Remove the clean_unfenced function.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
40  * heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44  * both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47  * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48  * traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
56
57 static inline uint32_t drm_bo_type_flags(unsigned type)
58 {
59         return (1 << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
67 {
68         struct drm_mem_type_manager *man;
69
70         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71         DRM_ASSERT_LOCKED(&bo->mutex);
72
73         man = &bo->dev->bm.man[bo->pinned_mem_type];
74         list_add_tail(&bo->pinned_lru, &man->pinned);
75 }
76
77 void drm_bo_add_to_lru(struct drm_buffer_object * bo)
78 {
79         struct drm_mem_type_manager *man;
80
81         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
82
83         if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84             || bo->mem.mem_type != bo->pinned_mem_type) {
85                 man = &bo->dev->bm.man[bo->mem.mem_type];
86                 list_add_tail(&bo->lru, &man->lru);
87         } else {
88                 INIT_LIST_HEAD(&bo->lru);
89         }
90 }
91
92 static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
93 {
94 #ifdef DRM_ODD_MM_COMPAT
95         int ret;
96
97         if (!bo->map_list.map)
98                 return 0;
99
100         ret = drm_bo_lock_kmm(bo);
101         if (ret)
102                 return ret;
103         drm_bo_unmap_virtual(bo);
104         if (old_is_pci)
105                 drm_bo_finish_unmap(bo);
106 #else
107         if (!bo->map_list.map)
108                 return 0;
109
110         drm_bo_unmap_virtual(bo);
111 #endif
112         return 0;
113 }
114
115 static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
116 {
117 #ifdef DRM_ODD_MM_COMPAT
118         int ret;
119
120         if (!bo->map_list.map)
121                 return;
122
123         ret = drm_bo_remap_bound(bo);
124         if (ret) {
125                 DRM_ERROR("Failed to remap a bound buffer object.\n"
126                           "\tThis might cause a sigbus later.\n");
127         }
128         drm_bo_unlock_kmm(bo);
129 #endif
130 }
131
132 /*
133  * Call bo->mutex locked.
134  */
135
136 static int drm_bo_add_ttm(struct drm_buffer_object * bo)
137 {
138         struct drm_device *dev = bo->dev;
139         int ret = 0;
140         bo->ttm = NULL;
141
142         DRM_ASSERT_LOCKED(&bo->mutex);
143
144         switch (bo->type) {
145         case drm_bo_type_dc:
146         case drm_bo_type_kernel:
147                 bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
148                 if (!bo->ttm)
149                         ret = -ENOMEM;
150                 break;
151         case drm_bo_type_user:
152                 break;
153         default:
154                 DRM_ERROR("Illegal buffer object type\n");
155                 ret = -EINVAL;
156                 break;
157         }
158
159         return ret;
160 }
161
162 static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
163                                   struct drm_bo_mem_reg * mem,
164                                   int evict, int no_wait)
165 {
166         struct drm_device *dev = bo->dev;
167         struct drm_buffer_manager *bm = &dev->bm;
168         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
169         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
170         struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
171         struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
172         int ret = 0;
173
174         if (old_is_pci || new_is_pci ||
175             ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
176                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
177         if (ret)
178                 return ret;
179
180         /*
181          * Create and bind a ttm if required.
182          */
183
184         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
185                 ret = drm_bo_add_ttm(bo);
186                 if (ret)
187                         goto out_err;
188
189                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
190                         ret = drm_bind_ttm(bo->ttm, mem);
191                         if (ret)
192                                 goto out_err;
193                 }
194         }
195
196         if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
197
198                 struct drm_bo_mem_reg *old_mem = &bo->mem;
199                 uint64_t save_flags = old_mem->flags;
200                 uint64_t save_mask = old_mem->mask;
201
202                 *old_mem = *mem;
203                 mem->mm_node = NULL;
204                 old_mem->mask = save_mask;
205                 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
206
207         } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
208                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
209
210                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
211
212         } else if (dev->driver->bo_driver->move) {
213                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
214
215         } else {
216
217                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
218
219         }
220
221         if (ret)
222                 goto out_err;
223
224         if (old_is_pci || new_is_pci)
225                 drm_bo_vm_post_move(bo);
226
227         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
228                 ret =
229                     dev->driver->bo_driver->invalidate_caches(dev,
230                                                               bo->mem.flags);
231                 if (ret)
232                         DRM_ERROR("Can not flush read caches\n");
233         }
234
235         DRM_FLAG_MASKED(bo->priv_flags,
236                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
237                         _DRM_BO_FLAG_EVICTED);
238
239         if (bo->mem.mm_node)
240                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
241                         bm->man[bo->mem.mem_type].gpu_offset;
242
243
244         return 0;
245
246       out_err:
247         if (old_is_pci || new_is_pci)
248                 drm_bo_vm_post_move(bo);
249
250         new_man = &bm->man[bo->mem.mem_type];
251         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
252                 drm_ttm_unbind(bo->ttm);
253                 drm_destroy_ttm(bo->ttm);
254                 bo->ttm = NULL;
255         }
256
257         return ret;
258 }
259
260 /*
261  * Call bo->mutex locked.
262  * Wait until the buffer is idle.
263  */
264
265 int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
266                 int no_wait)
267 {
268         int ret;
269
270         DRM_ASSERT_LOCKED(&bo->mutex);
271
272         if (bo->fence) {
273                 if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
274                         drm_fence_usage_deref_unlocked(&bo->fence);
275                         return 0;
276                 }
277                 if (no_wait) {
278                         return -EBUSY;
279                 }
280                 ret =
281                     drm_fence_object_wait(bo->fence, lazy, ignore_signals,
282                                           bo->fence_type);
283                 if (ret)
284                         return ret;
285
286                 drm_fence_usage_deref_unlocked(&bo->fence);
287         }
288         return 0;
289 }
290 EXPORT_SYMBOL(drm_bo_wait);
291
292 static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
293 {
294         struct drm_device *dev = bo->dev;
295         struct drm_buffer_manager *bm = &dev->bm;
296
297         if (bo->fence) {
298                 if (bm->nice_mode) {
299                         unsigned long _end = jiffies + 3 * DRM_HZ;
300                         int ret;
301                         do {
302                                 ret = drm_bo_wait(bo, 0, 1, 0);
303                                 if (ret && allow_errors)
304                                         return ret;
305
306                         } while (ret && !time_after_eq(jiffies, _end));
307
308                         if (bo->fence) {
309                                 bm->nice_mode = 0;
310                                 DRM_ERROR("Detected GPU lockup or "
311                                           "fence driver was taken down. "
312                                           "Evicting buffer.\n");
313                         }
314                 }
315                 if (bo->fence)
316                         drm_fence_usage_deref_unlocked(&bo->fence);
317         }
318         return 0;
319 }
320
321 /*
322  * Call dev->struct_mutex locked.
323  * Attempts to remove all private references to a buffer by expiring its
324  * fence object and removing from lru lists and memory managers.
325  */
326
327 static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
328 {
329         struct drm_device *dev = bo->dev;
330         struct drm_buffer_manager *bm = &dev->bm;
331
332         DRM_ASSERT_LOCKED(&dev->struct_mutex);
333
334         atomic_inc(&bo->usage);
335         mutex_unlock(&dev->struct_mutex);
336         mutex_lock(&bo->mutex);
337
338         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
339
340         if (bo->fence && drm_fence_object_signaled(bo->fence,
341                                                    bo->fence_type, 0))
342                 drm_fence_usage_deref_unlocked(&bo->fence);
343
344         if (bo->fence && remove_all)
345                 (void)drm_bo_expire_fence(bo, 0);
346
347         mutex_lock(&dev->struct_mutex);
348
349         if (!atomic_dec_and_test(&bo->usage)) {
350                 goto out;
351         }
352
353         if (!bo->fence) {
354                 list_del_init(&bo->lru);
355                 if (bo->mem.mm_node) {
356                         drm_mm_put_block(bo->mem.mm_node);
357                         if (bo->pinned_node == bo->mem.mm_node)
358                                 bo->pinned_node = NULL;
359                         bo->mem.mm_node = NULL;
360                 }
361                 list_del_init(&bo->pinned_lru);
362                 if (bo->pinned_node) {
363                         drm_mm_put_block(bo->pinned_node);
364                         bo->pinned_node = NULL;
365                 }
366                 list_del_init(&bo->ddestroy);
367                 mutex_unlock(&bo->mutex);
368                 drm_bo_destroy_locked(bo);
369                 return;
370         }
371
372         if (list_empty(&bo->ddestroy)) {
373                 drm_fence_object_flush(bo->fence, bo->fence_type);
374                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
375                 schedule_delayed_work(&bm->wq,
376                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
377         }
378
379       out:
380         mutex_unlock(&bo->mutex);
381         return;
382 }
383
384 /*
385  * Verify that refcount is 0 and that there are no internal references
386  * to the buffer object. Then destroy it.
387  */
388
389 static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
390 {
391         struct drm_device *dev = bo->dev;
392         struct drm_buffer_manager *bm = &dev->bm;
393
394         DRM_ASSERT_LOCKED(&dev->struct_mutex);
395
396         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
397             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
398             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
399                 if (bo->fence != NULL) {
400                         DRM_ERROR("Fence was non-zero.\n");
401                         drm_bo_cleanup_refs(bo, 0);
402                         return;
403                 }
404
405 #ifdef DRM_ODD_MM_COMPAT
406                 BUG_ON(!list_empty(&bo->vma_list));
407                 BUG_ON(!list_empty(&bo->p_mm_list));
408 #endif
409
410                 if (bo->ttm) {
411                         drm_ttm_unbind(bo->ttm);
412                         drm_destroy_ttm(bo->ttm);
413                         bo->ttm = NULL;
414                 }
415
416                 atomic_dec(&bm->count);
417
418                 //              BUG_ON(!list_empty(&bo->base.list));
419                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
420
421                 return;
422         }
423
424         /*
425          * Some stuff is still trying to reference the buffer object.
426          * Get rid of those references.
427          */
428
429         drm_bo_cleanup_refs(bo, 0);
430
431         return;
432 }
433
434 /*
435  * Call dev->struct_mutex locked.
436  */
437
438 static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
439 {
440         struct drm_buffer_manager *bm = &dev->bm;
441
442         struct drm_buffer_object *entry, *nentry;
443         struct list_head *list, *next;
444
445         list_for_each_safe(list, next, &bm->ddestroy) {
446                 entry = list_entry(list, struct drm_buffer_object, ddestroy);
447
448                 nentry = NULL;
449                 if (next != &bm->ddestroy) {
450                         nentry = list_entry(next, struct drm_buffer_object,
451                                             ddestroy);
452                         atomic_inc(&nentry->usage);
453                 }
454
455                 drm_bo_cleanup_refs(entry, remove_all);
456
457                 if (nentry) {
458                         atomic_dec(&nentry->usage);
459                 }
460         }
461 }
462
463 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
464 static void drm_bo_delayed_workqueue(void *data)
465 #else
466 static void drm_bo_delayed_workqueue(struct work_struct *work)
467 #endif
468 {
469 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
470         struct drm_device *dev = (struct drm_device *) data;
471         struct drm_buffer_manager *bm = &dev->bm;
472 #else
473         struct drm_buffer_manager *bm =
474             container_of(work, struct drm_buffer_manager, wq.work);
475         struct drm_device *dev = container_of(bm, struct drm_device, bm);
476 #endif
477
478         DRM_DEBUG("Delayed delete Worker\n");
479
480         mutex_lock(&dev->struct_mutex);
481         if (!bm->initialized) {
482                 mutex_unlock(&dev->struct_mutex);
483                 return;
484         }
485         drm_bo_delayed_delete(dev, 0);
486         if (bm->initialized && !list_empty(&bm->ddestroy)) {
487                 schedule_delayed_work(&bm->wq,
488                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
489         }
490         mutex_unlock(&dev->struct_mutex);
491 }
492
493 void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
494 {
495         struct drm_buffer_object *tmp_bo = *bo;
496         bo = NULL;
497
498         DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
499
500         if (atomic_dec_and_test(&tmp_bo->usage)) {
501                 drm_bo_destroy_locked(tmp_bo);
502         }
503 }
504 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
505
506 static void drm_bo_base_deref_locked(struct drm_file * file_priv,
507                                      struct drm_user_object * uo)
508 {
509         struct drm_buffer_object *bo =
510             drm_user_object_entry(uo, struct drm_buffer_object, base);
511
512         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
513
514         drm_bo_takedown_vm_locked(bo);
515         drm_bo_usage_deref_locked(&bo);
516 }
517
518 void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
519 {
520         struct drm_buffer_object *tmp_bo = *bo;
521         struct drm_device *dev = tmp_bo->dev;
522
523         *bo = NULL;
524         if (atomic_dec_and_test(&tmp_bo->usage)) {
525                 mutex_lock(&dev->struct_mutex);
526                 if (atomic_read(&tmp_bo->usage) == 0)
527                         drm_bo_destroy_locked(tmp_bo);
528                 mutex_unlock(&dev->struct_mutex);
529         }
530 }
531 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
532
533 void drm_putback_buffer_objects(struct drm_device *dev)
534 {
535         struct drm_buffer_manager *bm = &dev->bm;
536         struct list_head *list = &bm->unfenced;
537         struct drm_buffer_object *entry, *next;
538
539         mutex_lock(&dev->struct_mutex);
540         list_for_each_entry_safe(entry, next, list, lru) {
541                 atomic_inc(&entry->usage);
542                 mutex_unlock(&dev->struct_mutex);
543
544                 mutex_lock(&entry->mutex);
545                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
546                 mutex_lock(&dev->struct_mutex);
547
548                 list_del_init(&entry->lru);
549                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
550                 DRM_WAKEUP(&entry->event_queue);
551
552                 /*
553                  * FIXME: Might want to put back on head of list
554                  * instead of tail here.
555                  */
556
557                 drm_bo_add_to_lru(entry);
558                 mutex_unlock(&entry->mutex);
559                 drm_bo_usage_deref_locked(&entry);
560         }
561         mutex_unlock(&dev->struct_mutex);
562 }
563 EXPORT_SYMBOL(drm_putback_buffer_objects);
564
565
566 /*
567  * Note. The caller has to register (if applicable)
568  * and deregister fence object usage.
569  */
570
571 int drm_fence_buffer_objects(struct drm_device *dev,
572                              struct list_head *list,
573                              uint32_t fence_flags,
574                              struct drm_fence_object * fence,
575                              struct drm_fence_object ** used_fence)
576 {
577         struct drm_buffer_manager *bm = &dev->bm;
578         struct drm_buffer_object *entry;
579         uint32_t fence_type = 0;
580         uint32_t fence_class = ~0;
581         int count = 0;
582         int ret = 0;
583         struct list_head *l;
584
585         mutex_lock(&dev->struct_mutex);
586
587         if (!list)
588                 list = &bm->unfenced;
589
590         if (fence)
591                 fence_class = fence->fence_class;
592
593         list_for_each_entry(entry, list, lru) {
594                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
595                 fence_type |= entry->new_fence_type;
596                 if (fence_class == ~0)
597                         fence_class = entry->new_fence_class;
598                 else if (entry->new_fence_class != fence_class) {
599                         DRM_ERROR("Unmatching fence classes on unfenced list: "
600                                   "%d and %d.\n",
601                                   fence_class,
602                                   entry->new_fence_class);
603                         ret = -EINVAL;
604                         goto out;
605                 }
606                 count++;
607         }
608
609         if (!count) {
610                 ret = -EINVAL;
611                 goto out;
612         }
613
614         if (fence) {
615                 if ((fence_type & fence->type) != fence_type ||
616                     (fence->fence_class != fence_class)) {
617                         DRM_ERROR("Given fence doesn't match buffers "
618                                   "on unfenced list.\n");
619                         ret = -EINVAL;
620                         goto out;
621                 }
622         } else {
623                 mutex_unlock(&dev->struct_mutex);
624                 ret = drm_fence_object_create(dev, fence_class, fence_type,
625                                               fence_flags | DRM_FENCE_FLAG_EMIT,
626                                               &fence);
627                 mutex_lock(&dev->struct_mutex);
628                 if (ret)
629                         goto out;
630         }
631
632         count = 0;
633         l = list->next;
634         while (l != list) {
635                 prefetch(l->next);
636                 entry = list_entry(l, struct drm_buffer_object, lru);
637                 atomic_inc(&entry->usage);
638                 mutex_unlock(&dev->struct_mutex);
639                 mutex_lock(&entry->mutex);
640                 mutex_lock(&dev->struct_mutex);
641                 list_del_init(l);
642                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
643                         count++;
644                         if (entry->fence)
645                                 drm_fence_usage_deref_locked(&entry->fence);
646                         entry->fence = drm_fence_reference_locked(fence);
647                         entry->fence_class = entry->new_fence_class;
648                         entry->fence_type = entry->new_fence_type;
649                         DRM_FLAG_MASKED(entry->priv_flags, 0,
650                                         _DRM_BO_FLAG_UNFENCED);
651                         DRM_WAKEUP(&entry->event_queue);
652                         drm_bo_add_to_lru(entry);
653                 }
654                 mutex_unlock(&entry->mutex);
655                 drm_bo_usage_deref_locked(&entry);
656                 l = list->next;
657         }
658         DRM_DEBUG("Fenced %d buffers\n", count);
659       out:
660         mutex_unlock(&dev->struct_mutex);
661         *used_fence = fence;
662         return ret;
663 }
664 EXPORT_SYMBOL(drm_fence_buffer_objects);
665
666 /*
667  * bo->mutex locked
668  */
669
670 static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
671                         int no_wait)
672 {
673         int ret = 0;
674         struct drm_device *dev = bo->dev;
675         struct drm_bo_mem_reg evict_mem;
676
677         /*
678          * Someone might have modified the buffer before we took the buffer mutex.
679          */
680
681         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
682                 goto out;
683         if (bo->mem.mem_type != mem_type)
684                 goto out;
685
686         ret = drm_bo_wait(bo, 0, 0, no_wait);
687
688         if (ret && ret != -EAGAIN) {
689                 DRM_ERROR("Failed to expire fence before "
690                           "buffer eviction.\n");
691                 goto out;
692         }
693
694         evict_mem = bo->mem;
695         evict_mem.mm_node = NULL;
696
697         evict_mem = bo->mem;
698         evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
699         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
700
701         if (ret) {
702                 if (ret != -EAGAIN)
703                         DRM_ERROR("Failed to find memory space for "
704                                   "buffer 0x%p eviction.\n", bo);
705                 goto out;
706         }
707
708         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
709
710         if (ret) {
711                 if (ret != -EAGAIN)
712                         DRM_ERROR("Buffer eviction failed\n");
713                 goto out;
714         }
715
716         mutex_lock(&dev->struct_mutex);
717         if (evict_mem.mm_node) {
718                 if (evict_mem.mm_node != bo->pinned_node)
719                         drm_mm_put_block(evict_mem.mm_node);
720                 evict_mem.mm_node = NULL;
721         }
722         list_del(&bo->lru);
723         drm_bo_add_to_lru(bo);
724         mutex_unlock(&dev->struct_mutex);
725
726         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
727                         _DRM_BO_FLAG_EVICTED);
728
729       out:
730         return ret;
731 }
732
733 /**
734  * Repeatedly evict memory from the LRU for @mem_type until we create enough
735  * space, or we've evicted everything and there isn't enough space.
736  */
737 static int drm_bo_mem_force_space(struct drm_device * dev,
738                                   struct drm_bo_mem_reg * mem,
739                                   uint32_t mem_type, int no_wait)
740 {
741         struct drm_mm_node *node;
742         struct drm_buffer_manager *bm = &dev->bm;
743         struct drm_buffer_object *entry;
744         struct drm_mem_type_manager *man = &bm->man[mem_type];
745         struct list_head *lru;
746         unsigned long num_pages = mem->num_pages;
747         int ret;
748
749         mutex_lock(&dev->struct_mutex);
750         do {
751                 node = drm_mm_search_free(&man->manager, num_pages,
752                                           mem->page_alignment, 1);
753                 if (node)
754                         break;
755
756                 lru = &man->lru;
757                 if (lru->next == lru)
758                         break;
759
760                 entry = list_entry(lru->next, struct drm_buffer_object, lru);
761                 atomic_inc(&entry->usage);
762                 mutex_unlock(&dev->struct_mutex);
763                 mutex_lock(&entry->mutex);
764                 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
765
766                 ret = drm_bo_evict(entry, mem_type, no_wait);
767                 mutex_unlock(&entry->mutex);
768                 drm_bo_usage_deref_unlocked(&entry);
769                 if (ret)
770                         return ret;
771                 mutex_lock(&dev->struct_mutex);
772         } while (1);
773
774         if (!node) {
775                 mutex_unlock(&dev->struct_mutex);
776                 return -ENOMEM;
777         }
778
779         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
780         mutex_unlock(&dev->struct_mutex);
781         mem->mm_node = node;
782         mem->mem_type = mem_type;
783         return 0;
784 }
785
786 static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
787                                 uint32_t mem_type,
788                                 uint32_t mask, uint32_t * res_mask)
789 {
790         uint32_t cur_flags = drm_bo_type_flags(mem_type);
791         uint32_t flag_diff;
792
793         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
794                 cur_flags |= DRM_BO_FLAG_CACHED;
795         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
796                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
797         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
798                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
799
800         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
801                 return 0;
802
803         if (mem_type == DRM_BO_MEM_LOCAL) {
804                 *res_mask = cur_flags;
805                 return 1;
806         }
807
808         flag_diff = (mask ^ cur_flags);
809         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
810             (!(mask & DRM_BO_FLAG_CACHED) ||
811              (mask & DRM_BO_FLAG_FORCE_CACHING)))
812                 return 0;
813
814         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
815             ((mask & DRM_BO_FLAG_MAPPABLE) ||
816              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
817                 return 0;
818
819         *res_mask = cur_flags;
820         return 1;
821 }
822
823 /**
824  * Creates space for memory region @mem according to its type.
825  *
826  * This function first searches for free space in compatible memory types in
827  * the priority order defined by the driver.  If free space isn't found, then
828  * drm_bo_mem_force_space is attempted in priority order to evict and find
829  * space.
830  */
831 int drm_bo_mem_space(struct drm_buffer_object * bo,
832                      struct drm_bo_mem_reg * mem, int no_wait)
833 {
834         struct drm_device *dev = bo->dev;
835         struct drm_buffer_manager *bm = &dev->bm;
836         struct drm_mem_type_manager *man;
837
838         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
839         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
840         uint32_t i;
841         uint32_t mem_type = DRM_BO_MEM_LOCAL;
842         uint32_t cur_flags;
843         int type_found = 0;
844         int type_ok = 0;
845         int has_eagain = 0;
846         struct drm_mm_node *node = NULL;
847         int ret;
848
849         mem->mm_node = NULL;
850         for (i = 0; i < num_prios; ++i) {
851                 mem_type = prios[i];
852                 man = &bm->man[mem_type];
853
854                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
855                                                &cur_flags);
856
857                 if (!type_ok)
858                         continue;
859
860                 if (mem_type == DRM_BO_MEM_LOCAL)
861                         break;
862
863                 if ((mem_type == bo->pinned_mem_type) &&
864                     (bo->pinned_node != NULL)) {
865                         node = bo->pinned_node;
866                         break;
867                 }
868
869                 mutex_lock(&dev->struct_mutex);
870                 if (man->has_type && man->use_type) {
871                         type_found = 1;
872                         node = drm_mm_search_free(&man->manager, mem->num_pages,
873                                                   mem->page_alignment, 1);
874                         if (node)
875                                 node = drm_mm_get_block(node, mem->num_pages,
876                                                         mem->page_alignment);
877                 }
878                 mutex_unlock(&dev->struct_mutex);
879                 if (node)
880                         break;
881         }
882
883         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
884                 mem->mm_node = node;
885                 mem->mem_type = mem_type;
886                 mem->flags = cur_flags;
887                 return 0;
888         }
889
890         if (!type_found)
891                 return -EINVAL;
892
893         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
894         prios = dev->driver->bo_driver->mem_busy_prio;
895
896         for (i = 0; i < num_prios; ++i) {
897                 mem_type = prios[i];
898                 man = &bm->man[mem_type];
899
900                 if (!man->has_type)
901                         continue;
902
903                 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
904                         continue;
905
906                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
907
908                 if (ret == 0) {
909                         mem->flags = cur_flags;
910                         return 0;
911                 }
912
913                 if (ret == -EAGAIN)
914                         has_eagain = 1;
915         }
916
917         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
918         return ret;
919 }
920
921 EXPORT_SYMBOL(drm_bo_mem_space);
922
923 static int drm_bo_new_mask(struct drm_buffer_object * bo,
924                            uint64_t new_mask, uint32_t hint)
925 {
926         uint32_t new_props;
927
928         if (bo->type == drm_bo_type_user) {
929                 DRM_ERROR("User buffers are not supported yet\n");
930                 return -EINVAL;
931         }
932
933         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
934                 DRM_ERROR
935                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
936                      "processes\n");
937                 return -EPERM;
938         }
939
940         if ((new_mask & DRM_BO_FLAG_NO_MOVE)) {
941                 DRM_ERROR
942                         ("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
943                 return -EPERM;
944         }
945
946         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
947                                 DRM_BO_FLAG_READ);
948
949         if (!new_props) {
950                 DRM_ERROR("Invalid buffer object rwx properties\n");
951                 return -EINVAL;
952         }
953
954         bo->mem.mask = new_mask;
955         return 0;
956 }
957
958 /*
959  * Call dev->struct_mutex locked.
960  */
961
962 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
963                                               uint32_t handle, int check_owner)
964 {
965         struct drm_user_object *uo;
966         struct drm_buffer_object *bo;
967
968         uo = drm_lookup_user_object(file_priv, handle);
969
970         if (!uo || (uo->type != drm_buffer_type)) {
971                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
972                 return NULL;
973         }
974
975         if (check_owner && file_priv != uo->owner) {
976                 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
977                         return NULL;
978         }
979
980         bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
981         atomic_inc(&bo->usage);
982         return bo;
983 }
984 EXPORT_SYMBOL(drm_lookup_buffer_object);
985
986 /*
987  * Call bo->mutex locked.
988  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
989  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
990  */
991
992 static int drm_bo_quick_busy(struct drm_buffer_object * bo)
993 {
994         struct drm_fence_object *fence = bo->fence;
995
996         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
997         if (fence) {
998                 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
999                         drm_fence_usage_deref_unlocked(&bo->fence);
1000                         return 0;
1001                 }
1002                 return 1;
1003         }
1004         return 0;
1005 }
1006
1007 /*
1008  * Call bo->mutex locked.
1009  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1010  */
1011
1012 static int drm_bo_busy(struct drm_buffer_object * bo)
1013 {
1014         struct drm_fence_object *fence = bo->fence;
1015
1016         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1017         if (fence) {
1018                 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
1019                         drm_fence_usage_deref_unlocked(&bo->fence);
1020                         return 0;
1021                 }
1022                 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1023                 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
1024                         drm_fence_usage_deref_unlocked(&bo->fence);
1025                         return 0;
1026                 }
1027                 return 1;
1028         }
1029         return 0;
1030 }
1031
1032 static int drm_bo_read_cached(struct drm_buffer_object * bo)
1033 {
1034         int ret = 0;
1035
1036         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1037         if (bo->mem.mm_node)
1038                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1039         return ret;
1040 }
1041
1042 /*
1043  * Wait until a buffer is unmapped.
1044  */
1045
1046 static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
1047 {
1048         int ret = 0;
1049
1050         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1051                 return -EBUSY;
1052
1053         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1054                     atomic_read(&bo->mapped) == -1);
1055
1056         if (ret == -EINTR)
1057                 ret = -EAGAIN;
1058
1059         return ret;
1060 }
1061
1062 static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
1063 {
1064         int ret;
1065
1066         mutex_lock(&bo->mutex);
1067         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1068         mutex_unlock(&bo->mutex);
1069         return ret;
1070 }
1071
1072 /*
1073  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1074  * Until then, we cannot really do anything with it except delete it.
1075  * The unfenced list is a PITA, and the operations
1076  * 1) validating
1077  * 2) submitting commands
1078  * 3) fencing
1079  * Should really be an atomic operation.
1080  * We now "solve" this problem by keeping
1081  * the buffer "unfenced" after validating, but before fencing.
1082  */
1083
1084 static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
1085                                 int eagain_if_wait)
1086 {
1087         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1088
1089         if (ret && no_wait)
1090                 return -EBUSY;
1091         else if (!ret)
1092                 return 0;
1093
1094         ret = 0;
1095         mutex_unlock(&bo->mutex);
1096         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1097                     !drm_bo_check_unfenced(bo));
1098         mutex_lock(&bo->mutex);
1099         if (ret == -EINTR)
1100                 return -EAGAIN;
1101         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1102         if (ret) {
1103                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1104                 return -EBUSY;
1105         }
1106         if (eagain_if_wait)
1107                 return -EAGAIN;
1108
1109         return 0;
1110 }
1111
1112 /*
1113  * Fill in the ioctl reply argument with buffer info.
1114  * Bo locked.
1115  */
1116
1117 static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
1118                                 struct drm_bo_info_rep *rep)
1119 {
1120         if (!rep)
1121                 return;
1122
1123         rep->handle = bo->base.hash.key;
1124         rep->flags = bo->mem.flags;
1125         rep->size = bo->num_pages * PAGE_SIZE;
1126         rep->offset = bo->offset;
1127         rep->arg_handle = bo->map_list.user_token;
1128         rep->mask = bo->mem.mask;
1129         rep->buffer_start = bo->buffer_start;
1130         rep->fence_flags = bo->fence_type;
1131         rep->rep_flags = 0;
1132         rep->page_alignment = bo->mem.page_alignment;
1133
1134         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1135                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1136                                 DRM_BO_REP_BUSY);
1137         }
1138 }
1139
1140 /*
1141  * Wait for buffer idle and register that we've mapped the buffer.
1142  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1143  * so that if the client dies, the mapping is automatically
1144  * unregistered.
1145  */
1146
1147 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1148                                  uint32_t map_flags, unsigned hint,
1149                                  struct drm_bo_info_rep *rep)
1150 {
1151         struct drm_buffer_object *bo;
1152         struct drm_device *dev = file_priv->head->dev;
1153         int ret = 0;
1154         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1155
1156         mutex_lock(&dev->struct_mutex);
1157         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1158         mutex_unlock(&dev->struct_mutex);
1159
1160         if (!bo)
1161                 return -EINVAL;
1162
1163         mutex_lock(&bo->mutex);
1164         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1165         if (ret)
1166                 goto out;
1167
1168         /*
1169          * If this returns true, we are currently unmapped.
1170          * We need to do this test, because unmapping can
1171          * be done without the bo->mutex held.
1172          */
1173
1174         while (1) {
1175                 if (atomic_inc_and_test(&bo->mapped)) {
1176                         if (no_wait && drm_bo_busy(bo)) {
1177                                 atomic_dec(&bo->mapped);
1178                                 ret = -EBUSY;
1179                                 goto out;
1180                         }
1181                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1182                         if (ret) {
1183                                 atomic_dec(&bo->mapped);
1184                                 goto out;
1185                         }
1186
1187                         if ((map_flags & DRM_BO_FLAG_READ) &&
1188                             (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1189                             (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1190                                 drm_bo_read_cached(bo);
1191                         }
1192                         break;
1193                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1194                            (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1195                            (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1196
1197                         /*
1198                          * We are already mapped with different flags.
1199                          * need to wait for unmap.
1200                          */
1201
1202                         ret = drm_bo_wait_unmapped(bo, no_wait);
1203                         if (ret)
1204                                 goto out;
1205
1206                         continue;
1207                 }
1208                 break;
1209         }
1210
1211         mutex_lock(&dev->struct_mutex);
1212         ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1213         mutex_unlock(&dev->struct_mutex);
1214         if (ret) {
1215                 if (atomic_add_negative(-1, &bo->mapped))
1216                         DRM_WAKEUP(&bo->event_queue);
1217
1218         } else
1219                 drm_bo_fill_rep_arg(bo, rep);
1220       out:
1221         mutex_unlock(&bo->mutex);
1222         drm_bo_usage_deref_unlocked(&bo);
1223         return ret;
1224 }
1225
1226 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1227 {
1228         struct drm_device *dev = file_priv->head->dev;
1229         struct drm_buffer_object *bo;
1230         struct drm_ref_object *ro;
1231         int ret = 0;
1232
1233         mutex_lock(&dev->struct_mutex);
1234
1235         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1236         if (!bo) {
1237                 ret = -EINVAL;
1238                 goto out;
1239         }
1240
1241         ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1242         if (!ro) {
1243                 ret = -EINVAL;
1244                 goto out;
1245         }
1246
1247         drm_remove_ref_object(file_priv, ro);
1248         drm_bo_usage_deref_locked(&bo);
1249       out:
1250         mutex_unlock(&dev->struct_mutex);
1251         return ret;
1252 }
1253
1254 /*
1255  * Call struct-sem locked.
1256  */
1257
1258 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1259                                          struct drm_user_object * uo,
1260                                          enum drm_ref_type action)
1261 {
1262         struct drm_buffer_object *bo =
1263             drm_user_object_entry(uo, struct drm_buffer_object, base);
1264
1265         /*
1266          * We DON'T want to take the bo->lock here, because we want to
1267          * hold it when we wait for unmapped buffer.
1268          */
1269
1270         BUG_ON(action != _DRM_REF_TYPE1);
1271
1272         if (atomic_add_negative(-1, &bo->mapped))
1273                 DRM_WAKEUP(&bo->event_queue);
1274 }
1275
1276 /*
1277  * bo->mutex locked.
1278  * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1279  */
1280
1281 int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
1282                        int no_wait, int move_unfenced)
1283 {
1284         struct drm_device *dev = bo->dev;
1285         struct drm_buffer_manager *bm = &dev->bm;
1286         int ret = 0;
1287         struct drm_bo_mem_reg mem;
1288         /*
1289          * Flush outstanding fences.
1290          */
1291
1292         drm_bo_busy(bo);
1293
1294         /*
1295          * Wait for outstanding fences.
1296          */
1297
1298         ret = drm_bo_wait(bo, 0, 0, no_wait);
1299         if (ret)
1300                 return ret;
1301
1302         mem.num_pages = bo->num_pages;
1303         mem.size = mem.num_pages << PAGE_SHIFT;
1304         mem.mask = new_mem_flags;
1305         mem.page_alignment = bo->mem.page_alignment;
1306
1307         mutex_lock(&bm->evict_mutex);
1308         mutex_lock(&dev->struct_mutex);
1309         list_del(&bo->lru);
1310         list_add_tail(&bo->lru, &bm->unfenced);
1311         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1312                         _DRM_BO_FLAG_UNFENCED);
1313         mutex_unlock(&dev->struct_mutex);
1314
1315         /*
1316          * Determine where to move the buffer.
1317          */
1318         ret = drm_bo_mem_space(bo, &mem, no_wait);
1319         if (ret)
1320                 goto out_unlock;
1321
1322         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1323
1324  out_unlock:
1325         if (ret || !move_unfenced) {
1326                 mutex_lock(&dev->struct_mutex);
1327                 if (mem.mm_node) {
1328                         if (mem.mm_node != bo->pinned_node)
1329                                 drm_mm_put_block(mem.mm_node);
1330                         mem.mm_node = NULL;
1331                 }
1332                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1333                 DRM_WAKEUP(&bo->event_queue);
1334                 list_del(&bo->lru);
1335                 drm_bo_add_to_lru(bo);
1336                 mutex_unlock(&dev->struct_mutex);
1337         }
1338
1339         mutex_unlock(&bm->evict_mutex);
1340         return ret;
1341 }
1342
1343 static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
1344 {
1345         uint32_t flag_diff = (mem->mask ^ mem->flags);
1346
1347         if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1348                 return 0;
1349         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1350             (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
1351              (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1352           return 0;
1353         }
1354         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1355             ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1356              (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1357                 return 0;
1358         return 1;
1359 }
1360
1361 /*
1362  * bo locked.
1363  */
1364
1365 static int drm_buffer_object_validate(struct drm_buffer_object * bo,
1366                                       uint32_t fence_class,
1367                                       int move_unfenced, int no_wait)
1368 {
1369         struct drm_device *dev = bo->dev;
1370         struct drm_buffer_manager *bm = &dev->bm;
1371         struct drm_bo_driver *driver = dev->driver->bo_driver;
1372         uint32_t ftype;
1373         int ret;
1374
1375         DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
1376                   (unsigned long long) bo->mem.mask,
1377                   (unsigned long long) bo->mem.flags);
1378
1379         ret = driver->fence_type(bo, &fence_class, &ftype);
1380
1381         if (ret) {
1382                 DRM_ERROR("Driver did not support given buffer permissions\n");
1383                 return ret;
1384         }
1385
1386         /*
1387          * We're switching command submission mechanism,
1388          * or cannot simply rely on the hardware serializing for us.
1389          *
1390          * Wait for buffer idle.
1391          */
1392
1393         if ((fence_class != bo->fence_class) ||
1394             ((ftype ^ bo->fence_type) & bo->fence_type)) {
1395
1396                 ret = drm_bo_wait(bo, 0, 0, no_wait);
1397
1398                 if (ret)
1399                         return ret;
1400
1401         }
1402
1403         bo->new_fence_class = fence_class;
1404         bo->new_fence_type = ftype;
1405
1406         ret = drm_bo_wait_unmapped(bo, no_wait);
1407         if (ret) {
1408                 DRM_ERROR("Timed out waiting for buffer unmap.\n");
1409                 return ret;
1410         }
1411
1412         /*
1413          * Check whether we need to move buffer.
1414          */
1415
1416         if (!drm_bo_mem_compat(&bo->mem)) {
1417                 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1418                                          move_unfenced);
1419                 if (ret) {
1420                         if (ret != -EAGAIN)
1421                                 DRM_ERROR("Failed moving buffer.\n");
1422                         return ret;
1423                 }
1424         }
1425
1426         /*
1427          * Pinned buffers.
1428          */
1429
1430         if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1431                 bo->pinned_mem_type = bo->mem.mem_type;
1432                 mutex_lock(&dev->struct_mutex);
1433                 list_del_init(&bo->pinned_lru);
1434                 drm_bo_add_to_pinned_lru(bo);
1435
1436                 if (bo->pinned_node != bo->mem.mm_node) {
1437                         if (bo->pinned_node != NULL)
1438                                 drm_mm_put_block(bo->pinned_node);
1439                         bo->pinned_node = bo->mem.mm_node;
1440                 }
1441
1442                 mutex_unlock(&dev->struct_mutex);
1443
1444         } else if (bo->pinned_node != NULL) {
1445
1446                 mutex_lock(&dev->struct_mutex);
1447
1448                 if (bo->pinned_node != bo->mem.mm_node)
1449                         drm_mm_put_block(bo->pinned_node);
1450
1451                 list_del_init(&bo->pinned_lru);
1452                 bo->pinned_node = NULL;
1453                 mutex_unlock(&dev->struct_mutex);
1454
1455         }
1456
1457         /*
1458          * We might need to add a TTM.
1459          */
1460
1461         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1462                 ret = drm_bo_add_ttm(bo);
1463                 if (ret)
1464                         return ret;
1465         }
1466         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1467
1468         /*
1469          * Finally, adjust lru to be sure.
1470          */
1471
1472         mutex_lock(&dev->struct_mutex);
1473         list_del(&bo->lru);
1474         if (move_unfenced) {
1475                 list_add_tail(&bo->lru, &bm->unfenced);
1476                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1477                                 _DRM_BO_FLAG_UNFENCED);
1478         } else {
1479                 drm_bo_add_to_lru(bo);
1480                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1481                         DRM_WAKEUP(&bo->event_queue);
1482                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1483                                         _DRM_BO_FLAG_UNFENCED);
1484                 }
1485         }
1486         mutex_unlock(&dev->struct_mutex);
1487
1488         return 0;
1489 }
1490
1491 int drm_bo_do_validate(struct drm_buffer_object *bo,
1492                        uint64_t flags, uint64_t mask, uint32_t hint,
1493                        uint32_t fence_class,
1494                        int no_wait,
1495                        struct drm_bo_info_rep *rep)
1496 {
1497         int ret;
1498
1499         mutex_lock(&bo->mutex);
1500         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1501
1502         if (ret)
1503                 goto out;
1504
1505
1506         DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1507         ret = drm_bo_new_mask(bo, flags, hint);
1508         if (ret)
1509                 goto out;
1510
1511         ret = drm_buffer_object_validate(bo,
1512                                          fence_class,
1513                                          !(hint & DRM_BO_HINT_DONT_FENCE),
1514                                          no_wait);
1515 out:
1516         if (rep)
1517                 drm_bo_fill_rep_arg(bo, rep);
1518
1519         mutex_unlock(&bo->mutex);
1520         return ret;
1521 }
1522 EXPORT_SYMBOL(drm_bo_do_validate);
1523
1524
1525 int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
1526                            uint32_t fence_class,
1527                            uint64_t flags, uint64_t mask, uint32_t hint,
1528                            struct drm_bo_info_rep * rep,
1529                            struct drm_buffer_object **bo_rep)
1530 {
1531         struct drm_device *dev = file_priv->head->dev;
1532         struct drm_buffer_object *bo;
1533         int ret;
1534         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1535
1536         mutex_lock(&dev->struct_mutex);
1537         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1538         mutex_unlock(&dev->struct_mutex);
1539
1540         if (!bo) {
1541                 return -EINVAL;
1542         }
1543         
1544         /*
1545          * Only allow creator to change shared buffer mask.
1546          */
1547
1548         if (bo->base.owner != file_priv) 
1549                 mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1550
1551                 
1552         ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
1553                                  no_wait, rep);
1554
1555         if (!ret && bo_rep)
1556                 *bo_rep = bo;
1557         else
1558                 drm_bo_usage_deref_unlocked(&bo);
1559
1560         return ret;
1561 }
1562 EXPORT_SYMBOL(drm_bo_handle_validate);
1563
1564 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1565                               struct drm_bo_info_rep *rep)
1566 {
1567         struct drm_device *dev = file_priv->head->dev;
1568         struct drm_buffer_object *bo;
1569
1570         mutex_lock(&dev->struct_mutex);
1571         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1572         mutex_unlock(&dev->struct_mutex);
1573
1574         if (!bo) {
1575                 return -EINVAL;
1576         }
1577         mutex_lock(&bo->mutex);
1578         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1579                 (void)drm_bo_busy(bo);
1580         drm_bo_fill_rep_arg(bo, rep);
1581         mutex_unlock(&bo->mutex);
1582         drm_bo_usage_deref_unlocked(&bo);
1583         return 0;
1584 }
1585
1586 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1587                               uint32_t hint,
1588                               struct drm_bo_info_rep *rep)
1589 {
1590         struct drm_device *dev = file_priv->head->dev;
1591         struct drm_buffer_object *bo;
1592         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1593         int ret;
1594
1595         mutex_lock(&dev->struct_mutex);
1596         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1597         mutex_unlock(&dev->struct_mutex);
1598
1599         if (!bo) {
1600                 return -EINVAL;
1601         }
1602
1603         mutex_lock(&bo->mutex);
1604         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1605         if (ret)
1606                 goto out;
1607         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1608         if (ret)
1609                 goto out;
1610
1611         drm_bo_fill_rep_arg(bo, rep);
1612
1613       out:
1614         mutex_unlock(&bo->mutex);
1615         drm_bo_usage_deref_unlocked(&bo);
1616         return ret;
1617 }
1618
1619 int drm_buffer_object_create(struct drm_device *dev,
1620                              unsigned long size,
1621                              enum drm_bo_type type,
1622                              uint64_t mask,
1623                              uint32_t hint,
1624                              uint32_t page_alignment,
1625                              unsigned long buffer_start,
1626                              struct drm_buffer_object ** buf_obj)
1627 {
1628         struct drm_buffer_manager *bm = &dev->bm;
1629         struct drm_buffer_object *bo;
1630         int ret = 0;
1631         unsigned long num_pages;
1632
1633         if (buffer_start & ~PAGE_MASK) {
1634                 DRM_ERROR("Invalid buffer object start.\n");
1635                 return -EINVAL;
1636         }
1637         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1638         if (num_pages == 0) {
1639                 DRM_ERROR("Illegal buffer object size.\n");
1640                 return -EINVAL;
1641         }
1642
1643         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1644
1645         if (!bo)
1646                 return -ENOMEM;
1647
1648         mutex_init(&bo->mutex);
1649         mutex_lock(&bo->mutex);
1650
1651         atomic_set(&bo->usage, 1);
1652         atomic_set(&bo->mapped, -1);
1653         DRM_INIT_WAITQUEUE(&bo->event_queue);
1654         INIT_LIST_HEAD(&bo->lru);
1655         INIT_LIST_HEAD(&bo->pinned_lru);
1656         INIT_LIST_HEAD(&bo->ddestroy);
1657 #ifdef DRM_ODD_MM_COMPAT
1658         INIT_LIST_HEAD(&bo->p_mm_list);
1659         INIT_LIST_HEAD(&bo->vma_list);
1660 #endif
1661         bo->dev = dev;
1662         if (buffer_start != 0)
1663                 bo->type = drm_bo_type_user;
1664         else
1665                 bo->type = type;
1666         bo->num_pages = num_pages;
1667         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1668         bo->mem.num_pages = bo->num_pages;
1669         bo->mem.mm_node = NULL;
1670         bo->mem.page_alignment = page_alignment;
1671         bo->buffer_start = buffer_start;
1672         bo->priv_flags = 0;
1673         bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | 
1674                 DRM_BO_FLAG_MAPPABLE;
1675         bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1676                 DRM_BO_FLAG_MAPPABLE;
1677         atomic_inc(&bm->count);
1678         ret = drm_bo_new_mask(bo, mask, hint);
1679
1680         if (ret)
1681                 goto out_err;
1682
1683         if (bo->type == drm_bo_type_dc) {
1684                 mutex_lock(&dev->struct_mutex);
1685                 ret = drm_bo_setup_vm_locked(bo);
1686                 mutex_unlock(&dev->struct_mutex);
1687                 if (ret)
1688                         goto out_err;
1689         }
1690
1691         ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1692         if (ret)
1693                 goto out_err;
1694
1695         mutex_unlock(&bo->mutex);
1696         *buf_obj = bo;
1697         return 0;
1698
1699       out_err:
1700         mutex_unlock(&bo->mutex);
1701
1702         drm_bo_usage_deref_unlocked(&bo);
1703         return ret;
1704 }
1705 EXPORT_SYMBOL(drm_buffer_object_create);
1706
1707
1708 static int drm_bo_add_user_object(struct drm_file *file_priv,
1709                                   struct drm_buffer_object *bo, int shareable)
1710 {
1711         struct drm_device *dev = file_priv->head->dev;
1712         int ret;
1713
1714         mutex_lock(&dev->struct_mutex);
1715         ret = drm_add_user_object(file_priv, &bo->base, shareable);
1716         if (ret)
1717                 goto out;
1718
1719         bo->base.remove = drm_bo_base_deref_locked;
1720         bo->base.type = drm_buffer_type;
1721         bo->base.ref_struct_locked = NULL;
1722         bo->base.unref = drm_buffer_user_object_unmap;
1723
1724       out:
1725         mutex_unlock(&dev->struct_mutex);
1726         return ret;
1727 }
1728
1729 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1730 {
1731         struct drm_bo_create_arg *arg = data;
1732         struct drm_bo_create_req *req = &arg->d.req;
1733         struct drm_bo_info_rep *rep = &arg->d.rep;
1734         struct drm_buffer_object *entry;
1735         int ret = 0;
1736
1737         DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
1738             (int)(req->size / 1024), req->page_alignment * 4);
1739
1740         if (!dev->bm.initialized) {
1741                 DRM_ERROR("Buffer object manager is not initialized.\n");
1742                 return -EINVAL;
1743         }
1744
1745         ret = drm_buffer_object_create(file_priv->head->dev,
1746                                        req->size, drm_bo_type_dc, req->mask,
1747                                        req->hint, req->page_alignment,
1748                                        req->buffer_start, &entry);
1749         if (ret)
1750                 goto out;
1751         
1752         ret = drm_bo_add_user_object(file_priv, entry,
1753                                      req->mask & DRM_BO_FLAG_SHAREABLE);
1754         if (ret) {
1755                 drm_bo_usage_deref_unlocked(&entry);
1756                 goto out;
1757         }
1758         
1759         mutex_lock(&entry->mutex);
1760         drm_bo_fill_rep_arg(entry, rep);
1761         mutex_unlock(&entry->mutex);
1762
1763 out:
1764         return ret;
1765 }
1766
1767 int drm_bo_setstatus_ioctl(struct drm_device *dev, 
1768                            void *data, struct drm_file *file_priv)
1769 {
1770         struct drm_bo_map_wait_idle_arg *arg = data;
1771         struct drm_bo_info_req *req = &arg->d.req;
1772         struct drm_bo_info_rep *rep = &arg->d.rep;
1773         int ret;
1774         if (!dev->bm.initialized) {
1775                 DRM_ERROR("Buffer object manager is not initialized.\n");
1776                 return -EINVAL;
1777         }
1778
1779         ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
1780                                      req->flags,
1781                                      req->mask,
1782                                      req->hint | DRM_BO_HINT_DONT_FENCE,
1783                                      rep, NULL);
1784
1785         if (ret)
1786                 return ret;
1787
1788         return 0;
1789 }
1790
1791 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1792 {
1793         struct drm_bo_map_wait_idle_arg *arg = data;
1794         struct drm_bo_info_req *req = &arg->d.req;
1795         struct drm_bo_info_rep *rep = &arg->d.rep;
1796         int ret;
1797         if (!dev->bm.initialized) {
1798                 DRM_ERROR("Buffer object manager is not initialized.\n");
1799                 return -EINVAL;
1800         }
1801
1802         ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1803                                     req->hint, rep);
1804         if (ret)
1805                 return ret;
1806
1807         return 0;
1808 }
1809
1810 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1811 {
1812         struct drm_bo_handle_arg *arg = data;
1813         int ret;
1814         if (!dev->bm.initialized) {
1815                 DRM_ERROR("Buffer object manager is not initialized.\n");
1816                 return -EINVAL;
1817         }
1818
1819         ret = drm_buffer_object_unmap(file_priv, arg->handle);
1820         return ret;
1821 }
1822
1823
1824 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1825 {
1826         struct drm_bo_reference_info_arg *arg = data;
1827         struct drm_bo_handle_arg *req = &arg->d.req;
1828         struct drm_bo_info_rep *rep = &arg->d.rep;
1829         struct drm_user_object *uo;
1830         int ret;
1831
1832         if (!dev->bm.initialized) {
1833                 DRM_ERROR("Buffer object manager is not initialized.\n");
1834                 return -EINVAL;
1835         }
1836
1837         ret = drm_user_object_ref(file_priv, req->handle,
1838                                   drm_buffer_type, &uo);
1839         if (ret)
1840                 return ret;
1841         
1842         ret = drm_bo_handle_info(file_priv, req->handle, rep);
1843         if (ret)
1844                 return ret;
1845
1846         return 0;
1847 }
1848
1849 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1850 {
1851         struct drm_bo_handle_arg *arg = data;
1852         int ret = 0;
1853
1854         if (!dev->bm.initialized) {
1855                 DRM_ERROR("Buffer object manager is not initialized.\n");
1856                 return -EINVAL;
1857         }
1858
1859         ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
1860         return ret;
1861 }
1862
1863 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1864 {
1865         struct drm_bo_reference_info_arg *arg = data;
1866         struct drm_bo_handle_arg *req = &arg->d.req;
1867         struct drm_bo_info_rep *rep = &arg->d.rep;
1868         int ret;
1869
1870         if (!dev->bm.initialized) {
1871                 DRM_ERROR("Buffer object manager is not initialized.\n");
1872                 return -EINVAL;
1873         }
1874
1875         ret = drm_bo_handle_info(file_priv, req->handle, rep);
1876         if (ret)
1877                 return ret;
1878
1879         return 0;
1880 }
1881
1882 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1883 {
1884         struct drm_bo_map_wait_idle_arg *arg = data;
1885         struct drm_bo_info_req *req = &arg->d.req;
1886         struct drm_bo_info_rep *rep = &arg->d.rep;
1887         int ret;
1888         if (!dev->bm.initialized) {
1889                 DRM_ERROR("Buffer object manager is not initialized.\n");
1890                 return -EINVAL;
1891         }
1892
1893         ret = drm_bo_handle_wait(file_priv, req->handle,
1894                                  req->hint, rep);
1895         if (ret)
1896                 return ret;
1897
1898         return 0;
1899 }
1900
1901 static int drm_bo_leave_list(struct drm_buffer_object * bo,
1902                              uint32_t mem_type,
1903                              int free_pinned, int allow_errors)
1904 {
1905         struct drm_device *dev = bo->dev;
1906         int ret = 0;
1907
1908         mutex_lock(&bo->mutex);
1909
1910         ret = drm_bo_expire_fence(bo, allow_errors);
1911         if (ret)
1912                 goto out;
1913
1914         if (free_pinned) {
1915                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1916                 mutex_lock(&dev->struct_mutex);
1917                 list_del_init(&bo->pinned_lru);
1918                 if (bo->pinned_node == bo->mem.mm_node)
1919                         bo->pinned_node = NULL;
1920                 if (bo->pinned_node != NULL) {
1921                         drm_mm_put_block(bo->pinned_node);
1922                         bo->pinned_node = NULL;
1923                 }
1924                 mutex_unlock(&dev->struct_mutex);
1925         }
1926
1927         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1928                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1929                           "cleanup. Removing flag and evicting.\n");
1930                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1931                 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1932         }
1933
1934         if (bo->mem.mem_type == mem_type)
1935                 ret = drm_bo_evict(bo, mem_type, 0);
1936
1937         if (ret) {
1938                 if (allow_errors) {
1939                         goto out;
1940                 } else {
1941                         ret = 0;
1942                         DRM_ERROR("Cleanup eviction failed\n");
1943                 }
1944         }
1945
1946       out:
1947         mutex_unlock(&bo->mutex);
1948         return ret;
1949 }
1950
1951
1952 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
1953                                          int pinned_list)
1954 {
1955         if (pinned_list)
1956                 return list_entry(list, struct drm_buffer_object, pinned_lru);
1957         else
1958                 return list_entry(list, struct drm_buffer_object, lru);
1959 }
1960
1961 /*
1962  * dev->struct_mutex locked.
1963  */
1964
1965 static int drm_bo_force_list_clean(struct drm_device * dev,
1966                                    struct list_head *head,
1967                                    unsigned mem_type,
1968                                    int free_pinned,
1969                                    int allow_errors,
1970                                    int pinned_list)
1971 {
1972         struct list_head *list, *next, *prev;
1973         struct drm_buffer_object *entry, *nentry;
1974         int ret;
1975         int do_restart;
1976
1977         /*
1978          * The list traversal is a bit odd here, because an item may
1979          * disappear from the list when we release the struct_mutex or
1980          * when we decrease the usage count. Also we're not guaranteed
1981          * to drain pinned lists, so we can't always restart.
1982          */
1983
1984 restart:
1985         nentry = NULL;
1986         list_for_each_safe(list, next, head) {
1987                 prev = list->prev;
1988
1989                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
1990                 atomic_inc(&entry->usage);
1991                 if (nentry) {
1992                         atomic_dec(&nentry->usage);
1993                         nentry = NULL;
1994                 }
1995
1996                 /*
1997                  * Protect the next item from destruction, so we can check
1998                  * its list pointers later on.
1999                  */
2000
2001                 if (next != head) {
2002                         nentry = drm_bo_entry(next, pinned_list);
2003                         atomic_inc(&nentry->usage);
2004                 }
2005                 mutex_unlock(&dev->struct_mutex);
2006
2007                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2008                                         allow_errors);
2009                 mutex_lock(&dev->struct_mutex);
2010
2011                 drm_bo_usage_deref_locked(&entry);
2012                 if (ret)
2013                         return ret;
2014
2015                 /*
2016                  * Has the next item disappeared from the list?
2017                  */
2018
2019                 do_restart = ((next->prev != list) && (next->prev != prev));
2020
2021                 if (nentry != NULL && do_restart)
2022                         drm_bo_usage_deref_locked(&nentry);
2023
2024                 if (do_restart)
2025                         goto restart;
2026         }
2027         return 0;
2028 }
2029
2030 int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
2031 {
2032         struct drm_buffer_manager *bm = &dev->bm;
2033         struct drm_mem_type_manager *man = &bm->man[mem_type];
2034         int ret = -EINVAL;
2035
2036         if (mem_type >= DRM_BO_MEM_TYPES) {
2037                 DRM_ERROR("Illegal memory type %d\n", mem_type);
2038                 return ret;
2039         }
2040
2041         if (!man->has_type) {
2042                 DRM_ERROR("Trying to take down uninitialized "
2043                           "memory manager type %u\n", mem_type);
2044                 return ret;
2045         }
2046         man->use_type = 0;
2047         man->has_type = 0;
2048
2049         ret = 0;
2050         if (mem_type > 0) {
2051                 BUG_ON(!list_empty(&bm->unfenced));
2052                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2053                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2054
2055                 if (drm_mm_clean(&man->manager)) {
2056                         drm_mm_takedown(&man->manager);
2057                 } else {
2058                         ret = -EBUSY;
2059                 }
2060         }
2061
2062         return ret;
2063 }
2064 EXPORT_SYMBOL(drm_bo_clean_mm);
2065
2066 /**
2067  *Evict all buffers of a particular mem_type, but leave memory manager
2068  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2069  *point since we have the hardware lock.
2070  */
2071
2072 static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
2073 {
2074         int ret;
2075         struct drm_buffer_manager *bm = &dev->bm;
2076         struct drm_mem_type_manager *man = &bm->man[mem_type];
2077
2078         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2079                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2080                 return -EINVAL;
2081         }
2082
2083         if (!man->has_type) {
2084                 DRM_ERROR("Memory type %u has not been initialized.\n",
2085                           mem_type);
2086                 return 0;
2087         }
2088
2089         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2090         if (ret)
2091                 return ret;
2092         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2093
2094         return ret;
2095 }
2096
2097 int drm_bo_init_mm(struct drm_device * dev,
2098                    unsigned type,
2099                    unsigned long p_offset, unsigned long p_size)
2100 {
2101         struct drm_buffer_manager *bm = &dev->bm;
2102         int ret = -EINVAL;
2103         struct drm_mem_type_manager *man;
2104
2105         if (type >= DRM_BO_MEM_TYPES) {
2106                 DRM_ERROR("Illegal memory type %d\n", type);
2107                 return ret;
2108         }
2109
2110         man = &bm->man[type];
2111         if (man->has_type) {
2112                 DRM_ERROR("Memory manager already initialized for type %d\n",
2113                           type);
2114                 return ret;
2115         }
2116
2117         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2118         if (ret)
2119                 return ret;
2120
2121         ret = 0;
2122         if (type != DRM_BO_MEM_LOCAL) {
2123                 if (!p_size) {
2124                         DRM_ERROR("Zero size memory manager type %d\n", type);
2125                         return ret;
2126                 }
2127                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2128                 if (ret)
2129                         return ret;
2130         }
2131         man->has_type = 1;
2132         man->use_type = 1;
2133
2134         INIT_LIST_HEAD(&man->lru);
2135         INIT_LIST_HEAD(&man->pinned);
2136
2137         return 0;
2138 }
2139 EXPORT_SYMBOL(drm_bo_init_mm);
2140
2141 /*
2142  * This is called from lastclose, so we don't need to bother about
2143  * any clients still running when we set the initialized flag to zero.
2144  */
2145
2146 int drm_bo_driver_finish(struct drm_device * dev)
2147 {
2148         struct drm_buffer_manager *bm = &dev->bm;
2149         int ret = 0;
2150         unsigned i = DRM_BO_MEM_TYPES;
2151         struct drm_mem_type_manager *man;
2152
2153         mutex_lock(&dev->bm.init_mutex);
2154         mutex_lock(&dev->struct_mutex);
2155
2156         if (!bm->initialized)
2157                 goto out;
2158         bm->initialized = 0;
2159
2160         while (i--) {
2161                 man = &bm->man[i];
2162                 if (man->has_type) {
2163                         man->use_type = 0;
2164                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2165                                 ret = -EBUSY;
2166                                 DRM_ERROR("DRM memory manager type %d "
2167                                           "is not clean.\n", i);
2168                         }
2169                         man->has_type = 0;
2170                 }
2171         }
2172         mutex_unlock(&dev->struct_mutex);
2173
2174         if (!cancel_delayed_work(&bm->wq)) {
2175                 flush_scheduled_work();
2176         }
2177         mutex_lock(&dev->struct_mutex);
2178         drm_bo_delayed_delete(dev, 1);
2179         if (list_empty(&bm->ddestroy)) {
2180                 DRM_DEBUG("Delayed destroy list was clean\n");
2181         }
2182         if (list_empty(&bm->man[0].lru)) {
2183                 DRM_DEBUG("Swap list was clean\n");
2184         }
2185         if (list_empty(&bm->man[0].pinned)) {
2186                 DRM_DEBUG("NO_MOVE list was clean\n");
2187         }
2188         if (list_empty(&bm->unfenced)) {
2189                 DRM_DEBUG("Unfenced list was clean\n");
2190         }
2191       out:
2192         mutex_unlock(&dev->struct_mutex);
2193         mutex_unlock(&dev->bm.init_mutex);
2194         return ret;
2195 }
2196
2197 int drm_bo_driver_init(struct drm_device * dev)
2198 {
2199         struct drm_bo_driver *driver = dev->driver->bo_driver;
2200         struct drm_buffer_manager *bm = &dev->bm;
2201         int ret = -EINVAL;
2202
2203         mutex_lock(&dev->bm.init_mutex);
2204         mutex_lock(&dev->struct_mutex);
2205         if (!driver)
2206                 goto out_unlock;
2207
2208         /*
2209          * Initialize the system memory buffer type.
2210          * Other types need to be driver / IOCTL initialized.
2211          */
2212         ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
2213         if (ret)
2214                 goto out_unlock;
2215
2216 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2217         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2218 #else
2219         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2220 #endif
2221         bm->initialized = 1;
2222         bm->nice_mode = 1;
2223         atomic_set(&bm->count, 0);
2224         bm->cur_pages = 0;
2225         INIT_LIST_HEAD(&bm->unfenced);
2226         INIT_LIST_HEAD(&bm->ddestroy);
2227       out_unlock:
2228         mutex_unlock(&dev->struct_mutex);
2229         mutex_unlock(&dev->bm.init_mutex);
2230         return ret;
2231 }
2232
2233 EXPORT_SYMBOL(drm_bo_driver_init);
2234
2235 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2236 {
2237         struct drm_mm_init_arg *arg = data;
2238         struct drm_buffer_manager *bm = &dev->bm;
2239         struct drm_bo_driver *driver = dev->driver->bo_driver;
2240         int ret;
2241
2242         if (!driver) {
2243                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2244                 return -EINVAL;
2245         }
2246
2247         ret = -EINVAL;
2248         if (arg->magic != DRM_BO_INIT_MAGIC) {
2249                 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2250                           "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2251                 return -EINVAL;
2252         }
2253         if (arg->major != DRM_BO_INIT_MAJOR) {
2254                 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2255                           "\tversion don't match. Got %d, expected %d,\n",
2256                           arg->major, DRM_BO_INIT_MAJOR);
2257                 return -EINVAL;
2258         }
2259         if (arg->minor > DRM_BO_INIT_MINOR) {
2260                 DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
2261                           "\tlibdrm buffer object interface version is %d.%d.\n"
2262                           "\tkernel DRM buffer object interface version is %d.%d\n",
2263                           arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
2264                 return -EINVAL;
2265         }
2266
2267         mutex_lock(&dev->bm.init_mutex);
2268         mutex_lock(&dev->struct_mutex);
2269         if (!bm->initialized) {
2270                 DRM_ERROR("DRM memory manager was not initialized.\n");
2271                 goto out;
2272         }
2273         if (arg->mem_type == 0) {
2274                 DRM_ERROR("System memory buffers already initialized.\n");
2275                 goto out;
2276         }
2277         ret = drm_bo_init_mm(dev, arg->mem_type,
2278                              arg->p_offset, arg->p_size);
2279
2280 out:
2281         mutex_unlock(&dev->struct_mutex);
2282         mutex_unlock(&dev->bm.init_mutex);
2283         if (ret)
2284                 return ret;
2285
2286         return 0;
2287 }
2288
2289 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2290 {
2291         struct drm_mm_type_arg *arg = data;
2292         struct drm_buffer_manager *bm = &dev->bm;
2293         struct drm_bo_driver *driver = dev->driver->bo_driver;
2294         int ret;
2295
2296         if (!driver) {
2297                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2298                 return -EINVAL;
2299         }
2300
2301         LOCK_TEST_WITH_RETURN(dev, file_priv);
2302         mutex_lock(&dev->bm.init_mutex);
2303         mutex_lock(&dev->struct_mutex);
2304         ret = -EINVAL;
2305         if (!bm->initialized) {
2306                 DRM_ERROR("DRM memory manager was not initialized\n");
2307                 goto out;
2308         }
2309         if (arg->mem_type == 0) {
2310                 DRM_ERROR("No takedown for System memory buffers.\n");
2311                 goto out;
2312         }
2313         ret = 0;
2314         if (drm_bo_clean_mm(dev, arg->mem_type)) {
2315                 DRM_ERROR("Memory manager type %d not clean. "
2316                           "Delaying takedown\n", arg->mem_type);
2317         }
2318 out:
2319         mutex_unlock(&dev->struct_mutex);
2320         mutex_unlock(&dev->bm.init_mutex);
2321         if (ret)
2322                 return ret;
2323
2324         return 0;
2325 }
2326
2327 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2328 {
2329         struct drm_mm_type_arg *arg = data;
2330         struct drm_bo_driver *driver = dev->driver->bo_driver;
2331         int ret;
2332
2333         if (!driver) {
2334                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2335                 return -EINVAL;
2336         }
2337
2338         LOCK_TEST_WITH_RETURN(dev, file_priv);
2339         mutex_lock(&dev->bm.init_mutex);
2340         mutex_lock(&dev->struct_mutex);
2341         ret = drm_bo_lock_mm(dev, arg->mem_type);
2342         mutex_unlock(&dev->struct_mutex);
2343         mutex_unlock(&dev->bm.init_mutex);
2344         if (ret)
2345                 return ret;
2346
2347         return 0;
2348 }
2349
2350 int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2351 {
2352         struct drm_bo_driver *driver = dev->driver->bo_driver;
2353         int ret;
2354
2355         if (!driver) {
2356                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2357                 return -EINVAL;
2358         }
2359
2360         LOCK_TEST_WITH_RETURN(dev, file_priv);
2361         mutex_lock(&dev->bm.init_mutex);
2362         mutex_lock(&dev->struct_mutex);
2363         ret = 0;
2364
2365         mutex_unlock(&dev->struct_mutex);
2366         mutex_unlock(&dev->bm.init_mutex);
2367         if (ret)
2368                 return ret;
2369
2370         return 0;
2371 }
2372
2373 /*
2374  * buffer object vm functions.
2375  */
2376
2377 int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
2378 {
2379         struct drm_buffer_manager *bm = &dev->bm;
2380         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2381
2382         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2383                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2384                         return 0;
2385
2386                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2387                         return 0;
2388
2389                 if (mem->flags & DRM_BO_FLAG_CACHED)
2390                         return 0;
2391         }
2392         return 1;
2393 }
2394
2395 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2396
2397 /**
2398  * \c Get the PCI offset for the buffer object memory.
2399  *
2400  * \param bo The buffer object.
2401  * \param bus_base On return the base of the PCI region
2402  * \param bus_offset On return the byte offset into the PCI region
2403  * \param bus_size On return the byte size of the buffer object or zero if
2404  *     the buffer object memory is not accessible through a PCI region.
2405  * \return Failure indication.
2406  *
2407  * Returns -EINVAL if the buffer object is currently not mappable.
2408  * Otherwise returns zero.
2409  */
2410
2411 int drm_bo_pci_offset(struct drm_device *dev,
2412                       struct drm_bo_mem_reg *mem,
2413                       unsigned long *bus_base,
2414                       unsigned long *bus_offset, unsigned long *bus_size)
2415 {
2416         struct drm_buffer_manager *bm = &dev->bm;
2417         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2418
2419         *bus_size = 0;
2420         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2421                 return -EINVAL;
2422
2423         if (drm_mem_reg_is_pci(dev, mem)) {
2424                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2425                 *bus_size = mem->num_pages << PAGE_SHIFT;
2426                 *bus_base = man->io_offset;
2427         }
2428
2429         return 0;
2430 }
2431
2432 /**
2433  * \c Kill all user-space virtual mappings of this buffer object.
2434  *
2435  * \param bo The buffer object.
2436  *
2437  * Call bo->mutex locked.
2438  */
2439
2440 void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
2441 {
2442         struct drm_device *dev = bo->dev;
2443         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2444         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2445
2446         if (!dev->dev_mapping)
2447                 return;
2448
2449         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2450 }
2451
2452 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
2453 {
2454         struct drm_map_list *list = &bo->map_list;
2455         drm_local_map_t *map;
2456         struct drm_device *dev = bo->dev;
2457
2458         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2459         if (list->user_token) {
2460                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2461                 list->user_token = 0;
2462         }
2463         if (list->file_offset_node) {
2464                 drm_mm_put_block(list->file_offset_node);
2465                 list->file_offset_node = NULL;
2466         }
2467
2468         map = list->map;
2469         if (!map)
2470                 return;
2471
2472         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2473         list->map = NULL;
2474         list->user_token = 0ULL;
2475         drm_bo_usage_deref_locked(&bo);
2476 }
2477
2478 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
2479 {
2480         struct drm_map_list *list = &bo->map_list;
2481         drm_local_map_t *map;
2482         struct drm_device *dev = bo->dev;
2483
2484         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2485         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2486         if (!list->map)
2487                 return -ENOMEM;
2488
2489         map = list->map;
2490         map->offset = 0;
2491         map->type = _DRM_TTM;
2492         map->flags = _DRM_REMOVABLE;
2493         map->size = bo->mem.num_pages * PAGE_SIZE;
2494         atomic_inc(&bo->usage);
2495         map->handle = (void *)bo;
2496
2497         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2498                                                     bo->mem.num_pages, 0, 0);
2499
2500         if (!list->file_offset_node) {
2501                 drm_bo_takedown_vm_locked(bo);
2502                 return -ENOMEM;
2503         }
2504
2505         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2506                                                   bo->mem.num_pages, 0);
2507
2508         list->hash.key = list->file_offset_node->start;
2509         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2510                 drm_bo_takedown_vm_locked(bo);
2511                 return -ENOMEM;
2512         }
2513
2514         list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2515
2516         return 0;
2517 }