Merge branch 'master' of ssh+git://git.freedesktop.org/git/mesa/drm into xgi-0-0-2
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
40  * heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44  * both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47  * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48  * traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
56
57 static inline uint32_t drm_bo_type_flags(unsigned type)
58 {
59         return (1 << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
67 {
68         struct drm_mem_type_manager *man;
69
70         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71         DRM_ASSERT_LOCKED(&bo->mutex);
72
73         man = &bo->dev->bm.man[bo->pinned_mem_type];
74         list_add_tail(&bo->pinned_lru, &man->pinned);
75 }
76
77 void drm_bo_add_to_lru(struct drm_buffer_object * bo)
78 {
79         struct drm_mem_type_manager *man;
80
81         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
82
83         if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84             || bo->mem.mem_type != bo->pinned_mem_type) {
85                 man = &bo->dev->bm.man[bo->mem.mem_type];
86                 list_add_tail(&bo->lru, &man->lru);
87         } else {
88                 INIT_LIST_HEAD(&bo->lru);
89         }
90 }
91
92 static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
93 {
94 #ifdef DRM_ODD_MM_COMPAT
95         int ret;
96
97         if (!bo->map_list.map)
98                 return 0;
99
100         ret = drm_bo_lock_kmm(bo);
101         if (ret)
102                 return ret;
103         drm_bo_unmap_virtual(bo);
104         if (old_is_pci)
105                 drm_bo_finish_unmap(bo);
106 #else
107         if (!bo->map_list.map)
108                 return 0;
109
110         drm_bo_unmap_virtual(bo);
111 #endif
112         return 0;
113 }
114
115 static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
116 {
117 #ifdef DRM_ODD_MM_COMPAT
118         int ret;
119
120         if (!bo->map_list.map)
121                 return;
122
123         ret = drm_bo_remap_bound(bo);
124         if (ret) {
125                 DRM_ERROR("Failed to remap a bound buffer object.\n"
126                           "\tThis might cause a sigbus later.\n");
127         }
128         drm_bo_unlock_kmm(bo);
129 #endif
130 }
131
132 /*
133  * Call bo->mutex locked.
134  */
135
136 static int drm_bo_add_ttm(struct drm_buffer_object * bo)
137 {
138         struct drm_device *dev = bo->dev;
139         int ret = 0;
140         bo->ttm = NULL;
141
142         DRM_ASSERT_LOCKED(&bo->mutex);
143
144         switch (bo->type) {
145         case drm_bo_type_dc:
146                 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
147                 if (!bo->ttm)
148                         ret = -ENOMEM;
149                 break;
150         case drm_bo_type_kernel:
151                 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
152                 if (!bo->ttm)
153                         ret = -ENOMEM;
154                 break;
155         case drm_bo_type_user:
156         case drm_bo_type_fake:
157                 break;
158         default:
159                 DRM_ERROR("Illegal buffer object type\n");
160                 ret = -EINVAL;
161                 break;
162         }
163
164         return ret;
165 }
166
167 static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
168                                   struct drm_bo_mem_reg * mem,
169                                   int evict, int no_wait)
170 {
171         struct drm_device *dev = bo->dev;
172         struct drm_buffer_manager *bm = &dev->bm;
173         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
174         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
175         struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
176         struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
177         int ret = 0;
178
179         if (old_is_pci || new_is_pci)
180                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
181         if (ret)
182                 return ret;
183
184         /*
185          * Create and bind a ttm if required.
186          */
187
188         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
189                 ret = drm_bo_add_ttm(bo);
190                 if (ret)
191                         goto out_err;
192
193                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
194                         ret = drm_bind_ttm(bo->ttm, new_man->flags &
195                                            DRM_BO_FLAG_CACHED,
196                                            mem->mm_node->start);
197                         if (ret)
198                                 goto out_err;
199                 }
200         }
201
202         if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
203
204                 struct drm_bo_mem_reg *old_mem = &bo->mem;
205                 uint64_t save_flags = old_mem->flags;
206                 uint64_t save_mask = old_mem->mask;
207
208                 *old_mem = *mem;
209                 mem->mm_node = NULL;
210                 old_mem->mask = save_mask;
211                 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
212
213         } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
214                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
215
216                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
217
218         } else if (dev->driver->bo_driver->move) {
219                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
220
221         } else {
222
223                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
224
225         }
226
227         if (ret)
228                 goto out_err;
229
230         if (old_is_pci || new_is_pci)
231                 drm_bo_vm_post_move(bo);
232
233         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
234                 ret =
235                     dev->driver->bo_driver->invalidate_caches(dev,
236                                                               bo->mem.flags);
237                 if (ret)
238                         DRM_ERROR("Can not flush read caches\n");
239         }
240
241         DRM_FLAG_MASKED(bo->priv_flags,
242                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
243                         _DRM_BO_FLAG_EVICTED);
244
245         if (bo->mem.mm_node)
246                 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
247
248         return 0;
249
250       out_err:
251         if (old_is_pci || new_is_pci)
252                 drm_bo_vm_post_move(bo);
253
254         new_man = &bm->man[bo->mem.mem_type];
255         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
256                 drm_ttm_unbind(bo->ttm);
257                 drm_destroy_ttm(bo->ttm);
258                 bo->ttm = NULL;
259         }
260
261         return ret;
262 }
263
264 /*
265  * Call bo->mutex locked.
266  * Wait until the buffer is idle.
267  */
268
269 int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
270                 int no_wait)
271 {
272         int ret;
273
274         DRM_ASSERT_LOCKED(&bo->mutex);
275
276         if (bo->fence) {
277                 if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
278                         drm_fence_usage_deref_unlocked(&bo->fence);
279                         return 0;
280                 }
281                 if (no_wait) {
282                         return -EBUSY;
283                 }
284                 ret =
285                     drm_fence_object_wait(bo->fence, lazy, ignore_signals,
286                                           bo->fence_type);
287                 if (ret)
288                         return ret;
289
290                 drm_fence_usage_deref_unlocked(&bo->fence);
291         }
292         return 0;
293 }
294
295 static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
296 {
297         struct drm_device *dev = bo->dev;
298         struct drm_buffer_manager *bm = &dev->bm;
299
300         if (bo->fence) {
301                 if (bm->nice_mode) {
302                         unsigned long _end = jiffies + 3 * DRM_HZ;
303                         int ret;
304                         do {
305                                 ret = drm_bo_wait(bo, 0, 1, 0);
306                                 if (ret && allow_errors)
307                                         return ret;
308
309                         } while (ret && !time_after_eq(jiffies, _end));
310
311                         if (bo->fence) {
312                                 bm->nice_mode = 0;
313                                 DRM_ERROR("Detected GPU lockup or "
314                                           "fence driver was taken down. "
315                                           "Evicting buffer.\n");
316                         }
317                 }
318                 if (bo->fence)
319                         drm_fence_usage_deref_unlocked(&bo->fence);
320         }
321         return 0;
322 }
323
324 /*
325  * Call dev->struct_mutex locked.
326  * Attempts to remove all private references to a buffer by expiring its
327  * fence object and removing from lru lists and memory managers.
328  */
329
330 static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
331 {
332         struct drm_device *dev = bo->dev;
333         struct drm_buffer_manager *bm = &dev->bm;
334
335         DRM_ASSERT_LOCKED(&dev->struct_mutex);
336
337         atomic_inc(&bo->usage);
338         mutex_unlock(&dev->struct_mutex);
339         mutex_lock(&bo->mutex);
340
341         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
342
343         if (bo->fence && drm_fence_object_signaled(bo->fence,
344                                                    bo->fence_type, 0))
345                 drm_fence_usage_deref_unlocked(&bo->fence);
346
347         if (bo->fence && remove_all)
348                 (void)drm_bo_expire_fence(bo, 0);
349
350         mutex_lock(&dev->struct_mutex);
351
352         if (!atomic_dec_and_test(&bo->usage)) {
353                 goto out;
354         }
355
356         if (!bo->fence) {
357                 list_del_init(&bo->lru);
358                 if (bo->mem.mm_node) {
359                         drm_mm_put_block(bo->mem.mm_node);
360                         if (bo->pinned_node == bo->mem.mm_node)
361                                 bo->pinned_node = NULL;
362                         bo->mem.mm_node = NULL;
363                 }
364                 list_del_init(&bo->pinned_lru);
365                 if (bo->pinned_node) {
366                         drm_mm_put_block(bo->pinned_node);
367                         bo->pinned_node = NULL;
368                 }
369                 list_del_init(&bo->ddestroy);
370                 mutex_unlock(&bo->mutex);
371                 drm_bo_destroy_locked(bo);
372                 return;
373         }
374
375         if (list_empty(&bo->ddestroy)) {
376                 drm_fence_object_flush(bo->fence, bo->fence_type);
377                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
378                 schedule_delayed_work(&bm->wq,
379                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
380         }
381
382       out:
383         mutex_unlock(&bo->mutex);
384         return;
385 }
386
387 /*
388  * Verify that refcount is 0 and that there are no internal references
389  * to the buffer object. Then destroy it.
390  */
391
392 static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
393 {
394         struct drm_device *dev = bo->dev;
395         struct drm_buffer_manager *bm = &dev->bm;
396
397         DRM_ASSERT_LOCKED(&dev->struct_mutex);
398
399         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
400             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
401             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
402                 if (bo->fence != NULL) {
403                         DRM_ERROR("Fence was non-zero.\n");
404                         drm_bo_cleanup_refs(bo, 0);
405                         return;
406                 }
407
408 #ifdef DRM_ODD_MM_COMPAT
409                 BUG_ON(!list_empty(&bo->vma_list));
410                 BUG_ON(!list_empty(&bo->p_mm_list));
411 #endif
412
413                 if (bo->ttm) {
414                         drm_ttm_unbind(bo->ttm);
415                         drm_destroy_ttm(bo->ttm);
416                         bo->ttm = NULL;
417                 }
418
419                 atomic_dec(&bm->count);
420
421                 BUG_ON(!list_empty(&bo->base.list));
422                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
423
424                 return;
425         }
426
427         /*
428          * Some stuff is still trying to reference the buffer object.
429          * Get rid of those references.
430          */
431
432         drm_bo_cleanup_refs(bo, 0);
433
434         return;
435 }
436
437 /*
438  * Call dev->struct_mutex locked.
439  */
440
441 static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
442 {
443         struct drm_buffer_manager *bm = &dev->bm;
444
445         struct drm_buffer_object *entry, *nentry;
446         struct list_head *list, *next;
447
448         list_for_each_safe(list, next, &bm->ddestroy) {
449                 entry = list_entry(list, struct drm_buffer_object, ddestroy);
450
451                 nentry = NULL;
452                 if (next != &bm->ddestroy) {
453                         nentry = list_entry(next, struct drm_buffer_object,
454                                             ddestroy);
455                         atomic_inc(&nentry->usage);
456                 }
457
458                 drm_bo_cleanup_refs(entry, remove_all);
459
460                 if (nentry) {
461                         atomic_dec(&nentry->usage);
462                 }
463         }
464 }
465
466 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
467 static void drm_bo_delayed_workqueue(void *data)
468 #else
469 static void drm_bo_delayed_workqueue(struct work_struct *work)
470 #endif
471 {
472 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
473         struct drm_device *dev = (struct drm_device *) data;
474         struct drm_buffer_manager *bm = &dev->bm;
475 #else
476         struct drm_buffer_manager *bm =
477             container_of(work, struct drm_buffer_manager, wq.work);
478         struct drm_device *dev = container_of(bm, struct drm_device, bm);
479 #endif
480
481         DRM_DEBUG("Delayed delete Worker\n");
482
483         mutex_lock(&dev->struct_mutex);
484         if (!bm->initialized) {
485                 mutex_unlock(&dev->struct_mutex);
486                 return;
487         }
488         drm_bo_delayed_delete(dev, 0);
489         if (bm->initialized && !list_empty(&bm->ddestroy)) {
490                 schedule_delayed_work(&bm->wq,
491                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
492         }
493         mutex_unlock(&dev->struct_mutex);
494 }
495
496 void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
497 {
498         struct drm_buffer_object *tmp_bo = *bo;
499         bo = NULL;
500
501         DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
502
503         if (atomic_dec_and_test(&tmp_bo->usage)) {
504                 drm_bo_destroy_locked(tmp_bo);
505         }
506 }
507
508 static void drm_bo_base_deref_locked(struct drm_file * file_priv,
509                                      struct drm_user_object * uo)
510 {
511         struct drm_buffer_object *bo =
512             drm_user_object_entry(uo, struct drm_buffer_object, base);
513
514         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
515
516         drm_bo_takedown_vm_locked(bo);
517         drm_bo_usage_deref_locked(&bo);
518 }
519
520 static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
521 {
522         struct drm_buffer_object *tmp_bo = *bo;
523         struct drm_device *dev = tmp_bo->dev;
524
525         *bo = NULL;
526         if (atomic_dec_and_test(&tmp_bo->usage)) {
527                 mutex_lock(&dev->struct_mutex);
528                 if (atomic_read(&tmp_bo->usage) == 0)
529                         drm_bo_destroy_locked(tmp_bo);
530                 mutex_unlock(&dev->struct_mutex);
531         }
532 }
533
534 /*
535  * Note. The caller has to register (if applicable)
536  * and deregister fence object usage.
537  */
538
539 int drm_fence_buffer_objects(struct drm_file * file_priv,
540                              struct list_head *list,
541                              uint32_t fence_flags,
542                              struct drm_fence_object * fence,
543                              struct drm_fence_object ** used_fence)
544 {
545         struct drm_device *dev = file_priv->head->dev;
546         struct drm_buffer_manager *bm = &dev->bm;
547
548         struct drm_buffer_object *entry;
549         uint32_t fence_type = 0;
550         int count = 0;
551         int ret = 0;
552         struct list_head *l;
553         LIST_HEAD(f_list);
554
555         mutex_lock(&dev->struct_mutex);
556
557         if (!list)
558                 list = &bm->unfenced;
559
560         list_for_each_entry(entry, list, lru) {
561                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
562                 fence_type |= entry->fence_type;
563                 if (entry->fence_class != 0) {
564                         DRM_ERROR("Fence class %d is not implemented yet.\n",
565                                   entry->fence_class);
566                         ret = -EINVAL;
567                         goto out;
568                 }
569                 count++;
570         }
571
572         if (!count) {
573                 ret = -EINVAL;
574                 goto out;
575         }
576
577         /*
578          * Transfer to a local list before we release the dev->struct_mutex;
579          * This is so we don't get any new unfenced objects while fencing
580          * the ones we already have..
581          */
582
583         list_splice_init(list, &f_list);
584
585         if (fence) {
586                 if ((fence_type & fence->type) != fence_type) {
587                         DRM_ERROR("Given fence doesn't match buffers "
588                                   "on unfenced list.\n");
589                         ret = -EINVAL;
590                         goto out;
591                 }
592         } else {
593                 mutex_unlock(&dev->struct_mutex);
594                 ret = drm_fence_object_create(dev, 0, fence_type,
595                                               fence_flags | DRM_FENCE_FLAG_EMIT,
596                                               &fence);
597                 mutex_lock(&dev->struct_mutex);
598                 if (ret)
599                         goto out;
600         }
601
602         count = 0;
603         l = f_list.next;
604         while (l != &f_list) {
605                 prefetch(l->next);
606                 entry = list_entry(l, struct drm_buffer_object, lru);
607                 atomic_inc(&entry->usage);
608                 mutex_unlock(&dev->struct_mutex);
609                 mutex_lock(&entry->mutex);
610                 mutex_lock(&dev->struct_mutex);
611                 list_del_init(l);
612                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
613                         count++;
614                         if (entry->fence)
615                                 drm_fence_usage_deref_locked(&entry->fence);
616                         entry->fence = drm_fence_reference_locked(fence);
617                         DRM_FLAG_MASKED(entry->priv_flags, 0,
618                                         _DRM_BO_FLAG_UNFENCED);
619                         DRM_WAKEUP(&entry->event_queue);
620                         drm_bo_add_to_lru(entry);
621                 }
622                 mutex_unlock(&entry->mutex);
623                 drm_bo_usage_deref_locked(&entry);
624                 l = f_list.next;
625         }
626         DRM_DEBUG("Fenced %d buffers\n", count);
627       out:
628         mutex_unlock(&dev->struct_mutex);
629         *used_fence = fence;
630         return ret;
631 }
632
633 EXPORT_SYMBOL(drm_fence_buffer_objects);
634
635 /*
636  * bo->mutex locked
637  */
638
639 static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
640                         int no_wait)
641 {
642         int ret = 0;
643         struct drm_device *dev = bo->dev;
644         struct drm_bo_mem_reg evict_mem;
645
646         /*
647          * Someone might have modified the buffer before we took the buffer mutex.
648          */
649
650         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
651                 goto out;
652         if (bo->mem.mem_type != mem_type)
653                 goto out;
654
655         ret = drm_bo_wait(bo, 0, 0, no_wait);
656
657         if (ret && ret != -EAGAIN) {
658                 DRM_ERROR("Failed to expire fence before "
659                           "buffer eviction.\n");
660                 goto out;
661         }
662
663         evict_mem = bo->mem;
664         evict_mem.mm_node = NULL;
665
666         if (bo->type == drm_bo_type_fake) {
667                 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
668                 bo->mem.mm_node = NULL;
669                 goto out1;
670         }
671
672         evict_mem = bo->mem;
673         evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
674         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
675
676         if (ret) {
677                 if (ret != -EAGAIN)
678                         DRM_ERROR("Failed to find memory space for "
679                                   "buffer 0x%p eviction.\n", bo);
680                 goto out;
681         }
682
683         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
684
685         if (ret) {
686                 if (ret != -EAGAIN)
687                         DRM_ERROR("Buffer eviction failed\n");
688                 goto out;
689         }
690
691       out1:
692         mutex_lock(&dev->struct_mutex);
693         if (evict_mem.mm_node) {
694                 if (evict_mem.mm_node != bo->pinned_node)
695                         drm_mm_put_block(evict_mem.mm_node);
696                 evict_mem.mm_node = NULL;
697         }
698         list_del(&bo->lru);
699         drm_bo_add_to_lru(bo);
700         mutex_unlock(&dev->struct_mutex);
701
702         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
703                         _DRM_BO_FLAG_EVICTED);
704
705       out:
706         return ret;
707 }
708
709 static int drm_bo_mem_force_space(struct drm_device * dev,
710                                   struct drm_bo_mem_reg * mem,
711                                   uint32_t mem_type, int no_wait)
712 {
713         struct drm_mm_node *node;
714         struct drm_buffer_manager *bm = &dev->bm;
715         struct drm_buffer_object *entry;
716         struct drm_mem_type_manager *man = &bm->man[mem_type];
717         struct list_head *lru;
718         unsigned long num_pages = mem->num_pages;
719         int ret;
720
721         mutex_lock(&dev->struct_mutex);
722         do {
723                 node = drm_mm_search_free(&man->manager, num_pages,
724                                           mem->page_alignment, 1);
725                 if (node)
726                         break;
727
728                 lru = &man->lru;
729                 if (lru->next == lru)
730                         break;
731
732                 entry = list_entry(lru->next, struct drm_buffer_object, lru);
733                 atomic_inc(&entry->usage);
734                 mutex_unlock(&dev->struct_mutex);
735                 mutex_lock(&entry->mutex);
736                 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
737
738                 ret = drm_bo_evict(entry, mem_type, no_wait);
739                 mutex_unlock(&entry->mutex);
740                 drm_bo_usage_deref_unlocked(&entry);
741                 if (ret)
742                         return ret;
743                 mutex_lock(&dev->struct_mutex);
744         } while (1);
745
746         if (!node) {
747                 mutex_unlock(&dev->struct_mutex);
748                 return -ENOMEM;
749         }
750
751         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
752         mutex_unlock(&dev->struct_mutex);
753         mem->mm_node = node;
754         mem->mem_type = mem_type;
755         return 0;
756 }
757
758 static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
759                                 uint32_t mem_type,
760                                 uint32_t mask, uint32_t * res_mask)
761 {
762         uint32_t cur_flags = drm_bo_type_flags(mem_type);
763         uint32_t flag_diff;
764
765         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
766                 cur_flags |= DRM_BO_FLAG_CACHED;
767         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
768                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
769         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
770                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
771
772         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
773                 return 0;
774
775         if (mem_type == DRM_BO_MEM_LOCAL) {
776                 *res_mask = cur_flags;
777                 return 1;
778         }
779
780         flag_diff = (mask ^ cur_flags);
781         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
782             (!(mask & DRM_BO_FLAG_CACHED) ||
783              (mask & DRM_BO_FLAG_FORCE_CACHING)))
784                 return 0;
785
786         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
787             ((mask & DRM_BO_FLAG_MAPPABLE) ||
788              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
789                 return 0;
790
791         *res_mask = cur_flags;
792         return 1;
793 }
794
795 int drm_bo_mem_space(struct drm_buffer_object * bo,
796                      struct drm_bo_mem_reg * mem, int no_wait)
797 {
798         struct drm_device *dev = bo->dev;
799         struct drm_buffer_manager *bm = &dev->bm;
800         struct drm_mem_type_manager *man;
801
802         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
803         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
804         uint32_t i;
805         uint32_t mem_type = DRM_BO_MEM_LOCAL;
806         uint32_t cur_flags;
807         int type_found = 0;
808         int type_ok = 0;
809         int has_eagain = 0;
810         struct drm_mm_node *node = NULL;
811         int ret;
812
813         mem->mm_node = NULL;
814         for (i = 0; i < num_prios; ++i) {
815                 mem_type = prios[i];
816                 man = &bm->man[mem_type];
817
818                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
819                                                &cur_flags);
820
821                 if (!type_ok)
822                         continue;
823
824                 if (mem_type == DRM_BO_MEM_LOCAL)
825                         break;
826
827                 if ((mem_type == bo->pinned_mem_type) &&
828                     (bo->pinned_node != NULL)) {
829                         node = bo->pinned_node;
830                         break;
831                 }
832
833                 mutex_lock(&dev->struct_mutex);
834                 if (man->has_type && man->use_type) {
835                         type_found = 1;
836                         node = drm_mm_search_free(&man->manager, mem->num_pages,
837                                                   mem->page_alignment, 1);
838                         if (node)
839                                 node = drm_mm_get_block(node, mem->num_pages,
840                                                         mem->page_alignment);
841                 }
842                 mutex_unlock(&dev->struct_mutex);
843                 if (node)
844                         break;
845         }
846
847         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
848                 mem->mm_node = node;
849                 mem->mem_type = mem_type;
850                 mem->flags = cur_flags;
851                 return 0;
852         }
853
854         if (!type_found)
855                 return -EINVAL;
856
857         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
858         prios = dev->driver->bo_driver->mem_busy_prio;
859
860         for (i = 0; i < num_prios; ++i) {
861                 mem_type = prios[i];
862                 man = &bm->man[mem_type];
863
864                 if (!man->has_type)
865                         continue;
866
867                 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
868                         continue;
869
870                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
871
872                 if (ret == 0) {
873                         mem->flags = cur_flags;
874                         return 0;
875                 }
876
877                 if (ret == -EAGAIN)
878                         has_eagain = 1;
879         }
880
881         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
882         return ret;
883 }
884
885 EXPORT_SYMBOL(drm_bo_mem_space);
886
887 static int drm_bo_new_mask(struct drm_buffer_object * bo,
888                            uint64_t new_mask, uint32_t hint)
889 {
890         uint32_t new_props;
891
892         if (bo->type == drm_bo_type_user) {
893                 DRM_ERROR("User buffers are not supported yet\n");
894                 return -EINVAL;
895         }
896         if (bo->type == drm_bo_type_fake &&
897             !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
898                 DRM_ERROR("Fake buffers must be pinned.\n");
899                 return -EINVAL;
900         }
901
902         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
903                 DRM_ERROR
904                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
905                      "processes\n");
906                 return -EPERM;
907         }
908
909         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
910                                 DRM_BO_FLAG_READ);
911
912         if (!new_props) {
913                 DRM_ERROR("Invalid buffer object rwx properties\n");
914                 return -EINVAL;
915         }
916
917         bo->mem.mask = new_mask;
918         return 0;
919 }
920
921 /*
922  * Call dev->struct_mutex locked.
923  */
924
925 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
926                                               uint32_t handle, int check_owner)
927 {
928         struct drm_user_object *uo;
929         struct drm_buffer_object *bo;
930
931         uo = drm_lookup_user_object(file_priv, handle);
932
933         if (!uo || (uo->type != drm_buffer_type)) {
934                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
935                 return NULL;
936         }
937
938         if (check_owner && file_priv != uo->owner) {
939                 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
940                         return NULL;
941         }
942
943         bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
944         atomic_inc(&bo->usage);
945         return bo;
946 }
947
948 /*
949  * Call bo->mutex locked.
950  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
951  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
952  */
953
954 static int drm_bo_quick_busy(struct drm_buffer_object * bo)
955 {
956         struct drm_fence_object *fence = bo->fence;
957
958         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
959         if (fence) {
960                 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
961                         drm_fence_usage_deref_unlocked(&bo->fence);
962                         return 0;
963                 }
964                 return 1;
965         }
966         return 0;
967 }
968
969 /*
970  * Call bo->mutex locked.
971  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
972  */
973
974 static int drm_bo_busy(struct drm_buffer_object * bo)
975 {
976         struct drm_fence_object *fence = bo->fence;
977
978         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
979         if (fence) {
980                 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
981                         drm_fence_usage_deref_unlocked(&bo->fence);
982                         return 0;
983                 }
984                 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
985                 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
986                         drm_fence_usage_deref_unlocked(&bo->fence);
987                         return 0;
988                 }
989                 return 1;
990         }
991         return 0;
992 }
993
994 static int drm_bo_read_cached(struct drm_buffer_object * bo)
995 {
996         int ret = 0;
997
998         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
999         if (bo->mem.mm_node)
1000                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1001         return ret;
1002 }
1003
1004 /*
1005  * Wait until a buffer is unmapped.
1006  */
1007
1008 static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
1009 {
1010         int ret = 0;
1011
1012         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1013                 return -EBUSY;
1014
1015         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1016                     atomic_read(&bo->mapped) == -1);
1017
1018         if (ret == -EINTR)
1019                 ret = -EAGAIN;
1020
1021         return ret;
1022 }
1023
1024 static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
1025 {
1026         int ret;
1027
1028         mutex_lock(&bo->mutex);
1029         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1030         mutex_unlock(&bo->mutex);
1031         return ret;
1032 }
1033
1034 /*
1035  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1036  * Until then, we cannot really do anything with it except delete it.
1037  * The unfenced list is a PITA, and the operations
1038  * 1) validating
1039  * 2) submitting commands
1040  * 3) fencing
1041  * Should really be an atomic operation.
1042  * We now "solve" this problem by keeping
1043  * the buffer "unfenced" after validating, but before fencing.
1044  */
1045
1046 static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
1047                                 int eagain_if_wait)
1048 {
1049         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1050
1051         if (ret && no_wait)
1052                 return -EBUSY;
1053         else if (!ret)
1054                 return 0;
1055
1056         ret = 0;
1057         mutex_unlock(&bo->mutex);
1058         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1059                     !drm_bo_check_unfenced(bo));
1060         mutex_lock(&bo->mutex);
1061         if (ret == -EINTR)
1062                 return -EAGAIN;
1063         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1064         if (ret) {
1065                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1066                 return -EBUSY;
1067         }
1068         if (eagain_if_wait)
1069                 return -EAGAIN;
1070
1071         return 0;
1072 }
1073
1074 /*
1075  * Fill in the ioctl reply argument with buffer info.
1076  * Bo locked.
1077  */
1078
1079 static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
1080                                 struct drm_bo_info_rep *rep)
1081 {
1082         rep->handle = bo->base.hash.key;
1083         rep->flags = bo->mem.flags;
1084         rep->size = bo->mem.num_pages * PAGE_SIZE;
1085         rep->offset = bo->offset;
1086         rep->arg_handle = bo->map_list.user_token;
1087         rep->mask = bo->mem.mask;
1088         rep->buffer_start = bo->buffer_start;
1089         rep->fence_flags = bo->fence_type;
1090         rep->rep_flags = 0;
1091         rep->page_alignment = bo->mem.page_alignment;
1092
1093         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1094                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1095                                 DRM_BO_REP_BUSY);
1096         }
1097 }
1098
1099 /*
1100  * Wait for buffer idle and register that we've mapped the buffer.
1101  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1102  * so that if the client dies, the mapping is automatically
1103  * unregistered.
1104  */
1105
1106 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1107                                  uint32_t map_flags, unsigned hint,
1108                                  struct drm_bo_info_rep *rep)
1109 {
1110         struct drm_buffer_object *bo;
1111         struct drm_device *dev = file_priv->head->dev;
1112         int ret = 0;
1113         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1114
1115         mutex_lock(&dev->struct_mutex);
1116         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1117         mutex_unlock(&dev->struct_mutex);
1118
1119         if (!bo)
1120                 return -EINVAL;
1121
1122         mutex_lock(&bo->mutex);
1123         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1124                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1125                 if (ret)
1126                         goto out;
1127         }
1128
1129         /*
1130          * If this returns true, we are currently unmapped.
1131          * We need to do this test, because unmapping can
1132          * be done without the bo->mutex held.
1133          */
1134
1135         while (1) {
1136                 if (atomic_inc_and_test(&bo->mapped)) {
1137                         if (no_wait && drm_bo_busy(bo)) {
1138                                 atomic_dec(&bo->mapped);
1139                                 ret = -EBUSY;
1140                                 goto out;
1141                         }
1142                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1143                         if (ret) {
1144                                 atomic_dec(&bo->mapped);
1145                                 goto out;
1146                         }
1147
1148                         if ((map_flags & DRM_BO_FLAG_READ) &&
1149                             (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1150                             (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1151                                 drm_bo_read_cached(bo);
1152                         }
1153                         break;
1154                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1155                            (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1156                            (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1157
1158                         /*
1159                          * We are already mapped with different flags.
1160                          * need to wait for unmap.
1161                          */
1162
1163                         ret = drm_bo_wait_unmapped(bo, no_wait);
1164                         if (ret)
1165                                 goto out;
1166
1167                         continue;
1168                 }
1169                 break;
1170         }
1171
1172         mutex_lock(&dev->struct_mutex);
1173         ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1174         mutex_unlock(&dev->struct_mutex);
1175         if (ret) {
1176                 if (atomic_add_negative(-1, &bo->mapped))
1177                         DRM_WAKEUP(&bo->event_queue);
1178
1179         } else
1180                 drm_bo_fill_rep_arg(bo, rep);
1181       out:
1182         mutex_unlock(&bo->mutex);
1183         drm_bo_usage_deref_unlocked(&bo);
1184         return ret;
1185 }
1186
1187 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1188 {
1189         struct drm_device *dev = file_priv->head->dev;
1190         struct drm_buffer_object *bo;
1191         struct drm_ref_object *ro;
1192         int ret = 0;
1193
1194         mutex_lock(&dev->struct_mutex);
1195
1196         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1197         if (!bo) {
1198                 ret = -EINVAL;
1199                 goto out;
1200         }
1201
1202         ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1203         if (!ro) {
1204                 ret = -EINVAL;
1205                 goto out;
1206         }
1207
1208         drm_remove_ref_object(file_priv, ro);
1209         drm_bo_usage_deref_locked(&bo);
1210       out:
1211         mutex_unlock(&dev->struct_mutex);
1212         return ret;
1213 }
1214
1215 /*
1216  * Call struct-sem locked.
1217  */
1218
1219 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1220                                          struct drm_user_object * uo,
1221                                          enum drm_ref_type action)
1222 {
1223         struct drm_buffer_object *bo =
1224             drm_user_object_entry(uo, struct drm_buffer_object, base);
1225
1226         /*
1227          * We DON'T want to take the bo->lock here, because we want to
1228          * hold it when we wait for unmapped buffer.
1229          */
1230
1231         BUG_ON(action != _DRM_REF_TYPE1);
1232
1233         if (atomic_add_negative(-1, &bo->mapped))
1234                 DRM_WAKEUP(&bo->event_queue);
1235 }
1236
1237 /*
1238  * bo->mutex locked.
1239  * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1240  */
1241
1242 int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
1243                        int no_wait, int move_unfenced)
1244 {
1245         struct drm_device *dev = bo->dev;
1246         struct drm_buffer_manager *bm = &dev->bm;
1247         int ret = 0;
1248         struct drm_bo_mem_reg mem;
1249         /*
1250          * Flush outstanding fences.
1251          */
1252
1253         drm_bo_busy(bo);
1254
1255         /*
1256          * Wait for outstanding fences.
1257          */
1258
1259         ret = drm_bo_wait(bo, 0, 0, no_wait);
1260         if (ret)
1261                 return ret;
1262
1263         mem.num_pages = bo->mem.num_pages;
1264         mem.size = mem.num_pages << PAGE_SHIFT;
1265         mem.mask = new_mem_flags;
1266         mem.page_alignment = bo->mem.page_alignment;
1267
1268         mutex_lock(&bm->evict_mutex);
1269         mutex_lock(&dev->struct_mutex);
1270         list_del(&bo->lru);
1271         list_add_tail(&bo->lru, &bm->unfenced);
1272         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1273                         _DRM_BO_FLAG_UNFENCED);
1274         mutex_unlock(&dev->struct_mutex);
1275
1276         /*
1277          * Determine where to move the buffer.
1278          */
1279         ret = drm_bo_mem_space(bo, &mem, no_wait);
1280         if (ret)
1281                 goto out_unlock;
1282
1283         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1284
1285  out_unlock:
1286         if (ret || !move_unfenced) {
1287                 mutex_lock(&dev->struct_mutex);
1288                 if (mem.mm_node) {
1289                         if (mem.mm_node != bo->pinned_node)
1290                                 drm_mm_put_block(mem.mm_node);
1291                         mem.mm_node = NULL;
1292                 }
1293                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1294                 DRM_WAKEUP(&bo->event_queue);
1295                 list_del(&bo->lru);
1296                 drm_bo_add_to_lru(bo);
1297                 mutex_unlock(&dev->struct_mutex);
1298         }
1299
1300         mutex_unlock(&bm->evict_mutex);
1301         return ret;
1302 }
1303
1304 static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
1305 {
1306         uint32_t flag_diff = (mem->mask ^ mem->flags);
1307
1308         if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1309                 return 0;
1310         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1311             (!(mem->mask & DRM_BO_FLAG_CACHED) ||
1312              (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1313           return 0;
1314         }
1315         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1316             ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1317              (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1318                 return 0;
1319         return 1;
1320 }
1321
1322 static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem)
1323 {
1324         struct drm_buffer_manager *bm = &dev->bm;
1325         struct drm_mem_type_manager *man;
1326         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1327         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1328         uint32_t i;
1329         int type_ok = 0;
1330         uint32_t mem_type = 0;
1331         uint32_t cur_flags;
1332
1333         if (drm_bo_mem_compat(mem))
1334                 return 0;
1335
1336         BUG_ON(mem->mm_node);
1337
1338         for (i = 0; i < num_prios; ++i) {
1339                 mem_type = prios[i];
1340                 man = &bm->man[mem_type];
1341                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1342                                                &cur_flags);
1343                 if (type_ok)
1344                         break;
1345         }
1346
1347         if (type_ok) {
1348                 mem->mm_node = NULL;
1349                 mem->mem_type = mem_type;
1350                 mem->flags = cur_flags;
1351                 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1352                 return 0;
1353         }
1354
1355         DRM_ERROR("Illegal fake buffer flags 0x%016llx\n",
1356                   (unsigned long long) mem->mask);
1357         return -EINVAL;
1358 }
1359
1360 /*
1361  * bo locked.
1362  */
1363
1364 static int drm_buffer_object_validate(struct drm_buffer_object * bo,
1365                                       uint32_t fence_class,
1366                                       int move_unfenced, int no_wait)
1367 {
1368         struct drm_device *dev = bo->dev;
1369         struct drm_buffer_manager *bm = &dev->bm;
1370         struct drm_bo_driver *driver = dev->driver->bo_driver;
1371         uint32_t ftype;
1372         int ret;
1373
1374         DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
1375                   (unsigned long long) bo->mem.mask,
1376                   (unsigned long long) bo->mem.flags);
1377
1378         ret = driver->fence_type(bo, &ftype);
1379
1380         if (ret) {
1381                 DRM_ERROR("Driver did not support given buffer permissions\n");
1382                 return ret;
1383         }
1384
1385         /*
1386          * We're switching command submission mechanism,
1387          * or cannot simply rely on the hardware serializing for us.
1388          *
1389          * Wait for buffer idle.
1390          */
1391
1392         if ((fence_class != bo->fence_class) ||
1393             ((ftype ^ bo->fence_type) & bo->fence_type)) {
1394
1395                 ret = drm_bo_wait(bo, 0, 0, no_wait);
1396
1397                 if (ret)
1398                         return ret;
1399
1400         }
1401         
1402         bo->fence_class = fence_class;
1403         bo->fence_type = ftype;
1404         ret = drm_bo_wait_unmapped(bo, no_wait);
1405         if (ret)
1406                 return ret;
1407
1408         if (bo->type == drm_bo_type_fake) {
1409                 ret = drm_bo_check_fake(dev, &bo->mem);
1410                 if (ret)
1411                         return ret;
1412         }
1413
1414         /*
1415          * Check whether we need to move buffer.
1416          */
1417
1418         if (!drm_bo_mem_compat(&bo->mem)) {
1419                 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1420                                          move_unfenced);
1421                 if (ret) {
1422                         if (ret != -EAGAIN)
1423                                 DRM_ERROR("Failed moving buffer.\n");
1424                         return ret;
1425                 }
1426         }
1427
1428         /*
1429          * Pinned buffers.
1430          */
1431
1432         if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1433                 bo->pinned_mem_type = bo->mem.mem_type;
1434                 mutex_lock(&dev->struct_mutex);
1435                 list_del_init(&bo->pinned_lru);
1436                 drm_bo_add_to_pinned_lru(bo);
1437
1438                 if (bo->pinned_node != bo->mem.mm_node) {
1439                         if (bo->pinned_node != NULL)
1440                                 drm_mm_put_block(bo->pinned_node);
1441                         bo->pinned_node = bo->mem.mm_node;
1442                 }
1443
1444                 mutex_unlock(&dev->struct_mutex);
1445
1446         } else if (bo->pinned_node != NULL) {
1447
1448                 mutex_lock(&dev->struct_mutex);
1449
1450                 if (bo->pinned_node != bo->mem.mm_node)
1451                         drm_mm_put_block(bo->pinned_node);
1452
1453                 list_del_init(&bo->pinned_lru);
1454                 bo->pinned_node = NULL;
1455                 mutex_unlock(&dev->struct_mutex);
1456
1457         }
1458
1459         /*
1460          * We might need to add a TTM.
1461          */
1462
1463         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1464                 ret = drm_bo_add_ttm(bo);
1465                 if (ret)
1466                         return ret;
1467         }
1468         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1469
1470         /*
1471          * Finally, adjust lru to be sure.
1472          */
1473
1474         mutex_lock(&dev->struct_mutex);
1475         list_del(&bo->lru);
1476         if (move_unfenced) {
1477                 list_add_tail(&bo->lru, &bm->unfenced);
1478                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1479                                 _DRM_BO_FLAG_UNFENCED);
1480         } else {
1481                 drm_bo_add_to_lru(bo);
1482                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1483                         DRM_WAKEUP(&bo->event_queue);
1484                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1485                                         _DRM_BO_FLAG_UNFENCED);
1486                 }
1487         }
1488         mutex_unlock(&dev->struct_mutex);
1489
1490         return 0;
1491 }
1492
1493 static int drm_bo_handle_validate(struct drm_file *file_priv,
1494                                   uint32_t handle,
1495                                   uint32_t fence_class,
1496                                   uint64_t flags, uint64_t mask, uint32_t hint,
1497                                   struct drm_bo_info_rep *rep)
1498 {
1499         struct drm_device *dev = file_priv->head->dev;
1500         struct drm_buffer_object *bo;
1501         int ret;
1502         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1503
1504         mutex_lock(&dev->struct_mutex);
1505         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1506         mutex_unlock(&dev->struct_mutex);
1507         if (!bo) {
1508                 return -EINVAL;
1509         }
1510
1511         mutex_lock(&bo->mutex);
1512         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1513
1514         if (ret)
1515                 goto out;
1516
1517         DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1518         ret = drm_bo_new_mask(bo, flags, hint);
1519         if (ret)
1520                 goto out;
1521
1522         ret =
1523             drm_buffer_object_validate(bo, fence_class,
1524                                        !(hint & DRM_BO_HINT_DONT_FENCE),
1525                                        no_wait);
1526         drm_bo_fill_rep_arg(bo, rep);
1527
1528       out:
1529
1530         mutex_unlock(&bo->mutex);
1531
1532         drm_bo_usage_deref_unlocked(&bo);
1533         return ret;
1534 }
1535
1536 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1537                               struct drm_bo_info_rep *rep)
1538 {
1539         struct drm_device *dev = file_priv->head->dev;
1540         struct drm_buffer_object *bo;
1541
1542         mutex_lock(&dev->struct_mutex);
1543         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1544         mutex_unlock(&dev->struct_mutex);
1545
1546         if (!bo) {
1547                 return -EINVAL;
1548         }
1549         mutex_lock(&bo->mutex);
1550         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1551                 (void)drm_bo_busy(bo);
1552         drm_bo_fill_rep_arg(bo, rep);
1553         mutex_unlock(&bo->mutex);
1554         drm_bo_usage_deref_unlocked(&bo);
1555         return 0;
1556 }
1557
1558 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1559                               uint32_t hint,
1560                               struct drm_bo_info_rep *rep)
1561 {
1562         struct drm_device *dev = file_priv->head->dev;
1563         struct drm_buffer_object *bo;
1564         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1565         int ret;
1566
1567         mutex_lock(&dev->struct_mutex);
1568         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1569         mutex_unlock(&dev->struct_mutex);
1570
1571         if (!bo) {
1572                 return -EINVAL;
1573         }
1574
1575         mutex_lock(&bo->mutex);
1576         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1577         if (ret)
1578                 goto out;
1579         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1580         if (ret)
1581                 goto out;
1582
1583         drm_bo_fill_rep_arg(bo, rep);
1584
1585       out:
1586         mutex_unlock(&bo->mutex);
1587         drm_bo_usage_deref_unlocked(&bo);
1588         return ret;
1589 }
1590
1591 int drm_buffer_object_create(struct drm_device *dev,
1592                              unsigned long size,
1593                              enum drm_bo_type type,
1594                              uint64_t mask,
1595                              uint32_t hint,
1596                              uint32_t page_alignment,
1597                              unsigned long buffer_start,
1598                              struct drm_buffer_object ** buf_obj)
1599 {
1600         struct drm_buffer_manager *bm = &dev->bm;
1601         struct drm_buffer_object *bo;
1602         int ret = 0;
1603         unsigned long num_pages;
1604
1605         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1606                 DRM_ERROR("Invalid buffer object start.\n");
1607                 return -EINVAL;
1608         }
1609         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1610         if (num_pages == 0) {
1611                 DRM_ERROR("Illegal buffer object size.\n");
1612                 return -EINVAL;
1613         }
1614
1615         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1616
1617         if (!bo)
1618                 return -ENOMEM;
1619
1620         mutex_init(&bo->mutex);
1621         mutex_lock(&bo->mutex);
1622
1623         atomic_set(&bo->usage, 1);
1624         atomic_set(&bo->mapped, -1);
1625         DRM_INIT_WAITQUEUE(&bo->event_queue);
1626         INIT_LIST_HEAD(&bo->lru);
1627         INIT_LIST_HEAD(&bo->pinned_lru);
1628         INIT_LIST_HEAD(&bo->ddestroy);
1629 #ifdef DRM_ODD_MM_COMPAT
1630         INIT_LIST_HEAD(&bo->p_mm_list);
1631         INIT_LIST_HEAD(&bo->vma_list);
1632 #endif
1633         bo->dev = dev;
1634         bo->type = type;
1635         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1636         bo->mem.num_pages = num_pages;
1637         bo->mem.mm_node = NULL;
1638         bo->mem.page_alignment = page_alignment;
1639         if (bo->type == drm_bo_type_fake) {
1640                 bo->offset = buffer_start;
1641                 bo->buffer_start = 0;
1642         } else {
1643                 bo->buffer_start = buffer_start;
1644         }
1645         bo->priv_flags = 0;
1646         bo->mem.flags = 0ULL;
1647         bo->mem.mask = 0ULL;
1648         atomic_inc(&bm->count);
1649         ret = drm_bo_new_mask(bo, mask, hint);
1650
1651         if (ret)
1652                 goto out_err;
1653
1654         if (bo->type == drm_bo_type_dc) {
1655                 mutex_lock(&dev->struct_mutex);
1656                 ret = drm_bo_setup_vm_locked(bo);
1657                 mutex_unlock(&dev->struct_mutex);
1658                 if (ret)
1659                         goto out_err;
1660         }
1661         ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1662         if (ret)
1663                 goto out_err;
1664
1665         mutex_unlock(&bo->mutex);
1666         *buf_obj = bo;
1667         return 0;
1668
1669       out_err:
1670         mutex_unlock(&bo->mutex);
1671
1672         drm_bo_usage_deref_unlocked(&bo);
1673         return ret;
1674 }
1675
1676 static int drm_bo_add_user_object(struct drm_file *file_priv,
1677                                   struct drm_buffer_object *bo,
1678                                   int shareable)
1679 {
1680         struct drm_device *dev = file_priv->head->dev;
1681         int ret;
1682
1683         mutex_lock(&dev->struct_mutex);
1684         ret = drm_add_user_object(file_priv, &bo->base, shareable);
1685         if (ret)
1686                 goto out;
1687
1688         bo->base.remove = drm_bo_base_deref_locked;
1689         bo->base.type = drm_buffer_type;
1690         bo->base.ref_struct_locked = NULL;
1691         bo->base.unref = drm_buffer_user_object_unmap;
1692
1693       out:
1694         mutex_unlock(&dev->struct_mutex);
1695         return ret;
1696 }
1697
1698 static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv)
1699 {
1700         LOCK_TEST_WITH_RETURN(dev, file_priv);
1701         return 0;
1702 }
1703
1704 int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1705 {
1706         struct drm_bo_op_arg curarg;
1707         struct drm_bo_op_arg *arg = data;
1708         struct drm_bo_op_req *req = &arg->d.req;
1709         struct drm_bo_info_rep rep;
1710         unsigned long next = 0;
1711         void __user *curuserarg = NULL;
1712         int ret;
1713
1714         if (!dev->bm.initialized) {
1715                 DRM_ERROR("Buffer object manager is not initialized.\n");
1716                 return -EINVAL;
1717         }
1718
1719         do {
1720                 if (next != 0) {
1721                         curuserarg = (void __user *)next;
1722                         if (copy_from_user(&curarg, curuserarg,
1723                                            sizeof(curarg)) != 0)
1724                                 return -EFAULT;
1725                         arg = &curarg;
1726                 }
1727
1728                 if (arg->handled) {
1729                         next = arg->next;
1730                         continue;
1731                 }
1732                 req = &arg->d.req;
1733                 ret = 0;
1734                 switch (req->op) {
1735                 case drm_bo_validate:
1736                         ret = drm_bo_lock_test(dev, file_priv);
1737                         if (ret)
1738                                 break;
1739                         ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
1740                                                      req->bo_req.fence_class,
1741                                                      req->bo_req.flags,
1742                                                      req->bo_req.mask,
1743                                                      req->bo_req.hint,
1744                                                      &rep);
1745                         break;
1746                 case drm_bo_fence:
1747                         ret = -EINVAL;
1748                         DRM_ERROR("Function is not implemented yet.\n");
1749                         break;
1750                 case drm_bo_ref_fence:
1751                         ret = -EINVAL;
1752                         DRM_ERROR("Function is not implemented yet.\n");
1753                         break;
1754                 default:
1755                         ret = -EINVAL;
1756                 }
1757                 next = arg->next;
1758
1759                 /*
1760                  * A signal interrupted us. Make sure the ioctl is restartable.
1761                  */
1762
1763                 if (ret == -EAGAIN)
1764                         return -EAGAIN;
1765
1766                 arg->handled = 1;
1767                 arg->d.rep.ret = ret;
1768                 arg->d.rep.bo_info = rep;
1769                 if (arg != data) {
1770                         if (copy_to_user(curuserarg, &curarg,
1771                                          sizeof(curarg)) != 0)
1772                                 return -EFAULT;
1773                 }
1774         } while (next != 0);
1775         return 0;
1776 }
1777
1778 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1779 {
1780         struct drm_bo_create_arg *arg = data;
1781         struct drm_bo_create_req *req = &arg->d.req;
1782         struct drm_bo_info_rep *rep = &arg->d.rep;
1783         struct drm_buffer_object *entry;
1784         int ret = 0;
1785
1786         if (!dev->bm.initialized) {
1787                 DRM_ERROR("Buffer object manager is not initialized.\n");
1788                 return -EINVAL;
1789         }
1790
1791         ret = drm_bo_lock_test(dev, file_priv);
1792         if (ret)
1793                 goto out;
1794
1795         ret = drm_buffer_object_create(file_priv->head->dev,
1796                                        req->size, req->type, req->mask,
1797                                        req->hint, req->page_alignment,
1798                                        req->buffer_start, &entry);
1799         if (ret)
1800                 goto out;
1801         
1802         ret = drm_bo_add_user_object(file_priv, entry,
1803                                      req->mask & DRM_BO_FLAG_SHAREABLE);
1804         if (ret) {
1805                 drm_bo_usage_deref_unlocked(&entry);
1806                 goto out;
1807         }
1808         
1809         mutex_lock(&entry->mutex);
1810         drm_bo_fill_rep_arg(entry, rep);
1811         mutex_unlock(&entry->mutex);
1812
1813 out:
1814         return ret;
1815 }
1816
1817
1818 int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1819 {
1820         struct drm_bo_handle_arg *arg = data;
1821         struct drm_user_object *uo;
1822         int ret = 0;
1823
1824         if (!dev->bm.initialized) {
1825                 DRM_ERROR("Buffer object manager is not initialized.\n");
1826                 return -EINVAL;
1827         }
1828
1829         mutex_lock(&dev->struct_mutex);
1830         uo = drm_lookup_user_object(file_priv, arg->handle);
1831         if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) {
1832                 mutex_unlock(&dev->struct_mutex);
1833                 return -EINVAL;
1834         }
1835         ret = drm_remove_user_object(file_priv, uo);
1836         mutex_unlock(&dev->struct_mutex);
1837         
1838         return ret;
1839 }
1840
1841 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1842 {
1843         struct drm_bo_map_wait_idle_arg *arg = data;
1844         struct drm_bo_info_req *req = &arg->d.req;
1845         struct drm_bo_info_rep *rep = &arg->d.rep;
1846         int ret;
1847         if (!dev->bm.initialized) {
1848                 DRM_ERROR("Buffer object manager is not initialized.\n");
1849                 return -EINVAL;
1850         }
1851
1852         ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1853                                     req->hint, rep);
1854         if (ret)
1855                 return ret;
1856
1857         return 0;
1858 }
1859
1860 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1861 {
1862         struct drm_bo_handle_arg *arg = data;
1863         int ret;
1864         if (!dev->bm.initialized) {
1865                 DRM_ERROR("Buffer object manager is not initialized.\n");
1866                 return -EINVAL;
1867         }
1868
1869         ret = drm_buffer_object_unmap(file_priv, arg->handle);
1870         return ret;
1871 }
1872
1873
1874 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1875 {
1876         struct drm_bo_reference_info_arg *arg = data;
1877         struct drm_bo_handle_arg *req = &arg->d.req;
1878         struct drm_bo_info_rep *rep = &arg->d.rep;
1879         struct drm_user_object *uo;
1880         int ret;
1881
1882         if (!dev->bm.initialized) {
1883                 DRM_ERROR("Buffer object manager is not initialized.\n");
1884                 return -EINVAL;
1885         }
1886
1887         ret = drm_user_object_ref(file_priv, req->handle,
1888                                   drm_buffer_type, &uo);
1889         if (ret)
1890                 return ret;
1891         
1892         ret = drm_bo_handle_info(file_priv, req->handle, rep);
1893         if (ret)
1894                 return ret;
1895
1896         return 0;
1897 }
1898
1899 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1900 {
1901         struct drm_bo_handle_arg *arg = data;
1902         int ret = 0;
1903
1904         if (!dev->bm.initialized) {
1905                 DRM_ERROR("Buffer object manager is not initialized.\n");
1906                 return -EINVAL;
1907         }
1908
1909         ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
1910         return ret;
1911 }
1912
1913 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1914 {
1915         struct drm_bo_reference_info_arg *arg = data;
1916         struct drm_bo_handle_arg *req = &arg->d.req;
1917         struct drm_bo_info_rep *rep = &arg->d.rep;
1918         int ret;
1919
1920         if (!dev->bm.initialized) {
1921                 DRM_ERROR("Buffer object manager is not initialized.\n");
1922                 return -EINVAL;
1923         }
1924
1925         ret = drm_bo_handle_info(file_priv, req->handle, rep);
1926         if (ret)
1927                 return ret;
1928
1929         return 0;
1930 }
1931
1932 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1933 {
1934         struct drm_bo_map_wait_idle_arg *arg = data;
1935         struct drm_bo_info_req *req = &arg->d.req;
1936         struct drm_bo_info_rep *rep = &arg->d.rep;
1937         int ret;
1938         if (!dev->bm.initialized) {
1939                 DRM_ERROR("Buffer object manager is not initialized.\n");
1940                 return -EINVAL;
1941         }
1942
1943         ret = drm_bo_handle_wait(file_priv, req->handle,
1944                                  req->hint, rep);
1945         if (ret)
1946                 return ret;
1947
1948         return 0;
1949 }
1950
1951
1952
1953 /**
1954  *Clean the unfenced list and put on regular LRU.
1955  *This is part of the memory manager cleanup and should only be
1956  *called with the DRI lock held.
1957  *Call dev->struct_sem locked.
1958  */
1959
1960 static void drm_bo_clean_unfenced(struct drm_device *dev)
1961 {
1962         struct drm_buffer_manager *bm  = &dev->bm;
1963         struct list_head *head, *list;
1964         struct drm_buffer_object *entry;
1965
1966         head = &bm->unfenced;
1967
1968         list = head->next;
1969         while(list != head) {
1970                 prefetch(list->next);
1971                 entry = list_entry(list, struct drm_buffer_object, lru);
1972
1973                 atomic_inc(&entry->usage);
1974                 mutex_unlock(&dev->struct_mutex);
1975                 mutex_lock(&entry->mutex);
1976                 mutex_lock(&dev->struct_mutex);
1977
1978                 list_del(&entry->lru);
1979                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1980                 drm_bo_add_to_lru(entry);
1981                 mutex_unlock(&entry->mutex);
1982                 list = head->next;
1983         }
1984 }
1985
1986 static int drm_bo_leave_list(struct drm_buffer_object * bo,
1987                              uint32_t mem_type,
1988                              int free_pinned, int allow_errors)
1989 {
1990         struct drm_device *dev = bo->dev;
1991         int ret = 0;
1992
1993         mutex_lock(&bo->mutex);
1994
1995         ret = drm_bo_expire_fence(bo, allow_errors);
1996         if (ret)
1997                 goto out;
1998
1999         if (free_pinned) {
2000                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2001                 mutex_lock(&dev->struct_mutex);
2002                 list_del_init(&bo->pinned_lru);
2003                 if (bo->pinned_node == bo->mem.mm_node)
2004                         bo->pinned_node = NULL;
2005                 if (bo->pinned_node != NULL) {
2006                         drm_mm_put_block(bo->pinned_node);
2007                         bo->pinned_node = NULL;
2008                 }
2009                 mutex_unlock(&dev->struct_mutex);
2010         }
2011
2012         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2013                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2014                           "cleanup. Removing flag and evicting.\n");
2015                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2016                 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
2017         }
2018
2019         if (bo->mem.mem_type == mem_type)
2020                 ret = drm_bo_evict(bo, mem_type, 0);
2021
2022         if (ret) {
2023                 if (allow_errors) {
2024                         goto out;
2025                 } else {
2026                         ret = 0;
2027                         DRM_ERROR("Cleanup eviction failed\n");
2028                 }
2029         }
2030
2031       out:
2032         mutex_unlock(&bo->mutex);
2033         return ret;
2034 }
2035
2036
2037 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2038                                          int pinned_list)
2039 {
2040         if (pinned_list)
2041                 return list_entry(list, struct drm_buffer_object, pinned_lru);
2042         else
2043                 return list_entry(list, struct drm_buffer_object, lru);
2044 }
2045
2046 /*
2047  * dev->struct_mutex locked.
2048  */
2049
2050 static int drm_bo_force_list_clean(struct drm_device * dev,
2051                                    struct list_head *head,
2052                                    unsigned mem_type,
2053                                    int free_pinned,
2054                                    int allow_errors,
2055                                    int pinned_list)
2056 {
2057         struct list_head *list, *next, *prev;
2058         struct drm_buffer_object *entry, *nentry;
2059         int ret;
2060         int do_restart;
2061
2062         /*
2063          * The list traversal is a bit odd here, because an item may
2064          * disappear from the list when we release the struct_mutex or
2065          * when we decrease the usage count. Also we're not guaranteed
2066          * to drain pinned lists, so we can't always restart.
2067          */
2068
2069 restart:
2070         nentry = NULL;
2071         list_for_each_safe(list, next, head) {
2072                 prev = list->prev;
2073
2074                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2075                 atomic_inc(&entry->usage);
2076                 if (nentry) {
2077                         atomic_dec(&nentry->usage);
2078                         nentry = NULL;
2079                 }
2080
2081                 /*
2082                  * Protect the next item from destruction, so we can check
2083                  * its list pointers later on.
2084                  */
2085
2086                 if (next != head) {
2087                         nentry = drm_bo_entry(next, pinned_list);
2088                         atomic_inc(&nentry->usage);
2089                 }
2090                 mutex_unlock(&dev->struct_mutex);
2091
2092                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2093                                         allow_errors);
2094                 mutex_lock(&dev->struct_mutex);
2095
2096                 drm_bo_usage_deref_locked(&entry);
2097                 if (ret)
2098                         return ret;
2099
2100                 /*
2101                  * Has the next item disappeared from the list?
2102                  */
2103
2104                 do_restart = ((next->prev != list) && (next->prev != prev));
2105
2106                 if (nentry != NULL && do_restart)
2107                         drm_bo_usage_deref_locked(&nentry);
2108
2109                 if (do_restart)
2110                         goto restart;
2111         }
2112         return 0;
2113 }
2114
2115 int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
2116 {
2117         struct drm_buffer_manager *bm = &dev->bm;
2118         struct drm_mem_type_manager *man = &bm->man[mem_type];
2119         int ret = -EINVAL;
2120
2121         if (mem_type >= DRM_BO_MEM_TYPES) {
2122                 DRM_ERROR("Illegal memory type %d\n", mem_type);
2123                 return ret;
2124         }
2125
2126         if (!man->has_type) {
2127                 DRM_ERROR("Trying to take down uninitialized "
2128                           "memory manager type\n");
2129                 return ret;
2130         }
2131         man->use_type = 0;
2132         man->has_type = 0;
2133
2134         ret = 0;
2135         if (mem_type > 0) {
2136
2137                 drm_bo_clean_unfenced(dev);
2138                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2139                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2140
2141                 if (drm_mm_clean(&man->manager)) {
2142                         drm_mm_takedown(&man->manager);
2143                 } else {
2144                         ret = -EBUSY;
2145                 }
2146         }
2147
2148         return ret;
2149 }
2150
2151 /**
2152  *Evict all buffers of a particular mem_type, but leave memory manager
2153  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2154  *point since we have the hardware lock.
2155  */
2156
2157 static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
2158 {
2159         int ret;
2160         struct drm_buffer_manager *bm = &dev->bm;
2161         struct drm_mem_type_manager *man = &bm->man[mem_type];
2162
2163         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2164                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2165                 return -EINVAL;
2166         }
2167
2168         if (!man->has_type) {
2169                 DRM_ERROR("Memory type %u has not been initialized.\n",
2170                           mem_type);
2171                 return 0;
2172         }
2173
2174         drm_bo_clean_unfenced(dev);
2175         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2176         if (ret)
2177                 return ret;
2178         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2179
2180         return ret;
2181 }
2182
2183 int drm_bo_init_mm(struct drm_device * dev,
2184                    unsigned type,
2185                    unsigned long p_offset, unsigned long p_size)
2186 {
2187         struct drm_buffer_manager *bm = &dev->bm;
2188         int ret = -EINVAL;
2189         struct drm_mem_type_manager *man;
2190
2191         if (type >= DRM_BO_MEM_TYPES) {
2192                 DRM_ERROR("Illegal memory type %d\n", type);
2193                 return ret;
2194         }
2195
2196         man = &bm->man[type];
2197         if (man->has_type) {
2198                 DRM_ERROR("Memory manager already initialized for type %d\n",
2199                           type);
2200                 return ret;
2201         }
2202
2203         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2204         if (ret)
2205                 return ret;
2206
2207         ret = 0;
2208         if (type != DRM_BO_MEM_LOCAL) {
2209                 if (!p_size) {
2210                         DRM_ERROR("Zero size memory manager type %d\n", type);
2211                         return ret;
2212                 }
2213                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2214                 if (ret)
2215                         return ret;
2216         }
2217         man->has_type = 1;
2218         man->use_type = 1;
2219
2220         INIT_LIST_HEAD(&man->lru);
2221         INIT_LIST_HEAD(&man->pinned);
2222
2223         return 0;
2224 }
2225 EXPORT_SYMBOL(drm_bo_init_mm);
2226
2227 /*
2228  * This is called from lastclose, so we don't need to bother about
2229  * any clients still running when we set the initialized flag to zero.
2230  */
2231
2232 int drm_bo_driver_finish(struct drm_device * dev)
2233 {
2234         struct drm_buffer_manager *bm = &dev->bm;
2235         int ret = 0;
2236         unsigned i = DRM_BO_MEM_TYPES;
2237         struct drm_mem_type_manager *man;
2238
2239         mutex_lock(&dev->bm.init_mutex);
2240         mutex_lock(&dev->struct_mutex);
2241
2242         if (!bm->initialized)
2243                 goto out;
2244         bm->initialized = 0;
2245
2246         while (i--) {
2247                 man = &bm->man[i];
2248                 if (man->has_type) {
2249                         man->use_type = 0;
2250                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2251                                 ret = -EBUSY;
2252                                 DRM_ERROR("DRM memory manager type %d "
2253                                           "is not clean.\n", i);
2254                         }
2255                         man->has_type = 0;
2256                 }
2257         }
2258         mutex_unlock(&dev->struct_mutex);
2259
2260         if (!cancel_delayed_work(&bm->wq)) {
2261                 flush_scheduled_work();
2262         }
2263         mutex_lock(&dev->struct_mutex);
2264         drm_bo_delayed_delete(dev, 1);
2265         if (list_empty(&bm->ddestroy)) {
2266                 DRM_DEBUG("Delayed destroy list was clean\n");
2267         }
2268         if (list_empty(&bm->man[0].lru)) {
2269                 DRM_DEBUG("Swap list was clean\n");
2270         }
2271         if (list_empty(&bm->man[0].pinned)) {
2272                 DRM_DEBUG("NO_MOVE list was clean\n");
2273         }
2274         if (list_empty(&bm->unfenced)) {
2275                 DRM_DEBUG("Unfenced list was clean\n");
2276         }
2277       out:
2278         mutex_unlock(&dev->struct_mutex);
2279         mutex_unlock(&dev->bm.init_mutex);
2280         return ret;
2281 }
2282
2283 int drm_bo_driver_init(struct drm_device * dev)
2284 {
2285         struct drm_bo_driver *driver = dev->driver->bo_driver;
2286         struct drm_buffer_manager *bm = &dev->bm;
2287         int ret = -EINVAL;
2288
2289         mutex_lock(&dev->bm.init_mutex);
2290         mutex_lock(&dev->struct_mutex);
2291         if (!driver)
2292                 goto out_unlock;
2293
2294         /*
2295          * Initialize the system memory buffer type.
2296          * Other types need to be driver / IOCTL initialized.
2297          */
2298
2299         ret = drm_bo_init_mm(dev, 0, 0, 0);
2300         if (ret)
2301                 goto out_unlock;
2302
2303 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2304         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2305 #else
2306         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2307 #endif
2308         bm->initialized = 1;
2309         bm->nice_mode = 1;
2310         atomic_set(&bm->count, 0);
2311         bm->cur_pages = 0;
2312         INIT_LIST_HEAD(&bm->unfenced);
2313         INIT_LIST_HEAD(&bm->ddestroy);
2314       out_unlock:
2315         mutex_unlock(&dev->struct_mutex);
2316         mutex_unlock(&dev->bm.init_mutex);
2317         return ret;
2318 }
2319
2320 EXPORT_SYMBOL(drm_bo_driver_init);
2321
2322 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2323 {
2324         struct drm_mm_init_arg *arg = data;
2325         struct drm_buffer_manager *bm = &dev->bm;
2326         struct drm_bo_driver *driver = dev->driver->bo_driver;
2327         int ret;
2328
2329         if (!driver) {
2330                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2331                 return -EINVAL;
2332         }
2333
2334         ret = -EINVAL;
2335         if (arg->magic != DRM_BO_INIT_MAGIC) {
2336                 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2337                           "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2338                 return -EINVAL;
2339         }
2340         if (arg->major != DRM_BO_INIT_MAJOR) {
2341                 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2342                           "\tversion don't match. Got %d, expected %d,\n",
2343                           arg->major, DRM_BO_INIT_MAJOR);
2344                 return -EINVAL;
2345         }
2346         if (arg->minor > DRM_BO_INIT_MINOR) {
2347                 DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
2348                           "\tlibdrm buffer object interface version is %d.%d.\n"
2349                           "\tkernel DRM buffer object interface version is %d.%d\n",
2350                           arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
2351                 return -EINVAL;
2352         }
2353
2354         mutex_lock(&dev->bm.init_mutex);
2355         mutex_lock(&dev->struct_mutex);
2356         if (!bm->initialized) {
2357                 DRM_ERROR("DRM memory manager was not initialized.\n");
2358                 goto out;
2359         }
2360         if (arg->mem_type == 0) {
2361                 DRM_ERROR("System memory buffers already initialized.\n");
2362                 goto out;
2363         }
2364         ret = drm_bo_init_mm(dev, arg->mem_type,
2365                              arg->p_offset, arg->p_size);
2366
2367 out:
2368         mutex_unlock(&dev->struct_mutex);
2369         mutex_unlock(&dev->bm.init_mutex);
2370         if (ret)
2371                 return ret;
2372
2373         return 0;
2374 }
2375
2376 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2377 {
2378         struct drm_mm_type_arg *arg = data;
2379         struct drm_buffer_manager *bm = &dev->bm;
2380         struct drm_bo_driver *driver = dev->driver->bo_driver;
2381         int ret;
2382
2383         if (!driver) {
2384                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2385                 return -EINVAL;
2386         }
2387
2388         LOCK_TEST_WITH_RETURN(dev, file_priv);
2389         mutex_lock(&dev->bm.init_mutex);
2390         mutex_lock(&dev->struct_mutex);
2391         ret = -EINVAL;
2392         if (!bm->initialized) {
2393                 DRM_ERROR("DRM memory manager was not initialized\n");
2394                 goto out;
2395         }
2396         if (arg->mem_type == 0) {
2397                 DRM_ERROR("No takedown for System memory buffers.\n");
2398                 goto out;
2399         }
2400         ret = 0;
2401         if (drm_bo_clean_mm(dev, arg->mem_type)) {
2402                 DRM_ERROR("Memory manager type %d not clean. "
2403                           "Delaying takedown\n", arg->mem_type);
2404         }
2405 out:
2406         mutex_unlock(&dev->struct_mutex);
2407         mutex_unlock(&dev->bm.init_mutex);
2408         if (ret)
2409                 return ret;
2410
2411         return 0;
2412 }
2413
2414 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2415 {
2416         struct drm_mm_type_arg *arg = data;
2417         struct drm_bo_driver *driver = dev->driver->bo_driver;
2418         int ret;
2419
2420         if (!driver) {
2421                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2422                 return -EINVAL;
2423         }
2424
2425         LOCK_TEST_WITH_RETURN(dev, file_priv);
2426         mutex_lock(&dev->bm.init_mutex);
2427         mutex_lock(&dev->struct_mutex);
2428         ret = drm_bo_lock_mm(dev, arg->mem_type);
2429         mutex_unlock(&dev->struct_mutex);
2430         mutex_unlock(&dev->bm.init_mutex);
2431         if (ret)
2432                 return ret;
2433
2434         return 0;
2435 }
2436
2437 int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2438 {
2439         struct drm_bo_driver *driver = dev->driver->bo_driver;
2440         int ret;
2441
2442         if (!driver) {
2443                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2444                 return -EINVAL;
2445         }
2446
2447         LOCK_TEST_WITH_RETURN(dev, file_priv);
2448         mutex_lock(&dev->bm.init_mutex);
2449         mutex_lock(&dev->struct_mutex);
2450         ret = 0;
2451
2452         mutex_unlock(&dev->struct_mutex);
2453         mutex_unlock(&dev->bm.init_mutex);
2454         if (ret)
2455                 return ret;
2456
2457         return 0;
2458 }
2459
2460 /*
2461  * buffer object vm functions.
2462  */
2463
2464 int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
2465 {
2466         struct drm_buffer_manager *bm = &dev->bm;
2467         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2468
2469         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2470                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2471                         return 0;
2472
2473                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2474                         return 0;
2475
2476                 if (mem->flags & DRM_BO_FLAG_CACHED)
2477                         return 0;
2478         }
2479         return 1;
2480 }
2481
2482 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2483
2484 /**
2485  * \c Get the PCI offset for the buffer object memory.
2486  *
2487  * \param bo The buffer object.
2488  * \param bus_base On return the base of the PCI region
2489  * \param bus_offset On return the byte offset into the PCI region
2490  * \param bus_size On return the byte size of the buffer object or zero if
2491  *     the buffer object memory is not accessible through a PCI region.
2492  * \return Failure indication.
2493  *
2494  * Returns -EINVAL if the buffer object is currently not mappable.
2495  * Otherwise returns zero.
2496  */
2497
2498 int drm_bo_pci_offset(struct drm_device *dev,
2499                       struct drm_bo_mem_reg *mem,
2500                       unsigned long *bus_base,
2501                       unsigned long *bus_offset, unsigned long *bus_size)
2502 {
2503         struct drm_buffer_manager *bm = &dev->bm;
2504         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2505
2506         *bus_size = 0;
2507         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2508                 return -EINVAL;
2509
2510         if (drm_mem_reg_is_pci(dev, mem)) {
2511                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2512                 *bus_size = mem->num_pages << PAGE_SHIFT;
2513                 *bus_base = man->io_offset;
2514         }
2515
2516         return 0;
2517 }
2518
2519 /**
2520  * \c Kill all user-space virtual mappings of this buffer object.
2521  *
2522  * \param bo The buffer object.
2523  *
2524  * Call bo->mutex locked.
2525  */
2526
2527 void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
2528 {
2529         struct drm_device *dev = bo->dev;
2530         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2531         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2532
2533         if (!dev->dev_mapping)
2534                 return;
2535
2536         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2537 }
2538
2539 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
2540 {
2541         struct drm_map_list *list = &bo->map_list;
2542         drm_local_map_t *map;
2543         struct drm_device *dev = bo->dev;
2544
2545         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2546         if (list->user_token) {
2547                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2548                 list->user_token = 0;
2549         }
2550         if (list->file_offset_node) {
2551                 drm_mm_put_block(list->file_offset_node);
2552                 list->file_offset_node = NULL;
2553         }
2554
2555         map = list->map;
2556         if (!map)
2557                 return;
2558
2559         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2560         list->map = NULL;
2561         list->user_token = 0ULL;
2562         drm_bo_usage_deref_locked(&bo);
2563 }
2564
2565 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
2566 {
2567         struct drm_map_list *list = &bo->map_list;
2568         drm_local_map_t *map;
2569         struct drm_device *dev = bo->dev;
2570
2571         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2572         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2573         if (!list->map)
2574                 return -ENOMEM;
2575
2576         map = list->map;
2577         map->offset = 0;
2578         map->type = _DRM_TTM;
2579         map->flags = _DRM_REMOVABLE;
2580         map->size = bo->mem.num_pages * PAGE_SIZE;
2581         atomic_inc(&bo->usage);
2582         map->handle = (void *)bo;
2583
2584         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2585                                                     bo->mem.num_pages, 0, 0);
2586
2587         if (!list->file_offset_node) {
2588                 drm_bo_takedown_vm_locked(bo);
2589                 return -ENOMEM;
2590         }
2591
2592         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2593                                                   bo->mem.num_pages, 0);
2594
2595         list->hash.key = list->file_offset_node->start;
2596         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2597                 drm_bo_takedown_vm_locked(bo);
2598                 return -ENOMEM;
2599         }
2600
2601         list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2602
2603         return 0;
2604 }