Fix some obvious bugs.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
40  * heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44  * both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47  * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48  * traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
53 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
54 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
55 static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
56
57 static inline uint32_t drm_bo_type_flags(unsigned type)
58 {
59         return (1 << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
67 {
68         drm_mem_type_manager_t *man;
69
70         man = &bo->dev->bm.man[bo->pinned_mem_type];
71         list_add_tail(&bo->pinned_lru, &man->pinned);
72 }
73
74 void drm_bo_add_to_lru(drm_buffer_object_t * bo)
75 {
76         drm_mem_type_manager_t *man;
77
78         if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
79             || bo->mem.mem_type != bo->pinned_mem_type) {
80                 man = &bo->dev->bm.man[bo->mem.mem_type];
81                 list_add_tail(&bo->lru, &man->lru);
82         } else {
83                 INIT_LIST_HEAD(&bo->lru);
84         }
85 }
86
87 static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
88 {
89 #ifdef DRM_ODD_MM_COMPAT
90         int ret;
91
92         if (!bo->map_list.map)
93                 return 0;
94
95         ret = drm_bo_lock_kmm(bo);
96         if (ret)
97                 return ret;
98         drm_bo_unmap_virtual(bo);
99         if (old_is_pci)
100                 drm_bo_finish_unmap(bo);
101 #else
102         if (!bo->map_list.map)
103                 return 0;
104
105         drm_bo_unmap_virtual(bo);
106 #endif
107         return 0;
108 }
109
110 static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
111 {
112 #ifdef DRM_ODD_MM_COMPAT
113         int ret;
114
115         if (!bo->map_list.map)
116                 return;
117
118         ret = drm_bo_remap_bound(bo);
119         if (ret) {
120                 DRM_ERROR("Failed to remap a bound buffer object.\n"
121                           "\tThis might cause a sigbus later.\n");
122         }
123         drm_bo_unlock_kmm(bo);
124 #endif
125 }
126
127 /*
128  * Call bo->mutex locked.
129  */
130
131 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
132 {
133         drm_device_t *dev = bo->dev;
134         int ret = 0;
135         bo->ttm = NULL;
136
137         switch (bo->type) {
138         case drm_bo_type_dc:
139                 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
140                 if (!bo->ttm)
141                         ret = -ENOMEM;
142                 break;
143         case drm_bo_type_kernel:
144                 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
145                 if (!bo->ttm)
146                         ret = -ENOMEM;
147                 break;
148         case drm_bo_type_user:
149         case drm_bo_type_fake:
150                 break;
151         default:
152                 DRM_ERROR("Illegal buffer object type\n");
153                 ret = -EINVAL;
154                 break;
155         }
156
157         return ret;
158 }
159
160 static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
161                                   drm_bo_mem_reg_t * mem,
162                                   int evict, int no_wait)
163 {
164         drm_device_t *dev = bo->dev;
165         drm_buffer_manager_t *bm = &dev->bm;
166         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
167         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
168         drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
169         drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
170         int ret = 0;
171
172         if (old_is_pci || new_is_pci)
173                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
174         if (ret)
175                 return ret;
176
177         /*
178          * Create and bind a ttm if required.
179          */
180
181         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
182                 ret = drm_bo_add_ttm(bo);
183                 if (ret)
184                         goto out_err;
185
186                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
187                         ret = drm_bind_ttm(bo->ttm, new_man->flags &
188                                            DRM_BO_FLAG_CACHED,
189                                            mem->mm_node->start);
190                         if (ret)
191                                 goto out_err;
192                 }
193         }
194
195         if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
196
197                 drm_bo_mem_reg_t *old_mem = &bo->mem;
198                 uint64_t save_flags = old_mem->flags;
199                 uint64_t save_mask = old_mem->mask;
200
201                 *old_mem = *mem;
202                 mem->mm_node = NULL;
203                 old_mem->mask = save_mask;
204                 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
205
206         } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
207                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
208
209                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
210
211         } else if (dev->driver->bo_driver->move) {
212                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
213
214         } else {
215
216                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
217
218         }
219
220         if (ret)
221                 goto out_err;
222
223         if (old_is_pci || new_is_pci)
224                 drm_bo_vm_post_move(bo);
225
226         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
227                 ret =
228                     dev->driver->bo_driver->invalidate_caches(dev,
229                                                               bo->mem.flags);
230                 if (ret)
231                         DRM_ERROR("Can not flush read caches\n");
232         }
233
234         DRM_FLAG_MASKED(bo->priv_flags,
235                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
236                         _DRM_BO_FLAG_EVICTED);
237
238         if (bo->mem.mm_node)
239                 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
240
241         return 0;
242
243       out_err:
244         if (old_is_pci || new_is_pci)
245                 drm_bo_vm_post_move(bo);
246
247         new_man = &bm->man[bo->mem.mem_type];
248         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
249                 drm_ttm_unbind(bo->ttm);
250                 drm_destroy_ttm(bo->ttm);
251                 bo->ttm = NULL;
252         }
253
254         return ret;
255 }
256
257 /*
258  * Call bo->mutex locked.
259  * Wait until the buffer is idle.
260  */
261
262 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
263                 int no_wait)
264 {
265
266         drm_fence_object_t *fence = bo->fence;
267         int ret;
268
269         if (fence) {
270                 drm_device_t *dev = bo->dev;
271                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
272                         drm_fence_usage_deref_unlocked(dev, fence);
273                         bo->fence = NULL;
274                         return 0;
275                 }
276                 if (no_wait) {
277                         return -EBUSY;
278                 }
279                 ret =
280                     drm_fence_object_wait(dev, fence, lazy, ignore_signals,
281                                           bo->fence_type);
282                 if (ret)
283                         return ret;
284
285                 drm_fence_usage_deref_unlocked(dev, fence);
286                 bo->fence = NULL;
287
288         }
289         return 0;
290 }
291
292 static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
293 {
294         drm_device_t *dev = bo->dev;
295         drm_buffer_manager_t *bm = &dev->bm;
296
297         if (bo->fence) {
298                 if (bm->nice_mode) {
299                         unsigned long _end = jiffies + 3 * DRM_HZ;
300                         int ret;
301                         do {
302                                 ret = drm_bo_wait(bo, 0, 1, 0);
303                                 if (ret && allow_errors)
304                                         return ret;
305
306                         } while (ret && !time_after_eq(jiffies, _end));
307
308                         if (bo->fence) {
309                                 bm->nice_mode = 0;
310                                 DRM_ERROR("Detected GPU lockup or "
311                                           "fence driver was taken down. "
312                                           "Evicting buffer.\n");
313                         }
314                 }
315                 if (bo->fence) {
316                         drm_fence_usage_deref_unlocked(dev, bo->fence);
317                         bo->fence = NULL;
318                 }
319         }
320         return 0;
321 }
322
323 /*
324  * Call dev->struct_mutex locked.
325  * Attempts to remove all private references to a buffer by expiring its
326  * fence object and removing from lru lists and memory managers.
327  */
328
329 static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
330 {
331         drm_device_t *dev = bo->dev;
332         drm_buffer_manager_t *bm = &dev->bm;
333
334         atomic_inc(&bo->usage);
335         mutex_unlock(&dev->struct_mutex);
336         mutex_lock(&bo->mutex);
337
338         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
339
340         if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) {
341                 drm_fence_usage_deref_locked(dev, bo->fence);
342                 bo->fence = NULL;
343         }
344
345         if (bo->fence && remove_all)
346                 (void)drm_bo_expire_fence(bo, 0);
347
348         mutex_lock(&dev->struct_mutex);
349
350         if (!atomic_dec_and_test(&bo->usage)) {
351                 goto out;
352         }
353
354         if (!bo->fence) {
355                 list_del_init(&bo->lru);
356                 if (bo->mem.mm_node) {
357                         drm_mm_put_block(bo->mem.mm_node);
358                         if (bo->pinned_node == bo->mem.mm_node)
359                                 bo->pinned_node = NULL;
360                         bo->mem.mm_node = NULL;
361                 }
362                 list_del_init(&bo->pinned_lru);
363                 if (bo->pinned_node) {
364                         drm_mm_put_block(bo->pinned_node);
365                         bo->pinned_node = NULL;
366                 }
367                 list_del_init(&bo->ddestroy);
368                 mutex_unlock(&bo->mutex);
369                 drm_bo_destroy_locked(bo);
370                 return;
371         }
372
373         if (list_empty(&bo->ddestroy)) {
374                 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
375                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
376                 schedule_delayed_work(&bm->wq,
377                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
378         }
379
380       out:
381         mutex_unlock(&bo->mutex);
382         return;
383 }
384
385 /*
386  * Verify that refcount is 0 and that there are no internal references
387  * to the buffer object. Then destroy it.
388  */
389
390 static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
391 {
392         drm_device_t *dev = bo->dev;
393         drm_buffer_manager_t *bm = &dev->bm;
394
395         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
396             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
397             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
398                 if (bo->fence != NULL) {
399                         DRM_ERROR("Fence was non-zero.\n");
400                         drm_bo_cleanup_refs(bo, 0);
401                         return;
402                 }
403
404 #ifdef DRM_ODD_MM_COMPAT
405                 BUG_ON(!list_empty(&bo->vma_list));
406                 BUG_ON(!list_empty(&bo->p_mm_list));
407 #endif
408
409                 if (bo->ttm) {
410                         drm_ttm_unbind(bo->ttm);
411                         drm_destroy_ttm(bo->ttm);
412                         bo->ttm = NULL;
413                 }
414
415                 atomic_dec(&bm->count);
416
417                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
418
419                 return;
420         }
421
422         /*
423          * Some stuff is still trying to reference the buffer object.
424          * Get rid of those references.
425          */
426
427         drm_bo_cleanup_refs(bo, 0);
428
429         return;
430 }
431
432 /*
433  * Call dev->struct_mutex locked.
434  */
435
436 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
437 {
438         drm_buffer_manager_t *bm = &dev->bm;
439
440         drm_buffer_object_t *entry, *nentry;
441         struct list_head *list, *next;
442
443         list_for_each_safe(list, next, &bm->ddestroy) {
444                 entry = list_entry(list, drm_buffer_object_t, ddestroy);
445
446                 nentry = NULL;
447                 if (next != &bm->ddestroy) {
448                         nentry = list_entry(next, drm_buffer_object_t,
449                                             ddestroy);
450                         atomic_inc(&nentry->usage);
451                 }
452
453                 drm_bo_cleanup_refs(entry, remove_all);
454
455                 if (nentry) {
456                         atomic_dec(&nentry->usage);
457                 }
458         }
459 }
460
461 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
462 static void drm_bo_delayed_workqueue(void *data)
463 #else
464 static void drm_bo_delayed_workqueue(struct work_struct *work)
465 #endif
466 {
467 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
468         drm_device_t *dev = (drm_device_t *) data;
469         drm_buffer_manager_t *bm = &dev->bm;
470 #else
471         drm_buffer_manager_t *bm =
472             container_of(work, drm_buffer_manager_t, wq.work);
473         drm_device_t *dev = container_of(bm, drm_device_t, bm);
474 #endif
475
476         DRM_DEBUG("Delayed delete Worker\n");
477
478         mutex_lock(&dev->struct_mutex);
479         if (!bm->initialized) {
480                 mutex_unlock(&dev->struct_mutex);
481                 return;
482         }
483         drm_bo_delayed_delete(dev, 0);
484         if (bm->initialized && !list_empty(&bm->ddestroy)) {
485                 schedule_delayed_work(&bm->wq,
486                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
487         }
488         mutex_unlock(&dev->struct_mutex);
489 }
490
491 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
492 {
493         if (atomic_dec_and_test(&bo->usage)) {
494                 drm_bo_destroy_locked(bo);
495         }
496 }
497
498 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
499 {
500         drm_buffer_object_t *bo =
501             drm_user_object_entry(uo, drm_buffer_object_t, base);
502
503         drm_bo_takedown_vm_locked(bo);
504         drm_bo_usage_deref_locked(bo);
505 }
506
507 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
508 {
509         drm_device_t *dev = bo->dev;
510
511         if (atomic_dec_and_test(&bo->usage)) {
512                 mutex_lock(&dev->struct_mutex);
513                 if (atomic_read(&bo->usage) == 0)
514                         drm_bo_destroy_locked(bo);
515                 mutex_unlock(&dev->struct_mutex);
516         }
517 }
518
519 /*
520  * Note. The caller has to register (if applicable)
521  * and deregister fence object usage.
522  */
523
524 int drm_fence_buffer_objects(drm_file_t * priv,
525                              struct list_head *list,
526                              uint32_t fence_flags,
527                              drm_fence_object_t * fence,
528                              drm_fence_object_t ** used_fence)
529 {
530         drm_device_t *dev = priv->head->dev;
531         drm_buffer_manager_t *bm = &dev->bm;
532
533         drm_buffer_object_t *entry;
534         uint32_t fence_type = 0;
535         int count = 0;
536         int ret = 0;
537         struct list_head *l;
538         LIST_HEAD(f_list);
539
540         mutex_lock(&dev->struct_mutex);
541
542         if (!list)
543                 list = &bm->unfenced;
544
545         list_for_each_entry(entry, list, lru) {
546                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
547                 fence_type |= entry->fence_type;
548                 if (entry->fence_class != 0) {
549                         DRM_ERROR("Fence class %d is not implemented yet.\n",
550                                   entry->fence_class);
551                         ret = -EINVAL;
552                         goto out;
553                 }
554                 count++;
555         }
556
557         if (!count) {
558                 ret = -EINVAL;
559                 goto out;
560         }
561
562         /*
563          * Transfer to a local list before we release the dev->struct_mutex;
564          * This is so we don't get any new unfenced objects while fencing
565          * the ones we already have..
566          */
567
568         list_splice_init(list, &f_list);
569
570         if (fence) {
571                 if ((fence_type & fence->type) != fence_type) {
572                         DRM_ERROR("Given fence doesn't match buffers "
573                                   "on unfenced list.\n");
574                         ret = -EINVAL;
575                         goto out;
576                 }
577         } else {
578                 mutex_unlock(&dev->struct_mutex);
579                 ret = drm_fence_object_create(dev, 0, fence_type,
580                                               fence_flags | DRM_FENCE_FLAG_EMIT,
581                                               &fence);
582                 mutex_lock(&dev->struct_mutex);
583                 if (ret)
584                         goto out;
585         }
586
587         count = 0;
588         l = f_list.next;
589         while (l != &f_list) {
590                 prefetch(l->next);
591                 entry = list_entry(l, drm_buffer_object_t, lru);
592                 atomic_inc(&entry->usage);
593                 mutex_unlock(&dev->struct_mutex);
594                 mutex_lock(&entry->mutex);
595                 mutex_lock(&dev->struct_mutex);
596                 list_del_init(l);
597                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
598                         count++;
599                         if (entry->fence)
600                                 drm_fence_usage_deref_locked(dev, entry->fence);
601                         entry->fence = fence;
602                         DRM_FLAG_MASKED(entry->priv_flags, 0,
603                                         _DRM_BO_FLAG_UNFENCED);
604                         DRM_WAKEUP(&entry->event_queue);
605                         drm_bo_add_to_lru(entry);
606                 }
607                 mutex_unlock(&entry->mutex);
608                 drm_bo_usage_deref_locked(entry);
609                 l = f_list.next;
610         }
611         atomic_add(count, &fence->usage);
612         DRM_DEBUG("Fenced %d buffers\n", count);
613       out:
614         mutex_unlock(&dev->struct_mutex);
615         *used_fence = fence;
616         return ret;
617 }
618
619 EXPORT_SYMBOL(drm_fence_buffer_objects);
620
621 /*
622  * bo->mutex locked
623  */
624
625 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
626                         int no_wait)
627 {
628         int ret = 0;
629         drm_device_t *dev = bo->dev;
630         drm_bo_mem_reg_t evict_mem;
631
632         /*
633          * Someone might have modified the buffer before we took the buffer mutex.
634          */
635
636         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
637                 goto out;
638         if (bo->mem.mem_type != mem_type)
639                 goto out;
640
641         ret = drm_bo_wait(bo, 0, 0, no_wait);
642
643         if (ret && ret != -EAGAIN) {
644                 DRM_ERROR("Failed to expire fence before "
645                           "buffer eviction.\n");
646                 goto out;
647         }
648
649         evict_mem = bo->mem;
650         evict_mem.mm_node = NULL;
651
652         if (bo->type == drm_bo_type_fake) {
653                 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
654                 bo->mem.mm_node = NULL;
655                 goto out1;
656         }
657
658         evict_mem = bo->mem;
659         evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
660         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
661
662         if (ret) {
663                 if (ret != -EAGAIN)
664                         DRM_ERROR("Failed to find memory space for "
665                                   "buffer 0x%p eviction.\n", bo);
666                 goto out;
667         }
668
669         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
670
671         if (ret) {
672                 if (ret != -EAGAIN)
673                         DRM_ERROR("Buffer eviction failed\n");
674                 goto out;
675         }
676
677       out1:
678         mutex_lock(&dev->struct_mutex);
679         if (evict_mem.mm_node) {
680                 if (evict_mem.mm_node != bo->pinned_node)
681                         drm_mm_put_block(evict_mem.mm_node);
682                 evict_mem.mm_node = NULL;
683         }
684         list_del(&bo->lru);
685         drm_bo_add_to_lru(bo);
686         mutex_unlock(&dev->struct_mutex);
687
688         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
689                         _DRM_BO_FLAG_EVICTED);
690
691       out:
692         return ret;
693 }
694
695 static int drm_bo_mem_force_space(drm_device_t * dev,
696                                   drm_bo_mem_reg_t * mem,
697                                   uint32_t mem_type, int no_wait)
698 {
699         drm_mm_node_t *node;
700         drm_buffer_manager_t *bm = &dev->bm;
701         drm_buffer_object_t *entry;
702         drm_mem_type_manager_t *man = &bm->man[mem_type];
703         struct list_head *lru;
704         unsigned long num_pages = mem->num_pages;
705         int ret;
706
707         mutex_lock(&dev->struct_mutex);
708         do {
709                 node = drm_mm_search_free(&man->manager, num_pages,
710                                           mem->page_alignment, 1);
711                 if (node)
712                         break;
713
714                 lru = &man->lru;
715                 if (lru->next == lru)
716                         break;
717
718                 entry = list_entry(lru->next, drm_buffer_object_t, lru);
719                 atomic_inc(&entry->usage);
720                 mutex_unlock(&dev->struct_mutex);
721                 mutex_lock(&entry->mutex);
722                 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
723
724                 ret = drm_bo_evict(entry, mem_type, no_wait);
725                 mutex_unlock(&entry->mutex);
726                 drm_bo_usage_deref_unlocked(entry);
727                 if (ret)
728                         return ret;
729                 mutex_lock(&dev->struct_mutex);
730         } while (1);
731
732         if (!node) {
733                 mutex_unlock(&dev->struct_mutex);
734                 return -ENOMEM;
735         }
736
737         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
738         mutex_unlock(&dev->struct_mutex);
739         mem->mm_node = node;
740         mem->mem_type = mem_type;
741         return 0;
742 }
743
744 static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
745                                 uint32_t mem_type,
746                                 uint32_t mask, uint32_t * res_mask)
747 {
748         uint32_t cur_flags = drm_bo_type_flags(mem_type);
749         uint32_t flag_diff;
750
751         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
752                 cur_flags |= DRM_BO_FLAG_CACHED;
753         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
754                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
755         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
756                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
757
758         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
759                 return 0;
760
761         if (mem_type == DRM_BO_MEM_LOCAL) {
762                 *res_mask = cur_flags;
763                 return 1;
764         }
765
766         flag_diff = (mask ^ cur_flags);
767         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
768             (!(mask & DRM_BO_FLAG_CACHED) ||
769              (mask & DRM_BO_FLAG_FORCE_CACHING)))
770                 return 0;
771
772         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
773             ((mask & DRM_BO_FLAG_MAPPABLE) ||
774              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
775                 return 0;
776
777         *res_mask = cur_flags;
778         return 1;
779 }
780
781 int drm_bo_mem_space(drm_buffer_object_t * bo,
782                      drm_bo_mem_reg_t * mem, int no_wait)
783 {
784         drm_device_t *dev = bo->dev;
785         drm_buffer_manager_t *bm = &dev->bm;
786         drm_mem_type_manager_t *man;
787
788         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
789         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
790         uint32_t i;
791         uint32_t mem_type = DRM_BO_MEM_LOCAL;
792         uint32_t cur_flags;
793         int type_found = 0;
794         int type_ok = 0;
795         int has_eagain = 0;
796         drm_mm_node_t *node = NULL;
797         int ret;
798
799         mem->mm_node = NULL;
800         for (i = 0; i < num_prios; ++i) {
801                 mem_type = prios[i];
802                 man = &bm->man[mem_type];
803
804                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
805                                                &cur_flags);
806
807                 if (!type_ok)
808                         continue;
809
810                 if (mem_type == DRM_BO_MEM_LOCAL)
811                         break;
812
813                 if ((mem_type == bo->pinned_mem_type) &&
814                     (bo->pinned_node != NULL)) {
815                         node = bo->pinned_node;
816                         break;
817                 }
818
819                 mutex_lock(&dev->struct_mutex);
820                 if (man->has_type && man->use_type) {
821                         type_found = 1;
822                         node = drm_mm_search_free(&man->manager, mem->num_pages,
823                                                   mem->page_alignment, 1);
824                         if (node)
825                                 node = drm_mm_get_block(node, mem->num_pages,
826                                                         mem->page_alignment);
827                 }
828                 mutex_unlock(&dev->struct_mutex);
829                 if (node)
830                         break;
831         }
832
833         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
834                 mem->mm_node = node;
835                 mem->mem_type = mem_type;
836                 mem->flags = cur_flags;
837                 return 0;
838         }
839
840         if (!type_found)
841                 return -EINVAL;
842
843         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
844         prios = dev->driver->bo_driver->mem_busy_prio;
845
846         for (i = 0; i < num_prios; ++i) {
847                 mem_type = prios[i];
848                 man = &bm->man[mem_type];
849
850                 if (!man->has_type)
851                         continue;
852
853                 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
854                         continue;
855
856                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
857
858                 if (ret == 0) {
859                         mem->flags = cur_flags;
860                         return 0;
861                 }
862
863                 if (ret == -EAGAIN)
864                         has_eagain = 1;
865         }
866
867         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
868         return ret;
869 }
870
871 EXPORT_SYMBOL(drm_bo_mem_space);
872
873 static int drm_bo_new_mask(drm_buffer_object_t * bo,
874                            uint64_t new_mask, uint32_t hint)
875 {
876         uint32_t new_props;
877
878         if (bo->type == drm_bo_type_user) {
879                 DRM_ERROR("User buffers are not supported yet\n");
880                 return -EINVAL;
881         }
882         if (bo->type == drm_bo_type_fake &&
883             !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
884                 DRM_ERROR("Fake buffers must be pinned.\n");
885                 return -EINVAL;
886         }
887
888         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
889                 DRM_ERROR
890                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
891                      "processes\n");
892                 return -EPERM;
893         }
894
895         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
896                                 DRM_BO_FLAG_READ);
897
898         if (!new_props) {
899                 DRM_ERROR("Invalid buffer object rwx properties\n");
900                 return -EINVAL;
901         }
902
903         bo->mem.mask = new_mask;
904         return 0;
905 }
906
907 /*
908  * Call dev->struct_mutex locked.
909  */
910
911 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
912                                               uint32_t handle, int check_owner)
913 {
914         drm_user_object_t *uo;
915         drm_buffer_object_t *bo;
916
917         uo = drm_lookup_user_object(priv, handle);
918
919         if (!uo || (uo->type != drm_buffer_type)) {
920                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
921                 return NULL;
922         }
923
924         if (check_owner && priv != uo->owner) {
925                 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
926                         return NULL;
927         }
928
929         bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
930         atomic_inc(&bo->usage);
931         return bo;
932 }
933
934 /*
935  * Call bo->mutex locked.
936  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
937  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
938  */
939
940 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
941 {
942         drm_fence_object_t *fence = bo->fence;
943
944         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
945         if (fence) {
946                 drm_device_t *dev = bo->dev;
947                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
948                         drm_fence_usage_deref_unlocked(dev, fence);
949                         bo->fence = NULL;
950                         return 0;
951                 }
952                 return 1;
953         }
954         return 0;
955 }
956
957 /*
958  * Call bo->mutex locked.
959  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
960  */
961
962 static int drm_bo_busy(drm_buffer_object_t * bo)
963 {
964         drm_fence_object_t *fence = bo->fence;
965
966         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
967         if (fence) {
968                 drm_device_t *dev = bo->dev;
969                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
970                         drm_fence_usage_deref_unlocked(dev, fence);
971                         bo->fence = NULL;
972                         return 0;
973                 }
974                 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
975                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
976                         drm_fence_usage_deref_unlocked(dev, fence);
977                         bo->fence = NULL;
978                         return 0;
979                 }
980                 return 1;
981         }
982         return 0;
983 }
984
985 static int drm_bo_read_cached(drm_buffer_object_t * bo)
986 {
987         int ret = 0;
988
989         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
990         if (bo->mem.mm_node)
991                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
992         return ret;
993 }
994
995 /*
996  * Wait until a buffer is unmapped.
997  */
998
999 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
1000 {
1001         int ret = 0;
1002
1003         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1004                 return -EBUSY;
1005
1006         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1007                     atomic_read(&bo->mapped) == -1);
1008
1009         if (ret == -EINTR)
1010                 ret = -EAGAIN;
1011
1012         return ret;
1013 }
1014
1015 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
1016 {
1017         int ret;
1018
1019         mutex_lock(&bo->mutex);
1020         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1021         mutex_unlock(&bo->mutex);
1022         return ret;
1023 }
1024
1025 /*
1026  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1027  * Until then, we cannot really do anything with it except delete it.
1028  * The unfenced list is a PITA, and the operations
1029  * 1) validating
1030  * 2) submitting commands
1031  * 3) fencing
1032  * Should really be an atomic operation.
1033  * We now "solve" this problem by keeping
1034  * the buffer "unfenced" after validating, but before fencing.
1035  */
1036
1037 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
1038                                 int eagain_if_wait)
1039 {
1040         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1041
1042         if (ret && no_wait)
1043                 return -EBUSY;
1044         else if (!ret)
1045                 return 0;
1046
1047         ret = 0;
1048         mutex_unlock(&bo->mutex);
1049         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1050                     !drm_bo_check_unfenced(bo));
1051         mutex_lock(&bo->mutex);
1052         if (ret == -EINTR)
1053                 return -EAGAIN;
1054         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1055         if (ret) {
1056                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1057                 return -EBUSY;
1058         }
1059         if (eagain_if_wait)
1060                 return -EAGAIN;
1061
1062         return 0;
1063 }
1064
1065 /*
1066  * Fill in the ioctl reply argument with buffer info.
1067  * Bo locked.
1068  */
1069
1070 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1071                                 struct drm_bo_info_rep *rep)
1072 {
1073         rep->handle = bo->base.hash.key;
1074         rep->flags = bo->mem.flags;
1075         rep->size = bo->mem.num_pages * PAGE_SIZE;
1076         rep->offset = bo->offset;
1077         rep->arg_handle = bo->map_list.user_token;
1078         rep->mask = bo->mem.mask;
1079         rep->buffer_start = bo->buffer_start;
1080         rep->fence_flags = bo->fence_type;
1081         rep->rep_flags = 0;
1082         rep->page_alignment = bo->mem.page_alignment;
1083
1084         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1085                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1086                                 DRM_BO_REP_BUSY);
1087         }
1088 }
1089
1090 /*
1091  * Wait for buffer idle and register that we've mapped the buffer.
1092  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1093  * so that if the client dies, the mapping is automatically
1094  * unregistered.
1095  */
1096
1097 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1098                                  uint32_t map_flags, unsigned hint,
1099                                  struct drm_bo_info_rep *rep)
1100 {
1101         drm_buffer_object_t *bo;
1102         drm_device_t *dev = priv->head->dev;
1103         int ret = 0;
1104         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1105
1106         mutex_lock(&dev->struct_mutex);
1107         bo = drm_lookup_buffer_object(priv, handle, 1);
1108         mutex_unlock(&dev->struct_mutex);
1109
1110         if (!bo)
1111                 return -EINVAL;
1112
1113         mutex_lock(&bo->mutex);
1114         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1115                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1116                 if (ret)
1117                         goto out;
1118         }
1119
1120         /*
1121          * If this returns true, we are currently unmapped.
1122          * We need to do this test, because unmapping can
1123          * be done without the bo->mutex held.
1124          */
1125
1126         while (1) {
1127                 if (atomic_inc_and_test(&bo->mapped)) {
1128                         if (no_wait && drm_bo_busy(bo)) {
1129                                 atomic_dec(&bo->mapped);
1130                                 ret = -EBUSY;
1131                                 goto out;
1132                         }
1133                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1134                         if (ret) {
1135                                 atomic_dec(&bo->mapped);
1136                                 goto out;
1137                         }
1138
1139                         if ((map_flags & DRM_BO_FLAG_READ) &&
1140                             (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1141                             (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1142                                 drm_bo_read_cached(bo);
1143                         }
1144                         break;
1145                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1146                            (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1147                            (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1148
1149                         /*
1150                          * We are already mapped with different flags.
1151                          * need to wait for unmap.
1152                          */
1153
1154                         ret = drm_bo_wait_unmapped(bo, no_wait);
1155                         if (ret)
1156                                 goto out;
1157
1158                         continue;
1159                 }
1160                 break;
1161         }
1162
1163         mutex_lock(&dev->struct_mutex);
1164         ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1165         mutex_unlock(&dev->struct_mutex);
1166         if (ret) {
1167                 if (atomic_add_negative(-1, &bo->mapped))
1168                         DRM_WAKEUP(&bo->event_queue);
1169
1170         } else
1171                 drm_bo_fill_rep_arg(bo, rep);
1172       out:
1173         mutex_unlock(&bo->mutex);
1174         drm_bo_usage_deref_unlocked(bo);
1175         return ret;
1176 }
1177
1178 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1179 {
1180         drm_device_t *dev = priv->head->dev;
1181         drm_buffer_object_t *bo;
1182         drm_ref_object_t *ro;
1183         int ret = 0;
1184
1185         mutex_lock(&dev->struct_mutex);
1186
1187         bo = drm_lookup_buffer_object(priv, handle, 1);
1188         if (!bo) {
1189                 ret = -EINVAL;
1190                 goto out;
1191         }
1192
1193         ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1194         if (!ro) {
1195                 ret = -EINVAL;
1196                 goto out;
1197         }
1198
1199         drm_remove_ref_object(priv, ro);
1200         drm_bo_usage_deref_locked(bo);
1201       out:
1202         mutex_unlock(&dev->struct_mutex);
1203         return ret;
1204 }
1205
1206 /*
1207  * Call struct-sem locked.
1208  */
1209
1210 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1211                                          drm_user_object_t * uo,
1212                                          drm_ref_t action)
1213 {
1214         drm_buffer_object_t *bo =
1215             drm_user_object_entry(uo, drm_buffer_object_t, base);
1216
1217         /*
1218          * We DON'T want to take the bo->lock here, because we want to
1219          * hold it when we wait for unmapped buffer.
1220          */
1221
1222         BUG_ON(action != _DRM_REF_TYPE1);
1223
1224         if (atomic_add_negative(-1, &bo->mapped))
1225                 DRM_WAKEUP(&bo->event_queue);
1226 }
1227
1228 /*
1229  * bo->mutex locked.
1230  * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1231  */
1232
1233 int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1234                        int no_wait, int move_unfenced)
1235 {
1236         drm_device_t *dev = bo->dev;
1237         drm_buffer_manager_t *bm = &dev->bm;
1238         int ret = 0;
1239         drm_bo_mem_reg_t mem;
1240         /*
1241          * Flush outstanding fences.
1242          */
1243
1244         drm_bo_busy(bo);
1245
1246         /*
1247          * Wait for outstanding fences.
1248          */
1249
1250         ret = drm_bo_wait(bo, 0, 0, no_wait);
1251         if (ret)
1252                 return ret;
1253
1254         mem.num_pages = bo->mem.num_pages;
1255         mem.size = mem.num_pages << PAGE_SHIFT;
1256         mem.mask = new_mem_flags;
1257         mem.page_alignment = bo->mem.page_alignment;
1258
1259         mutex_lock(&bm->evict_mutex);
1260         mutex_lock(&dev->struct_mutex);
1261         list_del(&bo->lru);
1262         list_add_tail(&bo->lru, &bm->unfenced);
1263         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1264                         _DRM_BO_FLAG_UNFENCED);
1265         mutex_unlock(&dev->struct_mutex);
1266
1267         /*
1268          * Determine where to move the buffer.
1269          */
1270         ret = drm_bo_mem_space(bo, &mem, no_wait);
1271         if (ret)
1272                 goto out_unlock;
1273
1274         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1275
1276  out_unlock:
1277         if (ret || !move_unfenced) {
1278                 mutex_lock(&dev->struct_mutex);
1279                 if (mem.mm_node) {
1280                         if (mem.mm_node != bo->pinned_node)
1281                                 drm_mm_put_block(mem.mm_node);
1282                         mem.mm_node = NULL;
1283                 }
1284                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1285                 DRM_WAKEUP(&bo->event_queue);
1286                 list_del(&bo->lru);
1287                 drm_bo_add_to_lru(bo);
1288                 mutex_unlock(&dev->struct_mutex);
1289         }
1290
1291         mutex_unlock(&bm->evict_mutex);
1292         return ret;
1293 }
1294
1295 static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
1296 {
1297         uint32_t flag_diff = (mem->mask ^ mem->flags);
1298
1299         if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1300                 return 0;
1301         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1302             (!(mem->mask & DRM_BO_FLAG_CACHED) ||
1303              (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1304           return 0;
1305         }
1306         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1307             ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1308              (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1309                 return 0;
1310         return 1;
1311 }
1312
1313 static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
1314 {
1315         drm_buffer_manager_t *bm = &dev->bm;
1316         drm_mem_type_manager_t *man;
1317         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1318         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1319         uint32_t i;
1320         int type_ok = 0;
1321         uint32_t mem_type = 0;
1322         uint32_t cur_flags;
1323
1324         if (drm_bo_mem_compat(mem))
1325                 return 0;
1326
1327         BUG_ON(mem->mm_node);
1328
1329         for (i = 0; i < num_prios; ++i) {
1330                 mem_type = prios[i];
1331                 man = &bm->man[mem_type];
1332                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1333                                                &cur_flags);
1334                 if (type_ok)
1335                         break;
1336         }
1337
1338         if (type_ok) {
1339                 mem->mm_node = NULL;
1340                 mem->mem_type = mem_type;
1341                 mem->flags = cur_flags;
1342                 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1343                 return 0;
1344         }
1345
1346         DRM_ERROR("Illegal fake buffer flags 0x%016llx\n",
1347                   (unsigned long long) mem->mask);
1348         return -EINVAL;
1349 }
1350
1351 /*
1352  * bo locked.
1353  */
1354
1355 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1356                                       uint32_t fence_class,
1357                                       int move_unfenced, int no_wait)
1358 {
1359         drm_device_t *dev = bo->dev;
1360         drm_buffer_manager_t *bm = &dev->bm;
1361         drm_bo_driver_t *driver = dev->driver->bo_driver;
1362         uint32_t ftype;
1363         int ret;
1364
1365         DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
1366                   (unsigned long long) bo->mem.mask,
1367                   (unsigned long long) bo->mem.flags);
1368
1369         ret = driver->fence_type(bo, &ftype);
1370
1371         if (ret) {
1372                 DRM_ERROR("Driver did not support given buffer permissions\n");
1373                 return ret;
1374         }
1375
1376         /*
1377          * We're switching command submission mechanism,
1378          * or cannot simply rely on the hardware serializing for us.
1379          *
1380          * Wait for buffer idle.
1381          */
1382
1383         if ((fence_class != bo->fence_class) ||
1384             ((ftype ^ bo->fence_type) & bo->fence_type)) {
1385
1386                 ret = drm_bo_wait(bo, 0, 0, no_wait);
1387
1388                 if (ret)
1389                         return ret;
1390
1391         }
1392         
1393         bo->fence_class = fence_class;
1394         bo->fence_type = ftype;
1395         ret = drm_bo_wait_unmapped(bo, no_wait);
1396         if (ret)
1397                 return ret;
1398
1399         if (bo->type == drm_bo_type_fake) {
1400                 ret = drm_bo_check_fake(dev, &bo->mem);
1401                 if (ret)
1402                         return ret;
1403         }
1404
1405         /*
1406          * Check whether we need to move buffer.
1407          */
1408
1409         if (!drm_bo_mem_compat(&bo->mem)) {
1410                 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1411                                          move_unfenced);
1412                 if (ret) {
1413                         if (ret != -EAGAIN)
1414                                 DRM_ERROR("Failed moving buffer.\n");
1415                         return ret;
1416                 }
1417         }
1418
1419         /*
1420          * Pinned buffers.
1421          */
1422
1423         if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1424                 bo->pinned_mem_type = bo->mem.mem_type;
1425                 mutex_lock(&dev->struct_mutex);
1426                 list_del_init(&bo->pinned_lru);
1427                 drm_bo_add_to_pinned_lru(bo);
1428
1429                 if (bo->pinned_node != bo->mem.mm_node) {
1430                         if (bo->pinned_node != NULL)
1431                                 drm_mm_put_block(bo->pinned_node);
1432                         bo->pinned_node = bo->mem.mm_node;
1433                 }
1434
1435                 mutex_unlock(&dev->struct_mutex);
1436
1437         } else if (bo->pinned_node != NULL) {
1438
1439                 mutex_lock(&dev->struct_mutex);
1440
1441                 if (bo->pinned_node != bo->mem.mm_node)
1442                         drm_mm_put_block(bo->pinned_node);
1443
1444                 list_del_init(&bo->pinned_lru);
1445                 bo->pinned_node = NULL;
1446                 mutex_unlock(&dev->struct_mutex);
1447
1448         }
1449
1450         /*
1451          * We might need to add a TTM.
1452          */
1453
1454         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1455                 ret = drm_bo_add_ttm(bo);
1456                 if (ret)
1457                         return ret;
1458         }
1459         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1460
1461         /*
1462          * Finally, adjust lru to be sure.
1463          */
1464
1465         mutex_lock(&dev->struct_mutex);
1466         list_del(&bo->lru);
1467         if (move_unfenced) {
1468                 list_add_tail(&bo->lru, &bm->unfenced);
1469                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1470                                 _DRM_BO_FLAG_UNFENCED);
1471         } else {
1472                 drm_bo_add_to_lru(bo);
1473                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1474                         DRM_WAKEUP(&bo->event_queue);
1475                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1476                                         _DRM_BO_FLAG_UNFENCED);
1477                 }
1478         }
1479         mutex_unlock(&dev->struct_mutex);
1480
1481         return 0;
1482 }
1483
1484 static int drm_bo_handle_validate(drm_file_t * priv,
1485                                   uint32_t handle,
1486                                   uint32_t fence_class,
1487                                   uint64_t flags, uint64_t mask, uint32_t hint,
1488                                   struct drm_bo_info_rep *rep)
1489 {
1490         drm_buffer_object_t *bo;
1491         int ret;
1492         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1493
1494         bo = drm_lookup_buffer_object(priv, handle, 1);
1495         if (!bo) {
1496                 return -EINVAL;
1497         }
1498
1499         mutex_lock(&bo->mutex);
1500         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1501
1502         if (ret)
1503                 goto out;
1504
1505         DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1506         ret = drm_bo_new_mask(bo, flags, hint);
1507         if (ret)
1508                 goto out;
1509
1510         ret =
1511             drm_buffer_object_validate(bo, fence_class,
1512                                        !(hint & DRM_BO_HINT_DONT_FENCE),
1513                                        no_wait);
1514         drm_bo_fill_rep_arg(bo, rep);
1515
1516       out:
1517
1518         mutex_unlock(&bo->mutex);
1519
1520         drm_bo_usage_deref_unlocked(bo);
1521         return ret;
1522 }
1523
1524 static int drm_bo_handle_info(drm_file_t *priv, uint32_t handle,
1525                               struct drm_bo_info_rep *rep)
1526 {
1527         drm_buffer_object_t *bo;
1528
1529         bo = drm_lookup_buffer_object(priv, handle, 1);
1530         if (!bo) {
1531                 return -EINVAL;
1532         }
1533         mutex_lock(&bo->mutex);
1534         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1535                 (void)drm_bo_busy(bo);
1536         drm_bo_fill_rep_arg(bo, rep);
1537         mutex_unlock(&bo->mutex);
1538         drm_bo_usage_deref_unlocked(bo);
1539         return 0;
1540 }
1541
1542 static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle,
1543                               uint32_t hint,
1544                               struct drm_bo_info_rep *rep)
1545 {
1546         drm_buffer_object_t *bo;
1547         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1548         int ret;
1549
1550         bo = drm_lookup_buffer_object(priv, handle, 1);
1551         if (!bo) {
1552                 return -EINVAL;
1553         }
1554
1555         mutex_lock(&bo->mutex);
1556         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1557         if (ret)
1558                 goto out;
1559         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1560         if (ret)
1561                 goto out;
1562
1563         drm_bo_fill_rep_arg(bo, rep);
1564
1565       out:
1566         mutex_unlock(&bo->mutex);
1567         drm_bo_usage_deref_unlocked(bo);
1568         return ret;
1569 }
1570
1571 int drm_buffer_object_create(drm_device_t *dev,
1572                              unsigned long size,
1573                              drm_bo_type_t type,
1574                              uint64_t mask,
1575                              uint32_t hint,
1576                              uint32_t page_alignment,
1577                              unsigned long buffer_start,
1578                              drm_buffer_object_t ** buf_obj)
1579 {
1580         drm_buffer_manager_t *bm = &dev->bm;
1581         drm_buffer_object_t *bo;
1582         int ret = 0;
1583         unsigned long num_pages;
1584
1585         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1586                 DRM_ERROR("Invalid buffer object start.\n");
1587                 return -EINVAL;
1588         }
1589         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1590         if (num_pages == 0) {
1591                 DRM_ERROR("Illegal buffer object size.\n");
1592                 return -EINVAL;
1593         }
1594
1595         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1596
1597         if (!bo)
1598                 return -ENOMEM;
1599
1600         mutex_init(&bo->mutex);
1601         mutex_lock(&bo->mutex);
1602
1603         atomic_set(&bo->usage, 1);
1604         atomic_set(&bo->mapped, -1);
1605         DRM_INIT_WAITQUEUE(&bo->event_queue);
1606         INIT_LIST_HEAD(&bo->lru);
1607         INIT_LIST_HEAD(&bo->pinned_lru);
1608         INIT_LIST_HEAD(&bo->ddestroy);
1609 #ifdef DRM_ODD_MM_COMPAT
1610         INIT_LIST_HEAD(&bo->p_mm_list);
1611         INIT_LIST_HEAD(&bo->vma_list);
1612 #endif
1613         bo->dev = dev;
1614         bo->type = type;
1615         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1616         bo->mem.num_pages = num_pages;
1617         bo->mem.mm_node = NULL;
1618         bo->mem.page_alignment = page_alignment;
1619         if (bo->type == drm_bo_type_fake) {
1620                 bo->offset = buffer_start;
1621                 bo->buffer_start = 0;
1622         } else {
1623                 bo->buffer_start = buffer_start;
1624         }
1625         bo->priv_flags = 0;
1626         bo->mem.flags = 0ULL;
1627         bo->mem.mask = 0ULL;
1628         atomic_inc(&bm->count);
1629         ret = drm_bo_new_mask(bo, mask, hint);
1630
1631         if (ret)
1632                 goto out_err;
1633
1634         if (bo->type == drm_bo_type_dc) {
1635                 mutex_lock(&dev->struct_mutex);
1636                 ret = drm_bo_setup_vm_locked(bo);
1637                 mutex_unlock(&dev->struct_mutex);
1638                 if (ret)
1639                         goto out_err;
1640         }
1641         ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1642         if (ret)
1643                 goto out_err;
1644
1645         mutex_unlock(&bo->mutex);
1646         *buf_obj = bo;
1647         return 0;
1648
1649       out_err:
1650         mutex_unlock(&bo->mutex);
1651
1652         drm_bo_usage_deref_unlocked(bo);
1653         return ret;
1654 }
1655
1656 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1657                                   int shareable)
1658 {
1659         drm_device_t *dev = priv->head->dev;
1660         int ret;
1661
1662         mutex_lock(&dev->struct_mutex);
1663         ret = drm_add_user_object(priv, &bo->base, shareable);
1664         if (ret)
1665                 goto out;
1666
1667         bo->base.remove = drm_bo_base_deref_locked;
1668         bo->base.type = drm_buffer_type;
1669         bo->base.ref_struct_locked = NULL;
1670         bo->base.unref = drm_buffer_user_object_unmap;
1671
1672       out:
1673         mutex_unlock(&dev->struct_mutex);
1674         return ret;
1675 }
1676
1677 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1678 {
1679         LOCK_TEST_WITH_RETURN(dev, filp);
1680         return 0;
1681 }
1682
1683 int drm_bo_op_ioctl(DRM_IOCTL_ARGS)
1684 {
1685         DRM_DEVICE;
1686         struct drm_bo_op_arg arg;
1687         struct drm_bo_op_req *req = &arg.d.req;
1688         struct drm_bo_info_rep rep;
1689         unsigned long next;
1690         int ret;
1691
1692         if (!dev->bm.initialized) {
1693                 DRM_ERROR("Buffer object manager is not initialized.\n");
1694                 return -EINVAL;
1695         }
1696
1697         do {
1698                 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1699
1700                 if (arg.handled) {
1701                         data = arg.next;
1702                         continue;
1703                 }
1704
1705                 ret = 0;
1706                 switch (req->op) {
1707                 case drm_bo_validate:
1708                         ret = drm_bo_lock_test(dev, filp);
1709                         if (ret)
1710                                 break;
1711                         ret = drm_bo_handle_validate(priv, req->bo_req.handle,
1712                                                      req->bo_req.fence_class,
1713                                                      req->bo_req.flags,
1714                                                      req->bo_req.mask,
1715                                                      req->bo_req.hint,
1716                                                      &rep);
1717                         break;
1718                 case drm_bo_fence:
1719                         ret = -EINVAL;
1720                         DRM_ERROR("Function is not implemented yet.\n");
1721                         break;
1722                 case drm_bo_ref_fence:
1723                         ret = -EINVAL;
1724                         DRM_ERROR("Function is not implemented yet.\n");
1725                         break;
1726                 default:
1727                         ret = -EINVAL;
1728                 }
1729                 next = arg.next;
1730
1731                 /*
1732                  * A signal interrupted us. Make sure the ioctl is restartable.
1733                  */
1734
1735                 if (ret == -EAGAIN)
1736                         return -EAGAIN;
1737
1738                 arg.handled = 1;
1739                 arg.d.rep.ret = ret;
1740                 arg.d.rep.bo_info = rep;
1741                 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1742                 data = next;
1743         } while (data);
1744         return 0;
1745 }
1746
1747 int drm_bo_create_ioctl(DRM_IOCTL_ARGS)
1748 {
1749         DRM_DEVICE;
1750         struct drm_bo_create_arg arg;
1751         struct drm_bo_create_req *req = &arg.d.req;
1752         struct drm_bo_info_rep *rep = &arg.d.rep;
1753         drm_buffer_object_t *entry;
1754         int ret = 0;
1755
1756         if (!dev->bm.initialized) {
1757                 DRM_ERROR("Buffer object manager is not initialized.\n");
1758                 return -EINVAL;
1759         }
1760
1761         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1762         
1763         ret = drm_bo_lock_test(dev, filp);
1764         if (ret)
1765                 goto out;
1766
1767         ret = drm_buffer_object_create(priv->head->dev,
1768                                        req->size, req->type, req->mask,
1769                                        req->hint, req->page_alignment,
1770                                        req->buffer_start, &entry);
1771         if (ret)
1772                 goto out;
1773         
1774         ret = drm_bo_add_user_object(priv, entry,
1775                                      req->mask & DRM_BO_FLAG_SHAREABLE);
1776         if (ret) {
1777                 drm_bo_usage_deref_unlocked(entry);
1778                 goto out;
1779         }
1780         
1781         mutex_lock(&entry->mutex);
1782         drm_bo_fill_rep_arg(entry, rep);
1783         mutex_unlock(&entry->mutex);
1784
1785         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1786 out:
1787         return ret;
1788 }
1789
1790
1791 int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS)
1792 {
1793         DRM_DEVICE;
1794         struct drm_bo_handle_arg arg;
1795         drm_user_object_t *uo;
1796         int ret = 0;
1797
1798         if (!dev->bm.initialized) {
1799                 DRM_ERROR("Buffer object manager is not initialized.\n");
1800                 return -EINVAL;
1801         }
1802
1803         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1804
1805         mutex_lock(&dev->struct_mutex);
1806         uo = drm_lookup_user_object(priv, arg.handle);
1807         if (!uo || (uo->type != drm_buffer_type) || uo->owner != priv) {
1808                 mutex_unlock(&dev->struct_mutex);
1809                 return -EINVAL;
1810         }
1811         ret = drm_remove_user_object(priv, uo);
1812         mutex_unlock(&dev->struct_mutex);
1813         
1814         return ret;
1815 }
1816
1817 int drm_bo_map_ioctl(DRM_IOCTL_ARGS)
1818 {
1819         DRM_DEVICE;
1820         struct drm_bo_map_wait_idle_arg arg;
1821         struct drm_bo_info_req *req = &arg.d.req;
1822         struct drm_bo_info_rep *rep = &arg.d.rep;
1823         int ret;
1824         if (!dev->bm.initialized) {
1825                 DRM_ERROR("Buffer object manager is not initialized.\n");
1826                 return -EINVAL;
1827         }
1828
1829         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1830
1831         ret = drm_buffer_object_map(priv, req->handle, req->mask,
1832                                     req->hint, rep);
1833         if (ret)
1834                 return ret;
1835
1836         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1837         return 0;
1838 }
1839
1840 int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS)
1841 {
1842         DRM_DEVICE;
1843         struct drm_bo_handle_arg arg;
1844         int ret;
1845         if (!dev->bm.initialized) {
1846                 DRM_ERROR("Buffer object manager is not initialized.\n");
1847                 return -EINVAL;
1848         }
1849
1850         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1851
1852         ret = drm_buffer_object_unmap(priv, arg.handle);
1853         return ret;
1854 }
1855
1856
1857 int drm_bo_reference_ioctl(DRM_IOCTL_ARGS)
1858 {
1859         DRM_DEVICE;
1860         struct drm_bo_reference_info_arg arg;
1861         struct drm_bo_handle_arg *req = &arg.d.req;
1862         struct drm_bo_info_rep *rep = &arg.d.rep;
1863         drm_user_object_t *uo;
1864         int ret;
1865
1866         if (!dev->bm.initialized) {
1867                 DRM_ERROR("Buffer object manager is not initialized.\n");
1868                 return -EINVAL;
1869         }
1870
1871         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1872
1873         ret = drm_user_object_ref(priv, req->handle,
1874                                   drm_buffer_type, &uo);
1875         if (ret)
1876                 return ret;
1877         
1878         ret = drm_bo_handle_info(priv, req->handle, rep);
1879         if (ret)
1880                 return ret;
1881
1882         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1883         return 0;
1884 }
1885
1886 int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS)
1887 {
1888         DRM_DEVICE;
1889         struct drm_bo_handle_arg arg;
1890         int ret = 0;
1891
1892         if (!dev->bm.initialized) {
1893                 DRM_ERROR("Buffer object manager is not initialized.\n");
1894                 return -EINVAL;
1895         }
1896
1897         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1898
1899         ret = drm_user_object_unref(priv, arg.handle, drm_buffer_type);
1900         return ret;
1901 }
1902
1903 int drm_bo_info_ioctl(DRM_IOCTL_ARGS)
1904 {
1905         DRM_DEVICE;
1906         struct drm_bo_reference_info_arg arg;
1907         struct drm_bo_handle_arg *req = &arg.d.req;
1908         struct drm_bo_info_rep *rep = &arg.d.rep;
1909         int ret;
1910
1911         if (!dev->bm.initialized) {
1912                 DRM_ERROR("Buffer object manager is not initialized.\n");
1913                 return -EINVAL;
1914         }
1915
1916         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1917
1918         ret = drm_bo_handle_info(priv, req->handle, rep);
1919         if (ret)
1920                 return ret;
1921         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1922         return 0;
1923 }
1924
1925 int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS)
1926 {
1927         DRM_DEVICE;
1928         struct drm_bo_map_wait_idle_arg arg;
1929         struct drm_bo_info_req *req = &arg.d.req;
1930         struct drm_bo_info_rep *rep = &arg.d.rep;
1931         int ret;
1932         if (!dev->bm.initialized) {
1933                 DRM_ERROR("Buffer object manager is not initialized.\n");
1934                 return -EINVAL;
1935         }
1936
1937         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1938
1939         ret = drm_bo_handle_wait(priv, req->handle,
1940                                  req->hint, rep);
1941         if (ret)
1942                 return ret;
1943
1944         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1945         return 0;
1946 }
1947
1948
1949
1950 /**
1951  *Clean the unfenced list and put on regular LRU.
1952  *This is part of the memory manager cleanup and should only be
1953  *called with the DRI lock held.
1954  *Call dev->struct_sem locked.
1955  */
1956
1957 static void drm_bo_clean_unfenced(drm_device_t *dev)
1958 {
1959         drm_buffer_manager_t *bm  = &dev->bm;
1960         struct list_head *head, *list;
1961         drm_buffer_object_t *entry;
1962
1963         head = &bm->unfenced;
1964
1965         list = head->next;
1966         while(list != head) {
1967                 prefetch(list->next);
1968                 entry = list_entry(list, drm_buffer_object_t, lru);
1969
1970                 atomic_inc(&entry->usage);
1971                 mutex_unlock(&dev->struct_mutex);
1972                 mutex_lock(&entry->mutex);
1973                 mutex_lock(&dev->struct_mutex);
1974
1975                 list_del(&entry->lru);
1976                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1977                 drm_bo_add_to_lru(entry);
1978                 mutex_unlock(&entry->mutex);
1979                 list = head->next;
1980         }
1981 }
1982
1983 static int drm_bo_leave_list(drm_buffer_object_t * bo,
1984                              uint32_t mem_type,
1985                              int free_pinned, int allow_errors)
1986 {
1987         drm_device_t *dev = bo->dev;
1988         int ret = 0;
1989
1990         mutex_lock(&bo->mutex);
1991
1992         ret = drm_bo_expire_fence(bo, allow_errors);
1993         if (ret)
1994                 goto out;
1995
1996         if (free_pinned) {
1997                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1998                 mutex_lock(&dev->struct_mutex);
1999                 list_del_init(&bo->pinned_lru);
2000                 if (bo->pinned_node == bo->mem.mm_node)
2001                         bo->pinned_node = NULL;
2002                 if (bo->pinned_node != NULL) {
2003                         drm_mm_put_block(bo->pinned_node);
2004                         bo->pinned_node = NULL;
2005                 }
2006                 mutex_unlock(&dev->struct_mutex);
2007         }
2008
2009         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2010                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2011                           "cleanup. Removing flag and evicting.\n");
2012                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2013                 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
2014         }
2015
2016         if (bo->mem.mem_type == mem_type)
2017                 ret = drm_bo_evict(bo, mem_type, 0);
2018
2019         if (ret) {
2020                 if (allow_errors) {
2021                         goto out;
2022                 } else {
2023                         ret = 0;
2024                         DRM_ERROR("Cleanup eviction failed\n");
2025                 }
2026         }
2027
2028       out:
2029         mutex_unlock(&bo->mutex);
2030         return ret;
2031 }
2032
2033
2034 static drm_buffer_object_t *drm_bo_entry(struct list_head *list,
2035                                          int pinned_list)
2036 {
2037         if (pinned_list)
2038                 return list_entry(list, drm_buffer_object_t, pinned_lru);
2039         else
2040                 return list_entry(list, drm_buffer_object_t, lru);
2041 }
2042
2043 /*
2044  * dev->struct_mutex locked.
2045  */
2046
2047 static int drm_bo_force_list_clean(drm_device_t * dev,
2048                                    struct list_head *head,
2049                                    unsigned mem_type,
2050                                    int free_pinned,
2051                                    int allow_errors,
2052                                    int pinned_list)
2053 {
2054         struct list_head *list, *next, *prev;
2055         drm_buffer_object_t *entry, *nentry;
2056         int ret;
2057         int do_restart;
2058
2059         /*
2060          * The list traversal is a bit odd here, because an item may
2061          * disappear from the list when we release the struct_mutex or
2062          * when we decrease the usage count. Also we're not guaranteed
2063          * to drain pinned lists, so we can't always restart.
2064          */
2065
2066 restart:
2067         nentry = NULL;
2068         list_for_each_safe(list, next, head) {
2069                 prev = list->prev;
2070
2071                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2072                 atomic_inc(&entry->usage);
2073                 if (nentry) {
2074                         atomic_dec(&nentry->usage);
2075                         nentry = NULL;
2076                 }
2077
2078                 /*
2079                  * Protect the next item from destruction, so we can check
2080                  * its list pointers later on.
2081                  */
2082
2083                 if (next != head) {
2084                         nentry = drm_bo_entry(next, pinned_list);
2085                         atomic_inc(&nentry->usage);
2086                 }
2087                 mutex_unlock(&dev->struct_mutex);
2088
2089                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2090                                         allow_errors);
2091                 mutex_lock(&dev->struct_mutex);
2092
2093                 drm_bo_usage_deref_locked(entry);
2094                 if (ret)
2095                         return ret;
2096
2097                 /*
2098                  * Has the next item disappeared from the list?
2099                  */
2100
2101                 do_restart = ((next->prev != list) && (next->prev != prev));
2102
2103                 if (nentry != NULL && do_restart) {
2104                         drm_bo_usage_deref_locked(nentry);
2105                         nentry = NULL;
2106                 }
2107
2108                 if (do_restart)
2109                         goto restart;
2110         }
2111         return 0;
2112 }
2113
2114 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
2115 {
2116         drm_buffer_manager_t *bm = &dev->bm;
2117         drm_mem_type_manager_t *man = &bm->man[mem_type];
2118         int ret = -EINVAL;
2119
2120         if (mem_type >= DRM_BO_MEM_TYPES) {
2121                 DRM_ERROR("Illegal memory type %d\n", mem_type);
2122                 return ret;
2123         }
2124
2125         if (!man->has_type) {
2126                 DRM_ERROR("Trying to take down uninitialized "
2127                           "memory manager type\n");
2128                 return ret;
2129         }
2130         man->use_type = 0;
2131         man->has_type = 0;
2132
2133         ret = 0;
2134         if (mem_type > 0) {
2135
2136                 drm_bo_clean_unfenced(dev);
2137                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2138                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2139
2140                 if (drm_mm_clean(&man->manager)) {
2141                         drm_mm_takedown(&man->manager);
2142                 } else {
2143                         ret = -EBUSY;
2144                 }
2145         }
2146
2147         return ret;
2148 }
2149
2150 /**
2151  *Evict all buffers of a particular mem_type, but leave memory manager
2152  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2153  *point since we have the hardware lock.
2154  */
2155
2156 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
2157 {
2158         int ret;
2159         drm_buffer_manager_t *bm = &dev->bm;
2160         drm_mem_type_manager_t *man = &bm->man[mem_type];
2161
2162         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2163                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2164                 return -EINVAL;
2165         }
2166
2167         if (!man->has_type) {
2168                 DRM_ERROR("Memory type %u has not been initialized.\n",
2169                           mem_type);
2170                 return 0;
2171         }
2172
2173         drm_bo_clean_unfenced(dev);
2174         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2175         if (ret)
2176                 return ret;
2177         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2178
2179         return ret;
2180 }
2181
2182 int drm_bo_init_mm(drm_device_t * dev,
2183                    unsigned type,
2184                    unsigned long p_offset, unsigned long p_size)
2185 {
2186         drm_buffer_manager_t *bm = &dev->bm;
2187         int ret = -EINVAL;
2188         drm_mem_type_manager_t *man;
2189
2190         if (type >= DRM_BO_MEM_TYPES) {
2191                 DRM_ERROR("Illegal memory type %d\n", type);
2192                 return ret;
2193         }
2194
2195         man = &bm->man[type];
2196         if (man->has_type) {
2197                 DRM_ERROR("Memory manager already initialized for type %d\n",
2198                           type);
2199                 return ret;
2200         }
2201
2202         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2203         if (ret)
2204                 return ret;
2205
2206         ret = 0;
2207         if (type != DRM_BO_MEM_LOCAL) {
2208                 if (!p_size) {
2209                         DRM_ERROR("Zero size memory manager type %d\n", type);
2210                         return ret;
2211                 }
2212                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2213                 if (ret)
2214                         return ret;
2215         }
2216         man->has_type = 1;
2217         man->use_type = 1;
2218
2219         INIT_LIST_HEAD(&man->lru);
2220         INIT_LIST_HEAD(&man->pinned);
2221
2222         return 0;
2223 }
2224 EXPORT_SYMBOL(drm_bo_init_mm);
2225
2226 /*
2227  * This is called from lastclose, so we don't need to bother about
2228  * any clients still running when we set the initialized flag to zero.
2229  */
2230
2231 int drm_bo_driver_finish(drm_device_t * dev)
2232 {
2233         drm_buffer_manager_t *bm = &dev->bm;
2234         int ret = 0;
2235         unsigned i = DRM_BO_MEM_TYPES;
2236         drm_mem_type_manager_t *man;
2237
2238         mutex_lock(&dev->bm.init_mutex);
2239         mutex_lock(&dev->struct_mutex);
2240
2241         if (!bm->initialized)
2242                 goto out;
2243         bm->initialized = 0;
2244
2245         while (i--) {
2246                 man = &bm->man[i];
2247                 if (man->has_type) {
2248                         man->use_type = 0;
2249                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2250                                 ret = -EBUSY;
2251                                 DRM_ERROR("DRM memory manager type %d "
2252                                           "is not clean.\n", i);
2253                         }
2254                         man->has_type = 0;
2255                 }
2256         }
2257         mutex_unlock(&dev->struct_mutex);
2258
2259         if (!cancel_delayed_work(&bm->wq)) {
2260                 flush_scheduled_work();
2261         }
2262         mutex_lock(&dev->struct_mutex);
2263         drm_bo_delayed_delete(dev, 1);
2264         if (list_empty(&bm->ddestroy)) {
2265                 DRM_DEBUG("Delayed destroy list was clean\n");
2266         }
2267         if (list_empty(&bm->man[0].lru)) {
2268                 DRM_DEBUG("Swap list was clean\n");
2269         }
2270         if (list_empty(&bm->man[0].pinned)) {
2271                 DRM_DEBUG("NO_MOVE list was clean\n");
2272         }
2273         if (list_empty(&bm->unfenced)) {
2274                 DRM_DEBUG("Unfenced list was clean\n");
2275         }
2276       out:
2277         mutex_unlock(&dev->struct_mutex);
2278         mutex_unlock(&dev->bm.init_mutex);
2279         return ret;
2280 }
2281
2282 int drm_bo_driver_init(drm_device_t * dev)
2283 {
2284         drm_bo_driver_t *driver = dev->driver->bo_driver;
2285         drm_buffer_manager_t *bm = &dev->bm;
2286         int ret = -EINVAL;
2287
2288         mutex_lock(&dev->bm.init_mutex);
2289         mutex_lock(&dev->struct_mutex);
2290         if (!driver)
2291                 goto out_unlock;
2292
2293         /*
2294          * Initialize the system memory buffer type.
2295          * Other types need to be driver / IOCTL initialized.
2296          */
2297
2298         ret = drm_bo_init_mm(dev, 0, 0, 0);
2299         if (ret)
2300                 goto out_unlock;
2301
2302 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2303         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2304 #else
2305         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2306 #endif
2307         bm->initialized = 1;
2308         bm->nice_mode = 1;
2309         atomic_set(&bm->count, 0);
2310         bm->cur_pages = 0;
2311         INIT_LIST_HEAD(&bm->unfenced);
2312         INIT_LIST_HEAD(&bm->ddestroy);
2313       out_unlock:
2314         mutex_unlock(&dev->struct_mutex);
2315         mutex_unlock(&dev->bm.init_mutex);
2316         return ret;
2317 }
2318
2319 EXPORT_SYMBOL(drm_bo_driver_init);
2320
2321 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2322 {
2323         DRM_DEVICE;
2324         struct drm_mm_init_arg arg;
2325         drm_buffer_manager_t *bm = &dev->bm;
2326         drm_bo_driver_t *driver = dev->driver->bo_driver;
2327         int ret;
2328
2329         if (!driver) {
2330                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2331                 return -EINVAL;
2332         }
2333
2334         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2335         ret = -EINVAL;
2336         if (arg.magic != DRM_BO_INIT_MAGIC) {
2337                 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2338                           "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2339                 return -EINVAL;
2340         }
2341         if (arg.major != DRM_BO_INIT_MAJOR) {
2342                 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2343                           "\tversion don't match. Got %d, expected %d,\n",
2344                           arg.major, DRM_BO_INIT_MAJOR);
2345                 return -EINVAL;
2346         }
2347         if (arg.minor > DRM_BO_INIT_MINOR) {
2348                 DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
2349                           "\tlibdrm buffer object interface version is %d.%d.\n"
2350                           "\tkernel DRM buffer object interface version is %d.%d\n",
2351                           arg.major, arg.minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
2352                 return -EINVAL;
2353         }
2354
2355         mutex_lock(&dev->bm.init_mutex);
2356         mutex_lock(&dev->struct_mutex);
2357         if (!bm->initialized) {
2358                 DRM_ERROR("DRM memory manager was not initialized.\n");
2359                 goto out;
2360         }
2361         if (arg.mem_type == 0) {
2362                 DRM_ERROR("System memory buffers already initialized.\n");
2363                 goto out;
2364         }
2365         ret = drm_bo_init_mm(dev, arg.mem_type,
2366                              arg.p_offset, arg.p_size);
2367
2368 out:
2369         mutex_unlock(&dev->struct_mutex);
2370         mutex_unlock(&dev->bm.init_mutex);
2371         if (ret)
2372                 return ret;
2373
2374         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2375         return 0;
2376 }
2377
2378 int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS)
2379 {
2380         DRM_DEVICE;
2381         struct drm_mm_type_arg arg;
2382         drm_buffer_manager_t *bm = &dev->bm;
2383         drm_bo_driver_t *driver = dev->driver->bo_driver;
2384         int ret;
2385
2386         if (!driver) {
2387                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2388                 return -EINVAL;
2389         }
2390
2391         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2392
2393         LOCK_TEST_WITH_RETURN(dev, filp);
2394         mutex_lock(&dev->bm.init_mutex);
2395         mutex_lock(&dev->struct_mutex);
2396         ret = -EINVAL;
2397         if (!bm->initialized) {
2398                 DRM_ERROR("DRM memory manager was not initialized\n");
2399                 goto out;
2400         }
2401         if (arg.mem_type == 0) {
2402                 DRM_ERROR("No takedown for System memory buffers.\n");
2403                 goto out;
2404         }
2405         ret = 0;
2406         if (drm_bo_clean_mm(dev, arg.mem_type)) {
2407                 DRM_ERROR("Memory manager type %d not clean. "
2408                           "Delaying takedown\n", arg.mem_type);
2409         }
2410 out:
2411         mutex_unlock(&dev->struct_mutex);
2412         mutex_unlock(&dev->bm.init_mutex);
2413         if (ret)
2414                 return ret;
2415
2416         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2417         return 0;
2418 }
2419
2420 int drm_mm_lock_ioctl(DRM_IOCTL_ARGS)
2421 {
2422         DRM_DEVICE;
2423         struct drm_mm_type_arg arg;
2424         drm_bo_driver_t *driver = dev->driver->bo_driver;
2425         int ret;
2426
2427         if (!driver) {
2428                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2429                 return -EINVAL;
2430         }
2431
2432         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2433
2434         LOCK_TEST_WITH_RETURN(dev, filp);
2435         mutex_lock(&dev->bm.init_mutex);
2436         mutex_lock(&dev->struct_mutex);
2437         ret = drm_bo_lock_mm(dev, arg.mem_type);
2438         mutex_unlock(&dev->struct_mutex);
2439         mutex_unlock(&dev->bm.init_mutex);
2440         if (ret)
2441                 return ret;
2442
2443         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2444         return 0;
2445 }
2446
2447 int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS)
2448 {
2449         DRM_DEVICE;
2450         struct drm_mm_type_arg arg;
2451         drm_bo_driver_t *driver = dev->driver->bo_driver;
2452         int ret;
2453
2454         if (!driver) {
2455                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2456                 return -EINVAL;
2457         }
2458
2459         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2460         LOCK_TEST_WITH_RETURN(dev, filp);
2461         mutex_lock(&dev->bm.init_mutex);
2462         mutex_lock(&dev->struct_mutex);
2463         ret = 0;
2464
2465         mutex_unlock(&dev->struct_mutex);
2466         mutex_unlock(&dev->bm.init_mutex);
2467         if (ret)
2468                 return ret;
2469
2470         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2471         return 0;
2472 }
2473
2474 /*
2475  * buffer object vm functions.
2476  */
2477
2478 int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
2479 {
2480         drm_buffer_manager_t *bm = &dev->bm;
2481         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2482
2483         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2484                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2485                         return 0;
2486
2487                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2488                         return 0;
2489
2490                 if (mem->flags & DRM_BO_FLAG_CACHED)
2491                         return 0;
2492         }
2493         return 1;
2494 }
2495
2496 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2497
2498 /**
2499  * \c Get the PCI offset for the buffer object memory.
2500  *
2501  * \param bo The buffer object.
2502  * \param bus_base On return the base of the PCI region
2503  * \param bus_offset On return the byte offset into the PCI region
2504  * \param bus_size On return the byte size of the buffer object or zero if
2505  *     the buffer object memory is not accessible through a PCI region.
2506  * \return Failure indication.
2507  *
2508  * Returns -EINVAL if the buffer object is currently not mappable.
2509  * Otherwise returns zero.
2510  */
2511
2512 int drm_bo_pci_offset(drm_device_t * dev,
2513                       drm_bo_mem_reg_t * mem,
2514                       unsigned long *bus_base,
2515                       unsigned long *bus_offset, unsigned long *bus_size)
2516 {
2517         drm_buffer_manager_t *bm = &dev->bm;
2518         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2519
2520         *bus_size = 0;
2521         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2522                 return -EINVAL;
2523
2524         if (drm_mem_reg_is_pci(dev, mem)) {
2525                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2526                 *bus_size = mem->num_pages << PAGE_SHIFT;
2527                 *bus_base = man->io_offset;
2528         }
2529
2530         return 0;
2531 }
2532
2533 /**
2534  * \c Kill all user-space virtual mappings of this buffer object.
2535  *
2536  * \param bo The buffer object.
2537  *
2538  * Call bo->mutex locked.
2539  */
2540
2541 void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
2542 {
2543         drm_device_t *dev = bo->dev;
2544         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2545         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2546
2547         if (!dev->dev_mapping)
2548                 return;
2549
2550         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2551 }
2552
2553 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
2554 {
2555         drm_map_list_t *list = &bo->map_list;
2556         drm_local_map_t *map;
2557         drm_device_t *dev = bo->dev;
2558
2559         if (list->user_token) {
2560                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2561                 list->user_token = 0;
2562         }
2563         if (list->file_offset_node) {
2564                 drm_mm_put_block(list->file_offset_node);
2565                 list->file_offset_node = NULL;
2566         }
2567
2568         map = list->map;
2569         if (!map)
2570                 return;
2571
2572         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2573         list->map = NULL;
2574         list->user_token = 0ULL;
2575         drm_bo_usage_deref_locked(bo);
2576 }
2577
2578 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
2579 {
2580         drm_map_list_t *list = &bo->map_list;
2581         drm_local_map_t *map;
2582         drm_device_t *dev = bo->dev;
2583
2584         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2585         if (!list->map)
2586                 return -ENOMEM;
2587
2588         map = list->map;
2589         map->offset = 0;
2590         map->type = _DRM_TTM;
2591         map->flags = _DRM_REMOVABLE;
2592         map->size = bo->mem.num_pages * PAGE_SIZE;
2593         atomic_inc(&bo->usage);
2594         map->handle = (void *)bo;
2595
2596         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2597                                                     bo->mem.num_pages, 0, 0);
2598
2599         if (!list->file_offset_node) {
2600                 drm_bo_takedown_vm_locked(bo);
2601                 return -ENOMEM;
2602         }
2603
2604         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2605                                                   bo->mem.num_pages, 0);
2606
2607         list->hash.key = list->file_offset_node->start;
2608         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2609                 drm_bo_takedown_vm_locked(bo);
2610                 return -ENOMEM;
2611         }
2612
2613         list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
2614
2615         return 0;
2616 }