1bdc6fef395a65b2e8628ab1f4d75380140b7931
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
40  * heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44  * both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47  * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48  * traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
53 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
54 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
55 static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
56
57 static inline uint32_t drm_bo_type_flags(unsigned type)
58 {
59         return (1 << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
67 {
68         drm_mem_type_manager_t *man;
69
70         man = &bo->dev->bm.man[bo->pinned_mem_type];
71         list_add_tail(&bo->pinned_lru, &man->pinned);
72 }
73
74 void drm_bo_add_to_lru(drm_buffer_object_t * bo)
75 {
76         drm_mem_type_manager_t *man;
77
78         if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
79                 man = &bo->dev->bm.man[bo->mem.mem_type];
80                 list_add_tail(&bo->lru, &man->lru);
81         } else {
82                 INIT_LIST_HEAD(&bo->lru);
83         }
84 }
85
86 static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
87 {
88 #ifdef DRM_ODD_MM_COMPAT
89         int ret;
90
91         ret = drm_bo_lock_kmm(bo);
92         if (ret)
93                 return ret;
94         drm_bo_unmap_virtual(bo);
95         if (old_is_pci)
96                 drm_bo_finish_unmap(bo);
97 #else
98         drm_bo_unmap_virtual(bo);
99 #endif
100         return 0;
101 }
102
103 static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
104 {
105 #ifdef DRM_ODD_MM_COMPAT
106         int ret;
107
108         ret = drm_bo_remap_bound(bo);
109         if (ret) {
110                 DRM_ERROR("Failed to remap a bound buffer object.\n"
111                           "\tThis might cause a sigbus later.\n");
112         }
113         drm_bo_unlock_kmm(bo);
114 #endif
115 }
116
117 /*
118  * Call bo->mutex locked.
119  */
120
121 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
122 {
123         drm_device_t *dev = bo->dev;
124         int ret = 0;
125         bo->ttm = NULL;
126
127         switch (bo->type) {
128         case drm_bo_type_dc:
129                 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
130                 if (!bo->ttm)
131                         ret = -ENOMEM;
132                 break;
133         case drm_bo_type_user:
134         case drm_bo_type_fake:
135                 break;
136         default:
137                 DRM_ERROR("Illegal buffer object type\n");
138                 ret = -EINVAL;
139                 break;
140         }
141
142         return ret;
143 }
144
145 static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
146                                   drm_bo_mem_reg_t * mem,
147                                   int evict, int no_wait)
148 {
149         drm_device_t *dev = bo->dev;
150         drm_buffer_manager_t *bm = &dev->bm;
151         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
152         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
153         drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
154         drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
155         int ret = 0;
156
157         if (old_is_pci || new_is_pci)
158                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
159         if (ret)
160                 return ret;
161
162         /*
163          * Create and bind a ttm if required.
164          */
165
166         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
167                 ret = drm_bo_add_ttm(bo);
168                 if (ret)
169                         goto out_err;
170
171                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
172                         ret = drm_bind_ttm(bo->ttm, new_man->flags &
173                                            DRM_BO_FLAG_CACHED,
174                                            mem->mm_node->start);
175                         if (ret)
176                                 goto out_err;
177                 }
178         }
179
180         if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
181
182                 drm_bo_mem_reg_t *old_mem = &bo->mem;
183                 uint32_t save_flags = old_mem->flags;
184                 uint32_t save_mask = old_mem->mask;
185
186                 *old_mem = *mem;
187                 mem->mm_node = NULL;
188                 old_mem->mask = save_mask;
189                 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
190
191         } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
192                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
193
194                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
195
196         } else if (dev->driver->bo_driver->move) {
197                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
198
199         } else {
200
201                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
202
203         }
204
205         if (ret)
206                 goto out_err;
207
208         if (old_is_pci || new_is_pci)
209                 drm_bo_vm_post_move(bo);
210
211         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
212                 ret =
213                     dev->driver->bo_driver->invalidate_caches(dev,
214                                                               bo->mem.flags);
215                 if (ret)
216                         DRM_ERROR("Can not flush read caches\n");
217         }
218
219         DRM_FLAG_MASKED(bo->priv_flags,
220                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
221                         _DRM_BO_FLAG_EVICTED);
222
223         if (bo->mem.mm_node)
224                 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
225
226         return 0;
227
228       out_err:
229         if (old_is_pci || new_is_pci)
230                 drm_bo_vm_post_move(bo);
231
232         new_man = &bm->man[bo->mem.mem_type];
233         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
234                 drm_ttm_unbind(bo->ttm);
235                 drm_destroy_ttm(bo->ttm);
236                 bo->ttm = NULL;
237         }
238
239         return ret;
240 }
241
242 /*
243  * Call bo->mutex locked.
244  * Wait until the buffer is idle.
245  */
246
247 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
248                 int no_wait)
249 {
250
251         drm_fence_object_t *fence = bo->fence;
252         int ret;
253
254         if (fence) {
255                 drm_device_t *dev = bo->dev;
256                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
257                         drm_fence_usage_deref_unlocked(dev, fence);
258                         bo->fence = NULL;
259                         return 0;
260                 }
261                 if (no_wait) {
262                         return -EBUSY;
263                 }
264                 ret =
265                     drm_fence_object_wait(dev, fence, lazy, ignore_signals,
266                                           bo->fence_type);
267                 if (ret)
268                         return ret;
269
270                 drm_fence_usage_deref_unlocked(dev, fence);
271                 bo->fence = NULL;
272
273         }
274         return 0;
275 }
276
277 static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
278 {
279         drm_device_t *dev = bo->dev;
280         drm_buffer_manager_t *bm = &dev->bm;
281
282         if (bo->fence) {
283                 if (bm->nice_mode) {
284                         unsigned long _end = jiffies + 3 * DRM_HZ;
285                         int ret;
286                         do {
287                                 ret = drm_bo_wait(bo, 0, 1, 0);
288                                 if (ret && allow_errors)
289                                         return ret;
290
291                         } while (ret && !time_after_eq(jiffies, _end));
292
293                         if (bo->fence) {
294                                 bm->nice_mode = 0;
295                                 DRM_ERROR("Detected GPU lockup or "
296                                           "fence driver was taken down. "
297                                           "Evicting buffer.\n");
298                         }
299                 }
300                 if (bo->fence) {
301                         drm_fence_usage_deref_unlocked(dev, bo->fence);
302                         bo->fence = NULL;
303                 }
304         }
305         return 0;
306 }
307
308 /*
309  * Call dev->struct_mutex locked.
310  * Attempts to remove all private references to a buffer by expiring its
311  * fence object and removing from lru lists and memory managers.
312  */
313
314 static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
315 {
316         drm_device_t *dev = bo->dev;
317         drm_buffer_manager_t *bm = &dev->bm;
318
319         atomic_inc(&bo->usage);
320         mutex_unlock(&dev->struct_mutex);
321         mutex_lock(&bo->mutex);
322
323         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
324
325         if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) {
326                 drm_fence_usage_deref_locked(dev, bo->fence);
327                 bo->fence = NULL;
328         }
329
330         if (bo->fence && remove_all)
331                 (void)drm_bo_expire_fence(bo, 0);
332
333         mutex_lock(&dev->struct_mutex);
334
335         if (!atomic_dec_and_test(&bo->usage)) {
336                 goto out;
337         }
338
339         if (!bo->fence) {
340                 list_del_init(&bo->lru);
341                 if (bo->mem.mm_node) {
342                         drm_mm_put_block(bo->mem.mm_node);
343                         if (bo->pinned_node == bo->mem.mm_node)
344                                 bo->pinned_node = NULL;
345                         bo->mem.mm_node = NULL;
346                 }
347                 list_del_init(&bo->pinned_lru);
348                 if (bo->pinned_node) {
349                         drm_mm_put_block(bo->pinned_node);
350                         bo->pinned_node = NULL;
351                 }
352                 list_del_init(&bo->ddestroy);
353                 mutex_unlock(&bo->mutex);
354                 drm_bo_destroy_locked(bo);
355                 return;
356         }
357
358         if (list_empty(&bo->ddestroy)) {
359                 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
360                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
361                 schedule_delayed_work(&bm->wq,
362                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
363         }
364
365       out:
366         mutex_unlock(&bo->mutex);
367         return;
368 }
369
370 /*
371  * Verify that refcount is 0 and that there are no internal references
372  * to the buffer object. Then destroy it.
373  */
374
375 static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
376 {
377         drm_device_t *dev = bo->dev;
378         drm_buffer_manager_t *bm = &dev->bm;
379
380         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
381             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
382             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
383                 if (bo->fence != NULL) {
384                         DRM_ERROR("Fence was non-zero.\n");
385                         drm_bo_cleanup_refs(bo, 0);
386                         return;
387                 }
388
389 #ifdef DRM_ODD_MM_COMPAT
390                 BUG_ON(!list_empty(&bo->vma_list));
391                 BUG_ON(!list_empty(&bo->p_mm_list));
392 #endif
393
394                 if (bo->ttm) {
395                         drm_ttm_unbind(bo->ttm);
396                         drm_destroy_ttm(bo->ttm);
397                         bo->ttm = NULL;
398                 }
399
400                 atomic_dec(&bm->count);
401
402                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
403
404                 return;
405         }
406
407         /*
408          * Some stuff is still trying to reference the buffer object.
409          * Get rid of those references.
410          */
411
412         drm_bo_cleanup_refs(bo, 0);
413
414         return;
415 }
416
417 /*
418  * Call dev->struct_mutex locked.
419  */
420
421 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
422 {
423         drm_buffer_manager_t *bm = &dev->bm;
424
425         drm_buffer_object_t *entry, *nentry;
426         struct list_head *list, *next;
427
428         list_for_each_safe(list, next, &bm->ddestroy) {
429                 entry = list_entry(list, drm_buffer_object_t, ddestroy);
430
431                 nentry = NULL;
432                 if (next != &bm->ddestroy) {
433                         nentry = list_entry(next, drm_buffer_object_t,
434                                             ddestroy);
435                         atomic_inc(&nentry->usage);
436                 }
437
438                 drm_bo_cleanup_refs(entry, remove_all);
439
440                 if (nentry) {
441                         atomic_dec(&nentry->usage);
442                 }
443         }
444 }
445
446 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
447 static void drm_bo_delayed_workqueue(void *data)
448 #else
449 static void drm_bo_delayed_workqueue(struct work_struct *work)
450 #endif
451 {
452 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
453         drm_device_t *dev = (drm_device_t *) data;
454         drm_buffer_manager_t *bm = &dev->bm;
455 #else
456         drm_buffer_manager_t *bm =
457             container_of(work, drm_buffer_manager_t, wq.work);
458         drm_device_t *dev = container_of(bm, drm_device_t, bm);
459 #endif
460
461         DRM_DEBUG("Delayed delete Worker\n");
462
463         mutex_lock(&dev->struct_mutex);
464         if (!bm->initialized) {
465                 mutex_unlock(&dev->struct_mutex);
466                 return;
467         }
468         drm_bo_delayed_delete(dev, 0);
469         if (bm->initialized && !list_empty(&bm->ddestroy)) {
470                 schedule_delayed_work(&bm->wq,
471                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
472         }
473         mutex_unlock(&dev->struct_mutex);
474 }
475
476 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
477 {
478         if (atomic_dec_and_test(&bo->usage)) {
479                 drm_bo_destroy_locked(bo);
480         }
481 }
482
483 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
484 {
485         drm_buffer_object_t *bo =
486             drm_user_object_entry(uo, drm_buffer_object_t, base);
487
488         drm_bo_takedown_vm_locked(bo);
489         drm_bo_usage_deref_locked(bo);
490 }
491
492 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
493 {
494         drm_device_t *dev = bo->dev;
495
496         if (atomic_dec_and_test(&bo->usage)) {
497                 mutex_lock(&dev->struct_mutex);
498                 if (atomic_read(&bo->usage) == 0)
499                         drm_bo_destroy_locked(bo);
500                 mutex_unlock(&dev->struct_mutex);
501         }
502 }
503
504 /*
505  * Note. The caller has to register (if applicable)
506  * and deregister fence object usage.
507  */
508
509 int drm_fence_buffer_objects(drm_file_t * priv,
510                              struct list_head *list,
511                              uint32_t fence_flags,
512                              drm_fence_object_t * fence,
513                              drm_fence_object_t ** used_fence)
514 {
515         drm_device_t *dev = priv->head->dev;
516         drm_buffer_manager_t *bm = &dev->bm;
517
518         drm_buffer_object_t *entry;
519         uint32_t fence_type = 0;
520         int count = 0;
521         int ret = 0;
522         struct list_head *l;
523         LIST_HEAD(f_list);
524
525         mutex_lock(&dev->struct_mutex);
526
527         if (!list)
528                 list = &bm->unfenced;
529
530         list_for_each_entry(entry, list, lru) {
531                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
532                 fence_type |= entry->fence_type;
533                 if (entry->fence_class != 0) {
534                         DRM_ERROR("Fence class %d is not implemented yet.\n",
535                                   entry->fence_class);
536                         ret = -EINVAL;
537                         goto out;
538                 }
539                 count++;
540         }
541
542         if (!count) {
543                 ret = -EINVAL;
544                 goto out;
545         }
546
547         /*
548          * Transfer to a local list before we release the dev->struct_mutex;
549          * This is so we don't get any new unfenced objects while fencing
550          * the ones we already have..
551          */
552
553         list_splice_init(list, &f_list);
554
555         if (fence) {
556                 if ((fence_type & fence->type) != fence_type) {
557                         DRM_ERROR("Given fence doesn't match buffers "
558                                   "on unfenced list.\n");
559                         ret = -EINVAL;
560                         goto out;
561                 }
562         } else {
563                 mutex_unlock(&dev->struct_mutex);
564                 ret = drm_fence_object_create(dev, 0, fence_type,
565                                               fence_flags | DRM_FENCE_FLAG_EMIT,
566                                               &fence);
567                 mutex_lock(&dev->struct_mutex);
568                 if (ret)
569                         goto out;
570         }
571
572         count = 0;
573         l = f_list.next;
574         while (l != &f_list) {
575                 prefetch(l->next);
576                 entry = list_entry(l, drm_buffer_object_t, lru);
577                 atomic_inc(&entry->usage);
578                 mutex_unlock(&dev->struct_mutex);
579                 mutex_lock(&entry->mutex);
580                 mutex_lock(&dev->struct_mutex);
581                 list_del_init(l);
582                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
583                         count++;
584                         if (entry->fence)
585                                 drm_fence_usage_deref_locked(dev, entry->fence);
586                         entry->fence = fence;
587                         DRM_FLAG_MASKED(entry->priv_flags, 0,
588                                         _DRM_BO_FLAG_UNFENCED);
589                         DRM_WAKEUP(&entry->event_queue);
590                         drm_bo_add_to_lru(entry);
591                 }
592                 mutex_unlock(&entry->mutex);
593                 drm_bo_usage_deref_locked(entry);
594                 l = f_list.next;
595         }
596         atomic_add(count, &fence->usage);
597         DRM_DEBUG("Fenced %d buffers\n", count);
598       out:
599         mutex_unlock(&dev->struct_mutex);
600         *used_fence = fence;
601         return ret;
602 }
603
604 EXPORT_SYMBOL(drm_fence_buffer_objects);
605
606 /*
607  * bo->mutex locked
608  */
609
610 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
611                         int no_wait)
612 {
613         int ret = 0;
614         drm_device_t *dev = bo->dev;
615         drm_bo_mem_reg_t evict_mem;
616
617         /*
618          * Someone might have modified the buffer before we took the buffer mutex.
619          */
620
621         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
622                 goto out;
623         if (bo->mem.mem_type != mem_type)
624                 goto out;
625
626         ret = drm_bo_wait(bo, 0, 0, no_wait);
627
628         if (ret && ret != -EAGAIN) {
629                 DRM_ERROR("Failed to expire fence before "
630                           "buffer eviction.\n");
631                 goto out;
632         }
633
634         evict_mem = bo->mem;
635         evict_mem.mm_node = NULL;
636
637         if (bo->type == drm_bo_type_fake) {
638                 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
639                 bo->mem.mm_node = NULL;
640                 goto out1;
641         }
642
643         evict_mem = bo->mem;
644         evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
645         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
646
647         if (ret) {
648                 if (ret != -EAGAIN)
649                         DRM_ERROR("Failed to find memory space for "
650                                   "buffer 0x%p eviction.\n", bo);
651                 goto out;
652         }
653
654         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
655
656         if (ret) {
657                 if (ret != -EAGAIN)
658                         DRM_ERROR("Buffer eviction failed\n");
659                 goto out;
660         }
661
662       out1:
663         mutex_lock(&dev->struct_mutex);
664         if (evict_mem.mm_node) {
665                 if (evict_mem.mm_node != bo->pinned_node)
666                         drm_mm_put_block(evict_mem.mm_node);
667                 evict_mem.mm_node = NULL;
668         }
669         list_del(&bo->lru);
670         drm_bo_add_to_lru(bo);
671         mutex_unlock(&dev->struct_mutex);
672
673         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
674                         _DRM_BO_FLAG_EVICTED);
675
676       out:
677         return ret;
678 }
679
680 static int drm_bo_mem_force_space(drm_device_t * dev,
681                                   drm_bo_mem_reg_t * mem,
682                                   uint32_t mem_type, int no_wait)
683 {
684         drm_mm_node_t *node;
685         drm_buffer_manager_t *bm = &dev->bm;
686         drm_buffer_object_t *entry;
687         drm_mem_type_manager_t *man = &bm->man[mem_type];
688         struct list_head *lru;
689         unsigned long num_pages = mem->num_pages;
690         int ret;
691
692         mutex_lock(&dev->struct_mutex);
693         do {
694                 node = drm_mm_search_free(&man->manager, num_pages,
695                                           mem->page_alignment, 1);
696                 if (node)
697                         break;
698
699                 lru = &man->lru;
700                 if (lru->next == lru)
701                         break;
702
703                 entry = list_entry(lru->next, drm_buffer_object_t, lru);
704                 atomic_inc(&entry->usage);
705                 mutex_unlock(&dev->struct_mutex);
706                 mutex_lock(&entry->mutex);
707                 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
708
709                 ret = drm_bo_evict(entry, mem_type, no_wait);
710                 mutex_unlock(&entry->mutex);
711                 drm_bo_usage_deref_unlocked(entry);
712                 if (ret)
713                         return ret;
714                 mutex_lock(&dev->struct_mutex);
715         } while (1);
716
717         if (!node) {
718                 mutex_unlock(&dev->struct_mutex);
719                 return -ENOMEM;
720         }
721
722         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
723         mutex_unlock(&dev->struct_mutex);
724         mem->mm_node = node;
725         mem->mem_type = mem_type;
726         return 0;
727 }
728
729 static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
730                                 uint32_t mem_type,
731                                 uint32_t mask, uint32_t * res_mask)
732 {
733         uint32_t cur_flags = drm_bo_type_flags(mem_type);
734         uint32_t flag_diff;
735
736         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
737                 cur_flags |= DRM_BO_FLAG_CACHED;
738         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
739                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
740         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
741                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
742
743         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
744                 return 0;
745
746         if (mem_type == DRM_BO_MEM_LOCAL) {
747                 *res_mask = cur_flags;
748                 return 1;
749         }
750
751         flag_diff = (mask ^ cur_flags);
752         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
753             (!(mask & DRM_BO_FLAG_CACHED) ||
754              (mask & DRM_BO_FLAG_FORCE_CACHING)))
755                 return 0;
756
757         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
758             ((mask & DRM_BO_FLAG_MAPPABLE) ||
759              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
760                 return 0;
761
762         *res_mask = cur_flags;
763         return 1;
764 }
765
766 int drm_bo_mem_space(drm_buffer_object_t * bo,
767                      drm_bo_mem_reg_t * mem, int no_wait)
768 {
769         drm_device_t *dev = bo->dev;
770         drm_buffer_manager_t *bm = &dev->bm;
771         drm_mem_type_manager_t *man;
772
773         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
774         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
775         uint32_t i;
776         uint32_t mem_type = DRM_BO_MEM_LOCAL;
777         uint32_t cur_flags;
778         int type_found = 0;
779         int type_ok = 0;
780         int has_eagain = 0;
781         drm_mm_node_t *node = NULL;
782         int ret;
783
784         mem->mm_node = NULL;
785         for (i = 0; i < num_prios; ++i) {
786                 mem_type = prios[i];
787                 man = &bm->man[mem_type];
788
789                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
790                                                &cur_flags);
791
792                 if (!type_ok)
793                         continue;
794
795                 if (mem_type == DRM_BO_MEM_LOCAL)
796                         break;
797
798                 if ((mem_type == bo->pinned_mem_type) &&
799                     (bo->pinned_node != NULL)) {
800                         node = bo->pinned_node;
801                         break;
802                 }
803
804                 mutex_lock(&dev->struct_mutex);
805                 if (man->has_type && man->use_type) {
806                         type_found = 1;
807                         node = drm_mm_search_free(&man->manager, mem->num_pages,
808                                                   mem->page_alignment, 1);
809                         if (node)
810                                 node = drm_mm_get_block(node, mem->num_pages,
811                                                         mem->page_alignment);
812                 }
813                 mutex_unlock(&dev->struct_mutex);
814                 if (node)
815                         break;
816         }
817
818         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
819                 mem->mm_node = node;
820                 mem->mem_type = mem_type;
821                 mem->flags = cur_flags;
822                 return 0;
823         }
824
825         if (!type_found)
826                 return -EINVAL;
827
828         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
829         prios = dev->driver->bo_driver->mem_busy_prio;
830
831         for (i = 0; i < num_prios; ++i) {
832                 mem_type = prios[i];
833                 man = &bm->man[mem_type];
834
835                 if (!man->has_type)
836                         continue;
837
838                 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
839                         continue;
840
841                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
842
843                 if (ret == 0) {
844                         mem->flags = cur_flags;
845                         return 0;
846                 }
847
848                 if (ret == -EAGAIN)
849                         has_eagain = 1;
850         }
851
852         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
853         return ret;
854 }
855
856 EXPORT_SYMBOL(drm_bo_mem_space);
857
858 static int drm_bo_new_mask(drm_buffer_object_t * bo,
859                            uint32_t new_mask, uint32_t hint)
860 {
861         uint32_t new_props;
862
863         if (bo->type == drm_bo_type_user) {
864                 DRM_ERROR("User buffers are not supported yet\n");
865                 return -EINVAL;
866         }
867         if (bo->type == drm_bo_type_fake &&
868             !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
869                 DRM_ERROR("Fake buffers must be pinned.\n");
870                 return -EINVAL;
871         }
872
873         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
874                 DRM_ERROR
875                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
876                      "processes\n");
877                 return -EPERM;
878         }
879
880         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
881                                 DRM_BO_FLAG_READ);
882
883         if (!new_props) {
884                 DRM_ERROR("Invalid buffer object rwx properties\n");
885                 return -EINVAL;
886         }
887
888         bo->mem.mask = new_mask;
889         return 0;
890 }
891
892 /*
893  * Call dev->struct_mutex locked.
894  */
895
896 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
897                                               uint32_t handle, int check_owner)
898 {
899         drm_user_object_t *uo;
900         drm_buffer_object_t *bo;
901
902         uo = drm_lookup_user_object(priv, handle);
903
904         if (!uo || (uo->type != drm_buffer_type)) {
905                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
906                 return NULL;
907         }
908
909         if (check_owner && priv != uo->owner) {
910                 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
911                         return NULL;
912         }
913
914         bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
915         atomic_inc(&bo->usage);
916         return bo;
917 }
918
919 /*
920  * Call bo->mutex locked.
921  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
922  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
923  */
924
925 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
926 {
927         drm_fence_object_t *fence = bo->fence;
928
929         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
930         if (fence) {
931                 drm_device_t *dev = bo->dev;
932                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
933                         drm_fence_usage_deref_unlocked(dev, fence);
934                         bo->fence = NULL;
935                         return 0;
936                 }
937                 return 1;
938         }
939         return 0;
940 }
941
942 /*
943  * Call bo->mutex locked.
944  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
945  */
946
947 static int drm_bo_busy(drm_buffer_object_t * bo)
948 {
949         drm_fence_object_t *fence = bo->fence;
950
951         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
952         if (fence) {
953                 drm_device_t *dev = bo->dev;
954                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
955                         drm_fence_usage_deref_unlocked(dev, fence);
956                         bo->fence = NULL;
957                         return 0;
958                 }
959                 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
960                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
961                         drm_fence_usage_deref_unlocked(dev, fence);
962                         bo->fence = NULL;
963                         return 0;
964                 }
965                 return 1;
966         }
967         return 0;
968 }
969
970 static int drm_bo_read_cached(drm_buffer_object_t * bo)
971 {
972         int ret = 0;
973
974         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
975         if (bo->mem.mm_node)
976                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
977         return ret;
978 }
979
980 /*
981  * Wait until a buffer is unmapped.
982  */
983
984 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
985 {
986         int ret = 0;
987
988         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
989                 return -EBUSY;
990
991         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
992                     atomic_read(&bo->mapped) == -1);
993
994         if (ret == -EINTR)
995                 ret = -EAGAIN;
996
997         return ret;
998 }
999
1000 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
1001 {
1002         int ret;
1003
1004         mutex_lock(&bo->mutex);
1005         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1006         mutex_unlock(&bo->mutex);
1007         return ret;
1008 }
1009
1010 /*
1011  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1012  * Until then, we cannot really do anything with it except delete it.
1013  * The unfenced list is a PITA, and the operations
1014  * 1) validating
1015  * 2) submitting commands
1016  * 3) fencing
1017  * Should really be an atomic operation.
1018  * We now "solve" this problem by keeping
1019  * the buffer "unfenced" after validating, but before fencing.
1020  */
1021
1022 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
1023                                 int eagain_if_wait)
1024 {
1025         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1026         unsigned long _end = jiffies + 3 * DRM_HZ;
1027
1028         if (ret && no_wait)
1029                 return -EBUSY;
1030         else if (!ret)
1031                 return 0;
1032
1033         do {
1034                 mutex_unlock(&bo->mutex);
1035                 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1036                             !drm_bo_check_unfenced(bo));
1037                 mutex_lock(&bo->mutex);
1038                 if (ret == -EINTR)
1039                         return -EAGAIN;
1040                 if (ret) {
1041                         DRM_ERROR
1042                             ("Error waiting for buffer to become fenced\n");
1043                         return ret;
1044                 }
1045                 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1046         } while (ret && !time_after_eq(jiffies, _end));
1047         if (ret) {
1048                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1049                 return ret;
1050         }
1051         if (eagain_if_wait)
1052                 return -EAGAIN;
1053
1054         return 0;
1055 }
1056
1057 /*
1058  * Fill in the ioctl reply argument with buffer info.
1059  * Bo locked.
1060  */
1061
1062 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1063                                 drm_bo_arg_reply_t * rep)
1064 {
1065         rep->handle = bo->base.hash.key;
1066         rep->flags = bo->mem.flags;
1067         rep->size = bo->mem.num_pages * PAGE_SIZE;
1068         rep->offset = bo->offset;
1069         rep->arg_handle = bo->map_list.user_token;
1070         rep->mask = bo->mem.mask;
1071         rep->buffer_start = bo->buffer_start;
1072         rep->fence_flags = bo->fence_type;
1073         rep->rep_flags = 0;
1074         rep->page_alignment = bo->mem.page_alignment;
1075
1076         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1077                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1078                                 DRM_BO_REP_BUSY);
1079         }
1080 }
1081
1082 /*
1083  * Wait for buffer idle and register that we've mapped the buffer.
1084  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1085  * so that if the client dies, the mapping is automatically
1086  * unregistered.
1087  */
1088
1089 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1090                                  uint32_t map_flags, unsigned hint,
1091                                  drm_bo_arg_reply_t * rep)
1092 {
1093         drm_buffer_object_t *bo;
1094         drm_device_t *dev = priv->head->dev;
1095         int ret = 0;
1096         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1097
1098         mutex_lock(&dev->struct_mutex);
1099         bo = drm_lookup_buffer_object(priv, handle, 1);
1100         mutex_unlock(&dev->struct_mutex);
1101
1102         if (!bo)
1103                 return -EINVAL;
1104
1105         mutex_lock(&bo->mutex);
1106         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1107                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1108                 if (ret)
1109                         goto out;
1110         }
1111
1112         /*
1113          * If this returns true, we are currently unmapped.
1114          * We need to do this test, because unmapping can
1115          * be done without the bo->mutex held.
1116          */
1117
1118         while (1) {
1119                 if (atomic_inc_and_test(&bo->mapped)) {
1120                         if (no_wait && drm_bo_busy(bo)) {
1121                                 atomic_dec(&bo->mapped);
1122                                 ret = -EBUSY;
1123                                 goto out;
1124                         }
1125                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1126                         if (ret) {
1127                                 atomic_dec(&bo->mapped);
1128                                 goto out;
1129                         }
1130
1131                         if ((map_flags & DRM_BO_FLAG_READ) &&
1132                             (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1133                             (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1134                                 drm_bo_read_cached(bo);
1135                         }
1136                         break;
1137                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1138                            (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1139                            (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1140
1141                         /*
1142                          * We are already mapped with different flags.
1143                          * need to wait for unmap.
1144                          */
1145
1146                         ret = drm_bo_wait_unmapped(bo, no_wait);
1147                         if (ret)
1148                                 goto out;
1149
1150                         continue;
1151                 }
1152                 break;
1153         }
1154
1155         mutex_lock(&dev->struct_mutex);
1156         ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1157         mutex_unlock(&dev->struct_mutex);
1158         if (ret) {
1159                 if (atomic_add_negative(-1, &bo->mapped))
1160                         DRM_WAKEUP(&bo->event_queue);
1161
1162         } else
1163                 drm_bo_fill_rep_arg(bo, rep);
1164       out:
1165         mutex_unlock(&bo->mutex);
1166         drm_bo_usage_deref_unlocked(bo);
1167         return ret;
1168 }
1169
1170 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1171 {
1172         drm_device_t *dev = priv->head->dev;
1173         drm_buffer_object_t *bo;
1174         drm_ref_object_t *ro;
1175         int ret = 0;
1176
1177         mutex_lock(&dev->struct_mutex);
1178
1179         bo = drm_lookup_buffer_object(priv, handle, 1);
1180         if (!bo) {
1181                 ret = -EINVAL;
1182                 goto out;
1183         }
1184
1185         ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1186         if (!ro) {
1187                 ret = -EINVAL;
1188                 goto out;
1189         }
1190
1191         drm_remove_ref_object(priv, ro);
1192         drm_bo_usage_deref_locked(bo);
1193       out:
1194         mutex_unlock(&dev->struct_mutex);
1195         return ret;
1196 }
1197
1198 /*
1199  * Call struct-sem locked.
1200  */
1201
1202 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1203                                          drm_user_object_t * uo,
1204                                          drm_ref_t action)
1205 {
1206         drm_buffer_object_t *bo =
1207             drm_user_object_entry(uo, drm_buffer_object_t, base);
1208
1209         /*
1210          * We DON'T want to take the bo->lock here, because we want to
1211          * hold it when we wait for unmapped buffer.
1212          */
1213
1214         BUG_ON(action != _DRM_REF_TYPE1);
1215
1216         if (atomic_add_negative(-1, &bo->mapped))
1217                 DRM_WAKEUP(&bo->event_queue);
1218 }
1219
1220 /*
1221  * bo->mutex locked.
1222  * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1223  */
1224
1225 int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1226                        int no_wait, int move_unfenced)
1227 {
1228         drm_device_t *dev = bo->dev;
1229         drm_buffer_manager_t *bm = &dev->bm;
1230         int ret = 0;
1231         drm_bo_mem_reg_t mem;
1232         /*
1233          * Flush outstanding fences.
1234          */
1235
1236         drm_bo_busy(bo);
1237
1238         /*
1239          * Wait for outstanding fences.
1240          */
1241
1242         ret = drm_bo_wait(bo, 0, 0, no_wait);
1243         if (ret)
1244                 return ret;
1245
1246         mem.num_pages = bo->mem.num_pages;
1247         mem.size = mem.num_pages << PAGE_SHIFT;
1248         mem.mask = new_mem_flags;
1249         mem.page_alignment = bo->mem.page_alignment;
1250
1251         mutex_lock(&bm->evict_mutex);
1252         mutex_lock(&dev->struct_mutex);
1253         list_del(&bo->lru);
1254         list_add_tail(&bo->lru, &bm->unfenced);
1255         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1256                         _DRM_BO_FLAG_UNFENCED);
1257         mutex_unlock(&dev->struct_mutex);
1258
1259         /*
1260          * Determine where to move the buffer.
1261          */
1262         ret = drm_bo_mem_space(bo, &mem, no_wait);
1263         if (ret)
1264                 goto out_unlock;
1265
1266         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1267
1268  out_unlock:
1269         if (ret || !move_unfenced) {
1270                 mutex_lock(&dev->struct_mutex);
1271                 if (mem.mm_node) {
1272                         if (mem.mm_node != bo->pinned_node)
1273                                 drm_mm_put_block(mem.mm_node);
1274                         mem.mm_node = NULL;
1275                 }
1276                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1277                 DRM_WAKEUP(&bo->event_queue);
1278                 list_del(&bo->lru);
1279                 drm_bo_add_to_lru(bo);
1280                 mutex_unlock(&dev->struct_mutex);
1281         }
1282
1283         mutex_unlock(&bm->evict_mutex);
1284         return ret;
1285 }
1286
1287 static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
1288 {
1289         uint32_t flag_diff = (mem->mask ^ mem->flags);
1290
1291         if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1292                 return 0;
1293         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1294             (!(mem->mask & DRM_BO_FLAG_CACHED) ||
1295              (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1296           return 0;
1297         }
1298         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1299             ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1300              (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1301                 return 0;
1302         return 1;
1303 }
1304
1305 static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
1306 {
1307         drm_buffer_manager_t *bm = &dev->bm;
1308         drm_mem_type_manager_t *man;
1309         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1310         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1311         uint32_t i;
1312         int type_ok = 0;
1313         uint32_t mem_type = 0;
1314         uint32_t cur_flags;
1315
1316         if (drm_bo_mem_compat(mem))
1317                 return 0;
1318
1319         BUG_ON(mem->mm_node);
1320
1321         for (i = 0; i < num_prios; ++i) {
1322                 mem_type = prios[i];
1323                 man = &bm->man[mem_type];
1324                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1325                                                &cur_flags);
1326                 if (type_ok)
1327                         break;
1328         }
1329
1330         if (type_ok) {
1331                 mem->mm_node = NULL;
1332                 mem->mem_type = mem_type;
1333                 mem->flags = cur_flags;
1334                 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1335                 return 0;
1336         }
1337
1338         DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
1339         return -EINVAL;
1340 }
1341
1342 /*
1343  * bo locked.
1344  */
1345
1346 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1347                                       int move_unfenced, int no_wait)
1348 {
1349         drm_device_t *dev = bo->dev;
1350         drm_buffer_manager_t *bm = &dev->bm;
1351         drm_bo_driver_t *driver = dev->driver->bo_driver;
1352         int ret;
1353
1354         DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
1355                   bo->mem.flags);
1356         ret =
1357             driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
1358         if (ret) {
1359                 DRM_ERROR("Driver did not support given buffer permissions\n");
1360                 return ret;
1361         }
1362
1363         ret = drm_bo_wait_unmapped(bo, no_wait);
1364         if (ret)
1365                 return ret;
1366
1367         if (bo->type == drm_bo_type_fake) {
1368                 ret = drm_bo_check_fake(dev, &bo->mem);
1369                 if (ret)
1370                         return ret;
1371         }
1372
1373         /*
1374          * Check whether we need to move buffer.
1375          */
1376
1377         if (!drm_bo_mem_compat(&bo->mem)) {
1378                 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1379                                          move_unfenced);
1380                 if (ret) {
1381                         if (ret != -EAGAIN)
1382                                 DRM_ERROR("Failed moving buffer.\n");
1383                         return ret;
1384                 }
1385         }
1386
1387         /*
1388          * Pinned buffers.
1389          */
1390
1391         if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1392                 bo->pinned_mem_type = bo->mem.mem_type;
1393                 mutex_lock(&dev->struct_mutex);
1394                 list_del_init(&bo->pinned_lru);
1395                 drm_bo_add_to_pinned_lru(bo);
1396
1397                 if (bo->pinned_node != bo->mem.mm_node) {
1398                         if (bo->pinned_node != NULL)
1399                                 drm_mm_put_block(bo->pinned_node);
1400                         bo->pinned_node = bo->mem.mm_node;
1401                 }
1402
1403                 mutex_unlock(&dev->struct_mutex);
1404
1405         } else if (bo->pinned_node != NULL) {
1406
1407                 mutex_lock(&dev->struct_mutex);
1408                 drm_mm_put_block(bo->pinned_node);
1409                 list_del_init(&bo->pinned_lru);
1410                 bo->pinned_node = NULL;
1411                 mutex_unlock(&dev->struct_mutex);
1412
1413         }
1414
1415         /*
1416          * We might need to add a TTM.
1417          */
1418
1419         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1420                 ret = drm_bo_add_ttm(bo);
1421                 if (ret)
1422                         return ret;
1423         }
1424         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1425
1426         /*
1427          * Finally, adjust lru to be sure.
1428          */
1429
1430         mutex_lock(&dev->struct_mutex);
1431         list_del(&bo->lru);
1432         if (move_unfenced) {
1433                 list_add_tail(&bo->lru, &bm->unfenced);
1434                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1435                                 _DRM_BO_FLAG_UNFENCED);
1436         } else {
1437                 drm_bo_add_to_lru(bo);
1438                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1439                         DRM_WAKEUP(&bo->event_queue);
1440                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1441                                         _DRM_BO_FLAG_UNFENCED);
1442                 }
1443         }
1444         mutex_unlock(&dev->struct_mutex);
1445
1446         return 0;
1447 }
1448
1449 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1450                                   uint32_t flags, uint32_t mask, uint32_t hint,
1451                                   drm_bo_arg_reply_t * rep)
1452 {
1453         drm_buffer_object_t *bo;
1454         int ret;
1455         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1456
1457         bo = drm_lookup_buffer_object(priv, handle, 1);
1458         if (!bo) {
1459                 return -EINVAL;
1460         }
1461
1462         mutex_lock(&bo->mutex);
1463         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1464
1465         if (ret)
1466                 goto out;
1467
1468         DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1469         ret = drm_bo_new_mask(bo, flags, hint);
1470         if (ret)
1471                 goto out;
1472
1473         ret =
1474             drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
1475                                        no_wait);
1476         drm_bo_fill_rep_arg(bo, rep);
1477
1478       out:
1479
1480         mutex_unlock(&bo->mutex);
1481
1482         drm_bo_usage_deref_unlocked(bo);
1483         return ret;
1484 }
1485
1486 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1487                               drm_bo_arg_reply_t * rep)
1488 {
1489         drm_buffer_object_t *bo;
1490
1491         bo = drm_lookup_buffer_object(priv, handle, 1);
1492         if (!bo) {
1493                 return -EINVAL;
1494         }
1495         mutex_lock(&bo->mutex);
1496         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1497                 (void)drm_bo_busy(bo);
1498         drm_bo_fill_rep_arg(bo, rep);
1499         mutex_unlock(&bo->mutex);
1500         drm_bo_usage_deref_unlocked(bo);
1501         return 0;
1502 }
1503
1504 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1505                               uint32_t hint, drm_bo_arg_reply_t * rep)
1506 {
1507         drm_buffer_object_t *bo;
1508         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1509         int ret;
1510
1511         bo = drm_lookup_buffer_object(priv, handle, 1);
1512         if (!bo) {
1513                 return -EINVAL;
1514         }
1515
1516         mutex_lock(&bo->mutex);
1517         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1518         if (ret)
1519                 goto out;
1520         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1521         if (ret)
1522                 goto out;
1523
1524         drm_bo_fill_rep_arg(bo, rep);
1525
1526       out:
1527         mutex_unlock(&bo->mutex);
1528         drm_bo_usage_deref_unlocked(bo);
1529         return ret;
1530 }
1531
1532 int drm_buffer_object_create(drm_file_t * priv,
1533                              unsigned long size,
1534                              drm_bo_type_t type,
1535                              uint32_t mask,
1536                              uint32_t hint,
1537                              uint32_t page_alignment,
1538                              unsigned long buffer_start,
1539                              drm_buffer_object_t ** buf_obj)
1540 {
1541         drm_device_t *dev = priv->head->dev;
1542         drm_buffer_manager_t *bm = &dev->bm;
1543         drm_buffer_object_t *bo;
1544         int ret = 0;
1545         unsigned long num_pages;
1546
1547         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1548                 DRM_ERROR("Invalid buffer object start.\n");
1549                 return -EINVAL;
1550         }
1551         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1552         if (num_pages == 0) {
1553                 DRM_ERROR("Illegal buffer object size.\n");
1554                 return -EINVAL;
1555         }
1556
1557         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1558
1559         if (!bo)
1560                 return -ENOMEM;
1561
1562         mutex_init(&bo->mutex);
1563         mutex_lock(&bo->mutex);
1564
1565         atomic_set(&bo->usage, 1);
1566         atomic_set(&bo->mapped, -1);
1567         DRM_INIT_WAITQUEUE(&bo->event_queue);
1568         INIT_LIST_HEAD(&bo->lru);
1569         INIT_LIST_HEAD(&bo->pinned_lru);
1570         INIT_LIST_HEAD(&bo->ddestroy);
1571 #ifdef DRM_ODD_MM_COMPAT
1572         INIT_LIST_HEAD(&bo->p_mm_list);
1573         INIT_LIST_HEAD(&bo->vma_list);
1574 #endif
1575         bo->dev = dev;
1576         bo->type = type;
1577         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1578         bo->mem.num_pages = num_pages;
1579         bo->mem.mm_node = NULL;
1580         bo->mem.page_alignment = page_alignment;
1581         if (bo->type == drm_bo_type_fake) {
1582                 bo->offset = buffer_start;
1583                 bo->buffer_start = 0;
1584         } else {
1585                 bo->buffer_start = buffer_start;
1586         }
1587         bo->priv_flags = 0;
1588         bo->mem.flags = 0;
1589         bo->mem.mask = 0;
1590         atomic_inc(&bm->count);
1591         ret = drm_bo_new_mask(bo, mask, hint);
1592
1593         if (ret)
1594                 goto out_err;
1595
1596         if (bo->type == drm_bo_type_dc) {
1597                 mutex_lock(&dev->struct_mutex);
1598                 ret = drm_bo_setup_vm_locked(bo);
1599                 mutex_unlock(&dev->struct_mutex);
1600                 if (ret)
1601                         goto out_err;
1602         }
1603         ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1604         if (ret)
1605                 goto out_err;
1606
1607         mutex_unlock(&bo->mutex);
1608         *buf_obj = bo;
1609         return 0;
1610
1611       out_err:
1612         mutex_unlock(&bo->mutex);
1613
1614         drm_bo_usage_deref_unlocked(bo);
1615         return ret;
1616 }
1617
1618 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1619                                   int shareable)
1620 {
1621         drm_device_t *dev = priv->head->dev;
1622         int ret;
1623
1624         mutex_lock(&dev->struct_mutex);
1625         ret = drm_add_user_object(priv, &bo->base, shareable);
1626         if (ret)
1627                 goto out;
1628
1629         bo->base.remove = drm_bo_base_deref_locked;
1630         bo->base.type = drm_buffer_type;
1631         bo->base.ref_struct_locked = NULL;
1632         bo->base.unref = drm_buffer_user_object_unmap;
1633
1634       out:
1635         mutex_unlock(&dev->struct_mutex);
1636         return ret;
1637 }
1638
1639 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1640 {
1641         LOCK_TEST_WITH_RETURN(dev, filp);
1642         return 0;
1643 }
1644
1645 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1646 {
1647         DRM_DEVICE;
1648         drm_bo_arg_t arg;
1649         drm_bo_arg_request_t *req = &arg.d.req;
1650         drm_bo_arg_reply_t rep;
1651         unsigned long next;
1652         drm_user_object_t *uo;
1653         drm_buffer_object_t *entry;
1654
1655         if (!dev->bm.initialized) {
1656                 DRM_ERROR("Buffer object manager is not initialized.\n");
1657                 return -EINVAL;
1658         }
1659
1660         do {
1661                 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1662
1663                 if (arg.handled) {
1664                         data = arg.next;
1665                         continue;
1666                 }
1667
1668                 rep.ret = 0;
1669                 switch (req->op) {
1670                 case drm_bo_create:
1671                         rep.ret =
1672                             drm_buffer_object_create(priv, req->size,
1673                                                      req->type,
1674                                                      req->mask,
1675                                                      req->hint,
1676                                                      req->page_alignment,
1677                                                      req->buffer_start, &entry);
1678                         if (rep.ret)
1679                                 break;
1680
1681                         rep.ret =
1682                             drm_bo_add_user_object(priv, entry,
1683                                                    req->
1684                                                    mask &
1685                                                    DRM_BO_FLAG_SHAREABLE);
1686                         if (rep.ret)
1687                                 drm_bo_usage_deref_unlocked(entry);
1688
1689                         if (rep.ret)
1690                                 break;
1691
1692                         mutex_lock(&entry->mutex);
1693                         drm_bo_fill_rep_arg(entry, &rep);
1694                         mutex_unlock(&entry->mutex);
1695                         break;
1696                 case drm_bo_unmap:
1697                         rep.ret = drm_buffer_object_unmap(priv, req->handle);
1698                         break;
1699                 case drm_bo_map:
1700                         rep.ret = drm_buffer_object_map(priv, req->handle,
1701                                                         req->mask,
1702                                                         req->hint, &rep);
1703                         break;
1704                 case drm_bo_destroy:
1705                         mutex_lock(&dev->struct_mutex);
1706                         uo = drm_lookup_user_object(priv, req->handle);
1707                         if (!uo || (uo->type != drm_buffer_type)
1708                             || uo->owner != priv) {
1709                                 mutex_unlock(&dev->struct_mutex);
1710                                 rep.ret = -EINVAL;
1711                                 break;
1712                         }
1713                         rep.ret = drm_remove_user_object(priv, uo);
1714                         mutex_unlock(&dev->struct_mutex);
1715                         break;
1716                 case drm_bo_reference:
1717                         rep.ret = drm_user_object_ref(priv, req->handle,
1718                                                       drm_buffer_type, &uo);
1719                         if (rep.ret)
1720                                 break;
1721                         mutex_lock(&dev->struct_mutex);
1722                         uo = drm_lookup_user_object(priv, req->handle);
1723                         entry =
1724                             drm_user_object_entry(uo, drm_buffer_object_t,
1725                                                   base);
1726                         atomic_dec(&entry->usage);
1727                         mutex_unlock(&dev->struct_mutex);
1728                         mutex_lock(&entry->mutex);
1729                         drm_bo_fill_rep_arg(entry, &rep);
1730                         mutex_unlock(&entry->mutex);
1731                         break;
1732                 case drm_bo_unreference:
1733                         rep.ret = drm_user_object_unref(priv, req->handle,
1734                                                         drm_buffer_type);
1735                         break;
1736                 case drm_bo_validate:
1737                         rep.ret = drm_bo_lock_test(dev, filp);
1738
1739                         if (rep.ret)
1740                                 break;
1741                         rep.ret =
1742                             drm_bo_handle_validate(priv, req->handle, req->mask,
1743                                                    req->arg_handle, req->hint,
1744                                                    &rep);
1745                         break;
1746                 case drm_bo_fence:
1747                         rep.ret = drm_bo_lock_test(dev, filp);
1748                         if (rep.ret)
1749                                 break;
1750                          /**/ break;
1751                 case drm_bo_info:
1752                         rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1753                         break;
1754                 case drm_bo_wait_idle:
1755                         rep.ret = drm_bo_handle_wait(priv, req->handle,
1756                                                      req->hint, &rep);
1757                         break;
1758                 case drm_bo_ref_fence:
1759                         rep.ret = -EINVAL;
1760                         DRM_ERROR("Function is not implemented yet.\n");
1761                 default:
1762                         rep.ret = -EINVAL;
1763                 }
1764                 next = arg.next;
1765
1766                 /*
1767                  * A signal interrupted us. Make sure the ioctl is restartable.
1768                  */
1769
1770                 if (rep.ret == -EAGAIN)
1771                         return -EAGAIN;
1772
1773                 arg.handled = 1;
1774                 arg.d.rep = rep;
1775                 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1776                 data = next;
1777         } while (data);
1778         return 0;
1779 }
1780
1781 /**
1782  *Clean the unfenced list and put on regular LRU.
1783  *This is part of the memory manager cleanup and should only be
1784  *called with the DRI lock held.
1785  *Call dev->struct_sem locked.
1786  */
1787
1788 static void drm_bo_clean_unfenced(drm_device_t *dev)
1789 {
1790         drm_buffer_manager_t *bm  = &dev->bm;
1791         struct list_head *head, *list;
1792         drm_buffer_object_t *entry;
1793
1794         head = &bm->unfenced;
1795
1796         list = head->next;
1797         while(list != head) {
1798                 prefetch(list->next);
1799                 entry = list_entry(list, drm_buffer_object_t, lru);
1800
1801                 atomic_inc(&entry->usage);
1802                 mutex_unlock(&dev->struct_mutex);
1803                 mutex_lock(&entry->mutex);
1804                 mutex_lock(&dev->struct_mutex);
1805
1806                 list_del(&entry->lru);
1807                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1808                 drm_bo_add_to_lru(entry);
1809                 mutex_unlock(&entry->mutex);
1810                 list = head->next;
1811         }
1812 }
1813
1814 static int drm_bo_leave_list(drm_buffer_object_t * bo,
1815                              uint32_t mem_type,
1816                              int free_pinned, int allow_errors)
1817 {
1818         drm_device_t *dev = bo->dev;
1819         int ret = 0;
1820
1821         mutex_lock(&bo->mutex);
1822
1823         ret = drm_bo_expire_fence(bo, allow_errors);
1824         if (ret)
1825                 goto out;
1826
1827         if (free_pinned) {
1828                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1829                 mutex_lock(&dev->struct_mutex);
1830                 list_del_init(&bo->pinned_lru);
1831                 if (bo->pinned_node == bo->mem.mm_node)
1832                         bo->pinned_node = NULL;
1833                 if (bo->pinned_node != NULL) {
1834                         drm_mm_put_block(bo->pinned_node);
1835                         bo->pinned_node = NULL;
1836                 }
1837                 mutex_unlock(&dev->struct_mutex);
1838         }
1839
1840         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1841                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1842                           "cleanup. Removing flag and evicting.\n");
1843                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1844                 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1845         }
1846
1847         if (bo->mem.mem_type == mem_type)
1848                 ret = drm_bo_evict(bo, mem_type, 0);
1849
1850         if (ret) {
1851                 if (allow_errors) {
1852                         goto out;
1853                 } else {
1854                         ret = 0;
1855                         DRM_ERROR("Cleanup eviction failed\n");
1856                 }
1857         }
1858
1859       out:
1860         mutex_unlock(&bo->mutex);
1861         return ret;
1862 }
1863
1864
1865 static drm_buffer_object_t *drm_bo_entry(struct list_head *list,
1866                                          int pinned_list)
1867 {
1868         if (pinned_list)
1869                 return list_entry(list, drm_buffer_object_t, pinned_lru);
1870         else
1871                 return list_entry(list, drm_buffer_object_t, lru);
1872 }
1873
1874 /*
1875  * dev->struct_mutex locked.
1876  */
1877
1878 static int drm_bo_force_list_clean(drm_device_t * dev,
1879                                    struct list_head *head,
1880                                    unsigned mem_type,
1881                                    int free_pinned,
1882                                    int allow_errors,
1883                                    int pinned_list)
1884 {
1885         struct list_head *list, *next, *prev;
1886         drm_buffer_object_t *entry, *nentry;
1887         int ret;
1888         int do_restart;
1889
1890         /*
1891          * The list traversal is a bit odd here, because an item may
1892          * disappear from the list when we release the struct_mutex or
1893          * when we decrease the usage count. Also we're not guaranteed
1894          * to drain pinned lists, so we can't always restart.
1895          */
1896
1897 restart:
1898         nentry = NULL;
1899         list_for_each_safe(list, next, head) {
1900                 prev = list->prev;
1901
1902                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
1903                 atomic_inc(&entry->usage);
1904                 if (nentry) {
1905                         atomic_dec(&nentry->usage);
1906                         nentry = NULL;
1907                 }
1908
1909                 /*
1910                  * Protect the next item from destruction, so we can check
1911                  * its list pointers later on.
1912                  */
1913
1914                 if (next != head) {
1915                         nentry = drm_bo_entry(next, pinned_list);
1916                         atomic_inc(&nentry->usage);
1917                 }
1918                 mutex_unlock(&dev->struct_mutex);
1919
1920                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
1921                                         allow_errors);
1922                 mutex_lock(&dev->struct_mutex);
1923
1924                 drm_bo_usage_deref_locked(entry);
1925                 if (ret)
1926                         return ret;
1927
1928                 /*
1929                  * Has the next item disappeared from the list?
1930                  */
1931
1932                 do_restart = ((next->prev != list) && (next->prev != prev));
1933
1934                 if (nentry != NULL && do_restart) {
1935                         drm_bo_usage_deref_locked(nentry);
1936                         nentry = NULL;
1937                 }
1938
1939                 if (do_restart)
1940                         goto restart;
1941         }
1942         return 0;
1943 }
1944
1945 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1946 {
1947         drm_buffer_manager_t *bm = &dev->bm;
1948         drm_mem_type_manager_t *man = &bm->man[mem_type];
1949         int ret = -EINVAL;
1950
1951         if (mem_type >= DRM_BO_MEM_TYPES) {
1952                 DRM_ERROR("Illegal memory type %d\n", mem_type);
1953                 return ret;
1954         }
1955
1956         if (!man->has_type) {
1957                 DRM_ERROR("Trying to take down uninitialized "
1958                           "memory manager type\n");
1959                 return ret;
1960         }
1961         man->use_type = 0;
1962         man->has_type = 0;
1963
1964         ret = 0;
1965         if (mem_type > 0) {
1966
1967                 drm_bo_clean_unfenced(dev);
1968                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
1969                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
1970
1971                 if (drm_mm_clean(&man->manager)) {
1972                         drm_mm_takedown(&man->manager);
1973                 } else {
1974                         ret = -EBUSY;
1975                 }
1976         }
1977
1978         return ret;
1979 }
1980
1981 /**
1982  *Evict all buffers of a particular mem_type, but leave memory manager
1983  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
1984  *point since we have the hardware lock.
1985  */
1986
1987 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1988 {
1989         int ret;
1990         drm_buffer_manager_t *bm = &dev->bm;
1991         drm_mem_type_manager_t *man = &bm->man[mem_type];
1992
1993         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1994                 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1995                 return -EINVAL;
1996         }
1997
1998         drm_bo_clean_unfenced(dev);
1999         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2000         if (ret)
2001                 return ret;
2002         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2003
2004         return ret;
2005 }
2006
2007 int drm_bo_init_mm(drm_device_t * dev,
2008                    unsigned type,
2009                    unsigned long p_offset, unsigned long p_size)
2010 {
2011         drm_buffer_manager_t *bm = &dev->bm;
2012         int ret = -EINVAL;
2013         drm_mem_type_manager_t *man;
2014
2015         if (type >= DRM_BO_MEM_TYPES) {
2016                 DRM_ERROR("Illegal memory type %d\n", type);
2017                 return ret;
2018         }
2019
2020         man = &bm->man[type];
2021         if (man->has_type) {
2022                 DRM_ERROR("Memory manager already initialized for type %d\n",
2023                           type);
2024                 return ret;
2025         }
2026
2027         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2028         if (ret)
2029                 return ret;
2030
2031         ret = 0;
2032         if (type != DRM_BO_MEM_LOCAL) {
2033                 if (!p_size) {
2034                         DRM_ERROR("Zero size memory manager type %d\n", type);
2035                         return ret;
2036                 }
2037                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2038                 if (ret)
2039                         return ret;
2040         }
2041         man->has_type = 1;
2042         man->use_type = 1;
2043
2044         INIT_LIST_HEAD(&man->lru);
2045         INIT_LIST_HEAD(&man->pinned);
2046
2047         return 0;
2048 }
2049 EXPORT_SYMBOL(drm_bo_init_mm);
2050
2051 /*
2052  * This is called from lastclose, so we don't need to bother about
2053  * any clients still running when we set the initialized flag to zero.
2054  */
2055
2056 int drm_bo_driver_finish(drm_device_t * dev)
2057 {
2058         drm_buffer_manager_t *bm = &dev->bm;
2059         int ret = 0;
2060         unsigned i = DRM_BO_MEM_TYPES;
2061         drm_mem_type_manager_t *man;
2062
2063         mutex_lock(&dev->bm.init_mutex);
2064         mutex_lock(&dev->struct_mutex);
2065
2066         if (!bm->initialized)
2067                 goto out;
2068         bm->initialized = 0;
2069
2070         while (i--) {
2071                 man = &bm->man[i];
2072                 if (man->has_type) {
2073                         man->use_type = 0;
2074                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2075                                 ret = -EBUSY;
2076                                 DRM_ERROR("DRM memory manager type %d "
2077                                           "is not clean.\n", i);
2078                         }
2079                         man->has_type = 0;
2080                 }
2081         }
2082         mutex_unlock(&dev->struct_mutex);
2083
2084         if (!cancel_delayed_work(&bm->wq)) {
2085                 flush_scheduled_work();
2086         }
2087         mutex_lock(&dev->struct_mutex);
2088         drm_bo_delayed_delete(dev, 1);
2089         if (list_empty(&bm->ddestroy)) {
2090                 DRM_DEBUG("Delayed destroy list was clean\n");
2091         }
2092         if (list_empty(&bm->man[0].lru)) {
2093                 DRM_DEBUG("Swap list was clean\n");
2094         }
2095         if (list_empty(&bm->man[0].pinned)) {
2096                 DRM_DEBUG("NO_MOVE list was clean\n");
2097         }
2098         if (list_empty(&bm->unfenced)) {
2099                 DRM_DEBUG("Unfenced list was clean\n");
2100         }
2101       out:
2102         mutex_unlock(&dev->struct_mutex);
2103         mutex_unlock(&dev->bm.init_mutex);
2104         return ret;
2105 }
2106
2107 int drm_bo_driver_init(drm_device_t * dev)
2108 {
2109         drm_bo_driver_t *driver = dev->driver->bo_driver;
2110         drm_buffer_manager_t *bm = &dev->bm;
2111         int ret = -EINVAL;
2112
2113         mutex_lock(&dev->bm.init_mutex);
2114         mutex_lock(&dev->struct_mutex);
2115         if (!driver)
2116                 goto out_unlock;
2117
2118         /*
2119          * Initialize the system memory buffer type.
2120          * Other types need to be driver / IOCTL initialized.
2121          */
2122
2123         ret = drm_bo_init_mm(dev, 0, 0, 0);
2124         if (ret)
2125                 goto out_unlock;
2126
2127 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2128         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2129 #else
2130         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2131 #endif
2132         bm->initialized = 1;
2133         bm->nice_mode = 1;
2134         atomic_set(&bm->count, 0);
2135         bm->cur_pages = 0;
2136         INIT_LIST_HEAD(&bm->unfenced);
2137         INIT_LIST_HEAD(&bm->ddestroy);
2138       out_unlock:
2139         mutex_unlock(&dev->struct_mutex);
2140         mutex_unlock(&dev->bm.init_mutex);
2141         return ret;
2142 }
2143
2144 EXPORT_SYMBOL(drm_bo_driver_init);
2145
2146 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2147 {
2148         DRM_DEVICE;
2149
2150         int ret = 0;
2151         drm_mm_init_arg_t arg;
2152         drm_buffer_manager_t *bm = &dev->bm;
2153         drm_bo_driver_t *driver = dev->driver->bo_driver;
2154
2155         if (!driver) {
2156                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2157                 return -EINVAL;
2158         }
2159
2160         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2161
2162         switch (arg.req.op) {
2163         case mm_init:
2164                 ret = -EINVAL;
2165                 mutex_lock(&dev->bm.init_mutex);
2166                 mutex_lock(&dev->struct_mutex);
2167                 if (!bm->initialized) {
2168                         DRM_ERROR("DRM memory manager was not initialized.\n");
2169                         break;
2170                 }
2171                 if (arg.req.mem_type == 0) {
2172                         DRM_ERROR
2173                             ("System memory buffers already initialized.\n");
2174                         break;
2175                 }
2176                 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2177                                      arg.req.p_offset, arg.req.p_size);
2178                 break;
2179         case mm_takedown:
2180                 LOCK_TEST_WITH_RETURN(dev, filp);
2181                 mutex_lock(&dev->bm.init_mutex);
2182                 mutex_lock(&dev->struct_mutex);
2183                 ret = -EINVAL;
2184                 if (!bm->initialized) {
2185                         DRM_ERROR("DRM memory manager was not initialized\n");
2186                         break;
2187                 }
2188                 if (arg.req.mem_type == 0) {
2189                         DRM_ERROR("No takedown for System memory buffers.\n");
2190                         break;
2191                 }
2192                 ret = 0;
2193                 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2194                         DRM_ERROR("Memory manager type %d not clean. "
2195                                   "Delaying takedown\n", arg.req.mem_type);
2196                 }
2197                 break;
2198         case mm_lock:
2199                 LOCK_TEST_WITH_RETURN(dev, filp);
2200                 mutex_lock(&dev->bm.init_mutex);
2201                 mutex_lock(&dev->struct_mutex);
2202                 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2203                 break;
2204         case mm_unlock:
2205                 LOCK_TEST_WITH_RETURN(dev, filp);
2206                 mutex_lock(&dev->bm.init_mutex);
2207                 mutex_lock(&dev->struct_mutex);
2208                 ret = 0;
2209                 break;
2210         default:
2211                 DRM_ERROR("Function not implemented yet\n");
2212                 return -EINVAL;
2213         }
2214
2215         mutex_unlock(&dev->struct_mutex);
2216         mutex_unlock(&dev->bm.init_mutex);
2217         if (ret)
2218                 return ret;
2219
2220         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2221         return 0;
2222 }
2223
2224 /*
2225  * buffer object vm functions.
2226  */
2227
2228 int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
2229 {
2230         drm_buffer_manager_t *bm = &dev->bm;
2231         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2232
2233         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2234                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2235                         return 0;
2236
2237                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2238                         return 0;
2239
2240                 if (mem->flags & DRM_BO_FLAG_CACHED)
2241                         return 0;
2242         }
2243         return 1;
2244 }
2245
2246 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2247
2248 /**
2249  * \c Get the PCI offset for the buffer object memory.
2250  *
2251  * \param bo The buffer object.
2252  * \param bus_base On return the base of the PCI region
2253  * \param bus_offset On return the byte offset into the PCI region
2254  * \param bus_size On return the byte size of the buffer object or zero if
2255  *     the buffer object memory is not accessible through a PCI region.
2256  * \return Failure indication.
2257  *
2258  * Returns -EINVAL if the buffer object is currently not mappable.
2259  * Otherwise returns zero.
2260  */
2261
2262 int drm_bo_pci_offset(drm_device_t * dev,
2263                       drm_bo_mem_reg_t * mem,
2264                       unsigned long *bus_base,
2265                       unsigned long *bus_offset, unsigned long *bus_size)
2266 {
2267         drm_buffer_manager_t *bm = &dev->bm;
2268         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2269
2270         *bus_size = 0;
2271         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2272                 return -EINVAL;
2273
2274         if (drm_mem_reg_is_pci(dev, mem)) {
2275                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2276                 *bus_size = mem->num_pages << PAGE_SHIFT;
2277                 *bus_base = man->io_offset;
2278         }
2279
2280         return 0;
2281 }
2282
2283 /**
2284  * \c Kill all user-space virtual mappings of this buffer object.
2285  *
2286  * \param bo The buffer object.
2287  *
2288  * Call bo->mutex locked.
2289  */
2290
2291 void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
2292 {
2293         drm_device_t *dev = bo->dev;
2294         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2295         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2296
2297         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2298 }
2299
2300 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
2301 {
2302         drm_map_list_t *list = &bo->map_list;
2303         drm_local_map_t *map;
2304         drm_device_t *dev = bo->dev;
2305
2306         if (list->user_token) {
2307                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2308                 list->user_token = 0;
2309         }
2310         if (list->file_offset_node) {
2311                 drm_mm_put_block(list->file_offset_node);
2312                 list->file_offset_node = NULL;
2313         }
2314
2315         map = list->map;
2316         if (!map)
2317                 return;
2318
2319         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2320         list->map = NULL;
2321         list->user_token = 0ULL;
2322         drm_bo_usage_deref_locked(bo);
2323 }
2324
2325 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
2326 {
2327         drm_map_list_t *list = &bo->map_list;
2328         drm_local_map_t *map;
2329         drm_device_t *dev = bo->dev;
2330
2331         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2332         if (!list->map)
2333                 return -ENOMEM;
2334
2335         map = list->map;
2336         map->offset = 0;
2337         map->type = _DRM_TTM;
2338         map->flags = _DRM_REMOVABLE;
2339         map->size = bo->mem.num_pages * PAGE_SIZE;
2340         atomic_inc(&bo->usage);
2341         map->handle = (void *)bo;
2342
2343         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2344                                                     bo->mem.num_pages, 0, 0);
2345
2346         if (!list->file_offset_node) {
2347                 drm_bo_takedown_vm_locked(bo);
2348                 return -ENOMEM;
2349         }
2350
2351         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2352                                                   bo->mem.num_pages, 0);
2353
2354         list->hash.key = list->file_offset_node->start;
2355         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2356                 drm_bo_takedown_vm_locked(bo);
2357                 return -ENOMEM;
2358         }
2359
2360         list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
2361
2362         return 0;
2363 }