Indentation fixes.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
40  * heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44  * both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47  * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48  * traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
53 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
54 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
55 static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
56
57 static inline uint32_t drm_bo_type_flags(unsigned type)
58 {
59         return (1 << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
67 {
68         drm_mem_type_manager_t *man;
69
70         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71         DRM_ASSERT_LOCKED(&bo->mutex);
72
73         man = &bo->dev->bm.man[bo->pinned_mem_type];
74         list_add_tail(&bo->pinned_lru, &man->pinned);
75 }
76
77 void drm_bo_add_to_lru(drm_buffer_object_t * bo)
78 {
79         drm_mem_type_manager_t *man;
80
81         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
82
83         if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84             || bo->mem.mem_type != bo->pinned_mem_type) {
85                 man = &bo->dev->bm.man[bo->mem.mem_type];
86                 list_add_tail(&bo->lru, &man->lru);
87         } else {
88                 INIT_LIST_HEAD(&bo->lru);
89         }
90 }
91
92 static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
93 {
94 #ifdef DRM_ODD_MM_COMPAT
95         int ret;
96
97         if (!bo->map_list.map)
98                 return 0;
99
100         ret = drm_bo_lock_kmm(bo);
101         if (ret)
102                 return ret;
103         drm_bo_unmap_virtual(bo);
104         if (old_is_pci)
105                 drm_bo_finish_unmap(bo);
106 #else
107         if (!bo->map_list.map)
108                 return 0;
109
110         drm_bo_unmap_virtual(bo);
111 #endif
112         return 0;
113 }
114
115 static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
116 {
117 #ifdef DRM_ODD_MM_COMPAT
118         int ret;
119
120         if (!bo->map_list.map)
121                 return;
122
123         ret = drm_bo_remap_bound(bo);
124         if (ret) {
125                 DRM_ERROR("Failed to remap a bound buffer object.\n"
126                           "\tThis might cause a sigbus later.\n");
127         }
128         drm_bo_unlock_kmm(bo);
129 #endif
130 }
131
132 /*
133  * Call bo->mutex locked.
134  */
135
136 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
137 {
138         drm_device_t *dev = bo->dev;
139         int ret = 0;
140         bo->ttm = NULL;
141
142         DRM_ASSERT_LOCKED(&bo->mutex);
143
144         switch (bo->type) {
145         case drm_bo_type_dc:
146                 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
147                 if (!bo->ttm)
148                         ret = -ENOMEM;
149                 break;
150         case drm_bo_type_kernel:
151                 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
152                 if (!bo->ttm)
153                         ret = -ENOMEM;
154                 break;
155         case drm_bo_type_user:
156         case drm_bo_type_fake:
157                 break;
158         default:
159                 DRM_ERROR("Illegal buffer object type\n");
160                 ret = -EINVAL;
161                 break;
162         }
163
164         return ret;
165 }
166
167 static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
168                                   drm_bo_mem_reg_t * mem,
169                                   int evict, int no_wait)
170 {
171         drm_device_t *dev = bo->dev;
172         drm_buffer_manager_t *bm = &dev->bm;
173         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
174         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
175         drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
176         drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
177         int ret = 0;
178
179         if (old_is_pci || new_is_pci)
180                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
181         if (ret)
182                 return ret;
183
184         /*
185          * Create and bind a ttm if required.
186          */
187
188         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
189                 ret = drm_bo_add_ttm(bo);
190                 if (ret)
191                         goto out_err;
192
193                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
194                         ret = drm_bind_ttm(bo->ttm, new_man->flags &
195                                            DRM_BO_FLAG_CACHED,
196                                            mem->mm_node->start);
197                         if (ret)
198                                 goto out_err;
199                 }
200         }
201
202         if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
203
204                 drm_bo_mem_reg_t *old_mem = &bo->mem;
205                 uint32_t save_flags = old_mem->flags;
206                 uint32_t save_mask = old_mem->mask;
207
208                 *old_mem = *mem;
209                 mem->mm_node = NULL;
210                 old_mem->mask = save_mask;
211                 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
212
213         } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
214                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
215
216                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
217
218         } else if (dev->driver->bo_driver->move) {
219                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
220
221         } else {
222
223                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
224
225         }
226
227         if (ret)
228                 goto out_err;
229
230         if (old_is_pci || new_is_pci)
231                 drm_bo_vm_post_move(bo);
232
233         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
234                 ret =
235                     dev->driver->bo_driver->invalidate_caches(dev,
236                                                               bo->mem.flags);
237                 if (ret)
238                         DRM_ERROR("Can not flush read caches\n");
239         }
240
241         DRM_FLAG_MASKED(bo->priv_flags,
242                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
243                         _DRM_BO_FLAG_EVICTED);
244
245         if (bo->mem.mm_node)
246                 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
247
248         return 0;
249
250       out_err:
251         if (old_is_pci || new_is_pci)
252                 drm_bo_vm_post_move(bo);
253
254         new_man = &bm->man[bo->mem.mem_type];
255         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
256                 drm_ttm_unbind(bo->ttm);
257                 drm_destroy_ttm(bo->ttm);
258                 bo->ttm = NULL;
259         }
260
261         return ret;
262 }
263
264 /*
265  * Call bo->mutex locked.
266  * Wait until the buffer is idle.
267  */
268
269 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
270                 int no_wait)
271 {
272
273         drm_fence_object_t *fence = bo->fence;
274         int ret;
275
276         DRM_ASSERT_LOCKED(&bo->mutex);
277
278         if (fence) {
279                 drm_device_t *dev = bo->dev;
280                 if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) {
281                         drm_fence_usage_deref_unlocked(dev, fence);
282                         bo->fence = NULL;
283                         return 0;
284                 }
285                 if (no_wait) {
286                         return -EBUSY;
287                 }
288                 ret =
289                     drm_fence_object_wait(dev, fence, lazy, ignore_signals,
290                                           bo->fence_type);
291                 if (ret)
292                         return ret;
293
294                 drm_fence_usage_deref_unlocked(dev, fence);
295                 bo->fence = NULL;
296
297         }
298         return 0;
299 }
300
301 static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
302 {
303         drm_device_t *dev = bo->dev;
304         drm_buffer_manager_t *bm = &dev->bm;
305
306         if (bo->fence) {
307                 if (bm->nice_mode) {
308                         unsigned long _end = jiffies + 3 * DRM_HZ;
309                         int ret;
310                         do {
311                                 ret = drm_bo_wait(bo, 0, 1, 0);
312                                 if (ret && allow_errors)
313                                         return ret;
314
315                         } while (ret && !time_after_eq(jiffies, _end));
316
317                         if (bo->fence) {
318                                 bm->nice_mode = 0;
319                                 DRM_ERROR("Detected GPU lockup or "
320                                           "fence driver was taken down. "
321                                           "Evicting buffer.\n");
322                         }
323                 }
324                 if (bo->fence) {
325                         drm_fence_usage_deref_unlocked(dev, bo->fence);
326                         bo->fence = NULL;
327                 }
328         }
329         return 0;
330 }
331
332 /*
333  * Call dev->struct_mutex locked.
334  * Attempts to remove all private references to a buffer by expiring its
335  * fence object and removing from lru lists and memory managers.
336  */
337
338 static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
339 {
340         drm_device_t *dev = bo->dev;
341         drm_buffer_manager_t *bm = &dev->bm;
342
343         DRM_ASSERT_LOCKED(&dev->struct_mutex);
344
345         atomic_inc(&bo->usage);
346         mutex_unlock(&dev->struct_mutex);
347         mutex_lock(&bo->mutex);
348
349         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
350
351         if (bo->fence && drm_fence_object_signaled(dev, bo->fence, 
352                                                    bo->fence_type, 0)) {
353                 drm_fence_usage_deref_unlocked(dev, bo->fence);
354                 bo->fence = NULL;
355         }
356
357         if (bo->fence && remove_all)
358                 (void)drm_bo_expire_fence(bo, 0);
359
360         mutex_lock(&dev->struct_mutex);
361
362         if (!atomic_dec_and_test(&bo->usage)) {
363                 goto out;
364         }
365
366         if (!bo->fence) {
367                 list_del_init(&bo->lru);
368                 if (bo->mem.mm_node) {
369                         drm_mm_put_block(bo->mem.mm_node);
370                         if (bo->pinned_node == bo->mem.mm_node)
371                                 bo->pinned_node = NULL;
372                         bo->mem.mm_node = NULL;
373                 }
374                 list_del_init(&bo->pinned_lru);
375                 if (bo->pinned_node) {
376                         drm_mm_put_block(bo->pinned_node);
377                         bo->pinned_node = NULL;
378                 }
379                 list_del_init(&bo->ddestroy);
380                 mutex_unlock(&bo->mutex);
381                 drm_bo_destroy_locked(bo);
382                 return;
383         }
384
385         if (list_empty(&bo->ddestroy)) {
386                 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
387                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
388                 schedule_delayed_work(&bm->wq,
389                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
390         }
391
392       out:
393         mutex_unlock(&bo->mutex);
394         return;
395 }
396
397 /*
398  * Verify that refcount is 0 and that there are no internal references
399  * to the buffer object. Then destroy it.
400  */
401
402 static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
403 {
404         drm_device_t *dev = bo->dev;
405         drm_buffer_manager_t *bm = &dev->bm;
406
407         DRM_ASSERT_LOCKED(&dev->struct_mutex);
408
409         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
410             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
411             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
412                 if (bo->fence != NULL) {
413                         DRM_ERROR("Fence was non-zero.\n");
414                         drm_bo_cleanup_refs(bo, 0);
415                         return;
416                 }
417
418 #ifdef DRM_ODD_MM_COMPAT
419                 BUG_ON(!list_empty(&bo->vma_list));
420                 BUG_ON(!list_empty(&bo->p_mm_list));
421 #endif
422
423                 if (bo->ttm) {
424                         drm_ttm_unbind(bo->ttm);
425                         drm_destroy_ttm(bo->ttm);
426                         bo->ttm = NULL;
427                 }
428
429                 atomic_dec(&bm->count);
430
431                 BUG_ON(!list_empty(&bo->base.list));
432                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
433
434                 return;
435         }
436
437         /*
438          * Some stuff is still trying to reference the buffer object.
439          * Get rid of those references.
440          */
441
442         drm_bo_cleanup_refs(bo, 0);
443
444         return;
445 }
446
447 /*
448  * Call dev->struct_mutex locked.
449  */
450
451 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
452 {
453         drm_buffer_manager_t *bm = &dev->bm;
454
455         drm_buffer_object_t *entry, *nentry;
456         struct list_head *list, *next;
457
458         list_for_each_safe(list, next, &bm->ddestroy) {
459                 entry = list_entry(list, drm_buffer_object_t, ddestroy);
460
461                 nentry = NULL;
462                 if (next != &bm->ddestroy) {
463                         nentry = list_entry(next, drm_buffer_object_t,
464                                             ddestroy);
465                         atomic_inc(&nentry->usage);
466                 }
467
468                 drm_bo_cleanup_refs(entry, remove_all);
469
470                 if (nentry) {
471                         atomic_dec(&nentry->usage);
472                 }
473         }
474 }
475
476 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
477 static void drm_bo_delayed_workqueue(void *data)
478 #else
479 static void drm_bo_delayed_workqueue(struct work_struct *work)
480 #endif
481 {
482 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
483         drm_device_t *dev = (drm_device_t *) data;
484         drm_buffer_manager_t *bm = &dev->bm;
485 #else
486         drm_buffer_manager_t *bm =
487             container_of(work, drm_buffer_manager_t, wq.work);
488         drm_device_t *dev = container_of(bm, drm_device_t, bm);
489 #endif
490
491         DRM_DEBUG("Delayed delete Worker\n");
492
493         mutex_lock(&dev->struct_mutex);
494         if (!bm->initialized) {
495                 mutex_unlock(&dev->struct_mutex);
496                 return;
497         }
498         drm_bo_delayed_delete(dev, 0);
499         if (bm->initialized && !list_empty(&bm->ddestroy)) {
500                 schedule_delayed_work(&bm->wq,
501                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
502         }
503         mutex_unlock(&dev->struct_mutex);
504 }
505
506 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
507 {
508         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
509
510         if (atomic_dec_and_test(&bo->usage)) {
511                 drm_bo_destroy_locked(bo);
512         }
513 }
514
515 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
516 {
517         drm_buffer_object_t *bo =
518             drm_user_object_entry(uo, drm_buffer_object_t, base);
519
520         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
521
522         drm_bo_takedown_vm_locked(bo);
523         drm_bo_usage_deref_locked(bo);
524 }
525
526 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
527 {
528         drm_device_t *dev = bo->dev;
529
530         if (atomic_dec_and_test(&bo->usage)) {
531                 mutex_lock(&dev->struct_mutex);
532                 if (atomic_read(&bo->usage) == 0)
533                         drm_bo_destroy_locked(bo);
534                 mutex_unlock(&dev->struct_mutex);
535         }
536 }
537
538 /*
539  * Note. The caller has to register (if applicable)
540  * and deregister fence object usage.
541  */
542
543 int drm_fence_buffer_objects(drm_file_t * priv,
544                              struct list_head *list,
545                              uint32_t fence_flags,
546                              drm_fence_object_t * fence,
547                              drm_fence_object_t ** used_fence)
548 {
549         drm_device_t *dev = priv->head->dev;
550         drm_buffer_manager_t *bm = &dev->bm;
551
552         drm_buffer_object_t *entry;
553         uint32_t fence_type = 0;
554         int count = 0;
555         int ret = 0;
556         struct list_head *l;
557         LIST_HEAD(f_list);
558
559         mutex_lock(&dev->struct_mutex);
560
561         if (!list)
562                 list = &bm->unfenced;
563
564         list_for_each_entry(entry, list, lru) {
565                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
566                 fence_type |= entry->fence_type;
567                 if (entry->fence_class != 0) {
568                         DRM_ERROR("Fence class %d is not implemented yet.\n",
569                                   entry->fence_class);
570                         ret = -EINVAL;
571                         goto out;
572                 }
573                 count++;
574         }
575
576         if (!count) {
577                 ret = -EINVAL;
578                 goto out;
579         }
580
581         /*
582          * Transfer to a local list before we release the dev->struct_mutex;
583          * This is so we don't get any new unfenced objects while fencing
584          * the ones we already have..
585          */
586
587         list_splice_init(list, &f_list);
588
589         if (fence) {
590                 if ((fence_type & fence->type) != fence_type) {
591                         DRM_ERROR("Given fence doesn't match buffers "
592                                   "on unfenced list.\n");
593                         ret = -EINVAL;
594                         goto out;
595                 }
596         } else {
597                 mutex_unlock(&dev->struct_mutex);
598                 ret = drm_fence_object_create(dev, 0, fence_type,
599                                               fence_flags | DRM_FENCE_FLAG_EMIT,
600                                               &fence);
601                 mutex_lock(&dev->struct_mutex);
602                 if (ret)
603                         goto out;
604         }
605
606         count = 0;
607         l = f_list.next;
608         while (l != &f_list) {
609                 prefetch(l->next);
610                 entry = list_entry(l, drm_buffer_object_t, lru);
611                 atomic_inc(&entry->usage);
612                 mutex_unlock(&dev->struct_mutex);
613                 mutex_lock(&entry->mutex);
614                 mutex_lock(&dev->struct_mutex);
615                 list_del_init(l);
616                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
617                         count++;
618                         if (entry->fence)
619                                 drm_fence_usage_deref_locked(dev, entry->fence);
620                         entry->fence = fence;
621                         atomic_inc(&fence->usage);
622                         DRM_FLAG_MASKED(entry->priv_flags, 0,
623                                         _DRM_BO_FLAG_UNFENCED);
624                         DRM_WAKEUP(&entry->event_queue);
625                         drm_bo_add_to_lru(entry);
626                 }
627                 mutex_unlock(&entry->mutex);
628                 drm_bo_usage_deref_locked(entry);
629                 l = f_list.next;
630         }
631         DRM_DEBUG("Fenced %d buffers\n", count);
632       out:
633         mutex_unlock(&dev->struct_mutex);
634         *used_fence = fence;
635         return ret;
636 }
637
638 EXPORT_SYMBOL(drm_fence_buffer_objects);
639
640 /*
641  * bo->mutex locked
642  */
643
644 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
645                         int no_wait)
646 {
647         int ret = 0;
648         drm_device_t *dev = bo->dev;
649         drm_bo_mem_reg_t evict_mem;
650
651         /*
652          * Someone might have modified the buffer before we took the buffer mutex.
653          */
654
655         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
656                 goto out;
657         if (bo->mem.mem_type != mem_type)
658                 goto out;
659
660         ret = drm_bo_wait(bo, 0, 0, no_wait);
661
662         if (ret && ret != -EAGAIN) {
663                 DRM_ERROR("Failed to expire fence before "
664                           "buffer eviction.\n");
665                 goto out;
666         }
667
668         evict_mem = bo->mem;
669         evict_mem.mm_node = NULL;
670
671         if (bo->type == drm_bo_type_fake) {
672                 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
673                 bo->mem.mm_node = NULL;
674                 goto out1;
675         }
676
677         evict_mem = bo->mem;
678         evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
679         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
680
681         if (ret) {
682                 if (ret != -EAGAIN)
683                         DRM_ERROR("Failed to find memory space for "
684                                   "buffer 0x%p eviction.\n", bo);
685                 goto out;
686         }
687
688         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
689
690         if (ret) {
691                 if (ret != -EAGAIN)
692                         DRM_ERROR("Buffer eviction failed\n");
693                 goto out;
694         }
695
696       out1:
697         mutex_lock(&dev->struct_mutex);
698         if (evict_mem.mm_node) {
699                 if (evict_mem.mm_node != bo->pinned_node)
700                         drm_mm_put_block(evict_mem.mm_node);
701                 evict_mem.mm_node = NULL;
702         }
703         list_del(&bo->lru);
704         drm_bo_add_to_lru(bo);
705         mutex_unlock(&dev->struct_mutex);
706
707         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
708                         _DRM_BO_FLAG_EVICTED);
709
710       out:
711         return ret;
712 }
713
714 static int drm_bo_mem_force_space(drm_device_t * dev,
715                                   drm_bo_mem_reg_t * mem,
716                                   uint32_t mem_type, int no_wait)
717 {
718         drm_mm_node_t *node;
719         drm_buffer_manager_t *bm = &dev->bm;
720         drm_buffer_object_t *entry;
721         drm_mem_type_manager_t *man = &bm->man[mem_type];
722         struct list_head *lru;
723         unsigned long num_pages = mem->num_pages;
724         int ret;
725
726         mutex_lock(&dev->struct_mutex);
727         do {
728                 node = drm_mm_search_free(&man->manager, num_pages,
729                                           mem->page_alignment, 1);
730                 if (node)
731                         break;
732
733                 lru = &man->lru;
734                 if (lru->next == lru)
735                         break;
736
737                 entry = list_entry(lru->next, drm_buffer_object_t, lru);
738                 atomic_inc(&entry->usage);
739                 mutex_unlock(&dev->struct_mutex);
740                 mutex_lock(&entry->mutex);
741                 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
742
743                 ret = drm_bo_evict(entry, mem_type, no_wait);
744                 mutex_unlock(&entry->mutex);
745                 drm_bo_usage_deref_unlocked(entry);
746                 if (ret)
747                         return ret;
748                 mutex_lock(&dev->struct_mutex);
749         } while (1);
750
751         if (!node) {
752                 mutex_unlock(&dev->struct_mutex);
753                 return -ENOMEM;
754         }
755
756         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
757         mutex_unlock(&dev->struct_mutex);
758         mem->mm_node = node;
759         mem->mem_type = mem_type;
760         return 0;
761 }
762
763 static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
764                                 uint32_t mem_type,
765                                 uint32_t mask, uint32_t * res_mask)
766 {
767         uint32_t cur_flags = drm_bo_type_flags(mem_type);
768         uint32_t flag_diff;
769
770         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
771                 cur_flags |= DRM_BO_FLAG_CACHED;
772         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
773                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
774         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
775                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
776
777         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
778                 return 0;
779
780         if (mem_type == DRM_BO_MEM_LOCAL) {
781                 *res_mask = cur_flags;
782                 return 1;
783         }
784
785         flag_diff = (mask ^ cur_flags);
786         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
787             (!(mask & DRM_BO_FLAG_CACHED) ||
788              (mask & DRM_BO_FLAG_FORCE_CACHING)))
789                 return 0;
790
791         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
792             ((mask & DRM_BO_FLAG_MAPPABLE) ||
793              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
794                 return 0;
795
796         *res_mask = cur_flags;
797         return 1;
798 }
799
800 int drm_bo_mem_space(drm_buffer_object_t * bo,
801                      drm_bo_mem_reg_t * mem, int no_wait)
802 {
803         drm_device_t *dev = bo->dev;
804         drm_buffer_manager_t *bm = &dev->bm;
805         drm_mem_type_manager_t *man;
806
807         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
808         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
809         uint32_t i;
810         uint32_t mem_type = DRM_BO_MEM_LOCAL;
811         uint32_t cur_flags;
812         int type_found = 0;
813         int type_ok = 0;
814         int has_eagain = 0;
815         drm_mm_node_t *node = NULL;
816         int ret;
817
818         mem->mm_node = NULL;
819         for (i = 0; i < num_prios; ++i) {
820                 mem_type = prios[i];
821                 man = &bm->man[mem_type];
822
823                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
824                                                &cur_flags);
825
826                 if (!type_ok)
827                         continue;
828
829                 if (mem_type == DRM_BO_MEM_LOCAL)
830                         break;
831
832                 if ((mem_type == bo->pinned_mem_type) &&
833                     (bo->pinned_node != NULL)) {
834                         node = bo->pinned_node;
835                         break;
836                 }
837
838                 mutex_lock(&dev->struct_mutex);
839                 if (man->has_type && man->use_type) {
840                         type_found = 1;
841                         node = drm_mm_search_free(&man->manager, mem->num_pages,
842                                                   mem->page_alignment, 1);
843                         if (node)
844                                 node = drm_mm_get_block(node, mem->num_pages,
845                                                         mem->page_alignment);
846                 }
847                 mutex_unlock(&dev->struct_mutex);
848                 if (node)
849                         break;
850         }
851
852         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
853                 mem->mm_node = node;
854                 mem->mem_type = mem_type;
855                 mem->flags = cur_flags;
856                 return 0;
857         }
858
859         if (!type_found)
860                 return -EINVAL;
861
862         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
863         prios = dev->driver->bo_driver->mem_busy_prio;
864
865         for (i = 0; i < num_prios; ++i) {
866                 mem_type = prios[i];
867                 man = &bm->man[mem_type];
868
869                 if (!man->has_type)
870                         continue;
871
872                 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
873                         continue;
874
875                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
876
877                 if (ret == 0) {
878                         mem->flags = cur_flags;
879                         return 0;
880                 }
881
882                 if (ret == -EAGAIN)
883                         has_eagain = 1;
884         }
885
886         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
887         return ret;
888 }
889
890 EXPORT_SYMBOL(drm_bo_mem_space);
891
892 static int drm_bo_new_mask(drm_buffer_object_t * bo,
893                            uint32_t new_mask, uint32_t hint)
894 {
895         uint32_t new_props;
896
897         if (bo->type == drm_bo_type_user) {
898                 DRM_ERROR("User buffers are not supported yet\n");
899                 return -EINVAL;
900         }
901         if (bo->type == drm_bo_type_fake &&
902             !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
903                 DRM_ERROR("Fake buffers must be pinned.\n");
904                 return -EINVAL;
905         }
906
907         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
908                 DRM_ERROR
909                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
910                      "processes\n");
911                 return -EPERM;
912         }
913
914         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
915                                 DRM_BO_FLAG_READ);
916
917         if (!new_props) {
918                 DRM_ERROR("Invalid buffer object rwx properties\n");
919                 return -EINVAL;
920         }
921
922         bo->mem.mask = new_mask;
923         return 0;
924 }
925
926 /*
927  * Call dev->struct_mutex locked.
928  */
929
930 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
931                                               uint32_t handle, int check_owner)
932 {
933         drm_user_object_t *uo;
934         drm_buffer_object_t *bo;
935
936         uo = drm_lookup_user_object(priv, handle);
937
938         if (!uo || (uo->type != drm_buffer_type)) {
939                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
940                 return NULL;
941         }
942
943         if (check_owner && priv != uo->owner) {
944                 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
945                         return NULL;
946         }
947
948         bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
949         atomic_inc(&bo->usage);
950         return bo;
951 }
952
953 /*
954  * Call bo->mutex locked.
955  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
956  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
957  */
958
959 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
960 {
961         drm_fence_object_t *fence = bo->fence;
962
963         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
964         if (fence) {
965                 drm_device_t *dev = bo->dev;
966                 if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) {
967                         drm_fence_usage_deref_unlocked(dev, fence);
968                         bo->fence = NULL;
969                         return 0;
970                 }
971                 return 1;
972         }
973         return 0;
974 }
975
976 /*
977  * Call bo->mutex locked.
978  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
979  */
980
981 static int drm_bo_busy(drm_buffer_object_t * bo)
982 {
983         drm_fence_object_t *fence = bo->fence;
984
985         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
986         if (fence) {
987                 drm_device_t *dev = bo->dev;
988                 if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) {
989                         drm_fence_usage_deref_unlocked(dev, fence);
990                         bo->fence = NULL;
991                         return 0;
992                 }
993                 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
994                 if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) {
995                         drm_fence_usage_deref_unlocked(dev, fence);
996                         bo->fence = NULL;
997                         return 0;
998                 }
999                 return 1;
1000         }
1001         return 0;
1002 }
1003
1004 static int drm_bo_read_cached(drm_buffer_object_t * bo)
1005 {
1006         int ret = 0;
1007
1008         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1009         if (bo->mem.mm_node)
1010                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1011         return ret;
1012 }
1013
1014 /*
1015  * Wait until a buffer is unmapped.
1016  */
1017
1018 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
1019 {
1020         int ret = 0;
1021
1022         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1023                 return -EBUSY;
1024
1025         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1026                     atomic_read(&bo->mapped) == -1);
1027
1028         if (ret == -EINTR)
1029                 ret = -EAGAIN;
1030
1031         return ret;
1032 }
1033
1034 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
1035 {
1036         int ret;
1037
1038         mutex_lock(&bo->mutex);
1039         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1040         mutex_unlock(&bo->mutex);
1041         return ret;
1042 }
1043
1044 /*
1045  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1046  * Until then, we cannot really do anything with it except delete it.
1047  * The unfenced list is a PITA, and the operations
1048  * 1) validating
1049  * 2) submitting commands
1050  * 3) fencing
1051  * Should really be an atomic operation.
1052  * We now "solve" this problem by keeping
1053  * the buffer "unfenced" after validating, but before fencing.
1054  */
1055
1056 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
1057                                 int eagain_if_wait)
1058 {
1059         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1060
1061         if (ret && no_wait)
1062                 return -EBUSY;
1063         else if (!ret)
1064                 return 0;
1065
1066         ret = 0;
1067         mutex_unlock(&bo->mutex);
1068         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1069                     !drm_bo_check_unfenced(bo));
1070         mutex_lock(&bo->mutex);
1071         if (ret == -EINTR)
1072                 return -EAGAIN;
1073         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1074         if (ret) {
1075                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1076                 return -EBUSY;
1077         }
1078         if (eagain_if_wait)
1079                 return -EAGAIN;
1080
1081         return 0;
1082 }
1083
1084 /*
1085  * Fill in the ioctl reply argument with buffer info.
1086  * Bo locked.
1087  */
1088
1089 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1090                                 drm_bo_arg_reply_t * rep)
1091 {
1092         rep->handle = bo->base.hash.key;
1093         rep->flags = bo->mem.flags;
1094         rep->size = bo->mem.num_pages * PAGE_SIZE;
1095         rep->offset = bo->offset;
1096         rep->arg_handle = bo->map_list.user_token;
1097         rep->mask = bo->mem.mask;
1098         rep->buffer_start = bo->buffer_start;
1099         rep->fence_flags = bo->fence_type;
1100         rep->rep_flags = 0;
1101         rep->page_alignment = bo->mem.page_alignment;
1102
1103         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1104                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1105                                 DRM_BO_REP_BUSY);
1106         }
1107 }
1108
1109 /*
1110  * Wait for buffer idle and register that we've mapped the buffer.
1111  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1112  * so that if the client dies, the mapping is automatically
1113  * unregistered.
1114  */
1115
1116 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1117                                  uint32_t map_flags, unsigned hint,
1118                                  drm_bo_arg_reply_t * rep)
1119 {
1120         drm_buffer_object_t *bo;
1121         drm_device_t *dev = priv->head->dev;
1122         int ret = 0;
1123         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1124
1125         mutex_lock(&dev->struct_mutex);
1126         bo = drm_lookup_buffer_object(priv, handle, 1);
1127         mutex_unlock(&dev->struct_mutex);
1128
1129         if (!bo)
1130                 return -EINVAL;
1131
1132         mutex_lock(&bo->mutex);
1133         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1134                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1135                 if (ret)
1136                         goto out;
1137         }
1138
1139         /*
1140          * If this returns true, we are currently unmapped.
1141          * We need to do this test, because unmapping can
1142          * be done without the bo->mutex held.
1143          */
1144
1145         while (1) {
1146                 if (atomic_inc_and_test(&bo->mapped)) {
1147                         if (no_wait && drm_bo_busy(bo)) {
1148                                 atomic_dec(&bo->mapped);
1149                                 ret = -EBUSY;
1150                                 goto out;
1151                         }
1152                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1153                         if (ret) {
1154                                 atomic_dec(&bo->mapped);
1155                                 goto out;
1156                         }
1157
1158                         if ((map_flags & DRM_BO_FLAG_READ) &&
1159                             (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1160                             (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1161                                 drm_bo_read_cached(bo);
1162                         }
1163                         break;
1164                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1165                            (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1166                            (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1167
1168                         /*
1169                          * We are already mapped with different flags.
1170                          * need to wait for unmap.
1171                          */
1172
1173                         ret = drm_bo_wait_unmapped(bo, no_wait);
1174                         if (ret)
1175                                 goto out;
1176
1177                         continue;
1178                 }
1179                 break;
1180         }
1181
1182         mutex_lock(&dev->struct_mutex);
1183         ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1184         mutex_unlock(&dev->struct_mutex);
1185         if (ret) {
1186                 if (atomic_add_negative(-1, &bo->mapped))
1187                         DRM_WAKEUP(&bo->event_queue);
1188
1189         } else
1190                 drm_bo_fill_rep_arg(bo, rep);
1191       out:
1192         mutex_unlock(&bo->mutex);
1193         drm_bo_usage_deref_unlocked(bo);
1194         return ret;
1195 }
1196
1197 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1198 {
1199         drm_device_t *dev = priv->head->dev;
1200         drm_buffer_object_t *bo;
1201         drm_ref_object_t *ro;
1202         int ret = 0;
1203
1204         mutex_lock(&dev->struct_mutex);
1205
1206         bo = drm_lookup_buffer_object(priv, handle, 1);
1207         if (!bo) {
1208                 ret = -EINVAL;
1209                 goto out;
1210         }
1211
1212         ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1213         if (!ro) {
1214                 ret = -EINVAL;
1215                 goto out;
1216         }
1217
1218         drm_remove_ref_object(priv, ro);
1219         drm_bo_usage_deref_locked(bo);
1220       out:
1221         mutex_unlock(&dev->struct_mutex);
1222         return ret;
1223 }
1224
1225 /*
1226  * Call struct-sem locked.
1227  */
1228
1229 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1230                                          drm_user_object_t * uo,
1231                                          drm_ref_t action)
1232 {
1233         drm_buffer_object_t *bo =
1234             drm_user_object_entry(uo, drm_buffer_object_t, base);
1235
1236         /*
1237          * We DON'T want to take the bo->lock here, because we want to
1238          * hold it when we wait for unmapped buffer.
1239          */
1240
1241         BUG_ON(action != _DRM_REF_TYPE1);
1242
1243         if (atomic_add_negative(-1, &bo->mapped))
1244                 DRM_WAKEUP(&bo->event_queue);
1245 }
1246
1247 /*
1248  * bo->mutex locked.
1249  * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1250  */
1251
1252 int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1253                        int no_wait, int move_unfenced)
1254 {
1255         drm_device_t *dev = bo->dev;
1256         drm_buffer_manager_t *bm = &dev->bm;
1257         int ret = 0;
1258         drm_bo_mem_reg_t mem;
1259         /*
1260          * Flush outstanding fences.
1261          */
1262
1263         drm_bo_busy(bo);
1264
1265         /*
1266          * Wait for outstanding fences.
1267          */
1268
1269         ret = drm_bo_wait(bo, 0, 0, no_wait);
1270         if (ret)
1271                 return ret;
1272
1273         mem.num_pages = bo->mem.num_pages;
1274         mem.size = mem.num_pages << PAGE_SHIFT;
1275         mem.mask = new_mem_flags;
1276         mem.page_alignment = bo->mem.page_alignment;
1277
1278         mutex_lock(&bm->evict_mutex);
1279         mutex_lock(&dev->struct_mutex);
1280         list_del(&bo->lru);
1281         list_add_tail(&bo->lru, &bm->unfenced);
1282         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1283                         _DRM_BO_FLAG_UNFENCED);
1284         mutex_unlock(&dev->struct_mutex);
1285
1286         /*
1287          * Determine where to move the buffer.
1288          */
1289         ret = drm_bo_mem_space(bo, &mem, no_wait);
1290         if (ret)
1291                 goto out_unlock;
1292
1293         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1294
1295  out_unlock:
1296         if (ret || !move_unfenced) {
1297                 mutex_lock(&dev->struct_mutex);
1298                 if (mem.mm_node) {
1299                         if (mem.mm_node != bo->pinned_node)
1300                                 drm_mm_put_block(mem.mm_node);
1301                         mem.mm_node = NULL;
1302                 }
1303                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1304                 DRM_WAKEUP(&bo->event_queue);
1305                 list_del(&bo->lru);
1306                 drm_bo_add_to_lru(bo);
1307                 mutex_unlock(&dev->struct_mutex);
1308         }
1309
1310         mutex_unlock(&bm->evict_mutex);
1311         return ret;
1312 }
1313
1314 static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
1315 {
1316         uint32_t flag_diff = (mem->mask ^ mem->flags);
1317
1318         if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1319                 return 0;
1320         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1321             (!(mem->mask & DRM_BO_FLAG_CACHED) ||
1322              (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1323           return 0;
1324         }
1325         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1326             ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1327              (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1328                 return 0;
1329         return 1;
1330 }
1331
1332 static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
1333 {
1334         drm_buffer_manager_t *bm = &dev->bm;
1335         drm_mem_type_manager_t *man;
1336         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1337         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1338         uint32_t i;
1339         int type_ok = 0;
1340         uint32_t mem_type = 0;
1341         uint32_t cur_flags;
1342
1343         if (drm_bo_mem_compat(mem))
1344                 return 0;
1345
1346         BUG_ON(mem->mm_node);
1347
1348         for (i = 0; i < num_prios; ++i) {
1349                 mem_type = prios[i];
1350                 man = &bm->man[mem_type];
1351                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1352                                                &cur_flags);
1353                 if (type_ok)
1354                         break;
1355         }
1356
1357         if (type_ok) {
1358                 mem->mm_node = NULL;
1359                 mem->mem_type = mem_type;
1360                 mem->flags = cur_flags;
1361                 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1362                 return 0;
1363         }
1364
1365         DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
1366         return -EINVAL;
1367 }
1368
1369 /*
1370  * bo locked.
1371  */
1372
1373 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1374                                       int move_unfenced, int no_wait)
1375 {
1376         drm_device_t *dev = bo->dev;
1377         drm_buffer_manager_t *bm = &dev->bm;
1378         drm_bo_driver_t *driver = dev->driver->bo_driver;
1379         int ret;
1380
1381         DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
1382                   bo->mem.flags);
1383         ret =
1384             driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
1385         if (ret) {
1386                 DRM_ERROR("Driver did not support given buffer permissions\n");
1387                 return ret;
1388         }
1389
1390         ret = drm_bo_wait_unmapped(bo, no_wait);
1391         if (ret)
1392                 return ret;
1393
1394         if (bo->type == drm_bo_type_fake) {
1395                 ret = drm_bo_check_fake(dev, &bo->mem);
1396                 if (ret)
1397                         return ret;
1398         }
1399
1400         /*
1401          * Check whether we need to move buffer.
1402          */
1403
1404         if (!drm_bo_mem_compat(&bo->mem)) {
1405                 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1406                                          move_unfenced);
1407                 if (ret) {
1408                         if (ret != -EAGAIN)
1409                                 DRM_ERROR("Failed moving buffer.\n");
1410                         return ret;
1411                 }
1412         }
1413
1414         /*
1415          * Pinned buffers.
1416          */
1417
1418         if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1419                 bo->pinned_mem_type = bo->mem.mem_type;
1420                 mutex_lock(&dev->struct_mutex);
1421                 list_del_init(&bo->pinned_lru);
1422                 drm_bo_add_to_pinned_lru(bo);
1423
1424                 if (bo->pinned_node != bo->mem.mm_node) {
1425                         if (bo->pinned_node != NULL)
1426                                 drm_mm_put_block(bo->pinned_node);
1427                         bo->pinned_node = bo->mem.mm_node;
1428                 }
1429
1430                 mutex_unlock(&dev->struct_mutex);
1431
1432         } else if (bo->pinned_node != NULL) {
1433
1434                 mutex_lock(&dev->struct_mutex);
1435
1436                 if (bo->pinned_node != bo->mem.mm_node)
1437                         drm_mm_put_block(bo->pinned_node);
1438
1439                 list_del_init(&bo->pinned_lru);
1440                 bo->pinned_node = NULL;
1441                 mutex_unlock(&dev->struct_mutex);
1442
1443         }
1444
1445         /*
1446          * We might need to add a TTM.
1447          */
1448
1449         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1450                 ret = drm_bo_add_ttm(bo);
1451                 if (ret)
1452                         return ret;
1453         }
1454         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1455
1456         /*
1457          * Finally, adjust lru to be sure.
1458          */
1459
1460         mutex_lock(&dev->struct_mutex);
1461         list_del(&bo->lru);
1462         if (move_unfenced) {
1463                 list_add_tail(&bo->lru, &bm->unfenced);
1464                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1465                                 _DRM_BO_FLAG_UNFENCED);
1466         } else {
1467                 drm_bo_add_to_lru(bo);
1468                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1469                         DRM_WAKEUP(&bo->event_queue);
1470                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1471                                         _DRM_BO_FLAG_UNFENCED);
1472                 }
1473         }
1474         mutex_unlock(&dev->struct_mutex);
1475
1476         return 0;
1477 }
1478
1479 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1480                                   uint32_t flags, uint32_t mask, uint32_t hint,
1481                                   drm_bo_arg_reply_t * rep)
1482 {
1483         struct drm_device *dev = priv->head->dev;
1484         drm_buffer_object_t *bo;
1485         int ret;
1486         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1487
1488         mutex_lock(&dev->struct_mutex);
1489         bo = drm_lookup_buffer_object(priv, handle, 1);
1490         mutex_unlock(&dev->struct_mutex);
1491         if (!bo) {
1492                 return -EINVAL;
1493         }
1494
1495         mutex_lock(&bo->mutex);
1496         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1497
1498         if (ret)
1499                 goto out;
1500
1501         DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1502         ret = drm_bo_new_mask(bo, flags, hint);
1503         if (ret)
1504                 goto out;
1505
1506         ret =
1507             drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
1508                                        no_wait);
1509         drm_bo_fill_rep_arg(bo, rep);
1510
1511       out:
1512
1513         mutex_unlock(&bo->mutex);
1514
1515         drm_bo_usage_deref_unlocked(bo);
1516         return ret;
1517 }
1518
1519 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1520                               drm_bo_arg_reply_t * rep)
1521 {
1522         struct drm_device *dev = priv->head->dev;
1523         drm_buffer_object_t *bo;
1524
1525         mutex_lock(&dev->struct_mutex);
1526         bo = drm_lookup_buffer_object(priv, handle, 1);
1527         mutex_unlock(&dev->struct_mutex);
1528
1529         if (!bo) {
1530                 return -EINVAL;
1531         }
1532         mutex_lock(&bo->mutex);
1533         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1534                 (void)drm_bo_busy(bo);
1535         drm_bo_fill_rep_arg(bo, rep);
1536         mutex_unlock(&bo->mutex);
1537         drm_bo_usage_deref_unlocked(bo);
1538         return 0;
1539 }
1540
1541 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1542                               uint32_t hint, drm_bo_arg_reply_t * rep)
1543 {
1544         struct drm_device *dev = priv->head->dev;
1545         drm_buffer_object_t *bo;
1546         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1547         int ret;
1548
1549         mutex_lock(&dev->struct_mutex);
1550         bo = drm_lookup_buffer_object(priv, handle, 1);
1551         mutex_unlock(&dev->struct_mutex);
1552
1553         if (!bo) {
1554                 return -EINVAL;
1555         }
1556
1557         mutex_lock(&bo->mutex);
1558         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1559         if (ret)
1560                 goto out;
1561         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1562         if (ret)
1563                 goto out;
1564
1565         drm_bo_fill_rep_arg(bo, rep);
1566
1567       out:
1568         mutex_unlock(&bo->mutex);
1569         drm_bo_usage_deref_unlocked(bo);
1570         return ret;
1571 }
1572
1573 int drm_buffer_object_create(drm_device_t *dev,
1574                              unsigned long size,
1575                              drm_bo_type_t type,
1576                              uint32_t mask,
1577                              uint32_t hint,
1578                              uint32_t page_alignment,
1579                              unsigned long buffer_start,
1580                              drm_buffer_object_t ** buf_obj)
1581 {
1582         drm_buffer_manager_t *bm = &dev->bm;
1583         drm_buffer_object_t *bo;
1584         int ret = 0;
1585         unsigned long num_pages;
1586
1587         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1588                 DRM_ERROR("Invalid buffer object start.\n");
1589                 return -EINVAL;
1590         }
1591         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1592         if (num_pages == 0) {
1593                 DRM_ERROR("Illegal buffer object size.\n");
1594                 return -EINVAL;
1595         }
1596
1597         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1598
1599         if (!bo)
1600                 return -ENOMEM;
1601
1602         mutex_init(&bo->mutex);
1603         mutex_lock(&bo->mutex);
1604
1605         atomic_set(&bo->usage, 1);
1606         atomic_set(&bo->mapped, -1);
1607         DRM_INIT_WAITQUEUE(&bo->event_queue);
1608         INIT_LIST_HEAD(&bo->lru);
1609         INIT_LIST_HEAD(&bo->pinned_lru);
1610         INIT_LIST_HEAD(&bo->ddestroy);
1611 #ifdef DRM_ODD_MM_COMPAT
1612         INIT_LIST_HEAD(&bo->p_mm_list);
1613         INIT_LIST_HEAD(&bo->vma_list);
1614 #endif
1615         bo->dev = dev;
1616         bo->type = type;
1617         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1618         bo->mem.num_pages = num_pages;
1619         bo->mem.mm_node = NULL;
1620         bo->mem.page_alignment = page_alignment;
1621         if (bo->type == drm_bo_type_fake) {
1622                 bo->offset = buffer_start;
1623                 bo->buffer_start = 0;
1624         } else {
1625                 bo->buffer_start = buffer_start;
1626         }
1627         bo->priv_flags = 0;
1628         bo->mem.flags = 0;
1629         bo->mem.mask = 0;
1630         atomic_inc(&bm->count);
1631         ret = drm_bo_new_mask(bo, mask, hint);
1632
1633         if (ret)
1634                 goto out_err;
1635
1636         if (bo->type == drm_bo_type_dc) {
1637                 mutex_lock(&dev->struct_mutex);
1638                 ret = drm_bo_setup_vm_locked(bo);
1639                 mutex_unlock(&dev->struct_mutex);
1640                 if (ret)
1641                         goto out_err;
1642         }
1643         ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1644         if (ret)
1645                 goto out_err;
1646
1647         mutex_unlock(&bo->mutex);
1648         *buf_obj = bo;
1649         return 0;
1650
1651       out_err:
1652         mutex_unlock(&bo->mutex);
1653
1654         drm_bo_usage_deref_unlocked(bo);
1655         return ret;
1656 }
1657
1658 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1659                                   int shareable)
1660 {
1661         drm_device_t *dev = priv->head->dev;
1662         int ret;
1663
1664         mutex_lock(&dev->struct_mutex);
1665         ret = drm_add_user_object(priv, &bo->base, shareable);
1666         if (ret)
1667                 goto out;
1668
1669         bo->base.remove = drm_bo_base_deref_locked;
1670         bo->base.type = drm_buffer_type;
1671         bo->base.ref_struct_locked = NULL;
1672         bo->base.unref = drm_buffer_user_object_unmap;
1673
1674       out:
1675         mutex_unlock(&dev->struct_mutex);
1676         return ret;
1677 }
1678
1679 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1680 {
1681         LOCK_TEST_WITH_RETURN(dev, filp);
1682         return 0;
1683 }
1684
1685 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1686 {
1687         DRM_DEVICE;
1688         drm_bo_arg_t arg;
1689         drm_bo_arg_request_t *req = &arg.d.req;
1690         drm_bo_arg_reply_t rep;
1691         unsigned long next;
1692         drm_user_object_t *uo;
1693         drm_buffer_object_t *entry;
1694
1695         if (!dev->bm.initialized) {
1696                 DRM_ERROR("Buffer object manager is not initialized.\n");
1697                 return -EINVAL;
1698         }
1699
1700         do {
1701                 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1702
1703                 if (arg.handled) {
1704                         data = arg.next;
1705                         continue;
1706                 }
1707
1708                 rep.ret = 0;
1709                 switch (req->op) {
1710                 case drm_bo_create:
1711                         rep.ret = drm_bo_lock_test(dev, filp);
1712                         if (rep.ret)
1713                                 break;  
1714                         rep.ret =
1715                             drm_buffer_object_create(priv->head->dev,
1716                                                      req->size,
1717                                                      req->type,
1718                                                      req->mask,
1719                                                      req->hint,
1720                                                      req->page_alignment,
1721                                                      req->buffer_start, &entry);
1722                         if (rep.ret)
1723                                 break;
1724
1725                         rep.ret =
1726                             drm_bo_add_user_object(priv, entry,
1727                                                    req->
1728                                                    mask &
1729                                                    DRM_BO_FLAG_SHAREABLE);
1730                         if (rep.ret)
1731                                 drm_bo_usage_deref_unlocked(entry);
1732
1733                         if (rep.ret)
1734                                 break;
1735
1736                         mutex_lock(&entry->mutex);
1737                         drm_bo_fill_rep_arg(entry, &rep);
1738                         mutex_unlock(&entry->mutex);
1739                         break;
1740                 case drm_bo_unmap:
1741                         rep.ret = drm_buffer_object_unmap(priv, req->handle);
1742                         break;
1743                 case drm_bo_map:
1744                         rep.ret = drm_buffer_object_map(priv, req->handle,
1745                                                         req->mask,
1746                                                         req->hint, &rep);
1747                         break;
1748                 case drm_bo_destroy:
1749                         mutex_lock(&dev->struct_mutex);
1750                         uo = drm_lookup_user_object(priv, req->handle);
1751                         if (!uo || (uo->type != drm_buffer_type)
1752                             || uo->owner != priv) {
1753                                 mutex_unlock(&dev->struct_mutex);
1754                                 rep.ret = -EINVAL;
1755                                 break;
1756                         }
1757                         rep.ret = drm_remove_user_object(priv, uo);
1758                         mutex_unlock(&dev->struct_mutex);
1759                         break;
1760                 case drm_bo_reference:
1761                         rep.ret = drm_user_object_ref(priv, req->handle,
1762                                                       drm_buffer_type, &uo);
1763                         if (rep.ret)
1764                                 break;
1765
1766                         rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1767                         break;
1768                 case drm_bo_unreference:
1769                         rep.ret = drm_user_object_unref(priv, req->handle,
1770                                                         drm_buffer_type);
1771                         break;
1772                 case drm_bo_validate:
1773                         rep.ret = drm_bo_lock_test(dev, filp);
1774
1775                         if (rep.ret)
1776                                 break;
1777                         rep.ret =
1778                             drm_bo_handle_validate(priv, req->handle, req->mask,
1779                                                    req->arg_handle, req->hint,
1780                                                    &rep);
1781                         break;
1782                 case drm_bo_fence:
1783                         rep.ret = drm_bo_lock_test(dev, filp);
1784                         if (rep.ret)
1785                                 break;
1786                          /**/ break;
1787                 case drm_bo_info:
1788                         rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1789                         break;
1790                 case drm_bo_wait_idle:
1791                         rep.ret = drm_bo_handle_wait(priv, req->handle,
1792                                                      req->hint, &rep);
1793                         break;
1794                 case drm_bo_ref_fence:
1795                         rep.ret = -EINVAL;
1796                         DRM_ERROR("Function is not implemented yet.\n");
1797                 default:
1798                         rep.ret = -EINVAL;
1799                 }
1800                 next = arg.next;
1801
1802                 /*
1803                  * A signal interrupted us. Make sure the ioctl is restartable.
1804                  */
1805
1806                 if (rep.ret == -EAGAIN)
1807                         return -EAGAIN;
1808
1809                 arg.handled = 1;
1810                 arg.d.rep = rep;
1811                 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1812                 data = next;
1813         } while (data);
1814         return 0;
1815 }
1816
1817 /**
1818  *Clean the unfenced list and put on regular LRU.
1819  *This is part of the memory manager cleanup and should only be
1820  *called with the DRI lock held.
1821  *Call dev->struct_sem locked.
1822  */
1823
1824 static void drm_bo_clean_unfenced(drm_device_t *dev)
1825 {
1826         drm_buffer_manager_t *bm  = &dev->bm;
1827         struct list_head *head, *list;
1828         drm_buffer_object_t *entry;
1829
1830         head = &bm->unfenced;
1831
1832         list = head->next;
1833         while(list != head) {
1834                 prefetch(list->next);
1835                 entry = list_entry(list, drm_buffer_object_t, lru);
1836
1837                 atomic_inc(&entry->usage);
1838                 mutex_unlock(&dev->struct_mutex);
1839                 mutex_lock(&entry->mutex);
1840                 mutex_lock(&dev->struct_mutex);
1841
1842                 list_del(&entry->lru);
1843                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1844                 drm_bo_add_to_lru(entry);
1845                 mutex_unlock(&entry->mutex);
1846                 list = head->next;
1847         }
1848 }
1849
1850 static int drm_bo_leave_list(drm_buffer_object_t * bo,
1851                              uint32_t mem_type,
1852                              int free_pinned, int allow_errors)
1853 {
1854         drm_device_t *dev = bo->dev;
1855         int ret = 0;
1856
1857         mutex_lock(&bo->mutex);
1858
1859         ret = drm_bo_expire_fence(bo, allow_errors);
1860         if (ret)
1861                 goto out;
1862
1863         if (free_pinned) {
1864                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1865                 mutex_lock(&dev->struct_mutex);
1866                 list_del_init(&bo->pinned_lru);
1867                 if (bo->pinned_node == bo->mem.mm_node)
1868                         bo->pinned_node = NULL;
1869                 if (bo->pinned_node != NULL) {
1870                         drm_mm_put_block(bo->pinned_node);
1871                         bo->pinned_node = NULL;
1872                 }
1873                 mutex_unlock(&dev->struct_mutex);
1874         }
1875
1876         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1877                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1878                           "cleanup. Removing flag and evicting.\n");
1879                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1880                 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1881         }
1882
1883         if (bo->mem.mem_type == mem_type)
1884                 ret = drm_bo_evict(bo, mem_type, 0);
1885
1886         if (ret) {
1887                 if (allow_errors) {
1888                         goto out;
1889                 } else {
1890                         ret = 0;
1891                         DRM_ERROR("Cleanup eviction failed\n");
1892                 }
1893         }
1894
1895       out:
1896         mutex_unlock(&bo->mutex);
1897         return ret;
1898 }
1899
1900
1901 static drm_buffer_object_t *drm_bo_entry(struct list_head *list,
1902                                          int pinned_list)
1903 {
1904         if (pinned_list)
1905                 return list_entry(list, drm_buffer_object_t, pinned_lru);
1906         else
1907                 return list_entry(list, drm_buffer_object_t, lru);
1908 }
1909
1910 /*
1911  * dev->struct_mutex locked.
1912  */
1913
1914 static int drm_bo_force_list_clean(drm_device_t * dev,
1915                                    struct list_head *head,
1916                                    unsigned mem_type,
1917                                    int free_pinned,
1918                                    int allow_errors,
1919                                    int pinned_list)
1920 {
1921         struct list_head *list, *next, *prev;
1922         drm_buffer_object_t *entry, *nentry;
1923         int ret;
1924         int do_restart;
1925
1926         /*
1927          * The list traversal is a bit odd here, because an item may
1928          * disappear from the list when we release the struct_mutex or
1929          * when we decrease the usage count. Also we're not guaranteed
1930          * to drain pinned lists, so we can't always restart.
1931          */
1932
1933 restart:
1934         nentry = NULL;
1935         list_for_each_safe(list, next, head) {
1936                 prev = list->prev;
1937
1938                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
1939                 atomic_inc(&entry->usage);
1940                 if (nentry) {
1941                         atomic_dec(&nentry->usage);
1942                         nentry = NULL;
1943                 }
1944
1945                 /*
1946                  * Protect the next item from destruction, so we can check
1947                  * its list pointers later on.
1948                  */
1949
1950                 if (next != head) {
1951                         nentry = drm_bo_entry(next, pinned_list);
1952                         atomic_inc(&nentry->usage);
1953                 }
1954                 mutex_unlock(&dev->struct_mutex);
1955
1956                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
1957                                         allow_errors);
1958                 mutex_lock(&dev->struct_mutex);
1959
1960                 drm_bo_usage_deref_locked(entry);
1961                 if (ret)
1962                         return ret;
1963
1964                 /*
1965                  * Has the next item disappeared from the list?
1966                  */
1967
1968                 do_restart = ((next->prev != list) && (next->prev != prev));
1969
1970                 if (nentry != NULL && do_restart) {
1971                         drm_bo_usage_deref_locked(nentry);
1972                         nentry = NULL;
1973                 }
1974
1975                 if (do_restart)
1976                         goto restart;
1977         }
1978         return 0;
1979 }
1980
1981 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1982 {
1983         drm_buffer_manager_t *bm = &dev->bm;
1984         drm_mem_type_manager_t *man = &bm->man[mem_type];
1985         int ret = -EINVAL;
1986
1987         if (mem_type >= DRM_BO_MEM_TYPES) {
1988                 DRM_ERROR("Illegal memory type %d\n", mem_type);
1989                 return ret;
1990         }
1991
1992         if (!man->has_type) {
1993                 DRM_ERROR("Trying to take down uninitialized "
1994                           "memory manager type\n");
1995                 return ret;
1996         }
1997         man->use_type = 0;
1998         man->has_type = 0;
1999
2000         ret = 0;
2001         if (mem_type > 0) {
2002
2003                 drm_bo_clean_unfenced(dev);
2004                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2005                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2006
2007                 if (drm_mm_clean(&man->manager)) {
2008                         drm_mm_takedown(&man->manager);
2009                 } else {
2010                         ret = -EBUSY;
2011                 }
2012         }
2013
2014         return ret;
2015 }
2016
2017 /**
2018  *Evict all buffers of a particular mem_type, but leave memory manager
2019  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2020  *point since we have the hardware lock.
2021  */
2022
2023 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
2024 {
2025         int ret;
2026         drm_buffer_manager_t *bm = &dev->bm;
2027         drm_mem_type_manager_t *man = &bm->man[mem_type];
2028
2029         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2030                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2031                 return -EINVAL;
2032         }
2033
2034         if (!man->has_type) {
2035                 DRM_ERROR("Memory type %u has not been initialized.\n",
2036                           mem_type);
2037                 return 0;
2038         }
2039
2040         drm_bo_clean_unfenced(dev);
2041         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2042         if (ret)
2043                 return ret;
2044         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2045
2046         return ret;
2047 }
2048
2049 int drm_bo_init_mm(drm_device_t * dev,
2050                    unsigned type,
2051                    unsigned long p_offset, unsigned long p_size)
2052 {
2053         drm_buffer_manager_t *bm = &dev->bm;
2054         int ret = -EINVAL;
2055         drm_mem_type_manager_t *man;
2056
2057         if (type >= DRM_BO_MEM_TYPES) {
2058                 DRM_ERROR("Illegal memory type %d\n", type);
2059                 return ret;
2060         }
2061
2062         man = &bm->man[type];
2063         if (man->has_type) {
2064                 DRM_ERROR("Memory manager already initialized for type %d\n",
2065                           type);
2066                 return ret;
2067         }
2068
2069         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2070         if (ret)
2071                 return ret;
2072
2073         ret = 0;
2074         if (type != DRM_BO_MEM_LOCAL) {
2075                 if (!p_size) {
2076                         DRM_ERROR("Zero size memory manager type %d\n", type);
2077                         return ret;
2078                 }
2079                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2080                 if (ret)
2081                         return ret;
2082         }
2083         man->has_type = 1;
2084         man->use_type = 1;
2085
2086         INIT_LIST_HEAD(&man->lru);
2087         INIT_LIST_HEAD(&man->pinned);
2088
2089         return 0;
2090 }
2091 EXPORT_SYMBOL(drm_bo_init_mm);
2092
2093 /*
2094  * This is called from lastclose, so we don't need to bother about
2095  * any clients still running when we set the initialized flag to zero.
2096  */
2097
2098 int drm_bo_driver_finish(drm_device_t * dev)
2099 {
2100         drm_buffer_manager_t *bm = &dev->bm;
2101         int ret = 0;
2102         unsigned i = DRM_BO_MEM_TYPES;
2103         drm_mem_type_manager_t *man;
2104
2105         mutex_lock(&dev->bm.init_mutex);
2106         mutex_lock(&dev->struct_mutex);
2107
2108         if (!bm->initialized)
2109                 goto out;
2110         bm->initialized = 0;
2111
2112         while (i--) {
2113                 man = &bm->man[i];
2114                 if (man->has_type) {
2115                         man->use_type = 0;
2116                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2117                                 ret = -EBUSY;
2118                                 DRM_ERROR("DRM memory manager type %d "
2119                                           "is not clean.\n", i);
2120                         }
2121                         man->has_type = 0;
2122                 }
2123         }
2124         mutex_unlock(&dev->struct_mutex);
2125
2126         if (!cancel_delayed_work(&bm->wq)) {
2127                 flush_scheduled_work();
2128         }
2129         mutex_lock(&dev->struct_mutex);
2130         drm_bo_delayed_delete(dev, 1);
2131         if (list_empty(&bm->ddestroy)) {
2132                 DRM_DEBUG("Delayed destroy list was clean\n");
2133         }
2134         if (list_empty(&bm->man[0].lru)) {
2135                 DRM_DEBUG("Swap list was clean\n");
2136         }
2137         if (list_empty(&bm->man[0].pinned)) {
2138                 DRM_DEBUG("NO_MOVE list was clean\n");
2139         }
2140         if (list_empty(&bm->unfenced)) {
2141                 DRM_DEBUG("Unfenced list was clean\n");
2142         }
2143       out:
2144         mutex_unlock(&dev->struct_mutex);
2145         mutex_unlock(&dev->bm.init_mutex);
2146         return ret;
2147 }
2148
2149 int drm_bo_driver_init(drm_device_t * dev)
2150 {
2151         drm_bo_driver_t *driver = dev->driver->bo_driver;
2152         drm_buffer_manager_t *bm = &dev->bm;
2153         int ret = -EINVAL;
2154
2155         mutex_lock(&dev->bm.init_mutex);
2156         mutex_lock(&dev->struct_mutex);
2157         if (!driver)
2158                 goto out_unlock;
2159
2160         /*
2161          * Initialize the system memory buffer type.
2162          * Other types need to be driver / IOCTL initialized.
2163          */
2164
2165         ret = drm_bo_init_mm(dev, 0, 0, 0);
2166         if (ret)
2167                 goto out_unlock;
2168
2169 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2170         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2171 #else
2172         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2173 #endif
2174         bm->initialized = 1;
2175         bm->nice_mode = 1;
2176         atomic_set(&bm->count, 0);
2177         bm->cur_pages = 0;
2178         INIT_LIST_HEAD(&bm->unfenced);
2179         INIT_LIST_HEAD(&bm->ddestroy);
2180       out_unlock:
2181         mutex_unlock(&dev->struct_mutex);
2182         mutex_unlock(&dev->bm.init_mutex);
2183         return ret;
2184 }
2185
2186 EXPORT_SYMBOL(drm_bo_driver_init);
2187
2188 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2189 {
2190         DRM_DEVICE;
2191
2192         int ret = 0;
2193         drm_mm_init_arg_t arg;
2194         drm_buffer_manager_t *bm = &dev->bm;
2195         drm_bo_driver_t *driver = dev->driver->bo_driver;
2196
2197         if (!driver) {
2198                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2199                 return -EINVAL;
2200         }
2201
2202         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2203
2204         switch (arg.req.op) {
2205         case mm_init:
2206                 ret = -EINVAL;
2207                 mutex_lock(&dev->bm.init_mutex);
2208                 mutex_lock(&dev->struct_mutex);
2209                 if (!bm->initialized) {
2210                         DRM_ERROR("DRM memory manager was not initialized.\n");
2211                         break;
2212                 }
2213                 if (arg.req.mem_type == 0) {
2214                         DRM_ERROR
2215                             ("System memory buffers already initialized.\n");
2216                         break;
2217                 }
2218                 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2219                                      arg.req.p_offset, arg.req.p_size);
2220                 break;
2221         case mm_takedown:
2222                 LOCK_TEST_WITH_RETURN(dev, filp);
2223                 mutex_lock(&dev->bm.init_mutex);
2224                 mutex_lock(&dev->struct_mutex);
2225                 ret = -EINVAL;
2226                 if (!bm->initialized) {
2227                         DRM_ERROR("DRM memory manager was not initialized\n");
2228                         break;
2229                 }
2230                 if (arg.req.mem_type == 0) {
2231                         DRM_ERROR("No takedown for System memory buffers.\n");
2232                         break;
2233                 }
2234                 ret = 0;
2235                 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2236                         DRM_ERROR("Memory manager type %d not clean. "
2237                                   "Delaying takedown\n", arg.req.mem_type);
2238                 }
2239                 break;
2240         case mm_lock:
2241                 LOCK_TEST_WITH_RETURN(dev, filp);
2242                 mutex_lock(&dev->bm.init_mutex);
2243                 mutex_lock(&dev->struct_mutex);
2244                 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2245                 break;
2246         case mm_unlock:
2247                 LOCK_TEST_WITH_RETURN(dev, filp);
2248                 mutex_lock(&dev->bm.init_mutex);
2249                 mutex_lock(&dev->struct_mutex);
2250                 ret = 0;
2251                 break;
2252         default:
2253                 DRM_ERROR("Function not implemented yet\n");
2254                 return -EINVAL;
2255         }
2256
2257         mutex_unlock(&dev->struct_mutex);
2258         mutex_unlock(&dev->bm.init_mutex);
2259         if (ret)
2260                 return ret;
2261
2262         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2263         return 0;
2264 }
2265
2266 /*
2267  * buffer object vm functions.
2268  */
2269
2270 int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
2271 {
2272         drm_buffer_manager_t *bm = &dev->bm;
2273         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2274
2275         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2276                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2277                         return 0;
2278
2279                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2280                         return 0;
2281
2282                 if (mem->flags & DRM_BO_FLAG_CACHED)
2283                         return 0;
2284         }
2285         return 1;
2286 }
2287
2288 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2289
2290 /**
2291  * \c Get the PCI offset for the buffer object memory.
2292  *
2293  * \param bo The buffer object.
2294  * \param bus_base On return the base of the PCI region
2295  * \param bus_offset On return the byte offset into the PCI region
2296  * \param bus_size On return the byte size of the buffer object or zero if
2297  *     the buffer object memory is not accessible through a PCI region.
2298  * \return Failure indication.
2299  *
2300  * Returns -EINVAL if the buffer object is currently not mappable.
2301  * Otherwise returns zero.
2302  */
2303
2304 int drm_bo_pci_offset(drm_device_t * dev,
2305                       drm_bo_mem_reg_t * mem,
2306                       unsigned long *bus_base,
2307                       unsigned long *bus_offset, unsigned long *bus_size)
2308 {
2309         drm_buffer_manager_t *bm = &dev->bm;
2310         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2311
2312         *bus_size = 0;
2313         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2314                 return -EINVAL;
2315
2316         if (drm_mem_reg_is_pci(dev, mem)) {
2317                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2318                 *bus_size = mem->num_pages << PAGE_SHIFT;
2319                 *bus_base = man->io_offset;
2320         }
2321
2322         return 0;
2323 }
2324
2325 /**
2326  * \c Kill all user-space virtual mappings of this buffer object.
2327  *
2328  * \param bo The buffer object.
2329  *
2330  * Call bo->mutex locked.
2331  */
2332
2333 void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
2334 {
2335         drm_device_t *dev = bo->dev;
2336         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2337         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2338
2339         if (!dev->dev_mapping)
2340                 return;
2341
2342         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2343 }
2344
2345 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
2346 {
2347         drm_map_list_t *list = &bo->map_list;
2348         drm_local_map_t *map;
2349         drm_device_t *dev = bo->dev;
2350
2351         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2352         if (list->user_token) {
2353                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2354                 list->user_token = 0;
2355         }
2356         if (list->file_offset_node) {
2357                 drm_mm_put_block(list->file_offset_node);
2358                 list->file_offset_node = NULL;
2359         }
2360
2361         map = list->map;
2362         if (!map)
2363                 return;
2364
2365         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2366         list->map = NULL;
2367         list->user_token = 0ULL;
2368         drm_bo_usage_deref_locked(bo);
2369 }
2370
2371 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
2372 {
2373         drm_map_list_t *list = &bo->map_list;
2374         drm_local_map_t *map;
2375         drm_device_t *dev = bo->dev;
2376
2377         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2378         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2379         if (!list->map)
2380                 return -ENOMEM;
2381
2382         map = list->map;
2383         map->offset = 0;
2384         map->type = _DRM_TTM;
2385         map->flags = _DRM_REMOVABLE;
2386         map->size = bo->mem.num_pages * PAGE_SIZE;
2387         atomic_inc(&bo->usage);
2388         map->handle = (void *)bo;
2389
2390         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2391                                                     bo->mem.num_pages, 0, 0);
2392
2393         if (!list->file_offset_node) {
2394                 drm_bo_takedown_vm_locked(bo);
2395                 return -ENOMEM;
2396         }
2397
2398         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2399                                                   bo->mem.num_pages, 0);
2400
2401         list->hash.key = list->file_offset_node->start;
2402         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2403                 drm_bo_takedown_vm_locked(bo);
2404                 return -ENOMEM;
2405         }
2406
2407         list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
2408
2409         return 0;
2410 }