Remove some code that should have gone in
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  * 
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  * 
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  * 
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  * 
39  * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
40  * heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44  * both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47  * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48  * traversal will, in general, need to be restarted.
49  *
50  */
51
52
53
54 static void drm_bo_destroy_locked(drm_buffer_object_t *bo);
55 static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo);
56 static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo);
57 static void drm_bo_unmap_virtual(drm_buffer_object_t *bo);
58
59 static inline uint32_t drm_bo_type_flags(unsigned type)
60 {
61         return (1 << (24 + type));
62 }
63
64 /*
65  * bo locked. dev->struct_mutex locked.
66  */
67
68 void drm_bo_add_to_lru(drm_buffer_object_t * bo,
69                        drm_buffer_manager_t * bm)
70 {
71         struct list_head *list;
72         drm_mem_type_manager_t *man;
73
74         man = &bm->man[bo->mem.mem_type];
75         list = (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
76                 &man->pinned : &man->lru;
77         list_add_tail(&bo->lru, list);
78         return;
79 }
80
81 static int drm_bo_vm_pre_move(drm_buffer_object_t *bo,
82                               int old_is_pci)
83 {
84 #ifdef DRM_ODD_MM_COMPAT
85         int ret;
86
87         ret = drm_bo_lock_kmm(bo);
88         if (ret) {
89                 if (ret == -EAGAIN)
90                         schedule();
91                 return ret;
92         }
93         drm_bo_unmap_virtual(bo);
94         if (old_is_pci)
95                 drm_bo_finish_unmap(bo);
96 #else
97         drm_bo_unmap_virtual(bo);
98 #endif
99         return 0;
100 }
101
102 static void drm_bo_vm_post_move(drm_buffer_object_t *bo)
103 {
104 #ifdef DRM_ODD_MM_COMPAT
105         int ret;
106         
107         ret = drm_bo_remap_bound(bo);
108         if (ret) {
109                 DRM_ERROR("Failed to remap a bound buffer object.\n"
110                           "\tThis might cause a sigbus later.\n");
111         }
112         drm_bo_unlock_kmm(bo);
113 #endif
114 }
115
116 /*
117  * Call bo->mutex locked.
118  */
119
120 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
121 {
122         drm_device_t *dev = bo->dev;
123         int ret = 0;
124         bo->ttm = NULL;
125
126         switch (bo->type) {
127         case drm_bo_type_dc:
128                 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
129                 if (!bo->ttm)
130                         ret = -ENOMEM;
131                 break;
132         case drm_bo_type_user:
133         case drm_bo_type_fake:
134                 break;
135         default:
136                 DRM_ERROR("Illegal buffer object type\n");
137                 ret = -EINVAL;
138                 break;
139         }
140
141         return ret;
142 }
143
144
145 static int drm_bo_handle_move_mem(drm_buffer_object_t *bo, 
146                                   drm_bo_mem_reg_t *mem,
147                                   int evict,
148                                   int no_wait)
149 {
150         drm_device_t *dev = bo->dev;
151         drm_buffer_manager_t *bm = &dev->bm;
152         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
153         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
154         drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
155         drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
156         int ret = 0;
157
158
159         if (old_is_pci || new_is_pci)
160                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
161         if (ret)
162                 return ret;
163
164         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && 
165             (bo->ttm == NULL)) 
166                 ret = drm_bo_add_ttm(bo);
167         if (ret)
168                 return ret;
169
170         if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
171             !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
172                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
173         }  else if (dev->driver->bo_driver->move) {
174                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
175         } else {
176                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
177         }
178
179         if (old_is_pci || new_is_pci)
180                 drm_bo_vm_post_move(bo);
181        
182         if (ret)
183                 return ret;
184
185         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
186                 ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags);
187                 if (ret)
188                         DRM_ERROR("Can not flush read caches\n");
189         }
190         
191         DRM_FLAG_MASKED(bo->priv_flags,
192                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
193                         _DRM_BO_FLAG_EVICTED);
194
195         if (bo->mem.mm_node)
196                 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
197
198         return 0;
199 }
200
201 /*
202  * Call bo->mutex locked.
203  * Wait until the buffer is idle.
204  */
205
206 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
207                 int no_wait)
208 {
209
210         drm_fence_object_t *fence = bo->fence;
211         int ret;
212
213         if (fence) {
214                 drm_device_t *dev = bo->dev;
215                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
216                         drm_fence_usage_deref_unlocked(dev, fence);
217                         bo->fence = NULL;
218                         return 0;
219                 }
220                 if (no_wait) {
221                         return -EBUSY;
222                 }
223                 ret =
224                     drm_fence_object_wait(dev, fence, lazy, ignore_signals,
225                                           bo->fence_type);
226                 if (ret)
227                         return ret;
228
229                 drm_fence_usage_deref_unlocked(dev, fence);
230                 bo->fence = NULL;
231
232         }
233         return 0;
234 }
235
236 /*
237  * Call dev->struct_mutex locked.
238  * Attempts to remove all private references to a buffer by expiring its
239  * fence object and removing from lru lists and memory managers.
240  */
241
242
243 static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all)
244 {
245         drm_device_t *dev = bo->dev;
246         drm_buffer_manager_t *bm = &dev->bm;
247
248         atomic_inc(&bo->usage);
249         mutex_unlock(&dev->struct_mutex);
250         mutex_lock(&bo->mutex);
251
252         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
253
254         if (bo->fence && drm_fence_object_signaled(bo->fence,
255                                                    bo->fence_type)) {
256                 drm_fence_usage_deref_locked(dev, bo->fence);
257                 bo->fence = NULL;
258         }
259
260         if (bo->fence && remove_all) {
261                 if (bm->nice_mode) {
262                         unsigned long _end = jiffies + 3 * DRM_HZ;
263                         int ret;
264                         do {
265                                 ret = drm_bo_wait(bo, 0, 1, 0);
266                         } while (ret && !time_after_eq(jiffies, _end));
267
268                         if (bo->fence) {
269                                 bm->nice_mode = 0;
270                                 DRM_ERROR("Detected GPU lockup or "
271                                           "fence driver was taken down. "
272                                           "Evicting waiting buffers.\n");
273                         }
274                         if (bo->fence) {
275                                 drm_fence_usage_deref_unlocked(dev, bo->fence);
276                                 bo->fence = NULL;
277                         }
278                 }
279         }
280         mutex_lock(&dev->struct_mutex);
281
282         if (!atomic_dec_and_test(&bo->usage)) {
283                 goto out;
284         }
285
286         if (!bo->fence) {
287                 list_del_init(&bo->lru);
288                 if (bo->mem.mm_node) {
289                         drm_mm_put_block(bo->mem.mm_node);
290                         bo->mem.mm_node = NULL;
291                 }
292                 list_del_init(&bo->ddestroy);
293                 mutex_unlock(&bo->mutex);
294                 drm_bo_destroy_locked(bo);
295                 return;
296         }
297
298         if (list_empty(&bo->ddestroy)) {
299                 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
300                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
301                 schedule_delayed_work(&bm->wq,
302                                       ((DRM_HZ / 100) <
303                                        1) ? 1 : DRM_HZ / 100);
304         }
305
306 out:
307         mutex_unlock(&bo->mutex);
308         return;
309 }
310
311
312 /*
313  * Verify that refcount is 0 and that there are no internal references
314  * to the buffer object. Then destroy it.
315  */
316
317 static void drm_bo_destroy_locked(drm_buffer_object_t *bo)
318 {
319         drm_device_t *dev = bo->dev;
320         drm_buffer_manager_t *bm = &dev->bm;
321
322         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && atomic_read(&bo->usage) == 0) {
323                 BUG_ON(bo->fence != NULL);
324
325 #ifdef DRM_ODD_MM_COMPAT
326                 BUG_ON(!list_empty(&bo->vma_list));
327                 BUG_ON(!list_empty(&bo->p_mm_list));
328 #endif
329
330                 if (bo->ttm) {
331                         drm_ttm_unbind(bo->ttm);
332                         drm_destroy_ttm(bo->ttm);
333                         bo->ttm = NULL;
334                 }
335
336                 atomic_dec(&bm->count);
337
338                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
339
340                 return;
341         }
342
343         /*
344          * Some stuff is still trying to reference the buffer object.
345          * Get rid of those references.
346          */
347
348         drm_bo_cleanup_refs(bo, 0);
349
350         return;
351 }
352
353
354 /*
355  * Call dev->struct_mutex locked.
356  */
357
358 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
359 {
360         drm_buffer_manager_t *bm = &dev->bm;
361
362         drm_buffer_object_t *entry, *nentry;
363         struct list_head *list, *next;
364
365         list_for_each_safe(list, next, &bm->ddestroy) {
366                 entry = list_entry(list, drm_buffer_object_t, ddestroy);
367
368                 nentry = NULL;
369                 if (next != &bm->ddestroy) {
370                         nentry = list_entry(next, drm_buffer_object_t,
371                                             ddestroy);
372                         atomic_inc(&nentry->usage);
373                 }
374
375                 drm_bo_cleanup_refs(entry, remove_all);
376
377                 if (nentry) {
378                         atomic_dec(&nentry->usage);
379                 }
380         }
381
382 }
383
384 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
385 static void drm_bo_delayed_workqueue(void *data)
386 #else
387 static void drm_bo_delayed_workqueue(struct work_struct *work)
388 #endif
389 {
390 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
391         drm_device_t *dev = (drm_device_t *) data;
392         drm_buffer_manager_t *bm = &dev->bm;
393 #else
394         drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work);
395         drm_device_t *dev = container_of(bm, drm_device_t, bm);
396 #endif
397
398
399         DRM_DEBUG("Delayed delete Worker\n");
400
401         mutex_lock(&dev->struct_mutex);
402         if (!bm->initialized) {
403                 mutex_unlock(&dev->struct_mutex);
404                 return;
405         }
406         drm_bo_delayed_delete(dev, 0);
407         if (bm->initialized && !list_empty(&bm->ddestroy)) {
408                 schedule_delayed_work(&bm->wq,
409                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
410         }
411         mutex_unlock(&dev->struct_mutex);
412 }
413
414 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
415 {
416         if (atomic_dec_and_test(&bo->usage)) {
417                 drm_bo_destroy_locked(bo);
418         }
419 }
420
421 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
422 {
423         drm_buffer_object_t *bo =
424                 drm_user_object_entry(uo, drm_buffer_object_t, base);
425
426         drm_bo_takedown_vm_locked(bo);
427         drm_bo_usage_deref_locked(bo);                                         
428 }
429
430 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
431 {
432         drm_device_t *dev = bo->dev;
433
434         if (atomic_dec_and_test(&bo->usage)) {
435                 mutex_lock(&dev->struct_mutex);
436                 if (atomic_read(&bo->usage) == 0)
437                         drm_bo_destroy_locked(bo);
438                 mutex_unlock(&dev->struct_mutex);
439         }
440 }
441
442 /*
443  * Note. The caller has to register (if applicable) 
444  * and deregister fence object usage.
445  */
446
447 int drm_fence_buffer_objects(drm_file_t * priv,
448                              struct list_head *list,
449                              uint32_t fence_flags,
450                              drm_fence_object_t * fence,
451                              drm_fence_object_t ** used_fence)
452 {
453         drm_device_t *dev = priv->head->dev;
454         drm_buffer_manager_t *bm = &dev->bm;
455
456         drm_buffer_object_t *entry;
457         uint32_t fence_type = 0;
458         int count = 0;
459         int ret = 0;
460         struct list_head *l;
461         LIST_HEAD(f_list);
462
463         mutex_lock(&dev->struct_mutex);
464
465         if (!list)
466                 list = &bm->unfenced;
467
468         list_for_each_entry(entry, list, lru) {
469                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
470                 fence_type |= entry->fence_type;
471                 if (entry->fence_class != 0) {
472                         DRM_ERROR("Fence class %d is not implemented yet.\n",
473                                   entry->fence_class);
474                         ret = -EINVAL;
475                         goto out;
476                 }
477                 count++;
478         }
479
480         if (!count) {
481                 ret = -EINVAL;
482                 goto out;
483         }
484
485         /*
486          * Transfer to a local list before we release the dev->struct_mutex;
487          * This is so we don't get any new unfenced objects while fencing 
488          * the ones we already have..
489          */
490
491         list_splice_init(list, &f_list);
492
493         if (fence) {
494                 if ((fence_type & fence->type) != fence_type) {
495                         DRM_ERROR("Given fence doesn't match buffers "
496                                   "on unfenced list.\n");
497                         ret = -EINVAL;
498                         goto out;
499                 }
500         } else {
501                 mutex_unlock(&dev->struct_mutex);
502                 ret = drm_fence_object_create(dev, fence_type,
503                                               fence_flags | DRM_FENCE_FLAG_EMIT,
504                                               &fence);
505                 mutex_lock(&dev->struct_mutex);
506                 if (ret)
507                         goto out;
508         }
509
510         count = 0;
511         l = f_list.next;
512         while (l != &f_list) {
513                 entry = list_entry(l, drm_buffer_object_t, lru);
514                 atomic_inc(&entry->usage);
515                 mutex_unlock(&dev->struct_mutex);
516                 mutex_lock(&entry->mutex);
517                 mutex_lock(&dev->struct_mutex);
518                 list_del_init(l);
519                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
520                         count++;
521                         if (entry->fence)
522                                 drm_fence_usage_deref_locked(dev, entry->fence);
523                         entry->fence = fence;
524                         DRM_FLAG_MASKED(entry->priv_flags, 0,
525                                         _DRM_BO_FLAG_UNFENCED);
526                         DRM_WAKEUP(&entry->event_queue);
527                         drm_bo_add_to_lru(entry, bm);
528                 }
529                 mutex_unlock(&entry->mutex);
530                 drm_bo_usage_deref_locked(entry);
531                 l = f_list.next;
532         }
533         atomic_add(count, &fence->usage);
534         DRM_DEBUG("Fenced %d buffers\n", count);
535       out:
536         mutex_unlock(&dev->struct_mutex);
537         *used_fence = fence;
538         return ret;
539 }
540
541 EXPORT_SYMBOL(drm_fence_buffer_objects);
542
543 /*
544  * bo->mutex locked 
545  */
546
547 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
548                         int no_wait, int force_no_move)
549 {
550         int ret = 0;
551         drm_device_t *dev = bo->dev;
552         drm_buffer_manager_t *bm = &dev->bm;
553         drm_bo_mem_reg_t evict_mem;
554
555         /*
556          * Someone might have modified the buffer before we took the buffer mutex.
557          */
558
559         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
560                 goto out;
561         if (bo->mem.mem_type != mem_type)
562                 goto out;
563
564         ret = drm_bo_wait(bo, 0, 0, no_wait);
565
566         if (ret && ret != -EAGAIN) {
567                 DRM_ERROR("Failed to expire fence before "
568                           "buffer eviction.\n");
569                 goto out;
570         }
571
572         if (bo->type != drm_bo_type_dc)
573                 goto out1;
574
575         evict_mem = bo->mem;
576         evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type);
577         ret = drm_bo_mem_space(dev, &evict_mem, no_wait);
578
579         if (ret) {
580                 if (ret != -EAGAIN)
581                         DRM_ERROR("Failed to find memory space for "
582                                   "buffer eviction.\n");
583                 goto out;
584         }
585         
586         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
587         
588         if (ret) {
589                 if (ret != -EAGAIN)
590                         DRM_ERROR("Buffer eviction failed\n");
591                 goto out;
592         }
593         
594 out1:
595         mutex_lock(&dev->struct_mutex);
596         if (evict_mem.mm_node) {
597                 drm_mm_put_block(evict_mem.mm_node);
598                 evict_mem.mm_node = NULL;
599         }
600         list_del(&bo->lru);
601         drm_bo_add_to_lru(bo, bm);
602         mutex_unlock(&dev->struct_mutex);
603
604         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
605                         _DRM_BO_FLAG_EVICTED);
606                                      
607 out:
608         return ret;
609 }
610
611
612
613 static int drm_bo_mem_force_space(drm_device_t *dev,
614                                   drm_bo_mem_reg_t *mem,
615                                   uint32_t mem_type,
616                                   int no_wait)
617 {
618         drm_mm_node_t *node;
619         drm_buffer_manager_t *bm = &dev->bm;
620         drm_buffer_object_t *entry;
621         drm_mem_type_manager_t *man = &bm->man[mem_type];
622         struct list_head *lru;
623         unsigned long num_pages = mem->num_pages;
624         int ret;
625
626         mutex_lock(&dev->struct_mutex);
627         do {
628                 node = drm_mm_search_free(&man->manager, num_pages, 
629                                           mem->page_alignment, 1);
630                 if (node)
631                         break;
632
633                 lru = &man->lru;
634                 if (lru->next == lru)
635                         break;
636
637                 entry = list_entry(lru->next, drm_buffer_object_t, lru);
638                 atomic_inc(&entry->usage);
639                 mutex_unlock(&dev->struct_mutex);
640                 mutex_lock(&entry->mutex);
641                 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
642
643                 ret = drm_bo_evict(entry, mem_type, no_wait, 0);
644                 mutex_unlock(&entry->mutex);
645                 drm_bo_usage_deref_unlocked(entry);
646                 if (ret)
647                         return ret;
648                 mutex_lock(&dev->struct_mutex);
649         } while (1);
650
651         if (!node) {
652                 mutex_unlock(&dev->struct_mutex);
653                 return -ENOMEM;
654         }
655
656         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
657         mutex_unlock(&dev->struct_mutex);
658         mem->mm_node = node;
659         mem->mem_type = mem_type;
660         return 0;
661 }
662
663
664 static int drm_bo_mt_compatible(drm_mem_type_manager_t *man,
665                                 uint32_t mem_type,
666                                 uint32_t mask,
667                                 uint32_t *res_mask)
668 {
669         uint32_t cur_flags = drm_bo_type_flags(mem_type);
670         uint32_t flag_diff;
671
672         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
673                 cur_flags |= DRM_BO_FLAG_CACHED;
674         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
675                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
676         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
677                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
678
679         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) {
680                 return 0;
681         }
682         flag_diff = (mask ^ cur_flags);
683         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
684             (mask & DRM_BO_FLAG_FORCE_CACHING)) {
685                 return 0;
686         }
687         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
688             (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) {
689                 return 0;
690         }
691
692         *res_mask = cur_flags;
693         return 1;
694 }
695         
696
697 int drm_bo_mem_space(drm_device_t *dev,
698                      drm_bo_mem_reg_t *mem,
699                      int no_wait)
700 {
701         drm_buffer_manager_t *bm= &dev->bm;
702         drm_mem_type_manager_t *man; 
703
704         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
705         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
706         uint32_t i;
707         uint32_t mem_type = DRM_BO_MEM_LOCAL;
708         uint32_t cur_flags;
709         int type_found = 0;
710         int type_ok = 0;
711         int has_eagain = 0;
712         drm_mm_node_t *node = NULL;
713         int ret;
714
715         for (i=0; i<num_prios; ++i) {
716                 mem_type = prios[i];
717                 man = &bm->man[mem_type];
718
719                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, 
720                                                &cur_flags); 
721
722                 if (!type_ok)
723                         continue;
724
725                 if (mem_type == DRM_BO_MEM_LOCAL)
726                         break;
727
728                 mutex_lock(&dev->struct_mutex);
729                 if (man->has_type && man->use_type) {
730                         type_found = 1;
731                         node = drm_mm_search_free(&man->manager, mem->num_pages, 
732                                                   mem->page_alignment, 1);
733                         if (node) 
734                                 node = drm_mm_get_block(node, mem->num_pages, 
735                                                         mem->page_alignment);
736                 }
737                 mutex_unlock(&dev->struct_mutex);
738                 if (node)
739                         break;
740         }
741         
742         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
743                 mem->mm_node = node;
744                 mem->mem_type = mem_type;
745                 mem->flags = cur_flags;
746                 return 0;
747         }
748
749         if (!type_found) 
750                 return -EINVAL;
751         
752         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
753         prios = dev->driver->bo_driver->mem_busy_prio;
754
755         for (i=0; i<num_prios; ++i) {
756                 mem_type = prios[i];
757                 man = &bm->man[mem_type];
758
759                 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, 
760                                           &cur_flags))
761                         continue;
762                 
763                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
764                 
765                 if (ret == 0) {
766                         mem->flags = cur_flags;
767                         return 0;
768                 }
769                 
770                 if (ret == -EAGAIN)
771                         has_eagain = 1;
772         }
773
774         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
775         return ret;
776 }
777 EXPORT_SYMBOL(drm_bo_mem_space);
778
779
780 static int drm_bo_new_mask(drm_buffer_object_t *bo,
781                            uint32_t new_mask, uint32_t hint)
782 {
783         uint32_t new_props;
784
785         if (bo->type == drm_bo_type_user) {
786                 DRM_ERROR("User buffers are not supported yet\n");
787                 return -EINVAL;
788         }
789         if (bo->type == drm_bo_type_fake &&
790             !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
791                 DRM_ERROR("Fake buffers must be pinned.\n");
792                 return -EINVAL;
793         }
794
795         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
796                 DRM_ERROR
797                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
798                      "processes\n");
799                 return -EPERM;
800         }
801
802         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
803                                 DRM_BO_FLAG_READ);
804
805         if (!new_props) {
806                 DRM_ERROR("Invalid buffer object rwx properties\n");
807                 return -EINVAL;
808         }
809
810         /*
811          * FIXME: Check what can be done about pinned buffers here.
812          */
813
814         bo->mem.mask = new_mask;
815         return 0;
816 }
817
818 /*
819  * Call dev->struct_mutex locked.
820  */
821
822 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
823                                               uint32_t handle, int check_owner)
824 {
825         drm_user_object_t *uo;
826         drm_buffer_object_t *bo;
827
828         uo = drm_lookup_user_object(priv, handle);
829
830         if (!uo || (uo->type != drm_buffer_type)) {
831                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
832                 return NULL;
833         }
834
835         if (check_owner && priv != uo->owner) {
836                 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
837                         return NULL;
838         }
839
840         bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
841         atomic_inc(&bo->usage);
842         return bo;
843 }
844
845 /*
846  * Call bo->mutex locked.
847  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
848  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
849  */
850
851 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
852 {
853         drm_fence_object_t *fence = bo->fence;
854
855         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
856         if (fence) {
857                 drm_device_t *dev = bo->dev;
858                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
859                         drm_fence_usage_deref_unlocked(dev, fence);
860                         bo->fence = NULL;
861                         return 0;
862                 }
863                 return 1;
864         }
865         return 0;
866 }
867
868 /*
869  * Call bo->mutex locked.
870  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
871  */
872
873 static int drm_bo_busy(drm_buffer_object_t * bo)
874 {
875         drm_fence_object_t *fence = bo->fence;
876
877         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
878         if (fence) {
879                 drm_device_t *dev = bo->dev;
880                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
881                         drm_fence_usage_deref_unlocked(dev, fence);
882                         bo->fence = NULL;
883                         return 0;
884                 }
885                 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
886                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
887                         drm_fence_usage_deref_unlocked(dev, fence);
888                         bo->fence = NULL;
889                         return 0;
890                 }
891                 return 1;
892         }
893         return 0;
894 }
895
896 static int drm_bo_read_cached(drm_buffer_object_t * bo)
897 {
898         int ret = 0;
899
900         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
901         if (bo->mem.mm_node)
902                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
903         return ret;
904 }
905
906 /*
907  * Wait until a buffer is unmapped.
908  */
909
910 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
911 {
912         int ret = 0;
913
914         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
915                 return -EBUSY;
916
917         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
918                     atomic_read(&bo->mapped) == -1);
919
920         if (ret == -EINTR)
921                 ret = -EAGAIN;
922
923         return ret;
924 }
925
926 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
927 {
928         int ret;
929
930         mutex_lock(&bo->mutex);
931         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
932         mutex_unlock(&bo->mutex);
933         return ret;
934 }
935
936 /*
937  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
938  * Until then, we cannot really do anything with it except delete it.
939  * The unfenced list is a PITA, and the operations
940  * 1) validating
941  * 2) submitting commands
942  * 3) fencing
943  * Should really be an atomic operation. 
944  * We now "solve" this problem by keeping
945  * the buffer "unfenced" after validating, but before fencing.
946  */
947
948 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
949                                 int eagain_if_wait)
950 {
951         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
952         unsigned long _end = jiffies + 3 * DRM_HZ;
953
954         if (ret && no_wait)
955                 return -EBUSY;
956         else if (!ret)
957                 return 0;
958
959         do {
960                 mutex_unlock(&bo->mutex);
961                 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
962                             !drm_bo_check_unfenced(bo));
963                 mutex_lock(&bo->mutex);
964                 if (ret == -EINTR)
965                         return -EAGAIN;
966                 if (ret) {
967                         DRM_ERROR
968                             ("Error waiting for buffer to become fenced\n");
969                         return ret;
970                 }
971                 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
972         } while (ret && !time_after_eq(jiffies, _end));
973         if (ret) {
974                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
975                 return ret;
976         }
977         if (eagain_if_wait)
978                 return -EAGAIN;
979
980         return 0;
981 }
982
983 /*
984  * Fill in the ioctl reply argument with buffer info.
985  * Bo locked. 
986  */
987
988 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
989                                 drm_bo_arg_reply_t * rep)
990 {
991         rep->handle = bo->base.hash.key;
992         rep->flags = bo->mem.flags;
993         rep->size = bo->mem.num_pages * PAGE_SIZE;
994         rep->offset = bo->offset;
995         rep->arg_handle = bo->map_list.user_token;
996         rep->mask = bo->mem.mask;
997         rep->buffer_start = bo->buffer_start;
998         rep->fence_flags = bo->fence_type;
999         rep->rep_flags = 0;
1000         rep->page_alignment = bo->mem.page_alignment;
1001
1002         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1003                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1004                                 DRM_BO_REP_BUSY);
1005         }
1006 }
1007
1008 /*
1009  * Wait for buffer idle and register that we've mapped the buffer.
1010  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, 
1011  * so that if the client dies, the mapping is automatically 
1012  * unregistered.
1013  */
1014
1015 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1016                                  uint32_t map_flags, unsigned hint,
1017                                  drm_bo_arg_reply_t * rep)
1018 {
1019         drm_buffer_object_t *bo;
1020         drm_device_t *dev = priv->head->dev;
1021         int ret = 0;
1022         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1023
1024         mutex_lock(&dev->struct_mutex);
1025         bo = drm_lookup_buffer_object(priv, handle, 1);
1026         mutex_unlock(&dev->struct_mutex);
1027
1028         if (!bo)
1029                 return -EINVAL;
1030
1031         mutex_lock(&bo->mutex);
1032         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1033                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1034                 if (ret)
1035                         goto out;
1036         }
1037
1038         /*
1039          * If this returns true, we are currently unmapped.
1040          * We need to do this test, because unmapping can
1041          * be done without the bo->mutex held.
1042          */
1043
1044         while (1) {
1045                 if (atomic_inc_and_test(&bo->mapped)) {
1046                         if (no_wait && drm_bo_busy(bo)) {
1047                                 atomic_dec(&bo->mapped);
1048                                 ret = -EBUSY;
1049                                 goto out;
1050                         }
1051                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1052                         if (ret) {
1053                                 atomic_dec(&bo->mapped);
1054                                 goto out;
1055                         }
1056
1057                         if ((map_flags & DRM_BO_FLAG_READ) &&
1058                             (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1059                             (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1060                                 drm_bo_read_cached(bo);
1061                         }
1062                         break;
1063                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1064                            (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1065                            (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1066
1067                         /*
1068                          * We are already mapped with different flags.
1069                          * need to wait for unmap.
1070                          */
1071
1072                         ret = drm_bo_wait_unmapped(bo, no_wait);
1073                         if (ret)
1074                                 goto out;
1075
1076                         continue;
1077                 }
1078                 break;
1079         }
1080
1081         mutex_lock(&dev->struct_mutex);
1082         ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1083         mutex_unlock(&dev->struct_mutex);
1084         if (ret) {
1085                 if (atomic_add_negative(-1, &bo->mapped))
1086                         DRM_WAKEUP(&bo->event_queue);
1087
1088         } else
1089                 drm_bo_fill_rep_arg(bo, rep);
1090       out:
1091         mutex_unlock(&bo->mutex);
1092         drm_bo_usage_deref_unlocked(bo);
1093         return ret;
1094 }
1095
1096 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1097 {
1098         drm_device_t *dev = priv->head->dev;
1099         drm_buffer_object_t *bo;
1100         drm_ref_object_t *ro;
1101         int ret = 0;
1102
1103         mutex_lock(&dev->struct_mutex);
1104
1105         bo = drm_lookup_buffer_object(priv, handle, 1);
1106         if (!bo) {
1107                 ret = -EINVAL;
1108                 goto out;
1109         }
1110
1111         ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1112         if (!ro) {
1113                 ret = -EINVAL;
1114                 goto out;
1115         }
1116
1117         drm_remove_ref_object(priv, ro);
1118         drm_bo_usage_deref_locked(bo);
1119       out:
1120         mutex_unlock(&dev->struct_mutex);
1121         return ret;
1122 }
1123
1124 /*
1125  * Call struct-sem locked.
1126  */
1127
1128 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1129                                          drm_user_object_t * uo,
1130                                          drm_ref_t action)
1131 {
1132         drm_buffer_object_t *bo =
1133             drm_user_object_entry(uo, drm_buffer_object_t, base);
1134
1135         /*
1136          * We DON'T want to take the bo->lock here, because we want to
1137          * hold it when we wait for unmapped buffer.
1138          */
1139
1140         BUG_ON(action != _DRM_REF_TYPE1);
1141
1142         if (atomic_add_negative(-1, &bo->mapped))
1143                 DRM_WAKEUP(&bo->event_queue);
1144 }
1145
1146 /*
1147  * bo->mutex locked. 
1148  */
1149
1150 static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1151                               int no_wait, int force_no_move, int move_unfenced)
1152 {
1153         drm_device_t *dev = bo->dev;
1154         drm_buffer_manager_t *bm = &dev->bm;
1155         int ret = 0;
1156         drm_bo_mem_reg_t mem;
1157         /*
1158          * Flush outstanding fences.
1159          */
1160
1161         drm_bo_busy(bo);
1162
1163         /*
1164          * Wait for outstanding fences.
1165          */
1166
1167         ret = drm_bo_wait(bo, 0, 0, no_wait);
1168         if (ret)
1169                 return ret;
1170
1171
1172         mem.num_pages = bo->mem.num_pages;
1173         mem.size = mem.num_pages << PAGE_SHIFT;
1174         mem.mask = new_mem_flags;
1175         mem.page_alignment = bo->mem.page_alignment;
1176
1177         mutex_lock(&bm->evict_mutex);
1178         mutex_lock(&dev->struct_mutex);
1179         list_del(&bo->lru);
1180         list_add_tail(&bo->lru, &bm->unfenced);
1181         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, 
1182                         _DRM_BO_FLAG_UNFENCED);
1183         mutex_unlock(&dev->struct_mutex);
1184
1185         /*
1186          * Determine where to move the buffer.
1187          */
1188         ret = drm_bo_mem_space(dev, &mem, no_wait);
1189         
1190         if (ret)
1191                 goto out_unlock;
1192
1193         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1194
1195  out_unlock:
1196         if (ret || !move_unfenced) {
1197                 mutex_lock(&dev->struct_mutex);
1198                 if (mem.mm_node) {
1199                         drm_mm_put_block(mem.mm_node);
1200                         mem.mm_node = NULL;
1201                 }
1202                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1203                 list_del(&bo->lru);
1204                 drm_bo_add_to_lru(bo, bm);
1205                 mutex_unlock(&dev->struct_mutex);
1206         }
1207
1208         mutex_unlock(&bm->evict_mutex);
1209         return ret;
1210 }
1211
1212
1213 static int drm_bo_mem_compat(drm_bo_mem_reg_t *mem)
1214 {
1215         uint32_t
1216                 flag_diff = (mem->mask ^ mem->flags);
1217
1218         if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1219                 return 0;
1220         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1221             (mem->mask & DRM_BO_FLAG_FORCE_CACHING))
1222                 return 0;
1223         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1224             (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE))
1225                 return 0;
1226         return 1;
1227 }
1228         
1229 static int drm_bo_check_fake(drm_device_t *dev, drm_bo_mem_reg_t *mem)
1230 {
1231         drm_buffer_manager_t *bm = &dev->bm;
1232         drm_mem_type_manager_t *man; 
1233         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1234         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1235         uint32_t i;
1236         int type_ok = 0;
1237         uint32_t mem_type = 0;
1238         uint32_t cur_flags;
1239
1240         if (drm_bo_mem_compat(mem))
1241                 return 0;
1242
1243         BUG_ON(mem->mm_node);
1244
1245         for (i=0; i<num_prios; ++i) {
1246                 mem_type = prios[i];
1247                 man = &bm->man[mem_type];
1248                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, 
1249                                                &cur_flags); 
1250                 if (type_ok)
1251                         break;
1252         }
1253
1254         if (type_ok) {
1255                 mem->mm_node = NULL;
1256                 mem->mem_type = mem_type;
1257                 mem->flags = cur_flags;
1258                 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1259                 return 0;
1260         }
1261
1262         DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
1263         return -EINVAL;
1264 }
1265                 
1266 /*
1267  * bo locked.
1268  */
1269
1270 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1271                                       int move_unfenced, int no_wait)
1272 {
1273         drm_device_t *dev = bo->dev;
1274         drm_buffer_manager_t *bm = &dev->bm;
1275         uint32_t flag_diff = (bo->mem.mask ^ bo->mem.flags);
1276         drm_bo_driver_t *driver = dev->driver->bo_driver;
1277
1278         int ret;
1279
1280         DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, 
1281                   bo->mem.flags);
1282         ret = driver->fence_type(bo->mem.mask, &bo->fence_class, &bo->fence_type);
1283         if (ret) {
1284                 DRM_ERROR("Driver did not support given buffer permissions\n");
1285                 return ret;
1286         }
1287
1288         ret = drm_bo_wait_unmapped(bo, no_wait);
1289         if (ret)
1290                 return ret;
1291
1292         if (bo->type == drm_bo_type_fake) {
1293                 ret = drm_bo_check_fake(dev, &bo->mem);
1294                 if (ret)
1295                         return ret;
1296         }
1297
1298         /*
1299          * Check whether we dropped no_move policy, and in that case,
1300          * release reserved manager regions, if we're evicted.
1301          */
1302
1303         if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
1304             !(bo->mem.mask & DRM_BO_FLAG_NO_MOVE)) {
1305                 /* FIXME */
1306         }
1307
1308         /*
1309          * Check whether we need to move buffer.
1310          */
1311
1312         if (!drm_bo_mem_compat(&bo->mem)) {
1313                 ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE, 
1314                                          no_wait, 1, move_unfenced);
1315                 if (ret) {
1316                         if (ret != -EAGAIN)
1317                                 DRM_ERROR("Failed moving buffer.\n");
1318                         return ret;
1319                 }
1320         }
1321
1322         /*
1323          * We might need to add a TTM.
1324          */
1325
1326         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1327                 ret = drm_bo_add_ttm(bo);
1328                 if (ret) 
1329                         return ret;
1330         }
1331
1332         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1333
1334         return 0;
1335 }
1336
1337 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1338                                   uint32_t flags, uint32_t mask, uint32_t hint,
1339                                   drm_bo_arg_reply_t * rep)
1340 {
1341         drm_buffer_object_t *bo;
1342         int ret;
1343         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1344
1345         bo = drm_lookup_buffer_object(priv, handle, 1);
1346         if (!bo) {
1347                 return -EINVAL;
1348         }
1349
1350         mutex_lock(&bo->mutex);
1351         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1352
1353         if (ret)
1354                 goto out;
1355
1356         DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1357         ret = drm_bo_new_mask(bo, flags, hint);
1358         if (ret)
1359                 goto out;
1360
1361         ret =
1362             drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
1363                                        no_wait);
1364         drm_bo_fill_rep_arg(bo, rep);
1365
1366       out:
1367
1368         mutex_unlock(&bo->mutex);
1369         drm_bo_usage_deref_unlocked(bo);
1370         return ret;
1371 }
1372
1373 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1374                               drm_bo_arg_reply_t * rep)
1375 {
1376         drm_buffer_object_t *bo;
1377
1378         bo = drm_lookup_buffer_object(priv, handle, 1);
1379         if (!bo) {
1380                 return -EINVAL;
1381         }
1382         mutex_lock(&bo->mutex);
1383         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1384                 (void)drm_bo_busy(bo);
1385         drm_bo_fill_rep_arg(bo, rep);
1386         mutex_unlock(&bo->mutex);
1387         drm_bo_usage_deref_unlocked(bo);
1388         return 0;
1389 }
1390
1391 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1392                               uint32_t hint, drm_bo_arg_reply_t * rep)
1393 {
1394         drm_buffer_object_t *bo;
1395         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1396         int ret;
1397
1398         bo = drm_lookup_buffer_object(priv, handle, 1);
1399         if (!bo) {
1400                 return -EINVAL;
1401         }
1402
1403         mutex_lock(&bo->mutex);
1404         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1405         if (ret)
1406                 goto out;
1407         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1408         if (ret)
1409                 goto out;
1410
1411         drm_bo_fill_rep_arg(bo, rep);
1412
1413       out:
1414         mutex_unlock(&bo->mutex);
1415         drm_bo_usage_deref_unlocked(bo);
1416         return ret;
1417 }
1418
1419 int drm_buffer_object_create(drm_file_t * priv,
1420                              unsigned long size,
1421                              drm_bo_type_t type,
1422                              uint32_t mask,
1423                              uint32_t hint,
1424                              uint32_t page_alignment,
1425                              unsigned long buffer_start,
1426                              drm_buffer_object_t ** buf_obj)
1427 {
1428         drm_device_t *dev = priv->head->dev;
1429         drm_buffer_manager_t *bm = &dev->bm;
1430         drm_buffer_object_t *bo;
1431         int ret = 0;
1432         unsigned long num_pages;
1433
1434         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1435                 DRM_ERROR("Invalid buffer object start.\n");
1436                 return -EINVAL;
1437         }
1438         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1439         if (num_pages == 0) {
1440                 DRM_ERROR("Illegal buffer object size.\n");
1441                 return -EINVAL;
1442         }
1443
1444         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1445
1446         if (!bo)
1447                 return -ENOMEM;
1448
1449         mutex_init(&bo->mutex);
1450         mutex_lock(&bo->mutex);
1451
1452         atomic_set(&bo->usage, 1);
1453         atomic_set(&bo->mapped, -1);
1454         DRM_INIT_WAITQUEUE(&bo->event_queue);
1455         INIT_LIST_HEAD(&bo->lru);
1456         INIT_LIST_HEAD(&bo->ddestroy);
1457 #ifdef DRM_ODD_MM_COMPAT
1458         INIT_LIST_HEAD(&bo->p_mm_list);
1459         INIT_LIST_HEAD(&bo->vma_list);
1460 #endif
1461         bo->dev = dev;
1462         bo->type = type;
1463         bo->mem.num_pages = num_pages;
1464         bo->mem.mm_node = NULL;
1465         bo->mem.page_alignment = page_alignment;
1466         if (bo->type == drm_bo_type_fake) {
1467                 bo->offset = buffer_start;
1468                 bo->buffer_start = 0;
1469         } else {
1470                 bo->buffer_start = buffer_start;
1471         }
1472         bo->priv_flags = 0;
1473         bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
1474         bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
1475         atomic_inc(&bm->count);
1476         ret = drm_bo_new_mask(bo, mask, hint);
1477
1478         if (ret)
1479                 goto out_err;
1480         
1481         if (bo->type == drm_bo_type_dc) {
1482                 mutex_lock(&dev->struct_mutex);
1483                 ret = drm_bo_setup_vm_locked(bo);
1484                 mutex_unlock(&dev->struct_mutex);
1485                 if (ret)
1486                         goto out_err;
1487         }
1488         ret = drm_buffer_object_validate(bo, 0,
1489                                          hint & DRM_BO_HINT_DONT_BLOCK);
1490         if (ret)
1491                 goto out_err;
1492
1493         mutex_unlock(&bo->mutex);
1494         *buf_obj = bo;
1495         return 0;
1496
1497       out_err:
1498         mutex_unlock(&bo->mutex);
1499         drm_bo_usage_deref_unlocked(bo);
1500         return ret;
1501 }
1502
1503 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1504                                   int shareable)
1505 {
1506         drm_device_t *dev = priv->head->dev;
1507         int ret;
1508
1509         mutex_lock(&dev->struct_mutex);
1510         ret = drm_add_user_object(priv, &bo->base, shareable);
1511         if (ret)
1512                 goto out;
1513
1514         bo->base.remove = drm_bo_base_deref_locked;
1515         bo->base.type = drm_buffer_type;
1516         bo->base.ref_struct_locked = NULL;
1517         bo->base.unref = drm_buffer_user_object_unmap;
1518
1519       out:
1520         mutex_unlock(&dev->struct_mutex);
1521         return ret;
1522 }
1523
1524 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1525 {
1526         LOCK_TEST_WITH_RETURN(dev, filp);
1527         return 0;
1528 }
1529
1530 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1531 {
1532         DRM_DEVICE;
1533         drm_bo_arg_t arg;
1534         drm_bo_arg_request_t *req = &arg.d.req;
1535         drm_bo_arg_reply_t rep;
1536         unsigned long next;
1537         drm_user_object_t *uo;
1538         drm_buffer_object_t *entry;
1539
1540         if (!dev->bm.initialized) {
1541                 DRM_ERROR("Buffer object manager is not initialized.\n");
1542                 return -EINVAL;
1543         }
1544
1545         do {
1546                 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1547
1548                 if (arg.handled) {
1549                         data = arg.next;
1550                         continue;
1551                 }
1552
1553                 rep.ret = 0;
1554                 switch (req->op) {
1555                 case drm_bo_create:
1556                         rep.ret =
1557                             drm_buffer_object_create(priv, req->size,
1558                                                      req->type,
1559                                                      req->mask,
1560                                                      req->hint,
1561                                                      req->page_alignment,
1562                                                      req->buffer_start, &entry);
1563                         if (rep.ret)
1564                                 break;
1565
1566                         rep.ret =
1567                             drm_bo_add_user_object(priv, entry,
1568                                                    req->
1569                                                    mask &
1570                                                    DRM_BO_FLAG_SHAREABLE);
1571                         if (rep.ret)
1572                                 drm_bo_usage_deref_unlocked(entry);
1573
1574                         if (rep.ret)
1575                                 break;
1576
1577                         mutex_lock(&entry->mutex);
1578                         drm_bo_fill_rep_arg(entry, &rep);
1579                         mutex_unlock(&entry->mutex);
1580                         break;
1581                 case drm_bo_unmap:
1582                         rep.ret = drm_buffer_object_unmap(priv, req->handle);
1583                         break;
1584                 case drm_bo_map:
1585                         rep.ret = drm_buffer_object_map(priv, req->handle,
1586                                                         req->mask,
1587                                                         req->hint, &rep);
1588                         break;
1589                 case drm_bo_destroy:
1590                         mutex_lock(&dev->struct_mutex);
1591                         uo = drm_lookup_user_object(priv, req->handle);
1592                         if (!uo || (uo->type != drm_buffer_type)
1593                             || uo->owner != priv) {
1594                                 mutex_unlock(&dev->struct_mutex);
1595                                 rep.ret = -EINVAL;
1596                                 break;
1597                         }
1598                         rep.ret = drm_remove_user_object(priv, uo);
1599                         mutex_unlock(&dev->struct_mutex);
1600                         break;
1601                 case drm_bo_reference:
1602                         rep.ret = drm_user_object_ref(priv, req->handle,
1603                                                       drm_buffer_type, &uo);
1604                         if (rep.ret)
1605                                 break;
1606                         mutex_lock(&dev->struct_mutex);
1607                         uo = drm_lookup_user_object(priv, req->handle);
1608                         entry =
1609                             drm_user_object_entry(uo, drm_buffer_object_t,
1610                                                   base);
1611                         atomic_dec(&entry->usage);
1612                         mutex_unlock(&dev->struct_mutex);
1613                         mutex_lock(&entry->mutex);
1614                         drm_bo_fill_rep_arg(entry, &rep);
1615                         mutex_unlock(&entry->mutex);
1616                         break;
1617                 case drm_bo_unreference:
1618                         rep.ret = drm_user_object_unref(priv, req->handle,
1619                                                         drm_buffer_type);
1620                         break;
1621                 case drm_bo_validate:
1622                         rep.ret = drm_bo_lock_test(dev, filp);
1623
1624                         if (rep.ret)
1625                                 break;
1626                         rep.ret =
1627                             drm_bo_handle_validate(priv, req->handle, req->mask,
1628                                                    req->arg_handle, req->hint,
1629                                                    &rep);
1630                         break;
1631                 case drm_bo_fence:
1632                         rep.ret = drm_bo_lock_test(dev, filp);
1633                         if (rep.ret)
1634                                 break;
1635                          /**/ break;
1636                 case drm_bo_info:
1637                         rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1638                         break;
1639                 case drm_bo_wait_idle:
1640                         rep.ret = drm_bo_handle_wait(priv, req->handle,
1641                                                      req->hint, &rep);
1642                         break;
1643                 case drm_bo_ref_fence:
1644                         rep.ret = -EINVAL;
1645                         DRM_ERROR("Function is not implemented yet.\n");
1646                 default:
1647                         rep.ret = -EINVAL;
1648                 }
1649                 next = arg.next;
1650
1651                 /*
1652                  * A signal interrupted us. Make sure the ioctl is restartable.
1653                  */
1654
1655                 if (rep.ret == -EAGAIN)
1656                         return -EAGAIN;
1657
1658                 arg.handled = 1;
1659                 arg.d.rep = rep;
1660                 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1661                 data = next;
1662         } while (data);
1663         return 0;
1664 }
1665
1666 /*
1667  * dev->struct_sem locked.
1668  */
1669
1670 static int drm_bo_force_list_clean(drm_device_t * dev,
1671                                    struct list_head *head,
1672                                    unsigned mem_type,
1673                                    int force_no_move, int allow_errors)
1674 {
1675         drm_buffer_manager_t *bm = &dev->bm;
1676         struct list_head *list, *next, *prev;
1677         drm_buffer_object_t *entry;
1678         int ret;
1679         int clean;
1680
1681       retry:
1682         clean = 1;
1683         list_for_each_safe(list, next, head) {
1684                 prev = list->prev;
1685                 entry = list_entry(list, drm_buffer_object_t, lru);
1686                 atomic_inc(&entry->usage);
1687                 mutex_unlock(&dev->struct_mutex);
1688                 mutex_lock(&entry->mutex);
1689                 mutex_lock(&dev->struct_mutex);
1690
1691                 if (prev != list->prev || next != list->next) {
1692                         mutex_unlock(&entry->mutex);
1693                         drm_bo_usage_deref_locked(entry);
1694                         goto retry;
1695                 }
1696                 if (entry->mem.mm_node) {
1697                         clean = 0;
1698
1699                         /*
1700                          * Expire the fence.
1701                          */
1702
1703                         mutex_unlock(&dev->struct_mutex);
1704                         if (entry->fence && bm->nice_mode) {
1705                                 unsigned long _end = jiffies + 3 * DRM_HZ;
1706                                 do {
1707                                         ret = drm_bo_wait(entry, 0, 1, 0);
1708                                         if (ret && allow_errors)
1709                                                 goto out_err;
1710        
1711                                 } while (ret && !time_after_eq(jiffies, _end));
1712
1713                                 if (entry->fence) {
1714                                         bm->nice_mode = 0;
1715                                         DRM_ERROR("Detected GPU hang or "
1716                                                   "fence manager was taken down. "
1717                                                   "Evicting waiting buffers\n");
1718                                 }
1719                         }
1720                         if (entry->fence) {
1721                                 drm_fence_usage_deref_unlocked(dev,
1722                                                                entry->fence);
1723                                 entry->fence = NULL;
1724                         }
1725
1726                         DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1727
1728                         if (force_no_move) {
1729                                 DRM_FLAG_MASKED(entry->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1730                         }
1731                         if (entry->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1732                                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1733                                           "cleanup. Removing flag and evicting.\n");
1734                                 entry->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1735                                 entry->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1736                         }
1737
1738                         ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
1739                         if (ret) {
1740                                 if (allow_errors) {
1741                                         goto out_err;
1742                                 } else {
1743                                         DRM_ERROR("Aargh. Eviction failed.\n");
1744                                 }
1745                         }
1746                         mutex_lock(&dev->struct_mutex);
1747                 }
1748                 mutex_unlock(&entry->mutex);
1749                 drm_bo_usage_deref_locked(entry);
1750                 if (prev != list->prev || next != list->next) {
1751                         goto retry;
1752                 }
1753         }
1754         if (!clean)
1755                 goto retry;
1756         return 0;
1757       out_err:
1758         mutex_unlock(&entry->mutex);
1759         drm_bo_usage_deref_unlocked(entry);
1760         mutex_lock(&dev->struct_mutex);
1761         return ret;
1762 }
1763
1764 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1765 {
1766         drm_buffer_manager_t *bm = &dev->bm;
1767         drm_mem_type_manager_t *man = &bm->man[mem_type];
1768         drm_mem_type_manager_t *local_man = &bm->man[DRM_BO_MEM_LOCAL];
1769         int ret = -EINVAL;
1770
1771         if (mem_type >= DRM_BO_MEM_TYPES) {
1772                 DRM_ERROR("Illegal memory type %d\n", mem_type);
1773                 return ret;
1774         }
1775
1776         if (!man->has_type) {
1777                 DRM_ERROR("Trying to take down uninitialized "
1778                           "memory manager type\n");
1779                 return ret;
1780         }
1781         man->use_type = 0;
1782         man->has_type = 0;
1783
1784         ret = 0;
1785         if (mem_type > 0) {
1786
1787                 /*
1788                  * Throw out unfenced buffers.
1789                  */
1790
1791                 drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
1792
1793                 /*
1794                  * Throw out evicted no-move buffers.
1795                  */
1796
1797                 drm_bo_force_list_clean(dev, &local_man->pinned, mem_type, 1, 0);
1798                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0);
1799                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0);
1800
1801                 if (drm_mm_clean(&man->manager)) {
1802                         drm_mm_takedown(&man->manager);
1803                 } else {
1804                         ret = -EBUSY;
1805                 }
1806         }
1807
1808         return ret;
1809 }
1810
1811 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1812 {
1813         int ret;
1814         drm_buffer_manager_t *bm = &dev->bm;
1815         drm_mem_type_manager_t *man = &bm->man[mem_type];
1816
1817         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1818                 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1819                 return -EINVAL;
1820         }
1821
1822         ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
1823         if (ret)
1824                 return ret;
1825         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1);
1826         if (ret)
1827                 return ret;
1828         ret =
1829             drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1);
1830         return ret;
1831 }
1832
1833 static int drm_bo_init_mm(drm_device_t * dev,
1834                           unsigned type,
1835                           unsigned long p_offset, unsigned long p_size)
1836 {
1837         drm_buffer_manager_t *bm = &dev->bm;
1838         int ret = -EINVAL;
1839         drm_mem_type_manager_t *man;
1840
1841         if (type >= DRM_BO_MEM_TYPES) {
1842                 DRM_ERROR("Illegal memory type %d\n", type);
1843                 return ret;
1844         }
1845         
1846         man = &bm->man[type];
1847         if (man->has_type) {
1848                 DRM_ERROR("Memory manager already initialized for type %d\n",
1849                           type);
1850                 return ret;
1851         }
1852
1853         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
1854         if (ret) 
1855                 return ret;
1856
1857         ret = 0;
1858         if (type != DRM_BO_MEM_LOCAL) {
1859                 if (!p_size) {
1860                         DRM_ERROR("Zero size memory manager type %d\n", type);
1861                         return ret;
1862                 }
1863                 ret = drm_mm_init(&man->manager, p_offset, p_size);
1864                 if (ret)
1865                         return ret;
1866         }
1867         man->has_type = 1;
1868         man->use_type = 1;
1869
1870         INIT_LIST_HEAD(&man->lru);
1871         INIT_LIST_HEAD(&man->pinned);
1872
1873         return 0;
1874 }
1875
1876 /*
1877  * This is called from lastclose, so we don't need to bother about
1878  * any clients still running when we set the initialized flag to zero.
1879  */
1880
1881 int drm_bo_driver_finish(drm_device_t * dev)
1882 {
1883         drm_buffer_manager_t *bm = &dev->bm;
1884         int ret = 0;
1885         unsigned i = DRM_BO_MEM_TYPES;
1886         drm_mem_type_manager_t *man;
1887
1888         mutex_lock(&dev->bm.init_mutex);
1889         mutex_lock(&dev->struct_mutex);
1890
1891         if (!bm->initialized)
1892                 goto out;
1893         bm->initialized = 0;
1894
1895         while (i--) {
1896                 man = &bm->man[i];
1897                 if (man->has_type) {
1898                         man->use_type = 0;
1899                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
1900                                 ret = -EBUSY;
1901                                 DRM_ERROR("DRM memory manager type %d "
1902                                           "is not clean.\n", i);
1903                         }
1904                         man->has_type = 0;
1905                 }
1906         }
1907         mutex_unlock(&dev->struct_mutex);
1908         if (!cancel_delayed_work(&bm->wq)) {
1909                 flush_scheduled_work();
1910         }
1911         mutex_lock(&dev->struct_mutex);
1912         drm_bo_delayed_delete(dev, 1);
1913         if (list_empty(&bm->ddestroy)) {
1914                 DRM_DEBUG("Delayed destroy list was clean\n");
1915         }
1916         if (list_empty(&bm->man[0].lru)) {
1917                 DRM_DEBUG("Swap list was clean\n");
1918         }
1919         if (list_empty(&bm->man[0].pinned)) {
1920                 DRM_DEBUG("NO_MOVE list was clean\n");
1921         }
1922         if (list_empty(&bm->unfenced)) {
1923                 DRM_DEBUG("Unfenced list was clean\n");
1924         }
1925       out:
1926         mutex_unlock(&dev->struct_mutex);
1927         mutex_unlock(&dev->bm.init_mutex);
1928         return ret;
1929 }
1930
1931 int drm_bo_driver_init(drm_device_t * dev)
1932 {
1933         drm_bo_driver_t *driver = dev->driver->bo_driver;
1934         drm_buffer_manager_t *bm = &dev->bm;
1935         int ret = -EINVAL;
1936
1937         mutex_lock(&dev->bm.init_mutex);
1938         mutex_lock(&dev->struct_mutex);
1939         if (!driver)
1940                 goto out_unlock;
1941
1942         /*
1943          * Initialize the system memory buffer type.
1944          * Other types need to be driver / IOCTL initialized.
1945          */
1946
1947         ret = drm_bo_init_mm(dev, 0, 0, 0);
1948         if (ret)
1949                 goto out_unlock;
1950
1951 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1952         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
1953 #else
1954         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
1955 #endif
1956         bm->initialized = 1;
1957         bm->nice_mode = 1;
1958         atomic_set(&bm->count, 0);
1959         bm->cur_pages = 0;
1960         INIT_LIST_HEAD(&bm->unfenced);
1961         INIT_LIST_HEAD(&bm->ddestroy);
1962       out_unlock:
1963         mutex_unlock(&dev->struct_mutex);
1964         mutex_unlock(&dev->bm.init_mutex);
1965         return ret;
1966 }
1967
1968 EXPORT_SYMBOL(drm_bo_driver_init);
1969
1970 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
1971 {
1972         DRM_DEVICE;
1973
1974         int ret = 0;
1975         drm_mm_init_arg_t arg;
1976         drm_buffer_manager_t *bm = &dev->bm;
1977         drm_bo_driver_t *driver = dev->driver->bo_driver;
1978
1979         if (!driver) {
1980                 DRM_ERROR("Buffer objects are not supported by this driver\n");
1981                 return -EINVAL;
1982         }
1983
1984         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1985
1986         switch (arg.req.op) {
1987         case mm_init:
1988                 ret = -EINVAL;
1989                 mutex_lock(&dev->bm.init_mutex);
1990                 mutex_lock(&dev->struct_mutex);
1991                 if (!bm->initialized) {
1992                         DRM_ERROR("DRM memory manager was not initialized.\n");
1993                         break;
1994                 }
1995                 if (arg.req.mem_type == 0) {
1996                         DRM_ERROR
1997                             ("System memory buffers already initialized.\n");
1998                         break;
1999                 }
2000                 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2001                                      arg.req.p_offset, arg.req.p_size);
2002                 break;
2003         case mm_takedown:
2004                 LOCK_TEST_WITH_RETURN(dev, filp);
2005                 mutex_lock(&dev->bm.init_mutex);
2006                 mutex_lock(&dev->struct_mutex);
2007                 ret = -EINVAL;
2008                 if (!bm->initialized) {
2009                         DRM_ERROR("DRM memory manager was not initialized\n");
2010                         break;
2011                 }
2012                 if (arg.req.mem_type == 0) {
2013                         DRM_ERROR("No takedown for System memory buffers.\n");
2014                         break;
2015                 }
2016                 ret = 0;
2017                 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2018                         DRM_ERROR("Memory manager type %d not clean. "
2019                                   "Delaying takedown\n", arg.req.mem_type);
2020                 }
2021                 break;
2022         case mm_lock:
2023                 LOCK_TEST_WITH_RETURN(dev, filp);
2024                 mutex_lock(&dev->bm.init_mutex);
2025                 mutex_lock(&dev->struct_mutex);
2026                 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2027                 break;
2028         case mm_unlock:
2029                 LOCK_TEST_WITH_RETURN(dev, filp);
2030                 mutex_lock(&dev->bm.init_mutex);
2031                 mutex_lock(&dev->struct_mutex);
2032                 ret = 0;
2033                 break;
2034         default:
2035                 DRM_ERROR("Function not implemented yet\n");
2036                 return -EINVAL;
2037         }
2038
2039         mutex_unlock(&dev->struct_mutex);
2040         mutex_unlock(&dev->bm.init_mutex);
2041         if (ret)
2042                 return ret;
2043
2044         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2045         return 0;
2046 }
2047
2048 /*
2049  * buffer object vm functions.
2050  */
2051
2052 int drm_mem_reg_is_pci(drm_device_t *dev, drm_bo_mem_reg_t *mem)
2053 {
2054         drm_buffer_manager_t *bm = &dev->bm;
2055         drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; 
2056
2057         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2058                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2059                         return 0;
2060                 
2061                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2062                         return 0;
2063
2064                 if (mem->flags & DRM_BO_FLAG_CACHED)
2065                         return 0;
2066         }
2067         return 1;
2068 }
2069 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2070
2071 /**
2072  * \c Get the PCI offset for the buffer object memory.
2073  *
2074  * \param bo The buffer object.
2075  * \param bus_base On return the base of the PCI region
2076  * \param bus_offset On return the byte offset into the PCI region
2077  * \param bus_size On return the byte size of the buffer object or zero if
2078  *     the buffer object memory is not accessible through a PCI region.
2079  * \return Failure indication.
2080  * 
2081  * Returns -EINVAL if the buffer object is currently not mappable.
2082  * Otherwise returns zero.
2083  */
2084
2085 int drm_bo_pci_offset(drm_device_t *dev,
2086                       drm_bo_mem_reg_t *mem,
2087                       unsigned long *bus_base,
2088                       unsigned long *bus_offset,
2089                       unsigned long *bus_size)
2090 {
2091         drm_buffer_manager_t *bm = &dev->bm;
2092         drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; 
2093
2094         *bus_size = 0;
2095         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) 
2096                 return -EINVAL;
2097
2098         if (drm_mem_reg_is_pci(dev, mem)) {
2099                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2100                 *bus_size = mem->num_pages << PAGE_SHIFT;
2101                 *bus_base = man->io_offset;
2102         }
2103
2104         return 0;
2105 }
2106
2107
2108 /**
2109  * \c Kill all user-space virtual mappings of this buffer object.
2110  *
2111  * \param bo The buffer object.
2112  *
2113  * Call bo->mutex locked.
2114  */
2115
2116 void drm_bo_unmap_virtual(drm_buffer_object_t *bo)
2117 {
2118         drm_device_t *dev = bo->dev;
2119         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2120         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2121
2122         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2123 }
2124
2125 static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo)
2126 {
2127         drm_map_list_t *list = &bo->map_list;
2128         drm_local_map_t *map;
2129         drm_device_t *dev = bo->dev;
2130         
2131         if (list->user_token) {
2132                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2133                 list->user_token = 0;
2134         }
2135         if (list->file_offset_node) {
2136                 drm_mm_put_block(list->file_offset_node);
2137                 list->file_offset_node = NULL;
2138         }
2139
2140         map = list->map;
2141         if (!map)
2142                 return;
2143
2144         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2145         list->map = NULL;
2146         list->user_token = 0ULL;
2147         drm_bo_usage_deref_locked(bo);
2148 }
2149
2150 static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo)
2151 {
2152         drm_map_list_t *list = &bo->map_list;
2153         drm_local_map_t *map;
2154         drm_device_t *dev = bo->dev;
2155         
2156         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2157         if (!list->map)
2158                 return -ENOMEM;
2159
2160         map = list->map;
2161         map->offset = 0;
2162         map->type = _DRM_TTM;
2163         map->flags = _DRM_REMOVABLE;
2164         map->size = bo->mem.num_pages * PAGE_SIZE;
2165         atomic_inc(&bo->usage);
2166         map->handle = (void *) bo;
2167         
2168         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2169                                                     bo->mem.num_pages, 0, 0);
2170
2171         if (!list->file_offset_node) {
2172                 drm_bo_takedown_vm_locked(bo);
2173                 return -ENOMEM;
2174         }
2175
2176         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2177                                                   bo->mem.num_pages, 0);
2178
2179         list->hash.key = list->file_offset_node->start;
2180         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2181                 drm_bo_takedown_vm_locked(bo);
2182                 return -ENOMEM;
2183         }
2184                 
2185         list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
2186
2187         return 0;
2188 }