Merge tag 'jfs-3.11' of git://github.com/kleikamp/linux-shaggy
[profile/ivi/kernel-x86-ivi.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <linux/io.h>
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/module.h>
39
40 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41 {
42         ttm_bo_mem_put(bo, &bo->mem);
43 }
44
45 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46                     bool evict,
47                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48 {
49         struct ttm_tt *ttm = bo->ttm;
50         struct ttm_mem_reg *old_mem = &bo->mem;
51         int ret;
52
53         if (old_mem->mem_type != TTM_PL_SYSTEM) {
54                 ttm_tt_unbind(ttm);
55                 ttm_bo_free_old_node(bo);
56                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
57                                 TTM_PL_MASK_MEM);
58                 old_mem->mem_type = TTM_PL_SYSTEM;
59         }
60
61         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
62         if (unlikely(ret != 0))
63                 return ret;
64
65         if (new_mem->mem_type != TTM_PL_SYSTEM) {
66                 ret = ttm_tt_bind(ttm, new_mem);
67                 if (unlikely(ret != 0))
68                         return ret;
69         }
70
71         *old_mem = *new_mem;
72         new_mem->mm_node = NULL;
73
74         return 0;
75 }
76 EXPORT_SYMBOL(ttm_bo_move_ttm);
77
78 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
79 {
80         if (likely(man->io_reserve_fastpath))
81                 return 0;
82
83         if (interruptible)
84                 return mutex_lock_interruptible(&man->io_reserve_mutex);
85
86         mutex_lock(&man->io_reserve_mutex);
87         return 0;
88 }
89 EXPORT_SYMBOL(ttm_mem_io_lock);
90
91 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
92 {
93         if (likely(man->io_reserve_fastpath))
94                 return;
95
96         mutex_unlock(&man->io_reserve_mutex);
97 }
98 EXPORT_SYMBOL(ttm_mem_io_unlock);
99
100 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
101 {
102         struct ttm_buffer_object *bo;
103
104         if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
105                 return -EAGAIN;
106
107         bo = list_first_entry(&man->io_reserve_lru,
108                               struct ttm_buffer_object,
109                               io_reserve_lru);
110         list_del_init(&bo->io_reserve_lru);
111         ttm_bo_unmap_virtual_locked(bo);
112
113         return 0;
114 }
115
116
117 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
118                        struct ttm_mem_reg *mem)
119 {
120         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
121         int ret = 0;
122
123         if (!bdev->driver->io_mem_reserve)
124                 return 0;
125         if (likely(man->io_reserve_fastpath))
126                 return bdev->driver->io_mem_reserve(bdev, mem);
127
128         if (bdev->driver->io_mem_reserve &&
129             mem->bus.io_reserved_count++ == 0) {
130 retry:
131                 ret = bdev->driver->io_mem_reserve(bdev, mem);
132                 if (ret == -EAGAIN) {
133                         ret = ttm_mem_io_evict(man);
134                         if (ret == 0)
135                                 goto retry;
136                 }
137         }
138         return ret;
139 }
140 EXPORT_SYMBOL(ttm_mem_io_reserve);
141
142 void ttm_mem_io_free(struct ttm_bo_device *bdev,
143                      struct ttm_mem_reg *mem)
144 {
145         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
146
147         if (likely(man->io_reserve_fastpath))
148                 return;
149
150         if (bdev->driver->io_mem_reserve &&
151             --mem->bus.io_reserved_count == 0 &&
152             bdev->driver->io_mem_free)
153                 bdev->driver->io_mem_free(bdev, mem);
154
155 }
156 EXPORT_SYMBOL(ttm_mem_io_free);
157
158 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
159 {
160         struct ttm_mem_reg *mem = &bo->mem;
161         int ret;
162
163         if (!mem->bus.io_reserved_vm) {
164                 struct ttm_mem_type_manager *man =
165                         &bo->bdev->man[mem->mem_type];
166
167                 ret = ttm_mem_io_reserve(bo->bdev, mem);
168                 if (unlikely(ret != 0))
169                         return ret;
170                 mem->bus.io_reserved_vm = true;
171                 if (man->use_io_reserve_lru)
172                         list_add_tail(&bo->io_reserve_lru,
173                                       &man->io_reserve_lru);
174         }
175         return 0;
176 }
177
178 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
179 {
180         struct ttm_mem_reg *mem = &bo->mem;
181
182         if (mem->bus.io_reserved_vm) {
183                 mem->bus.io_reserved_vm = false;
184                 list_del_init(&bo->io_reserve_lru);
185                 ttm_mem_io_free(bo->bdev, mem);
186         }
187 }
188
189 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
190                         void **virtual)
191 {
192         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
193         int ret;
194         void *addr;
195
196         *virtual = NULL;
197         (void) ttm_mem_io_lock(man, false);
198         ret = ttm_mem_io_reserve(bdev, mem);
199         ttm_mem_io_unlock(man);
200         if (ret || !mem->bus.is_iomem)
201                 return ret;
202
203         if (mem->bus.addr) {
204                 addr = mem->bus.addr;
205         } else {
206                 if (mem->placement & TTM_PL_FLAG_WC)
207                         addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
208                 else
209                         addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
210                 if (!addr) {
211                         (void) ttm_mem_io_lock(man, false);
212                         ttm_mem_io_free(bdev, mem);
213                         ttm_mem_io_unlock(man);
214                         return -ENOMEM;
215                 }
216         }
217         *virtual = addr;
218         return 0;
219 }
220
221 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
222                          void *virtual)
223 {
224         struct ttm_mem_type_manager *man;
225
226         man = &bdev->man[mem->mem_type];
227
228         if (virtual && mem->bus.addr == NULL)
229                 iounmap(virtual);
230         (void) ttm_mem_io_lock(man, false);
231         ttm_mem_io_free(bdev, mem);
232         ttm_mem_io_unlock(man);
233 }
234
235 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
236 {
237         uint32_t *dstP =
238             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
239         uint32_t *srcP =
240             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
241
242         int i;
243         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
244                 iowrite32(ioread32(srcP++), dstP++);
245         return 0;
246 }
247
248 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
249                                 unsigned long page,
250                                 pgprot_t prot)
251 {
252         struct page *d = ttm->pages[page];
253         void *dst;
254
255         if (!d)
256                 return -ENOMEM;
257
258         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
259
260 #ifdef CONFIG_X86
261         dst = kmap_atomic_prot(d, prot);
262 #else
263         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
264                 dst = vmap(&d, 1, 0, prot);
265         else
266                 dst = kmap(d);
267 #endif
268         if (!dst)
269                 return -ENOMEM;
270
271         memcpy_fromio(dst, src, PAGE_SIZE);
272
273 #ifdef CONFIG_X86
274         kunmap_atomic(dst);
275 #else
276         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
277                 vunmap(dst);
278         else
279                 kunmap(d);
280 #endif
281
282         return 0;
283 }
284
285 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
286                                 unsigned long page,
287                                 pgprot_t prot)
288 {
289         struct page *s = ttm->pages[page];
290         void *src;
291
292         if (!s)
293                 return -ENOMEM;
294
295         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
296 #ifdef CONFIG_X86
297         src = kmap_atomic_prot(s, prot);
298 #else
299         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
300                 src = vmap(&s, 1, 0, prot);
301         else
302                 src = kmap(s);
303 #endif
304         if (!src)
305                 return -ENOMEM;
306
307         memcpy_toio(dst, src, PAGE_SIZE);
308
309 #ifdef CONFIG_X86
310         kunmap_atomic(src);
311 #else
312         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
313                 vunmap(src);
314         else
315                 kunmap(s);
316 #endif
317
318         return 0;
319 }
320
321 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
322                        bool evict, bool no_wait_gpu,
323                        struct ttm_mem_reg *new_mem)
324 {
325         struct ttm_bo_device *bdev = bo->bdev;
326         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
327         struct ttm_tt *ttm = bo->ttm;
328         struct ttm_mem_reg *old_mem = &bo->mem;
329         struct ttm_mem_reg old_copy = *old_mem;
330         void *old_iomap;
331         void *new_iomap;
332         int ret;
333         unsigned long i;
334         unsigned long page;
335         unsigned long add = 0;
336         int dir;
337
338         ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
339         if (ret)
340                 return ret;
341         ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
342         if (ret)
343                 goto out;
344
345         if (old_iomap == NULL && new_iomap == NULL)
346                 goto out2;
347         if (old_iomap == NULL && ttm == NULL)
348                 goto out2;
349
350         if (ttm->state == tt_unpopulated) {
351                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
352                 if (ret) {
353                         /* if we fail here don't nuke the mm node
354                          * as the bo still owns it */
355                         old_copy.mm_node = NULL;
356                         goto out1;
357                 }
358         }
359
360         add = 0;
361         dir = 1;
362
363         if ((old_mem->mem_type == new_mem->mem_type) &&
364             (new_mem->start < old_mem->start + old_mem->size)) {
365                 dir = -1;
366                 add = new_mem->num_pages - 1;
367         }
368
369         for (i = 0; i < new_mem->num_pages; ++i) {
370                 page = i * dir + add;
371                 if (old_iomap == NULL) {
372                         pgprot_t prot = ttm_io_prot(old_mem->placement,
373                                                     PAGE_KERNEL);
374                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
375                                                    prot);
376                 } else if (new_iomap == NULL) {
377                         pgprot_t prot = ttm_io_prot(new_mem->placement,
378                                                     PAGE_KERNEL);
379                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
380                                                    prot);
381                 } else
382                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
383                 if (ret) {
384                         /* failing here, means keep old copy as-is */
385                         old_copy.mm_node = NULL;
386                         goto out1;
387                 }
388         }
389         mb();
390 out2:
391         old_copy = *old_mem;
392         *old_mem = *new_mem;
393         new_mem->mm_node = NULL;
394
395         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
396                 ttm_tt_unbind(ttm);
397                 ttm_tt_destroy(ttm);
398                 bo->ttm = NULL;
399         }
400
401 out1:
402         ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
403 out:
404         ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
405         ttm_bo_mem_put(bo, &old_copy);
406         return ret;
407 }
408 EXPORT_SYMBOL(ttm_bo_move_memcpy);
409
410 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
411 {
412         kfree(bo);
413 }
414
415 /**
416  * ttm_buffer_object_transfer
417  *
418  * @bo: A pointer to a struct ttm_buffer_object.
419  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
420  * holding the data of @bo with the old placement.
421  *
422  * This is a utility function that may be called after an accelerated move
423  * has been scheduled. A new buffer object is created as a placeholder for
424  * the old data while it's being copied. When that buffer object is idle,
425  * it can be destroyed, releasing the space of the old placement.
426  * Returns:
427  * !0: Failure.
428  */
429
430 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
431                                       struct ttm_buffer_object **new_obj)
432 {
433         struct ttm_buffer_object *fbo;
434         struct ttm_bo_device *bdev = bo->bdev;
435         struct ttm_bo_driver *driver = bdev->driver;
436         int ret;
437
438         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
439         if (!fbo)
440                 return -ENOMEM;
441
442         *fbo = *bo;
443
444         /**
445          * Fix up members that we shouldn't copy directly:
446          * TODO: Explicit member copy would probably be better here.
447          */
448
449         INIT_LIST_HEAD(&fbo->ddestroy);
450         INIT_LIST_HEAD(&fbo->lru);
451         INIT_LIST_HEAD(&fbo->swap);
452         INIT_LIST_HEAD(&fbo->io_reserve_lru);
453         fbo->vm_node = NULL;
454         atomic_set(&fbo->cpu_writers, 0);
455
456         spin_lock(&bdev->fence_lock);
457         if (bo->sync_obj)
458                 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
459         else
460                 fbo->sync_obj = NULL;
461         spin_unlock(&bdev->fence_lock);
462         kref_init(&fbo->list_kref);
463         kref_init(&fbo->kref);
464         fbo->destroy = &ttm_transfered_destroy;
465         fbo->acc_size = 0;
466         fbo->resv = &fbo->ttm_resv;
467         reservation_object_init(fbo->resv);
468         ret = ww_mutex_trylock(&fbo->resv->lock);
469         WARN_ON(!ret);
470
471         *new_obj = fbo;
472         return 0;
473 }
474
475 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
476 {
477 #if defined(__i386__) || defined(__x86_64__)
478         if (caching_flags & TTM_PL_FLAG_WC)
479                 tmp = pgprot_writecombine(tmp);
480         else if (boot_cpu_data.x86 > 3)
481                 tmp = pgprot_noncached(tmp);
482
483 #elif defined(__powerpc__)
484         if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
485                 pgprot_val(tmp) |= _PAGE_NO_CACHE;
486                 if (caching_flags & TTM_PL_FLAG_UNCACHED)
487                         pgprot_val(tmp) |= _PAGE_GUARDED;
488         }
489 #endif
490 #if defined(__ia64__)
491         if (caching_flags & TTM_PL_FLAG_WC)
492                 tmp = pgprot_writecombine(tmp);
493         else
494                 tmp = pgprot_noncached(tmp);
495 #endif
496 #if defined(__sparc__) || defined(__mips__)
497         if (!(caching_flags & TTM_PL_FLAG_CACHED))
498                 tmp = pgprot_noncached(tmp);
499 #endif
500         return tmp;
501 }
502 EXPORT_SYMBOL(ttm_io_prot);
503
504 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
505                           unsigned long offset,
506                           unsigned long size,
507                           struct ttm_bo_kmap_obj *map)
508 {
509         struct ttm_mem_reg *mem = &bo->mem;
510
511         if (bo->mem.bus.addr) {
512                 map->bo_kmap_type = ttm_bo_map_premapped;
513                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
514         } else {
515                 map->bo_kmap_type = ttm_bo_map_iomap;
516                 if (mem->placement & TTM_PL_FLAG_WC)
517                         map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
518                                                   size);
519                 else
520                         map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
521                                                        size);
522         }
523         return (!map->virtual) ? -ENOMEM : 0;
524 }
525
526 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
527                            unsigned long start_page,
528                            unsigned long num_pages,
529                            struct ttm_bo_kmap_obj *map)
530 {
531         struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
532         struct ttm_tt *ttm = bo->ttm;
533         int ret;
534
535         BUG_ON(!ttm);
536
537         if (ttm->state == tt_unpopulated) {
538                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
539                 if (ret)
540                         return ret;
541         }
542
543         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
544                 /*
545                  * We're mapping a single page, and the desired
546                  * page protection is consistent with the bo.
547                  */
548
549                 map->bo_kmap_type = ttm_bo_map_kmap;
550                 map->page = ttm->pages[start_page];
551                 map->virtual = kmap(map->page);
552         } else {
553                 /*
554                  * We need to use vmap to get the desired page protection
555                  * or to make the buffer object look contiguous.
556                  */
557                 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
558                         PAGE_KERNEL :
559                         ttm_io_prot(mem->placement, PAGE_KERNEL);
560                 map->bo_kmap_type = ttm_bo_map_vmap;
561                 map->virtual = vmap(ttm->pages + start_page, num_pages,
562                                     0, prot);
563         }
564         return (!map->virtual) ? -ENOMEM : 0;
565 }
566
567 int ttm_bo_kmap(struct ttm_buffer_object *bo,
568                 unsigned long start_page, unsigned long num_pages,
569                 struct ttm_bo_kmap_obj *map)
570 {
571         struct ttm_mem_type_manager *man =
572                 &bo->bdev->man[bo->mem.mem_type];
573         unsigned long offset, size;
574         int ret;
575
576         BUG_ON(!list_empty(&bo->swap));
577         map->virtual = NULL;
578         map->bo = bo;
579         if (num_pages > bo->num_pages)
580                 return -EINVAL;
581         if (start_page > bo->num_pages)
582                 return -EINVAL;
583 #if 0
584         if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
585                 return -EPERM;
586 #endif
587         (void) ttm_mem_io_lock(man, false);
588         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
589         ttm_mem_io_unlock(man);
590         if (ret)
591                 return ret;
592         if (!bo->mem.bus.is_iomem) {
593                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
594         } else {
595                 offset = start_page << PAGE_SHIFT;
596                 size = num_pages << PAGE_SHIFT;
597                 return ttm_bo_ioremap(bo, offset, size, map);
598         }
599 }
600 EXPORT_SYMBOL(ttm_bo_kmap);
601
602 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
603 {
604         struct ttm_buffer_object *bo = map->bo;
605         struct ttm_mem_type_manager *man =
606                 &bo->bdev->man[bo->mem.mem_type];
607
608         if (!map->virtual)
609                 return;
610         switch (map->bo_kmap_type) {
611         case ttm_bo_map_iomap:
612                 iounmap(map->virtual);
613                 break;
614         case ttm_bo_map_vmap:
615                 vunmap(map->virtual);
616                 break;
617         case ttm_bo_map_kmap:
618                 kunmap(map->page);
619                 break;
620         case ttm_bo_map_premapped:
621                 break;
622         default:
623                 BUG();
624         }
625         (void) ttm_mem_io_lock(man, false);
626         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
627         ttm_mem_io_unlock(man);
628         map->virtual = NULL;
629         map->page = NULL;
630 }
631 EXPORT_SYMBOL(ttm_bo_kunmap);
632
633 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
634                               void *sync_obj,
635                               bool evict,
636                               bool no_wait_gpu,
637                               struct ttm_mem_reg *new_mem)
638 {
639         struct ttm_bo_device *bdev = bo->bdev;
640         struct ttm_bo_driver *driver = bdev->driver;
641         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
642         struct ttm_mem_reg *old_mem = &bo->mem;
643         int ret;
644         struct ttm_buffer_object *ghost_obj;
645         void *tmp_obj = NULL;
646
647         spin_lock(&bdev->fence_lock);
648         if (bo->sync_obj) {
649                 tmp_obj = bo->sync_obj;
650                 bo->sync_obj = NULL;
651         }
652         bo->sync_obj = driver->sync_obj_ref(sync_obj);
653         if (evict) {
654                 ret = ttm_bo_wait(bo, false, false, false);
655                 spin_unlock(&bdev->fence_lock);
656                 if (tmp_obj)
657                         driver->sync_obj_unref(&tmp_obj);
658                 if (ret)
659                         return ret;
660
661                 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
662                     (bo->ttm != NULL)) {
663                         ttm_tt_unbind(bo->ttm);
664                         ttm_tt_destroy(bo->ttm);
665                         bo->ttm = NULL;
666                 }
667                 ttm_bo_free_old_node(bo);
668         } else {
669                 /**
670                  * This should help pipeline ordinary buffer moves.
671                  *
672                  * Hang old buffer memory on a new buffer object,
673                  * and leave it to be released when the GPU
674                  * operation has completed.
675                  */
676
677                 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
678                 spin_unlock(&bdev->fence_lock);
679                 if (tmp_obj)
680                         driver->sync_obj_unref(&tmp_obj);
681
682                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
683                 if (ret)
684                         return ret;
685
686                 /**
687                  * If we're not moving to fixed memory, the TTM object
688                  * needs to stay alive. Otherwhise hang it on the ghost
689                  * bo to be unbound and destroyed.
690                  */
691
692                 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
693                         ghost_obj->ttm = NULL;
694                 else
695                         bo->ttm = NULL;
696
697                 ttm_bo_unreserve(ghost_obj);
698                 ttm_bo_unref(&ghost_obj);
699         }
700
701         *old_mem = *new_mem;
702         new_mem->mm_node = NULL;
703
704         return 0;
705 }
706 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);