drm: fix dead lock in drm_buffer_object_transfer
[profile/ivi/libdrm.git] / linux-core / drm_bo_move.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /**
34  * Free the old memory node unless it's a pinned region and we
35  * have not been requested to free also pinned regions.
36  */
37
38 static void drm_bo_free_old_node(struct drm_buffer_object *bo)
39 {
40         struct drm_bo_mem_reg *old_mem = &bo->mem;
41
42         if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
43                 mutex_lock(&bo->dev->struct_mutex);
44                 drm_mm_put_block(old_mem->mm_node);
45                 old_mem->mm_node = NULL;
46                 mutex_unlock(&bo->dev->struct_mutex);
47         }
48         old_mem->mm_node = NULL;
49 }
50
51 int drm_bo_move_ttm(struct drm_buffer_object *bo,
52                     int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
53 {
54         struct drm_ttm *ttm = bo->ttm;
55         struct drm_bo_mem_reg *old_mem = &bo->mem;
56         uint64_t save_flags = old_mem->flags;
57         uint64_t save_mask = old_mem->mask;
58         int ret;
59
60         if (old_mem->mem_type == DRM_BO_MEM_TT) {
61                 if (evict)
62                         drm_ttm_evict(ttm);
63                 else
64                         drm_ttm_unbind(ttm);
65
66                 drm_bo_free_old_node(bo);
67                 DRM_FLAG_MASKED(old_mem->flags,
68                                 DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
69                                 DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
70                 old_mem->mem_type = DRM_BO_MEM_LOCAL;
71                 save_flags = old_mem->flags;
72         }
73         if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
74                 ret = drm_bind_ttm(ttm, new_mem);
75                 if (ret)
76                         return ret;
77         }
78
79         *old_mem = *new_mem;
80         new_mem->mm_node = NULL;
81         old_mem->mask = save_mask;
82         DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
83         return 0;
84 }
85 EXPORT_SYMBOL(drm_bo_move_ttm);
86
87 /**
88  * \c Return a kernel virtual address to the buffer object PCI memory.
89  *
90  * \param bo The buffer object.
91  * \return Failure indication.
92  *
93  * Returns -EINVAL if the buffer object is currently not mappable.
94  * Returns -ENOMEM if the ioremap operation failed.
95  * Otherwise returns zero.
96  *
97  * After a successfull call, bo->iomap contains the virtual address, or NULL
98  * if the buffer object content is not accessible through PCI space.
99  * Call bo->mutex locked.
100  */
101
102 int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
103                         void **virtual)
104 {
105         struct drm_buffer_manager *bm = &dev->bm;
106         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
107         unsigned long bus_offset;
108         unsigned long bus_size;
109         unsigned long bus_base;
110         int ret;
111         void *addr;
112
113         *virtual = NULL;
114         ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
115         if (ret || bus_size == 0)
116                 return ret;
117
118         if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
119                 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
120         else {
121                 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
122                 if (!addr)
123                         return -ENOMEM;
124         }
125         *virtual = addr;
126         return 0;
127 }
128 EXPORT_SYMBOL(drm_mem_reg_ioremap);
129
130 /**
131  * \c Unmap mapping obtained using drm_bo_ioremap
132  *
133  * \param bo The buffer object.
134  *
135  * Call bo->mutex locked.
136  */
137
138 void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
139                          void *virtual)
140 {
141         struct drm_buffer_manager *bm;
142         struct drm_mem_type_manager *man;
143
144         bm = &dev->bm;
145         man = &bm->man[mem->mem_type];
146
147         if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
148                 iounmap(virtual);
149 }
150 EXPORT_SYMBOL(drm_mem_reg_iounmap);
151
152 static int drm_copy_io_page(void *dst, void *src, unsigned long page)
153 {
154         uint32_t *dstP =
155             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
156         uint32_t *srcP =
157             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
158
159         int i;
160         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
161                 iowrite32(ioread32(srcP++), dstP++);
162         return 0;
163 }
164
165 static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
166                                 unsigned long page)
167 {
168         struct page *d = drm_ttm_get_page(ttm, page);
169         void *dst;
170
171         if (!d)
172                 return -ENOMEM;
173
174         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
175         dst = kmap(d);
176         if (!dst)
177                 return -ENOMEM;
178
179         memcpy_fromio(dst, src, PAGE_SIZE);
180         kunmap(d);
181         return 0;
182 }
183
184 static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
185 {
186         struct page *s = drm_ttm_get_page(ttm, page);
187         void *src;
188
189         if (!s)
190                 return -ENOMEM;
191
192         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
193         src = kmap(s);
194         if (!src)
195                 return -ENOMEM;
196
197         memcpy_toio(dst, src, PAGE_SIZE);
198         kunmap(s);
199         return 0;
200 }
201
202 int drm_bo_move_memcpy(struct drm_buffer_object *bo,
203                        int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
204 {
205         struct drm_device *dev = bo->dev;
206         struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
207         struct drm_ttm *ttm = bo->ttm;
208         struct drm_bo_mem_reg *old_mem = &bo->mem;
209         struct drm_bo_mem_reg old_copy = *old_mem;
210         void *old_iomap;
211         void *new_iomap;
212         int ret;
213         uint64_t save_flags = old_mem->flags;
214         uint64_t save_mask = old_mem->mask;
215         unsigned long i;
216         unsigned long page;
217         unsigned long add = 0;
218         int dir;
219
220         ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
221         if (ret)
222                 return ret;
223         ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
224         if (ret)
225                 goto out;
226
227         if (old_iomap == NULL && new_iomap == NULL)
228                 goto out2;
229         if (old_iomap == NULL && ttm == NULL)
230                 goto out2;
231
232         add = 0;
233         dir = 1;
234
235         if ((old_mem->mem_type == new_mem->mem_type) &&
236             (new_mem->mm_node->start <
237              old_mem->mm_node->start + old_mem->mm_node->size)) {
238                 dir = -1;
239                 add = new_mem->num_pages - 1;
240         }
241
242         for (i = 0; i < new_mem->num_pages; ++i) {
243                 page = i * dir + add;
244                 if (old_iomap == NULL)
245                         ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
246                 else if (new_iomap == NULL)
247                         ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
248                 else
249                         ret = drm_copy_io_page(new_iomap, old_iomap, page);
250                 if (ret)
251                         goto out1;
252         }
253         mb();
254 out2:
255         drm_bo_free_old_node(bo);
256
257         *old_mem = *new_mem;
258         new_mem->mm_node = NULL;
259         old_mem->mask = save_mask;
260         DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
261
262         if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
263                 drm_ttm_unbind(ttm);
264                 drm_destroy_ttm(ttm);
265                 bo->ttm = NULL;
266         }
267
268 out1:
269         drm_mem_reg_iounmap(dev, new_mem, new_iomap);
270 out:
271         drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
272         return ret;
273 }
274 EXPORT_SYMBOL(drm_bo_move_memcpy);
275
276 /*
277  * Transfer a buffer object's memory and LRU status to a newly
278  * created object. User-space references remains with the old
279  * object. Call bo->mutex locked.
280  */
281
282 int drm_buffer_object_transfer(struct drm_buffer_object *bo,
283                                struct drm_buffer_object **new_obj)
284 {
285         struct drm_buffer_object *fbo;
286         struct drm_device *dev = bo->dev;
287         struct drm_buffer_manager *bm = &dev->bm;
288
289         fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
290         if (!fbo)
291                 return -ENOMEM;
292
293         *fbo = *bo;
294         mutex_init(&fbo->mutex);
295         mutex_lock(&fbo->mutex);
296         mutex_lock(&dev->struct_mutex);
297
298         DRM_INIT_WAITQUEUE(&bo->event_queue);
299         INIT_LIST_HEAD(&fbo->ddestroy);
300         INIT_LIST_HEAD(&fbo->lru);
301         INIT_LIST_HEAD(&fbo->pinned_lru);
302 #ifdef DRM_ODD_MM_COMPAT
303         INIT_LIST_HEAD(&fbo->vma_list);
304         INIT_LIST_HEAD(&fbo->p_mm_list);
305 #endif
306
307         fbo->fence = drm_fence_reference_locked(bo->fence);
308         fbo->pinned_node = NULL;
309         fbo->mem.mm_node->private = (void *)fbo;
310         atomic_set(&fbo->usage, 1);
311         atomic_inc(&bm->count);
312         mutex_unlock(&dev->struct_mutex);
313         mutex_unlock(&fbo->mutex);
314
315         *new_obj = fbo;
316         return 0;
317 }
318
319 /*
320  * Since move is underway, we need to block signals in this function.
321  * We cannot restart until it has finished.
322  */
323
324 int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
325                               int evict, int no_wait, uint32_t fence_class,
326                               uint32_t fence_type, uint32_t fence_flags,
327                               struct drm_bo_mem_reg *new_mem)
328 {
329         struct drm_device *dev = bo->dev;
330         struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
331         struct drm_bo_mem_reg *old_mem = &bo->mem;
332         int ret;
333         uint64_t save_flags = old_mem->flags;
334         uint64_t save_mask = old_mem->mask;
335         struct drm_buffer_object *old_obj;
336
337         if (bo->fence)
338                 drm_fence_usage_deref_unlocked(&bo->fence);
339         ret = drm_fence_object_create(dev, fence_class, fence_type,
340                                       fence_flags | DRM_FENCE_FLAG_EMIT,
341                                       &bo->fence);
342         bo->fence_type = fence_type;
343         if (ret)
344                 return ret;
345
346 #ifdef DRM_ODD_MM_COMPAT
347         /*
348          * In this mode, we don't allow pipelining a copy blit,
349          * since the buffer will be accessible from user space
350          * the moment we return and rebuild the page tables.
351          *
352          * With normal vm operation, page tables are rebuilt
353          * on demand using fault(), which waits for buffer idle.
354          */
355         if (1)
356 #else
357         if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
358                       bo->mem.mm_node != NULL))
359 #endif
360         {
361                 ret = drm_bo_wait(bo, 0, 1, 0);
362                 if (ret)
363                         return ret;
364
365                 drm_bo_free_old_node(bo);
366
367                 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
368                         drm_ttm_unbind(bo->ttm);
369                         drm_destroy_ttm(bo->ttm);
370                         bo->ttm = NULL;
371                 }
372         } else {
373
374                 /* This should help pipeline ordinary buffer moves.
375                  *
376                  * Hang old buffer memory on a new buffer object,
377                  * and leave it to be released when the GPU
378                  * operation has completed.
379                  */
380
381                 ret = drm_buffer_object_transfer(bo, &old_obj);
382
383                 if (ret)
384                         return ret;
385
386                 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
387                         old_obj->ttm = NULL;
388                 else
389                         bo->ttm = NULL;
390
391                 mutex_lock(&dev->struct_mutex);
392                 list_del_init(&old_obj->lru);
393                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
394                 drm_bo_add_to_lru(old_obj);
395
396                 drm_bo_usage_deref_locked(&old_obj);
397                 mutex_unlock(&dev->struct_mutex);
398
399         }
400
401         *old_mem = *new_mem;
402         new_mem->mm_node = NULL;
403         old_mem->mask = save_mask;
404         DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
405         return 0;
406 }
407 EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
408
409 int drm_bo_same_page(unsigned long offset,
410                      unsigned long offset2)
411 {
412         return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
413 }
414 EXPORT_SYMBOL(drm_bo_same_page);
415
416 unsigned long drm_bo_offset_end(unsigned long offset,
417                                 unsigned long end)
418 {
419         offset = (offset + PAGE_SIZE) & PAGE_MASK;
420         return (end < offset) ? end : offset;
421 }
422 EXPORT_SYMBOL(drm_bo_offset_end);
423
424 static pgprot_t drm_kernel_io_prot(uint32_t map_type)
425 {
426         pgprot_t tmp = PAGE_KERNEL;
427
428 #if defined(__i386__) || defined(__x86_64__)
429 #ifdef USE_PAT_WC
430 #warning using pat
431         if (drm_use_pat() && map_type == _DRM_TTM) {
432                 pgprot_val(tmp) |= _PAGE_PAT;
433                 return tmp;
434         }
435 #endif
436         if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
437                 pgprot_val(tmp) |= _PAGE_PCD;
438                 pgprot_val(tmp) &= ~_PAGE_PWT;
439         }
440 #elif defined(__powerpc__)
441         pgprot_val(tmp) |= _PAGE_NO_CACHE;
442         if (map_type == _DRM_REGISTERS)
443                 pgprot_val(tmp) |= _PAGE_GUARDED;
444 #endif
445 #if defined(__ia64__)
446         if (map_type == _DRM_TTM)
447                 tmp = pgprot_writecombine(tmp);
448         else
449                 tmp = pgprot_noncached(tmp);
450 #endif
451         return tmp;
452 }
453
454 static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
455                           unsigned long bus_offset, unsigned long bus_size,
456                           struct drm_bo_kmap_obj *map)
457 {
458         struct drm_device *dev = bo->dev;
459         struct drm_bo_mem_reg *mem = &bo->mem;
460         struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
461
462         if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
463                 map->bo_kmap_type = bo_map_premapped;
464                 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
465         } else {
466                 map->bo_kmap_type = bo_map_iomap;
467                 map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
468         }
469         return (!map->virtual) ? -ENOMEM : 0;
470 }
471
472 static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
473                            unsigned long start_page, unsigned long num_pages,
474                            struct drm_bo_kmap_obj *map)
475 {
476         struct drm_device *dev = bo->dev;
477         struct drm_bo_mem_reg *mem = &bo->mem;
478         struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
479         pgprot_t prot;
480         struct drm_ttm *ttm = bo->ttm;
481         struct page *d;
482         int i;
483
484         BUG_ON(!ttm);
485
486         if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
487
488                 /*
489                  * We're mapping a single page, and the desired
490                  * page protection is consistent with the bo.
491                  */
492
493                 map->bo_kmap_type = bo_map_kmap;
494                 map->page = drm_ttm_get_page(ttm, start_page);
495                 map->virtual = kmap(map->page);
496         } else {
497                 /*
498                  * Populate the part we're mapping;
499                  */
500
501                 for (i = start_page; i < start_page + num_pages; ++i) {
502                         d = drm_ttm_get_page(ttm, i);
503                         if (!d)
504                                 return -ENOMEM;
505                 }
506
507                 /*
508                  * We need to use vmap to get the desired page protection
509                  * or to make the buffer object look contigous.
510                  */
511
512                 prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
513                         PAGE_KERNEL :
514                         drm_kernel_io_prot(man->drm_bus_maptype);
515                 map->bo_kmap_type = bo_map_vmap;
516                 map->virtual = vmap(ttm->pages + start_page,
517                                     num_pages, 0, prot);
518         }
519         return (!map->virtual) ? -ENOMEM : 0;
520 }
521
522 /*
523  * This function is to be used for kernel mapping of buffer objects.
524  * It chooses the appropriate mapping method depending on the memory type
525  * and caching policy the buffer currently has.
526  * Mapping multiple pages or buffers that live in io memory is a bit slow and
527  * consumes vmalloc space. Be restrictive with such mappings.
528  * Mapping single pages usually returns the logical kernel address,
529  * (which is fast)
530  * BUG may use slower temporary mappings for high memory pages or
531  * uncached / write-combined pages.
532  *
533  * The function fills in a drm_bo_kmap_obj which can be used to return the
534  * kernel virtual address of the buffer.
535  *
536  * Code servicing a non-priviliged user request is only allowed to map one
537  * page at a time. We might need to implement a better scheme to stop such
538  * processes from consuming all vmalloc space.
539  */
540
541 int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
542                 unsigned long num_pages, struct drm_bo_kmap_obj *map)
543 {
544         int ret;
545         unsigned long bus_base;
546         unsigned long bus_offset;
547         unsigned long bus_size;
548
549         map->virtual = NULL;
550
551         if (num_pages > bo->num_pages)
552                 return -EINVAL;
553         if (start_page > bo->num_pages)
554                 return -EINVAL;
555 #if 0
556         if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
557                 return -EPERM;
558 #endif
559         ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
560                                 &bus_offset, &bus_size);
561
562         if (ret)
563                 return ret;
564
565         if (bus_size == 0) {
566                 return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
567         } else {
568                 bus_offset += start_page << PAGE_SHIFT;
569                 bus_size = num_pages << PAGE_SHIFT;
570                 return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
571         }
572 }
573 EXPORT_SYMBOL(drm_bo_kmap);
574
575 void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
576 {
577         if (!map->virtual)
578                 return;
579
580         switch (map->bo_kmap_type) {
581         case bo_map_iomap:
582                 iounmap(map->virtual);
583                 break;
584         case bo_map_vmap:
585                 vunmap(map->virtual);
586                 break;
587         case bo_map_kmap:
588                 kunmap(map->page);
589                 break;
590         case bo_map_premapped:
591                 break;
592         default:
593                 BUG();
594         }
595         map->virtual = NULL;
596         map->page = NULL;
597 }
598 EXPORT_SYMBOL(drm_bo_kunmap);