More bugfixes.
[platform/upstream/libdrm.git] / linux-core / drm_bo_move.c
1 /**************************************************************************
2  * 
3  * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /**
34  * Free the old memory node unless it's a pinned region and we
35  * have not been requested to free also pinned regions.
36  */
37
38 static void drm_bo_free_old_node(drm_buffer_object_t * bo)
39 {
40         drm_bo_mem_reg_t *old_mem = &bo->mem;
41
42         if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
43                 mutex_lock(&bo->dev->struct_mutex);
44                 drm_mm_put_block(old_mem->mm_node);
45                 old_mem->mm_node = NULL;
46                 mutex_unlock(&bo->dev->struct_mutex);
47         }
48         old_mem->mm_node = NULL;
49 }
50
51 int drm_bo_move_ttm(drm_buffer_object_t * bo,
52                     int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
53 {
54         drm_ttm_t *ttm = bo->ttm;
55         drm_bo_mem_reg_t *old_mem = &bo->mem;
56         uint32_t save_flags = old_mem->flags;
57         uint32_t save_mask = old_mem->mask;
58         int ret;
59
60         if (old_mem->mem_type == DRM_BO_MEM_TT) {
61                 if (evict)
62                         drm_ttm_evict(ttm);
63                 else
64                         drm_ttm_unbind(ttm);
65
66                 drm_bo_free_old_node(bo);
67                 DRM_FLAG_MASKED(old_mem->flags,
68                                 DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
69                                 DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
70                 old_mem->mem_type = DRM_BO_MEM_LOCAL;
71                 save_flags = old_mem->flags;
72         }
73         if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
74                 ret = drm_bind_ttm(ttm,
75                                    new_mem->flags & DRM_BO_FLAG_CACHED,
76                                    new_mem->mm_node->start);
77                 if (ret)
78                         return ret;
79         }
80
81         *old_mem = *new_mem;
82         new_mem->mm_node = NULL;
83         old_mem->mask = save_mask;
84         DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
85         return 0;
86 }
87
88 EXPORT_SYMBOL(drm_bo_move_ttm);
89
90 /**
91  * \c Return a kernel virtual address to the buffer object PCI memory.
92  *
93  * \param bo The buffer object.
94  * \return Failure indication.
95  * 
96  * Returns -EINVAL if the buffer object is currently not mappable.
97  * Returns -ENOMEM if the ioremap operation failed.
98  * Otherwise returns zero.
99  * 
100  * After a successfull call, bo->iomap contains the virtual address, or NULL
101  * if the buffer object content is not accessible through PCI space. 
102  * Call bo->mutex locked.
103  */
104
105 int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
106                         void **virtual)
107 {
108         drm_buffer_manager_t *bm = &dev->bm;
109         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
110         unsigned long bus_offset;
111         unsigned long bus_size;
112         unsigned long bus_base;
113         int ret;
114         void *addr;
115
116         *virtual = NULL;
117         ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
118         if (ret || bus_size == 0)
119                 return ret;
120
121         if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
122                 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
123         else {
124                 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
125                 if (!addr)
126                         return -ENOMEM;
127         }
128         *virtual = addr;
129         return 0;
130 }
131
132 /**
133  * \c Unmap mapping obtained using drm_bo_ioremap
134  *
135  * \param bo The buffer object.
136  *
137  * Call bo->mutex locked.
138  */
139
140 void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
141                          void *virtual)
142 {
143         drm_buffer_manager_t *bm;
144         drm_mem_type_manager_t *man;
145
146         bm = &dev->bm;
147         man = &bm->man[mem->mem_type];
148
149         if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
150                 iounmap(virtual);
151         }
152 }
153
154 static int drm_copy_io_page(void *dst, void *src, unsigned long page)
155 {
156         uint32_t *dstP =
157             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
158         uint32_t *srcP =
159             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
160
161         int i;
162         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
163                 iowrite32(ioread32(srcP++), dstP++);
164         return 0;
165 }
166
167 static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page)
168 {
169         struct page *d = drm_ttm_get_page(ttm, page);
170         void *dst;
171
172         if (!d)
173                 return -ENOMEM;
174
175         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
176         dst = kmap(d);
177         if (!dst)
178                 return -ENOMEM;
179
180         memcpy_fromio(dst, src, PAGE_SIZE);
181         kunmap(d);
182         return 0;
183 }
184
185 static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page)
186 {
187         struct page *s = drm_ttm_get_page(ttm, page);
188         void *src;
189
190         if (!s)
191                 return -ENOMEM;
192
193         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
194         src = kmap(s);
195         if (!src)
196                 return -ENOMEM;
197
198         memcpy_toio(dst, src, PAGE_SIZE);
199         kunmap(s);
200         return 0;
201 }
202
203 int drm_bo_move_memcpy(drm_buffer_object_t * bo,
204                        int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
205 {
206         drm_device_t *dev = bo->dev;
207         drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
208         drm_ttm_t *ttm = bo->ttm;
209         drm_bo_mem_reg_t *old_mem = &bo->mem;
210         drm_bo_mem_reg_t old_copy = *old_mem;
211         void *old_iomap;
212         void *new_iomap;
213         int ret;
214         uint32_t save_flags = old_mem->flags;
215         uint32_t save_mask = old_mem->mask;
216         unsigned long i;
217         unsigned long page;
218         unsigned long add = 0;
219         int dir;
220
221         ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
222         if (ret)
223                 return ret;
224         ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
225         if (ret)
226                 goto out;
227
228         if (old_iomap == NULL && new_iomap == NULL)
229                 goto out2;
230         if (old_iomap == NULL && ttm == NULL)
231                 goto out2;
232
233         add = 0;
234         dir = 1;
235
236         if ((old_mem->mem_type == new_mem->mem_type) &&
237             (new_mem->mm_node->start <
238              old_mem->mm_node->start + old_mem->mm_node->size)) {
239                 dir = -1;
240                 add = new_mem->num_pages - 1;
241         }
242
243         for (i = 0; i < new_mem->num_pages; ++i) {
244                 page = i * dir + add;
245                 if (old_iomap == NULL)
246                         ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
247                 else if (new_iomap == NULL)
248                         ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
249                 else
250                         ret = drm_copy_io_page(new_iomap, old_iomap, page);
251                 if (ret)
252                         goto out1;
253         }
254         mb();
255       out2:
256         drm_bo_free_old_node(bo);
257
258         *old_mem = *new_mem;
259         new_mem->mm_node = NULL;
260         old_mem->mask = save_mask;
261         DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
262
263         if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
264                 drm_ttm_unbind(ttm);
265                 drm_destroy_ttm(ttm);
266                 bo->ttm = NULL;
267         }
268
269       out1:
270         drm_mem_reg_iounmap(dev, new_mem, new_iomap);
271       out:
272         drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
273         return ret;
274 }
275
276 EXPORT_SYMBOL(drm_bo_move_memcpy);
277
278 /*
279  * Transfer a buffer object's memory and LRU status to a newly
280  * created object. User-space references remains with the old
281  * object. Call bo->mutex locked.
282  */
283
284 int drm_buffer_object_transfer(drm_buffer_object_t * bo,
285                                drm_buffer_object_t ** new_obj)
286 {
287         drm_buffer_object_t *fbo;
288         drm_device_t *dev = bo->dev;
289         drm_buffer_manager_t *bm = &dev->bm;
290
291         fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
292         if (!fbo)
293                 return -ENOMEM;
294
295         *fbo = *bo;
296         mutex_init(&fbo->mutex);
297         mutex_lock(&fbo->mutex);
298         mutex_lock(&dev->struct_mutex);
299
300         DRM_INIT_WAITQUEUE(&bo->event_queue);
301         INIT_LIST_HEAD(&fbo->ddestroy);
302         INIT_LIST_HEAD(&fbo->lru);
303         INIT_LIST_HEAD(&fbo->pinned_lru);
304 #ifdef DRM_ODD_MM_COMPAT
305         INIT_LIST_HEAD(&fbo->vma_list);
306         INIT_LIST_HEAD(&fbo->p_mm_list);
307 #endif
308
309         atomic_inc(&bo->fence->usage);
310         fbo->pinned_node = NULL;
311         fbo->mem.mm_node->private = (void *)fbo;
312         atomic_set(&fbo->usage, 1);
313         atomic_inc(&bm->count);
314         mutex_unlock(&dev->struct_mutex);
315         mutex_unlock(&fbo->mutex);
316
317         *new_obj = fbo;
318         return 0;
319 }
320
321 /*
322  * Since move is underway, we need to block signals in this function.
323  * We cannot restart until it has finished.
324  */
325
326 int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
327                               int evict,
328                               int no_wait,
329                               uint32_t fence_type,
330                               uint32_t fence_flags, drm_bo_mem_reg_t * new_mem)
331 {
332         drm_device_t *dev = bo->dev;
333         drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
334         drm_bo_mem_reg_t *old_mem = &bo->mem;
335         int ret;
336         uint32_t save_flags = old_mem->flags;
337         uint32_t save_mask = old_mem->mask;
338         drm_buffer_object_t *old_obj;
339
340         if (bo->fence)
341                 drm_fence_usage_deref_unlocked(dev, bo->fence);
342         ret = drm_fence_object_create(dev, fence_type,
343                                       fence_flags | DRM_FENCE_FLAG_EMIT,
344                                       &bo->fence);
345         if (ret)
346                 return ret;
347
348 #ifdef DRM_ODD_MM_COMPAT
349         /*
350          * In this mode, we don't allow pipelining a copy blit,
351          * since the buffer will be accessible from user space 
352          * the moment we return and rebuild the page tables.
353          *
354          * With normal vm operation, page tables are rebuilt
355          * on demand using fault(), which waits for buffer idle. 
356          */
357         if (1)
358 #else
359         if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
360                       bo->mem.mm_node != NULL))
361 #endif
362         {
363                 ret = drm_bo_wait(bo, 0, 1, 0);
364                 if (ret)
365                         return ret;
366
367                 drm_bo_free_old_node(bo);
368
369                 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
370                         drm_ttm_unbind(bo->ttm);
371                         drm_destroy_ttm(bo->ttm);
372                         bo->ttm = NULL;
373                 }
374         } else {
375
376                 /* This should help pipeline ordinary buffer moves.
377                  *
378                  * Hang old buffer memory on a new buffer object,
379                  * and leave it to be released when the GPU
380                  * operation has completed.
381                  */
382
383                 ret = drm_buffer_object_transfer(bo, &old_obj);
384
385                 if (ret)
386                         return ret;
387
388                 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
389                         old_obj->ttm = NULL;
390                 else
391                         bo->ttm = NULL;
392
393                 mutex_lock(&dev->struct_mutex);
394                 list_del_init(&old_obj->lru);
395                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
396                 drm_bo_add_to_lru(old_obj);
397
398                 drm_bo_usage_deref_locked(old_obj);
399                 mutex_unlock(&dev->struct_mutex);
400
401         }
402
403         *old_mem = *new_mem;
404         new_mem->mm_node = NULL;
405         old_mem->mask = save_mask;
406         DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
407         return 0;
408 }
409
410 EXPORT_SYMBOL(drm_bo_move_accel_cleanup);