7e1951252bcc5e8be999221ca4bcd7c202b6443a
[platform/upstream/libdrm.git] / linux-core / drm_bo_move.c
1 /**************************************************************************
2  * 
3  * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /**
34  * Free the old memory node unless it's a pinned region and we
35  * have not been requested to free also pinned regions.
36  */
37
38 static void drm_bo_free_old_node(drm_buffer_object_t * bo)
39 {
40         drm_bo_mem_reg_t *old_mem = &bo->mem;
41
42         if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
43                 mutex_lock(&bo->dev->struct_mutex);
44                 drm_mm_put_block(old_mem->mm_node);
45                 old_mem->mm_node = NULL;
46                 mutex_unlock(&bo->dev->struct_mutex);
47         }
48         old_mem->mm_node = NULL;
49 }
50
51 int drm_bo_move_ttm(drm_buffer_object_t * bo,
52                     int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
53 {
54         drm_ttm_t *ttm = bo->ttm;
55         drm_bo_mem_reg_t *old_mem = &bo->mem;
56         uint32_t save_flags = old_mem->flags;
57         uint32_t save_mask = old_mem->mask;
58         int ret;
59
60         if (old_mem->mem_type == DRM_BO_MEM_TT) {
61
62                 if (evict)
63                         drm_ttm_evict(ttm);
64                 else
65                         drm_ttm_unbind(ttm);
66
67                 drm_bo_free_old_node(bo);
68                 DRM_FLAG_MASKED(old_mem->flags,
69                                 DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
70                                 DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
71                 old_mem->mem_type = DRM_BO_MEM_LOCAL;
72                 save_flags = old_mem->flags;
73         }
74         if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
75                 ret = drm_bind_ttm(ttm,
76                                    new_mem->flags & DRM_BO_FLAG_CACHED,
77                                    new_mem->mm_node->start);
78                 if (ret)
79                         return ret;
80         }
81
82         *old_mem = *new_mem;
83         new_mem->mm_node = NULL;
84         old_mem->mask = save_mask;
85         DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
86         return 0;
87 }
88
89 EXPORT_SYMBOL(drm_bo_move_ttm);
90
91 /**
92  * \c Return a kernel virtual address to the buffer object PCI memory.
93  *
94  * \param bo The buffer object.
95  * \return Failure indication.
96  * 
97  * Returns -EINVAL if the buffer object is currently not mappable.
98  * Returns -ENOMEM if the ioremap operation failed.
99  * Otherwise returns zero.
100  * 
101  * After a successfull call, bo->iomap contains the virtual address, or NULL
102  * if the buffer object content is not accessible through PCI space. 
103  * Call bo->mutex locked.
104  */
105
106 int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
107                         void **virtual)
108 {
109         drm_buffer_manager_t *bm = &dev->bm;
110         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
111         unsigned long bus_offset;
112         unsigned long bus_size;
113         unsigned long bus_base;
114         int ret;
115         void *addr;
116
117         *virtual = NULL;
118         ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
119         if (ret || bus_size == 0)
120                 return ret;
121
122         if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
123                 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
124         else {
125                 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
126                 if (!addr)
127                         return -ENOMEM;
128         }
129         *virtual = addr;
130         return 0;
131 }
132
133 /**
134  * \c Unmap mapping obtained using drm_bo_ioremap
135  *
136  * \param bo The buffer object.
137  *
138  * Call bo->mutex locked.
139  */
140
141 void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
142                          void *virtual)
143 {
144         drm_buffer_manager_t *bm;
145         drm_mem_type_manager_t *man;
146
147         bm = &dev->bm;
148         man = &bm->man[mem->mem_type];
149
150         if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
151                 iounmap(virtual);
152         }
153 }
154
155 static int drm_copy_io_page(void *dst, void *src, unsigned long page)
156 {
157         uint32_t *dstP =
158             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
159         uint32_t *srcP =
160             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
161
162         int i;
163         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
164                 iowrite32(ioread32(srcP++), dstP++);
165         return 0;
166 }
167
168 static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page)
169 {
170         struct page *d = drm_ttm_get_page(ttm, page);
171         void *dst;
172
173         if (!d)
174                 return -ENOMEM;
175
176         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
177         dst = kmap(d);
178         if (!dst)
179                 return -ENOMEM;
180
181         memcpy_fromio(dst, src, PAGE_SIZE);
182         kunmap(d);
183         return 0;
184 }
185
186 static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page)
187 {
188         struct page *s = drm_ttm_get_page(ttm, page);
189         void *src;
190
191         if (!s)
192                 return -ENOMEM;
193
194         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
195         src = kmap(s);
196         if (!src)
197                 return -ENOMEM;
198
199         memcpy_toio(dst, src, PAGE_SIZE);
200         kunmap(s);
201         return 0;
202 }
203
204 int drm_bo_move_memcpy(drm_buffer_object_t * bo,
205                        int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
206 {
207         drm_device_t *dev = bo->dev;
208         drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
209         drm_ttm_t *ttm = bo->ttm;
210         drm_bo_mem_reg_t *old_mem = &bo->mem;
211         drm_bo_mem_reg_t old_copy = *old_mem;
212         void *old_iomap;
213         void *new_iomap;
214         int ret;
215         uint32_t save_flags = old_mem->flags;
216         uint32_t save_mask = old_mem->mask;
217         unsigned long i;
218         unsigned long page;
219         unsigned long add = 0;
220         int dir;
221
222         ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
223         if (ret)
224                 return ret;
225         ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
226         if (ret)
227                 goto out;
228
229         if (old_iomap == NULL && new_iomap == NULL)
230                 goto out2;
231         if (old_iomap == NULL && ttm == NULL)
232                 goto out2;
233
234         add = 0;
235         dir = 1;
236
237         if ((old_mem->mem_type == new_mem->mem_type) &&
238             (new_mem->mm_node->start <
239              old_mem->mm_node->start + old_mem->mm_node->size)) {
240                 dir = -1;
241                 add = new_mem->num_pages - 1;
242         }
243
244         for (i = 0; i < new_mem->num_pages; ++i) {
245                 page = i * dir + add;
246                 if (old_iomap == NULL)
247                         ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
248                 else if (new_iomap == NULL)
249                         ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
250                 else
251                         ret = drm_copy_io_page(new_iomap, old_iomap, page);
252                 if (ret)
253                         goto out1;
254         }
255         mb();
256       out2:
257         drm_bo_free_old_node(bo);
258
259         *old_mem = *new_mem;
260         new_mem->mm_node = NULL;
261         old_mem->mask = save_mask;
262         DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
263
264         if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
265                 drm_ttm_unbind(ttm);
266                 drm_destroy_ttm(ttm);
267                 bo->ttm = NULL;
268         }
269
270       out1:
271         drm_mem_reg_iounmap(dev, new_mem, new_iomap);
272       out:
273         drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
274         return ret;
275 }
276
277 EXPORT_SYMBOL(drm_bo_move_memcpy);
278
279 /*
280  * Transfer a buffer object's memory and LRU status to a newly
281  * created object. User-space references remains with the old
282  * object. Call bo->mutex locked.
283  */
284
285 int drm_buffer_object_transfer(drm_buffer_object_t * bo,
286                                drm_buffer_object_t ** new_obj)
287 {
288         drm_buffer_object_t *fbo;
289         drm_device_t *dev = bo->dev;
290         drm_buffer_manager_t *bm = &dev->bm;
291
292         fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
293         if (!fbo)
294                 return -ENOMEM;
295
296         *fbo = *bo;
297         mutex_init(&fbo->mutex);
298         mutex_lock(&fbo->mutex);
299         mutex_lock(&dev->struct_mutex);
300
301         DRM_INIT_WAITQUEUE(&bo->event_queue);
302         INIT_LIST_HEAD(&fbo->ddestroy);
303         INIT_LIST_HEAD(&fbo->lru);
304         INIT_LIST_HEAD(&fbo->pinned_lru);
305 #ifdef DRM_ODD_MM_COMPAT
306         INIT_LIST_HEAD(&fbo->vma_list);
307         INIT_LIST_HEAD(&fbo->p_mm_list);
308 #endif
309
310         atomic_inc(&bo->fence->usage);
311         fbo->pinned_node = NULL;
312         fbo->mem.mm_node->private = (void *)fbo;
313         atomic_set(&fbo->usage, 1);
314         atomic_inc(&bm->count);
315         mutex_unlock(&dev->struct_mutex);
316         mutex_unlock(&fbo->mutex);
317
318         *new_obj = fbo;
319         return 0;
320 }
321
322 /*
323  * Since move is underway, we need to block signals in this function.
324  * We cannot restart until it has finished.
325  */
326
327 int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
328                               int evict,
329                               int no_wait,
330                               uint32_t fence_type,
331                               uint32_t fence_flags, drm_bo_mem_reg_t * new_mem)
332 {
333         drm_device_t *dev = bo->dev;
334         drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
335         drm_bo_mem_reg_t *old_mem = &bo->mem;
336         int ret;
337         uint32_t save_flags = old_mem->flags;
338         uint32_t save_mask = old_mem->mask;
339         drm_buffer_object_t *old_obj;
340
341         if (bo->fence)
342                 drm_fence_usage_deref_unlocked(dev, bo->fence);
343         ret = drm_fence_object_create(dev, fence_type,
344                                       fence_flags | DRM_FENCE_FLAG_EMIT,
345                                       &bo->fence);
346         if (ret)
347                 return ret;
348
349 #ifdef DRM_ODD_MM_COMPAT
350         /*
351          * In this mode, we don't allow pipelining a copy blit,
352          * since the buffer will be accessible from user space 
353          * the moment we return and rebuild the page tables.
354          *
355          * With normal vm operation, page tables are rebuilt
356          * on demand using fault(), which waits for buffer idle. 
357          */
358         if (1)
359 #else
360         if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
361                       bo->mem.mm_node != NULL))
362 #endif
363         {
364                 ret = drm_bo_wait(bo, 0, 1, 0);
365                 if (ret)
366                         return ret;
367
368                 drm_bo_free_old_node(bo);
369
370                 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
371                         drm_ttm_unbind(bo->ttm);
372                         drm_destroy_ttm(bo->ttm);
373                         bo->ttm = NULL;
374                 }
375         } else {
376
377                 /* This should help pipeline ordinary buffer moves.
378                  *
379                  * Hang old buffer memory on a new buffer object,
380                  * and leave it to be released when the GPU
381                  * operation has completed.
382                  */
383
384                 ret = drm_buffer_object_transfer(bo, &old_obj);
385
386                 if (ret)
387                         return ret;
388
389                 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
390                         old_obj->ttm = NULL;
391                 else
392                         bo->ttm = NULL;
393
394                 mutex_lock(&dev->struct_mutex);
395                 list_del_init(&old_obj->lru);
396                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
397                 drm_bo_add_to_lru(old_obj);
398
399                 drm_bo_usage_deref_locked(old_obj);
400                 mutex_unlock(&dev->struct_mutex);
401
402         }
403
404         *old_mem = *new_mem;
405         new_mem->mm_node = NULL;
406         old_mem->mask = save_mask;
407         DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
408         return 0;
409 }
410
411 EXPORT_SYMBOL(drm_bo_move_accel_cleanup);