1 /**************************************************************************
3 * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Free the old memory node unless it's a pinned region and we
35 * have not been requested to free also pinned regions.
38 static void drm_bo_free_old_node(drm_buffer_object_t * bo)
40 drm_bo_mem_reg_t *old_mem = &bo->mem;
42 if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
43 mutex_lock(&bo->dev->struct_mutex);
44 drm_mm_put_block(old_mem->mm_node);
45 old_mem->mm_node = NULL;
46 mutex_unlock(&bo->dev->struct_mutex);
48 old_mem->mm_node = NULL;
51 int drm_bo_move_ttm(drm_buffer_object_t * bo,
52 int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
54 drm_ttm_t *ttm = bo->ttm;
55 drm_bo_mem_reg_t *old_mem = &bo->mem;
56 uint32_t save_flags = old_mem->flags;
57 uint32_t save_mask = old_mem->mask;
60 if (old_mem->mem_type == DRM_BO_MEM_TT) {
66 drm_bo_free_old_node(bo);
67 DRM_FLAG_MASKED(old_mem->flags,
68 DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
69 DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
70 old_mem->mem_type = DRM_BO_MEM_LOCAL;
71 save_flags = old_mem->flags;
73 if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
74 ret = drm_bind_ttm(ttm,
75 new_mem->flags & DRM_BO_FLAG_CACHED,
76 new_mem->mm_node->start);
82 new_mem->mm_node = NULL;
83 old_mem->mask = save_mask;
84 DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
88 EXPORT_SYMBOL(drm_bo_move_ttm);
91 * \c Return a kernel virtual address to the buffer object PCI memory.
93 * \param bo The buffer object.
94 * \return Failure indication.
96 * Returns -EINVAL if the buffer object is currently not mappable.
97 * Returns -ENOMEM if the ioremap operation failed.
98 * Otherwise returns zero.
100 * After a successfull call, bo->iomap contains the virtual address, or NULL
101 * if the buffer object content is not accessible through PCI space.
102 * Call bo->mutex locked.
105 int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
108 drm_buffer_manager_t *bm = &dev->bm;
109 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
110 unsigned long bus_offset;
111 unsigned long bus_size;
112 unsigned long bus_base;
117 ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
118 if (ret || bus_size == 0)
121 if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
122 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
124 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
133 * \c Unmap mapping obtained using drm_bo_ioremap
135 * \param bo The buffer object.
137 * Call bo->mutex locked.
140 void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
143 drm_buffer_manager_t *bm;
144 drm_mem_type_manager_t *man;
147 man = &bm->man[mem->mem_type];
149 if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
154 static int drm_copy_io_page(void *dst, void *src, unsigned long page)
157 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
159 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
162 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
163 iowrite32(ioread32(srcP++), dstP++);
167 static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page)
169 struct page *d = drm_ttm_get_page(ttm, page);
175 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
180 memcpy_fromio(dst, src, PAGE_SIZE);
185 static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page)
187 struct page *s = drm_ttm_get_page(ttm, page);
193 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
198 memcpy_toio(dst, src, PAGE_SIZE);
203 int drm_bo_move_memcpy(drm_buffer_object_t * bo,
204 int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
206 drm_device_t *dev = bo->dev;
207 drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
208 drm_ttm_t *ttm = bo->ttm;
209 drm_bo_mem_reg_t *old_mem = &bo->mem;
210 drm_bo_mem_reg_t old_copy = *old_mem;
214 uint32_t save_flags = old_mem->flags;
215 uint32_t save_mask = old_mem->mask;
218 unsigned long add = 0;
221 ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
224 ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
228 if (old_iomap == NULL && new_iomap == NULL)
230 if (old_iomap == NULL && ttm == NULL)
236 if ((old_mem->mem_type == new_mem->mem_type) &&
237 (new_mem->mm_node->start <
238 old_mem->mm_node->start + old_mem->mm_node->size)) {
240 add = new_mem->num_pages - 1;
243 for (i = 0; i < new_mem->num_pages; ++i) {
244 page = i * dir + add;
245 if (old_iomap == NULL)
246 ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
247 else if (new_iomap == NULL)
248 ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
250 ret = drm_copy_io_page(new_iomap, old_iomap, page);
256 drm_bo_free_old_node(bo);
259 new_mem->mm_node = NULL;
260 old_mem->mask = save_mask;
261 DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
263 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
265 drm_destroy_ttm(ttm);
270 drm_mem_reg_iounmap(dev, new_mem, new_iomap);
272 drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
276 EXPORT_SYMBOL(drm_bo_move_memcpy);
279 * Transfer a buffer object's memory and LRU status to a newly
280 * created object. User-space references remains with the old
281 * object. Call bo->mutex locked.
284 int drm_buffer_object_transfer(drm_buffer_object_t * bo,
285 drm_buffer_object_t ** new_obj)
287 drm_buffer_object_t *fbo;
288 drm_device_t *dev = bo->dev;
289 drm_buffer_manager_t *bm = &dev->bm;
291 fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
296 mutex_init(&fbo->mutex);
297 mutex_lock(&fbo->mutex);
298 mutex_lock(&dev->struct_mutex);
300 DRM_INIT_WAITQUEUE(&bo->event_queue);
301 INIT_LIST_HEAD(&fbo->ddestroy);
302 INIT_LIST_HEAD(&fbo->lru);
303 INIT_LIST_HEAD(&fbo->pinned_lru);
304 #ifdef DRM_ODD_MM_COMPAT
305 INIT_LIST_HEAD(&fbo->vma_list);
306 INIT_LIST_HEAD(&fbo->p_mm_list);
309 atomic_inc(&bo->fence->usage);
310 fbo->pinned_node = NULL;
311 fbo->mem.mm_node->private = (void *)fbo;
312 atomic_set(&fbo->usage, 1);
313 atomic_inc(&bm->count);
314 mutex_unlock(&dev->struct_mutex);
315 mutex_unlock(&fbo->mutex);
322 * Since move is underway, we need to block signals in this function.
323 * We cannot restart until it has finished.
326 int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
330 uint32_t fence_flags, drm_bo_mem_reg_t * new_mem)
332 drm_device_t *dev = bo->dev;
333 drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
334 drm_bo_mem_reg_t *old_mem = &bo->mem;
336 uint32_t save_flags = old_mem->flags;
337 uint32_t save_mask = old_mem->mask;
338 drm_buffer_object_t *old_obj;
341 drm_fence_usage_deref_unlocked(dev, bo->fence);
342 ret = drm_fence_object_create(dev, fence_type,
343 fence_flags | DRM_FENCE_FLAG_EMIT,
348 #ifdef DRM_ODD_MM_COMPAT
350 * In this mode, we don't allow pipelining a copy blit,
351 * since the buffer will be accessible from user space
352 * the moment we return and rebuild the page tables.
354 * With normal vm operation, page tables are rebuilt
355 * on demand using fault(), which waits for buffer idle.
359 if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
360 bo->mem.mm_node != NULL))
363 ret = drm_bo_wait(bo, 0, 1, 0);
367 drm_bo_free_old_node(bo);
369 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
370 drm_ttm_unbind(bo->ttm);
371 drm_destroy_ttm(bo->ttm);
376 /* This should help pipeline ordinary buffer moves.
378 * Hang old buffer memory on a new buffer object,
379 * and leave it to be released when the GPU
380 * operation has completed.
383 ret = drm_buffer_object_transfer(bo, &old_obj);
388 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
393 mutex_lock(&dev->struct_mutex);
394 list_del_init(&old_obj->lru);
395 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
396 drm_bo_add_to_lru(old_obj);
398 drm_bo_usage_deref_locked(old_obj);
399 mutex_unlock(&dev->struct_mutex);
404 new_mem->mm_node = NULL;
405 old_mem->mask = save_mask;
406 DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
410 EXPORT_SYMBOL(drm_bo_move_accel_cleanup);