1 /**************************************************************************
3 * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Free the old memory node unless it's a pinned region and we
35 * have not been requested to free also pinned regions.
38 static void drm_bo_free_old_node(struct drm_buffer_object *bo)
40 struct drm_bo_mem_reg *old_mem = &bo->mem;
42 if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
43 mutex_lock(&bo->dev->struct_mutex);
44 drm_mm_put_block(old_mem->mm_node);
45 mutex_unlock(&bo->dev->struct_mutex);
47 old_mem->mm_node = NULL;
50 int drm_bo_move_ttm(struct drm_buffer_object *bo,
51 int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
53 struct drm_ttm *ttm = bo->ttm;
54 struct drm_bo_mem_reg *old_mem = &bo->mem;
55 uint64_t save_flags = old_mem->flags;
56 uint64_t save_proposed_flags = old_mem->proposed_flags;
59 if (old_mem->mem_type != DRM_BO_MEM_LOCAL) {
65 drm_bo_free_old_node(bo);
66 DRM_FLAG_MASKED(old_mem->flags,
67 DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
68 DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
69 old_mem->mem_type = DRM_BO_MEM_LOCAL;
70 save_flags = old_mem->flags;
72 if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
73 ret = drm_ttm_bind(ttm, new_mem);
79 new_mem->mm_node = NULL;
80 old_mem->proposed_flags = save_proposed_flags;
81 DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
84 EXPORT_SYMBOL(drm_bo_move_ttm);
87 * \c Return a kernel virtual address to the buffer object PCI memory.
89 * \param bo The buffer object.
90 * \return Failure indication.
92 * Returns -EINVAL if the buffer object is currently not mappable.
93 * Returns -ENOMEM if the ioremap operation failed.
94 * Otherwise returns zero.
96 * After a successfull call, bo->iomap contains the virtual address, or NULL
97 * if the buffer object content is not accessible through PCI space.
98 * Call bo->mutex locked.
101 int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
104 struct drm_buffer_manager *bm = &dev->bm;
105 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
106 unsigned long bus_offset;
107 unsigned long bus_size;
108 unsigned long bus_base;
113 ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
114 if (ret || bus_size == 0)
117 if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
118 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
120 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
127 EXPORT_SYMBOL(drm_mem_reg_ioremap);
130 * \c Unmap mapping obtained using drm_bo_ioremap
132 * \param bo The buffer object.
134 * Call bo->mutex locked.
137 void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
140 struct drm_buffer_manager *bm;
141 struct drm_mem_type_manager *man;
144 man = &bm->man[mem->mem_type];
146 if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
149 EXPORT_SYMBOL(drm_mem_reg_iounmap);
151 static int drm_copy_io_page(void *dst, void *src, unsigned long page)
154 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
156 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
159 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
160 iowrite32(ioread32(srcP++), dstP++);
164 static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
167 struct page *d = drm_ttm_get_page(ttm, page);
173 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
178 memcpy_fromio(dst, src, PAGE_SIZE);
183 static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
185 struct page *s = drm_ttm_get_page(ttm, page);
191 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
196 memcpy_toio(dst, src, PAGE_SIZE);
201 int drm_bo_move_memcpy(struct drm_buffer_object *bo,
202 int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
204 struct drm_device *dev = bo->dev;
205 struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
206 struct drm_ttm *ttm = bo->ttm;
207 struct drm_bo_mem_reg *old_mem = &bo->mem;
208 struct drm_bo_mem_reg old_copy = *old_mem;
212 uint64_t save_flags = old_mem->flags;
213 uint64_t save_proposed_flags = old_mem->proposed_flags;
216 unsigned long add = 0;
219 ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
222 ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
226 if (old_iomap == NULL && new_iomap == NULL)
228 if (old_iomap == NULL && ttm == NULL)
234 if ((old_mem->mem_type == new_mem->mem_type) &&
235 (new_mem->mm_node->start <
236 old_mem->mm_node->start + old_mem->mm_node->size)) {
238 add = new_mem->num_pages - 1;
241 for (i = 0; i < new_mem->num_pages; ++i) {
242 page = i * dir + add;
243 if (old_iomap == NULL)
244 ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
245 else if (new_iomap == NULL)
246 ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
248 ret = drm_copy_io_page(new_iomap, old_iomap, page);
254 drm_bo_free_old_node(bo);
257 new_mem->mm_node = NULL;
258 old_mem->proposed_flags = save_proposed_flags;
259 DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
261 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
263 drm_ttm_destroy(ttm);
268 drm_mem_reg_iounmap(dev, new_mem, new_iomap);
270 drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
273 EXPORT_SYMBOL(drm_bo_move_memcpy);
276 * Transfer a buffer object's memory and LRU status to a newly
277 * created object. User-space references remains with the old
278 * object. Call bo->mutex locked.
281 int drm_buffer_object_transfer(struct drm_buffer_object *bo,
282 struct drm_buffer_object **new_obj)
284 struct drm_buffer_object *fbo;
285 struct drm_device *dev = bo->dev;
286 struct drm_buffer_manager *bm = &dev->bm;
288 fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
293 mutex_init(&fbo->mutex);
294 mutex_lock(&fbo->mutex);
295 mutex_lock(&dev->struct_mutex);
297 DRM_INIT_WAITQUEUE(&bo->event_queue);
298 INIT_LIST_HEAD(&fbo->ddestroy);
299 INIT_LIST_HEAD(&fbo->lru);
300 INIT_LIST_HEAD(&fbo->pinned_lru);
301 #ifdef DRM_ODD_MM_COMPAT
302 INIT_LIST_HEAD(&fbo->vma_list);
303 INIT_LIST_HEAD(&fbo->p_mm_list);
306 fbo->fence = drm_fence_reference_locked(bo->fence);
307 fbo->pinned_node = NULL;
308 fbo->mem.mm_node->private = (void *)fbo;
309 atomic_set(&fbo->usage, 1);
310 atomic_inc(&bm->count);
311 mutex_unlock(&dev->struct_mutex);
312 mutex_unlock(&fbo->mutex);
319 * Since move is underway, we need to block signals in this function.
320 * We cannot restart until it has finished.
323 int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
324 int evict, int no_wait, uint32_t fence_class,
325 uint32_t fence_type, uint32_t fence_flags,
326 struct drm_bo_mem_reg *new_mem)
328 struct drm_device *dev = bo->dev;
329 struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
330 struct drm_bo_mem_reg *old_mem = &bo->mem;
332 uint64_t save_flags = old_mem->flags;
333 uint64_t save_proposed_flags = old_mem->proposed_flags;
334 struct drm_buffer_object *old_obj;
337 drm_fence_usage_deref_unlocked(&bo->fence);
338 ret = drm_fence_object_create(dev, fence_class, fence_type,
339 fence_flags | DRM_FENCE_FLAG_EMIT,
341 bo->fence_type = fence_type;
345 #ifdef DRM_ODD_MM_COMPAT
347 * In this mode, we don't allow pipelining a copy blit,
348 * since the buffer will be accessible from user space
349 * the moment we return and rebuild the page tables.
351 * With normal vm operation, page tables are rebuilt
352 * on demand using fault(), which waits for buffer idle.
356 if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
357 bo->mem.mm_node != NULL))
361 (void) drm_fence_object_wait(bo->fence, 0, 1,
363 drm_fence_usage_deref_unlocked(&bo->fence);
365 drm_bo_free_old_node(bo);
367 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
368 drm_ttm_unbind(bo->ttm);
369 drm_ttm_destroy(bo->ttm);
374 /* This should help pipeline ordinary buffer moves.
376 * Hang old buffer memory on a new buffer object,
377 * and leave it to be released when the GPU
378 * operation has completed.
381 ret = drm_buffer_object_transfer(bo, &old_obj);
386 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
391 mutex_lock(&dev->struct_mutex);
392 list_del_init(&old_obj->lru);
393 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
394 drm_bo_add_to_lru(old_obj);
396 drm_bo_usage_deref_locked(&old_obj);
397 mutex_unlock(&dev->struct_mutex);
402 new_mem->mm_node = NULL;
403 old_mem->proposed_flags = save_proposed_flags;
404 DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
407 EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
409 int drm_bo_same_page(unsigned long offset,
410 unsigned long offset2)
412 return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
414 EXPORT_SYMBOL(drm_bo_same_page);
416 unsigned long drm_bo_offset_end(unsigned long offset,
419 offset = (offset + PAGE_SIZE) & PAGE_MASK;
420 return (end < offset) ? end : offset;
422 EXPORT_SYMBOL(drm_bo_offset_end);
424 static pgprot_t drm_kernel_io_prot(uint32_t map_type)
426 pgprot_t tmp = PAGE_KERNEL;
428 #if defined(__i386__) || defined(__x86_64__)
431 if (drm_use_pat() && map_type == _DRM_TTM) {
432 pgprot_val(tmp) |= _PAGE_PAT;
436 if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
437 pgprot_val(tmp) |= _PAGE_PCD;
438 pgprot_val(tmp) &= ~_PAGE_PWT;
440 #elif defined(__powerpc__)
441 pgprot_val(tmp) |= _PAGE_NO_CACHE;
442 if (map_type == _DRM_REGISTERS)
443 pgprot_val(tmp) |= _PAGE_GUARDED;
445 #if defined(__ia64__)
446 if (map_type == _DRM_TTM)
447 tmp = pgprot_writecombine(tmp);
449 tmp = pgprot_noncached(tmp);
454 static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
455 unsigned long bus_offset, unsigned long bus_size,
456 struct drm_bo_kmap_obj *map)
458 struct drm_device *dev = bo->dev;
459 struct drm_bo_mem_reg *mem = &bo->mem;
460 struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
462 if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
463 map->bo_kmap_type = bo_map_premapped;
464 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
466 map->bo_kmap_type = bo_map_iomap;
467 map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
469 return (!map->virtual) ? -ENOMEM : 0;
472 static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
473 unsigned long start_page, unsigned long num_pages,
474 struct drm_bo_kmap_obj *map)
476 struct drm_device *dev = bo->dev;
477 struct drm_bo_mem_reg *mem = &bo->mem;
478 struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
480 struct drm_ttm *ttm = bo->ttm;
486 if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
489 * We're mapping a single page, and the desired
490 * page protection is consistent with the bo.
493 map->bo_kmap_type = bo_map_kmap;
494 map->page = drm_ttm_get_page(ttm, start_page);
495 map->virtual = kmap(map->page);
498 * Populate the part we're mapping;
501 for (i = start_page; i < start_page + num_pages; ++i) {
502 d = drm_ttm_get_page(ttm, i);
508 * We need to use vmap to get the desired page protection
509 * or to make the buffer object look contigous.
512 prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
514 drm_kernel_io_prot(man->drm_bus_maptype);
515 map->bo_kmap_type = bo_map_vmap;
516 map->virtual = vmap(ttm->pages + start_page,
519 return (!map->virtual) ? -ENOMEM : 0;
523 * This function is to be used for kernel mapping of buffer objects.
524 * It chooses the appropriate mapping method depending on the memory type
525 * and caching policy the buffer currently has.
526 * Mapping multiple pages or buffers that live in io memory is a bit slow and
527 * consumes vmalloc space. Be restrictive with such mappings.
528 * Mapping single pages usually returns the logical kernel address,
530 * BUG may use slower temporary mappings for high memory pages or
531 * uncached / write-combined pages.
533 * The function fills in a drm_bo_kmap_obj which can be used to return the
534 * kernel virtual address of the buffer.
536 * Code servicing a non-priviliged user request is only allowed to map one
537 * page at a time. We might need to implement a better scheme to stop such
538 * processes from consuming all vmalloc space.
541 int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
542 unsigned long num_pages, struct drm_bo_kmap_obj *map)
545 unsigned long bus_base;
546 unsigned long bus_offset;
547 unsigned long bus_size;
551 if (num_pages > bo->num_pages)
553 if (start_page > bo->num_pages)
556 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
559 ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
560 &bus_offset, &bus_size);
566 return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
568 bus_offset += start_page << PAGE_SHIFT;
569 bus_size = num_pages << PAGE_SHIFT;
570 return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
573 EXPORT_SYMBOL(drm_bo_kmap);
575 void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
580 switch (map->bo_kmap_type) {
582 iounmap(map->virtual);
585 vunmap(map->virtual);
590 case bo_map_premapped:
598 EXPORT_SYMBOL(drm_bo_kunmap);
600 int drm_bo_pfn_prot(struct drm_buffer_object *bo,
601 unsigned long dst_offset,
605 struct drm_bo_mem_reg *mem = &bo->mem;
606 struct drm_device *dev = bo->dev;
607 unsigned long bus_offset;
608 unsigned long bus_size;
609 unsigned long bus_base;
610 struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
613 ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset,
619 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
623 *pfn = page_to_pfn(drm_ttm_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
625 *prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
626 PAGE_KERNEL : drm_kernel_io_prot(man->drm_bus_maptype);
630 EXPORT_SYMBOL(drm_bo_pfn_prot);