1 /**************************************************************************
3 * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Free the old memory node unless it's a pinned region and we
35 * have not been requested to free also pinned regions.
38 static void drm_bo_free_old_node(struct drm_buffer_object *bo)
40 struct drm_bo_mem_reg *old_mem = &bo->mem;
42 if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
43 mutex_lock(&bo->dev->struct_mutex);
44 drm_mm_put_block(old_mem->mm_node);
45 old_mem->mm_node = NULL;
46 mutex_unlock(&bo->dev->struct_mutex);
48 old_mem->mm_node = NULL;
51 int drm_bo_move_ttm(struct drm_buffer_object *bo,
52 int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
54 struct drm_ttm *ttm = bo->ttm;
55 struct drm_bo_mem_reg *old_mem = &bo->mem;
56 uint64_t save_flags = old_mem->flags;
57 uint64_t save_mask = old_mem->mask;
60 if (old_mem->mem_type == DRM_BO_MEM_TT) {
66 drm_bo_free_old_node(bo);
67 DRM_FLAG_MASKED(old_mem->flags,
68 DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
69 DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
70 old_mem->mem_type = DRM_BO_MEM_LOCAL;
71 save_flags = old_mem->flags;
73 if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
74 ret = drm_bind_ttm(ttm, new_mem);
80 new_mem->mm_node = NULL;
81 old_mem->mask = save_mask;
82 DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
85 EXPORT_SYMBOL(drm_bo_move_ttm);
88 * \c Return a kernel virtual address to the buffer object PCI memory.
90 * \param bo The buffer object.
91 * \return Failure indication.
93 * Returns -EINVAL if the buffer object is currently not mappable.
94 * Returns -ENOMEM if the ioremap operation failed.
95 * Otherwise returns zero.
97 * After a successfull call, bo->iomap contains the virtual address, or NULL
98 * if the buffer object content is not accessible through PCI space.
99 * Call bo->mutex locked.
102 int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
105 struct drm_buffer_manager *bm = &dev->bm;
106 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
107 unsigned long bus_offset;
108 unsigned long bus_size;
109 unsigned long bus_base;
114 ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
115 if (ret || bus_size == 0)
118 if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
119 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
121 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
128 EXPORT_SYMBOL(drm_mem_reg_ioremap);
131 * \c Unmap mapping obtained using drm_bo_ioremap
133 * \param bo The buffer object.
135 * Call bo->mutex locked.
138 void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
141 struct drm_buffer_manager *bm;
142 struct drm_mem_type_manager *man;
145 man = &bm->man[mem->mem_type];
147 if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
150 EXPORT_SYMBOL(drm_mem_reg_iounmap);
152 static int drm_copy_io_page(void *dst, void *src, unsigned long page)
155 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
157 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
160 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
161 iowrite32(ioread32(srcP++), dstP++);
165 static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
168 struct page *d = drm_ttm_get_page(ttm, page);
174 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
179 memcpy_fromio(dst, src, PAGE_SIZE);
184 static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
186 struct page *s = drm_ttm_get_page(ttm, page);
192 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
197 memcpy_toio(dst, src, PAGE_SIZE);
202 int drm_bo_move_memcpy(struct drm_buffer_object *bo,
203 int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
205 struct drm_device *dev = bo->dev;
206 struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
207 struct drm_ttm *ttm = bo->ttm;
208 struct drm_bo_mem_reg *old_mem = &bo->mem;
209 struct drm_bo_mem_reg old_copy = *old_mem;
213 uint64_t save_flags = old_mem->flags;
214 uint64_t save_mask = old_mem->mask;
217 unsigned long add = 0;
220 ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
223 ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
227 if (old_iomap == NULL && new_iomap == NULL)
229 if (old_iomap == NULL && ttm == NULL)
235 if ((old_mem->mem_type == new_mem->mem_type) &&
236 (new_mem->mm_node->start <
237 old_mem->mm_node->start + old_mem->mm_node->size)) {
239 add = new_mem->num_pages - 1;
242 for (i = 0; i < new_mem->num_pages; ++i) {
243 page = i * dir + add;
244 if (old_iomap == NULL)
245 ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
246 else if (new_iomap == NULL)
247 ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
249 ret = drm_copy_io_page(new_iomap, old_iomap, page);
255 drm_bo_free_old_node(bo);
258 new_mem->mm_node = NULL;
259 old_mem->mask = save_mask;
260 DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
262 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
264 drm_destroy_ttm(ttm);
269 drm_mem_reg_iounmap(dev, new_mem, new_iomap);
271 drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
274 EXPORT_SYMBOL(drm_bo_move_memcpy);
277 * Transfer a buffer object's memory and LRU status to a newly
278 * created object. User-space references remains with the old
279 * object. Call bo->mutex locked.
282 int drm_buffer_object_transfer(struct drm_buffer_object *bo,
283 struct drm_buffer_object **new_obj)
285 struct drm_buffer_object *fbo;
286 struct drm_device *dev = bo->dev;
287 struct drm_buffer_manager *bm = &dev->bm;
289 fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
294 mutex_init(&fbo->mutex);
295 mutex_lock(&fbo->mutex);
296 mutex_lock(&dev->struct_mutex);
298 DRM_INIT_WAITQUEUE(&bo->event_queue);
299 INIT_LIST_HEAD(&fbo->ddestroy);
300 INIT_LIST_HEAD(&fbo->lru);
301 INIT_LIST_HEAD(&fbo->pinned_lru);
302 #ifdef DRM_ODD_MM_COMPAT
303 INIT_LIST_HEAD(&fbo->vma_list);
304 INIT_LIST_HEAD(&fbo->p_mm_list);
307 drm_fence_reference_unlocked(&fbo->fence, bo->fence);
308 fbo->pinned_node = NULL;
309 fbo->mem.mm_node->private = (void *)fbo;
310 atomic_set(&fbo->usage, 1);
311 atomic_inc(&bm->count);
312 mutex_unlock(&dev->struct_mutex);
313 mutex_unlock(&fbo->mutex);
320 * Since move is underway, we need to block signals in this function.
321 * We cannot restart until it has finished.
324 int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
325 int evict, int no_wait, uint32_t fence_class,
326 uint32_t fence_type, uint32_t fence_flags,
327 struct drm_bo_mem_reg *new_mem)
329 struct drm_device *dev = bo->dev;
330 struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
331 struct drm_bo_mem_reg *old_mem = &bo->mem;
333 uint64_t save_flags = old_mem->flags;
334 uint64_t save_mask = old_mem->mask;
335 struct drm_buffer_object *old_obj;
338 drm_fence_usage_deref_unlocked(&bo->fence);
339 ret = drm_fence_object_create(dev, fence_class, fence_type,
340 fence_flags | DRM_FENCE_FLAG_EMIT,
342 bo->fence_type = fence_type;
346 #ifdef DRM_ODD_MM_COMPAT
348 * In this mode, we don't allow pipelining a copy blit,
349 * since the buffer will be accessible from user space
350 * the moment we return and rebuild the page tables.
352 * With normal vm operation, page tables are rebuilt
353 * on demand using fault(), which waits for buffer idle.
357 if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
358 bo->mem.mm_node != NULL))
361 ret = drm_bo_wait(bo, 0, 1, 0);
365 drm_bo_free_old_node(bo);
367 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
368 drm_ttm_unbind(bo->ttm);
369 drm_destroy_ttm(bo->ttm);
374 /* This should help pipeline ordinary buffer moves.
376 * Hang old buffer memory on a new buffer object,
377 * and leave it to be released when the GPU
378 * operation has completed.
381 ret = drm_buffer_object_transfer(bo, &old_obj);
386 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
391 mutex_lock(&dev->struct_mutex);
392 list_del_init(&old_obj->lru);
393 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
394 drm_bo_add_to_lru(old_obj);
396 drm_bo_usage_deref_locked(&old_obj);
397 mutex_unlock(&dev->struct_mutex);
402 new_mem->mm_node = NULL;
403 old_mem->mask = save_mask;
404 DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
407 EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
409 int drm_bo_same_page(unsigned long offset,
410 unsigned long offset2)
412 return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
414 EXPORT_SYMBOL(drm_bo_same_page);
416 unsigned long drm_bo_offset_end(unsigned long offset,
419 offset = (offset + PAGE_SIZE) & PAGE_MASK;
420 return (end < offset) ? end : offset;
422 EXPORT_SYMBOL(drm_bo_offset_end);
424 static pgprot_t drm_kernel_io_prot(uint32_t map_type)
426 pgprot_t tmp = PAGE_KERNEL;
428 #if defined(__i386__) || defined(__x86_64__)
431 if (drm_use_pat() && map_type == _DRM_TTM) {
432 pgprot_val(tmp) |= _PAGE_PAT;
436 if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
437 pgprot_val(tmp) |= _PAGE_PCD;
438 pgprot_val(tmp) &= ~_PAGE_PWT;
440 #elif defined(__powerpc__)
441 pgprot_val(tmp) |= _PAGE_NO_CACHE;
442 if (map_type == _DRM_REGISTERS)
443 pgprot_val(tmp) |= _PAGE_GUARDED;
445 #if defined(__ia64__)
446 if (map_type == _DRM_TTM)
447 tmp = pgprot_writecombine(tmp);
449 tmp = pgprot_noncached(tmp);
454 static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
455 unsigned long bus_offset, unsigned long bus_size,
456 struct drm_bo_kmap_obj *map)
458 struct drm_device *dev = bo->dev;
459 struct drm_bo_mem_reg *mem = &bo->mem;
460 struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
462 if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
463 map->bo_kmap_type = bo_map_premapped;
464 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
466 map->bo_kmap_type = bo_map_iomap;
467 map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
469 return (!map->virtual) ? -ENOMEM : 0;
472 static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
473 unsigned long start_page, unsigned long num_pages,
474 struct drm_bo_kmap_obj *map)
476 struct drm_device *dev = bo->dev;
477 struct drm_bo_mem_reg *mem = &bo->mem;
478 struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
480 struct drm_ttm *ttm = bo->ttm;
486 if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
489 * We're mapping a single page, and the desired
490 * page protection is consistent with the bo.
493 map->bo_kmap_type = bo_map_kmap;
494 map->page = drm_ttm_get_page(ttm, start_page);
495 map->virtual = kmap(map->page);
498 * Populate the part we're mapping;
501 for (i = start_page; i < start_page + num_pages; ++i) {
502 d = drm_ttm_get_page(ttm, i);
508 * We need to use vmap to get the desired page protection
509 * or to make the buffer object look contigous.
512 prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
514 drm_kernel_io_prot(man->drm_bus_maptype);
515 map->bo_kmap_type = bo_map_vmap;
516 map->virtual = vmap(ttm->pages + start_page,
519 return (!map->virtual) ? -ENOMEM : 0;
523 * This function is to be used for kernel mapping of buffer objects.
524 * It chooses the appropriate mapping method depending on the memory type
525 * and caching policy the buffer currently has.
526 * Mapping multiple pages or buffers that live in io memory is a bit slow and
527 * consumes vmalloc space. Be restrictive with such mappings.
528 * Mapping single pages usually returns the logical kernel address,
530 * BUG may use slower temporary mappings for high memory pages or
531 * uncached / write-combined pages.
533 * The function fills in a drm_bo_kmap_obj which can be used to return the
534 * kernel virtual address of the buffer.
536 * Code servicing a non-priviliged user request is only allowed to map one
537 * page at a time. We might need to implement a better scheme to stop such
538 * processes from consuming all vmalloc space.
541 int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
542 unsigned long num_pages, struct drm_bo_kmap_obj *map)
545 unsigned long bus_base;
546 unsigned long bus_offset;
547 unsigned long bus_size;
551 if (num_pages > bo->num_pages)
553 if (start_page > bo->num_pages)
556 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
559 ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
560 &bus_offset, &bus_size);
566 return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
568 bus_offset += start_page << PAGE_SHIFT;
569 bus_size = num_pages << PAGE_SHIFT;
570 return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
573 EXPORT_SYMBOL(drm_bo_kmap);
575 void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
580 switch (map->bo_kmap_type) {
582 iounmap(map->virtual);
585 vunmap(map->virtual);
590 case bo_map_premapped:
598 EXPORT_SYMBOL(drm_bo_kunmap);