3 * (C) COPYRIGHT 2010-2011 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
8 * A copy of the licence is included with the program, and can also be obtained from Free Software
9 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 * @file mali_kbase_mem_linux.c
17 * Base kernel memory APIs, Linux implementation.
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
26 #include <linux/dma-mapping.h>
28 #include <kbase/src/common/mali_kbase.h>
29 #include <kbase/src/linux/mali_kbase_mem_linux.h>
31 struct kbase_va_region *kbase_pmem_alloc(struct kbase_context *kctx, u32 size,
32 u32 flags, u16 *pmem_cookie)
34 struct kbase_va_region *reg;
37 OSK_ASSERT(kctx != NULL);
38 OSK_ASSERT(pmem_cookie != NULL);
45 if (!kbase_check_alloc_flags(flags))
50 reg = kbase_alloc_free_region(kctx, 0, size, KBASE_REG_ZONE_PMEM);
54 reg->flags &= ~KBASE_REG_FREE;
56 kbase_update_region_flags(reg, flags, MALI_FALSE);
58 if (kbase_alloc_phy_pages(reg, size, size))
61 reg->nr_alloc_pages = size;
64 kbase_gpu_vm_lock(kctx);
65 if (!kctx->osctx.cookies)
68 cookie = __ffs(kctx->osctx.cookies);
69 kctx->osctx.cookies &= ~(1UL << cookie);
70 reg->flags &= ~KBASE_REG_COOKIE_MASK;
71 reg->flags |= KBASE_REG_COOKIE(cookie);
73 OSK_DLIST_PUSH_FRONT(&kctx->osctx.reg_pending, reg,
74 struct kbase_va_region, link);
76 *pmem_cookie = cookie;
77 kbase_gpu_vm_unlock(kctx);
82 kbase_gpu_vm_unlock(kctx);
83 kbase_free_phy_pages(reg);
90 KBASE_EXPORT_TEST_API(kbase_pmem_alloc)
93 * Callback for munmap(). PMEM receives a special treatment, as it
94 * frees the memory at the same time it gets unmapped. This avoids the
95 * map/unmap race where map reuses a memory range that has been
96 * unmapped from CPU, but still mapped on GPU.
98 STATIC void kbase_cpu_vm_close(struct vm_area_struct *vma)
100 struct kbase_va_region *reg = vma->vm_private_data;
101 kbase_context *kctx = reg->kctx;
104 kbase_gpu_vm_lock(kctx);
106 err = kbase_cpu_free_mapping(reg, vma);
108 (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_PMEM)
110 kbase_mem_free_region(kctx, reg);
113 kbase_gpu_vm_unlock(kctx);
115 KBASE_EXPORT_TEST_API(kbase_cpu_vm_close)
117 static const struct vm_operations_struct kbase_vm_ops = {
118 .close = kbase_cpu_vm_close,
121 static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, u32 nr_pages)
123 struct kbase_cpu_mapping *map;
124 u64 start_off = vma->vm_pgoff - reg->start_pfn;
125 osk_phy_addr *page_array;
129 map = osk_calloc(sizeof(*map));
138 * VM_DONTCOPY - don't make this mapping available in fork'ed processes
139 * VM_DONTEXPAND - disable mremap on this region
140 * VM_RESERVED & VM_IO - disables paging
141 * VM_MIXEDMAP - Support mixing struct page*s and raw pfns.
142 * This is needed to support using the dedicated and
143 * the OS based memory backends together.
146 * This will need updating to propagate coherency flags
149 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO | VM_MIXEDMAP;
150 vma->vm_ops = &kbase_vm_ops;
151 vma->vm_private_data = reg;
153 page_array = kbase_get_phy_pages(reg);
155 if (!(reg->flags & KBASE_REG_CPU_CACHED))
157 /* We can't map vmalloc'd memory uncached.
158 * Other memory will have been returned from
159 * osk_phy_pages_alloc which should have done the cache
160 * maintenance necessary to support an uncached mapping
163 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
168 for (i = 0; i < nr_pages; i++)
170 err = vm_insert_mixed(vma, vma->vm_start + (i << OSK_PAGE_SHIFT), page_array[i + start_off] >> OSK_PAGE_SHIFT);
178 /* vmalloc remaping is easy... */
179 err = remap_vmalloc_range(vma, kaddr, 0);
189 map->uaddr = (osk_virt_addr)vma->vm_start;
190 map->nr_pages = nr_pages;
191 map->page_off = start_off;
194 OSK_DLIST_PUSH_FRONT(®->map_list, map,
195 struct kbase_cpu_mapping, link);
201 static int kbase_rb_mmap(struct kbase_context *kctx,
202 struct vm_area_struct *vma,
203 struct kbase_va_region **reg,
206 struct kbase_va_region *new_reg;
211 mali_error m_err = MALI_ERROR_NONE;
213 pr_debug("in kbase_rb_mmap\n");
214 size = (vma->vm_end - vma->vm_start);
215 nr_pages = size >> OSK_PAGE_SHIFT;
217 if (kctx->jctx.pool_size < size)
223 kaddr = kctx->jctx.pool;
225 new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_PMEM);
233 new_reg->flags &= ~KBASE_REG_FREE;
234 new_reg->flags |= KBASE_REG_IS_RB | KBASE_REG_CPU_CACHED;
236 m_err = kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1);
237 if (MALI_ERROR_NONE != m_err)
239 pr_debug("kbase_rb_mmap: kbase_add_va_region failed\n");
240 /* Free allocated new_reg */
241 kbase_free_alloced_region(new_reg);
249 pr_debug("kbase_rb_mmap done\n");
256 static int kbase_trace_buffer_mmap(struct kbase_context * kctx, struct vm_area_struct * vma, struct kbase_va_region **reg, void **kaddr)
258 struct kbase_va_region *new_reg;
264 pr_debug("in %s\n", __func__);
265 size = (vma->vm_end - vma->vm_start);
266 nr_pages = size >> OSK_PAGE_SHIFT;
270 tb = osk_vmalloc(size);
277 kbase_device_trace_buffer_install(kctx, tb, size);
285 *kaddr = kctx->jctx.tb;
287 new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_PMEM);
295 new_reg->flags &= ~KBASE_REG_FREE;
296 new_reg->flags |= KBASE_REG_IS_TB | KBASE_REG_CPU_CACHED;
298 if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1))
303 /* map read only, noexec */
304 vma->vm_flags &= ~(VM_WRITE|VM_EXEC);
305 /* the rest of the flags is added by the cpu_mmap handler */
307 pr_debug("%s done\n", __func__);
311 kbase_device_trace_buffer_uninstall(kctx);
318 static int kbase_mmu_dump_mmap( struct kbase_context *kctx,
319 struct vm_area_struct *vma,
320 struct kbase_va_region **reg,
323 struct kbase_va_region *new_reg;
329 pr_debug("in kbase_mmu_dump_mmap\n");
330 size = (vma->vm_end - vma->vm_start);
331 nr_pages = size >> OSK_PAGE_SHIFT;
333 kaddr = kbase_mmu_dump(kctx, nr_pages);
341 new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_PMEM);
349 new_reg->flags &= ~KBASE_REG_FREE;
350 new_reg->flags |= KBASE_REG_IS_MMU_DUMP | KBASE_REG_CPU_CACHED;
352 if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1))
358 pr_debug("kbase_mmu_dump_mmap done\n");
365 /* must be called with the gpu vm lock held */
367 struct kbase_va_region * kbase_lookup_cookie(struct kbase_context * kctx, mali_addr64 cookie)
369 struct kbase_va_region * reg;
370 mali_addr64 test_cookie;
372 OSK_ASSERT(kctx != NULL);
374 test_cookie = KBASE_REG_COOKIE(cookie);
376 OSK_DLIST_FOREACH(&kctx->osctx.reg_pending, struct kbase_va_region, link, reg)
378 if ((reg->flags & KBASE_REG_COOKIE_MASK) == test_cookie)
384 return NULL; /* not found */
386 KBASE_EXPORT_TEST_API(kbase_lookup_cookie)
388 void kbase_unlink_cookie(struct kbase_context * kctx, mali_addr64 cookie, struct kbase_va_region * reg)
390 OSKP_ASSERT(kctx != NULL);
391 OSKP_ASSERT(reg != NULL);
392 OSKP_ASSERT(MALI_TRUE == OSK_DLIST_MEMBER_OF(&kctx->osctx.reg_pending, reg, link));
393 OSKP_ASSERT(KBASE_REG_COOKIE(cookie) == (reg->flags & KBASE_REG_COOKIE_MASK));
394 OSKP_ASSERT((kctx->osctx.cookies & (1UL << cookie)) == 0);
396 OSK_DLIST_REMOVE(&kctx->osctx.reg_pending, reg, link);
397 kctx->osctx.cookies |= (1UL << cookie); /* mark as resolved */
400 KBASE_EXPORT_TEST_API(kbase_unlink_cookie)
402 void kbase_os_mem_map_lock(struct kbase_context * kctx)
404 struct mm_struct * mm = current->mm;
406 down_read(&mm->mmap_sem);
409 void kbase_os_mem_map_unlock(struct kbase_context * kctx)
411 struct mm_struct * mm = current->mm;
413 up_read(&mm->mmap_sem);
416 int kbase_mmap(struct file *file, struct vm_area_struct *vma)
418 struct kbase_context *kctx = file->private_data;
419 struct kbase_va_region *reg;
424 pr_debug("kbase_mmap\n");
425 nr_pages = (vma->vm_end - vma->vm_start) >> OSK_PAGE_SHIFT;
433 kbase_gpu_vm_lock(kctx);
435 if (vma->vm_pgoff == KBASE_REG_COOKIE_RB)
437 /* Reserve offset 0 for the shared ring-buffer */
438 if ((err = kbase_rb_mmap(kctx, vma, ®, &kaddr)))
441 pr_debug("kbase_rb_mmap ok\n");
444 else if (vma->vm_pgoff == KBASE_REG_COOKIE_TB)
446 err = kbase_trace_buffer_mmap(kctx, vma, ®, &kaddr);
449 pr_debug("kbase_trace_buffer_mmap ok\n");
452 else if (vma->vm_pgoff == KBASE_REG_COOKIE_MMU_DUMP)
455 if ((err = kbase_mmu_dump_mmap(kctx, vma, ®, &kaddr)))
461 if (vma->vm_pgoff < OSK_PAGE_SIZE) /* first page is reserved for cookie resolution */
463 /* PMEM stuff, fetch the right region */
464 reg = kbase_lookup_cookie(kctx, vma->vm_pgoff);
468 if (reg->nr_pages != nr_pages)
470 /* incorrect mmap size */
471 /* leave the cookie for a potential later mapping, or to be reclaimed later when the context is freed */
476 kbase_unlink_cookie(kctx, vma->vm_pgoff, reg);
479 * If we cannot map it in GPU space,
480 * then something is *very* wrong. We
481 * might as well die now.
483 if (kbase_gpu_mmap(kctx, reg, vma->vm_start,
488 * Overwrite the offset with the
489 * region start_pfn, so we effectively
490 * map from offset 0 in the region.
492 vma->vm_pgoff = reg->start_pfn;
499 else if (vma->vm_pgoff < KBASE_REG_ZONE_TMEM_BASE)
501 /* invalid offset as it identifies an already mapped pmem */
508 OSK_DLIST_FOREACH(&kctx->reg_list,
509 struct kbase_va_region, link, reg)
511 if (reg->start_pfn <= vma->vm_pgoff &&
512 (reg->start_pfn + reg->nr_alloc_pages) >= (vma->vm_pgoff + nr_pages) &&
513 (reg->flags & (KBASE_REG_ZONE_MASK | KBASE_REG_FREE)) == KBASE_REG_ZONE_TMEM)
525 err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages);
527 if (vma->vm_pgoff == KBASE_REG_COOKIE_MMU_DUMP) {
528 /* MMU dump - userspace should now have a reference on
529 * the pages, so we can now free the kernel mapping */
533 kbase_gpu_vm_unlock(kctx);
537 pr_err("mmap failed %d\n", err);
541 KBASE_EXPORT_TEST_API(kbase_mmap)
543 mali_error kbase_create_os_context(kbase_os_context *osctx)
545 OSK_ASSERT(osctx != NULL);
547 OSK_DLIST_INIT(&osctx->reg_pending);
548 osctx->cookies = ~KBASE_REG_RESERVED_COOKIES;
549 init_waitqueue_head(&osctx->event_queue);
551 return MALI_ERROR_NONE;
553 KBASE_EXPORT_TEST_API(kbase_create_os_context)
555 static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
557 kbase_free_phy_pages(reg);
558 pr_info("Freeing pending unmapped region\n");
562 void kbase_destroy_os_context(kbase_os_context *osctx)
564 OSK_ASSERT(osctx != NULL);
566 OSK_DLIST_EMPTY_LIST(&osctx->reg_pending, struct kbase_va_region,
567 link, kbase_reg_pending_dtor);
569 KBASE_EXPORT_TEST_API(kbase_destroy_os_context)