1 /* drivers/char/s5p_vmem.c
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * S5P_VMEM driver for /dev/s5p-vmem
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation
13 #include <asm/asm-offsets.h>
14 /* VM_EXEC is originally defined in linux/mm.h but asm/asm-offset also defines
15 * it with same value */
20 #include <linux/module.h>
21 #include <linux/uaccess.h> /* copy_from_user, copy_to_user */
22 #include <linux/mman.h>
23 #include <linux/sched.h> /* 'current' and 'init_mm global variables */
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h> /* unmap_kernel_range, remap_vmalloc_range, */
26 /* map_vm_area, vmalloc_user, struct vm_struct */
28 #include <asm/outercache.h>
29 #include <asm/cacheflush.h>
38 MEM_ALLOC_CACHEABLE_SHARE,
45 char *s5p_alloctype_names[NR_ALLOCTYPE] = {
48 "MEM_ALLOC_CACHEABLE",
49 "MEM_ALLOC_CACHEABLE_SHARE",
56 unsigned int cookie; /* unique number. actually, pfn */
57 /* TODO: cookie can be 0 because it's pfn. change type of cookie to signed */
58 void *start_addr; /* the first virtual address */
60 int count; /* reference count */
61 struct page **pages; /* page descriptor table if required. */
62 struct kvm_area *next;
65 static int s5p_free(struct file *, struct s5p_vmem_alloc *);
66 static int s5p_alloc(struct file *, struct s5p_vmem_alloc *);
67 static int s5p_reset(struct file *, struct s5p_vmem_alloc *);
69 static int (*mmanfns[NR_ALLOCTYPE]) (struct file *, struct s5p_vmem_alloc *) = {
79 static char funcmap[NR_ALLOCTYPE + 2] = {
82 MEM_INVTYPE, /* NEVER USE THIS */
83 MEM_INVTYPE, /* NEVER USE THIS */
87 MEM_ALLOC_CACHEABLE_SHARE,
91 /* we actually need only one mutex because ioctl must be executed atomically
92 * to avoid the following problems:
93 * - removing allocated physical memory while sharing the area.
94 * - modifying global variables by different calls to ioctl by other processes.
96 static DEFINE_MUTEX(s5p_vmem_lock);
97 static DEFINE_MUTEX(s5p_vmem_userlock);
99 /* initialized by s5p_vmem_ioctl, used by s5p_vmem_mmap */
100 static int alloctype = MEM_INVTYPE; /* enum ALLOCTYPE */
101 /* the beginning of the list is dummy
102 * do not access this variable directly; use ROOTKVM and FIRSTKVM */
103 struct kvm_area root_kvm;
105 /* points to the recently accessed entry of kvm_area */
106 static struct kvm_area *recent_kvm_area;
107 static unsigned int cookie;
109 #define IOCTLNR2FMAPIDX(nr) _IOC_NR(nr)
110 #define ISALLOCTYPE(nr) ((nr >= 0) && (nr < MEM_FREE))
111 #define ISFREETYPE(nr) ((nr >= MEM_FREE) && (nr < NR_ALLOCTYPE))
112 #define ISSHARETYPE(nr) (nr & 1)
113 #define ROOTKVM (&root_kvm)
114 #define FIRSTKVM (root_kvm.next)
115 #define PAGEDESCSIZE(size) ((size >> PAGE_SHIFT) * sizeof(struct page *))
118 * Cleans specific page table entries in the outer (L2) cache
119 * pgd: page table base
120 * addr: start virtual address in the address space created by pgd
121 * size: the size of the range to be translated by pgd
123 * This function must be called whenever a new mapping is created.
124 * This function don't need to be called when a mapping is removed because
125 * the no one will use the removed mapping and the data in L2 cache will be
126 * flushed onto the memory soon.
128 #if defined(CONFIG_OUTER_CACHE) && defined(CONFIG_ARM)
129 static void flush_outercache_pagetable(struct mm_struct *mm, unsigned long addr,
133 pgd_t *pgd, *pgd_end;
135 pte_t *pte, *pte_end;
139 end = addr + PAGE_ALIGN(size);
140 pgd = pgd_offset(mm, addr);
141 pgd_end = pgd_offset(mm, (addr + size + PGDIR_SIZE - 1) & PGDIR_MASK);
143 /* Clean L1 page table entries */
144 outer_flush_range(virt_to_phys(pgd), virt_to_phys(pgd_end));
146 /* clean L2 page table entries */
147 /* this regards pgd == pmd and no pud */
149 next = pgd_addr_end(addr, end);
150 pgd = pgd_offset(mm, addr);
151 pmd = pmd_offset(pgd, addr);
152 pte = pte_offset_map(pmd, addr) - PTRS_PER_PTE;
153 pte_end = pte_offset_map(pmd, next-4) - PTRS_PER_PTE + 1;
154 outer_flush_range(virt_to_phys(pte), virt_to_phys(pte_end));
156 } while (addr != end);
159 #define flush_outercache_pagetable(mm, addr, size) do { } while (0)
160 #endif /* CONFIG_OUTER_CACHE && CONFIG_ARM */
162 static struct kvm_area *createkvm(void *kvm_addr, size_t size)
164 struct kvm_area *newarea, *cur;
166 newarea = kmalloc(sizeof(struct kvm_area), GFP_KERNEL);
170 mutex_lock(&s5p_vmem_lock);
172 newarea->start_addr = kvm_addr;
173 newarea->size = size;
175 newarea->next = NULL;
176 newarea->pages = NULL;
177 newarea->cookie = virt_to_phys(kvm_addr) ^ 0xA5CF;/* simple encryption*/
180 while (cur->next != NULL)
184 mutex_unlock(&s5p_vmem_lock);
189 static inline struct kvm_area *findkvm(unsigned int cookie)
191 struct kvm_area *kvmarea;
193 while ((kvmarea != NULL) && (kvmarea->cookie != cookie))
194 kvmarea = kvmarea->next;
198 static inline unsigned int findcookie(void *addr)
200 struct kvm_area *kvmarea;
202 while ((kvmarea != NULL) && (kvmarea->start_addr != addr))
203 kvmarea = kvmarea->next;
204 return (kvmarea != NULL) ? kvmarea->cookie : 0;
207 static struct kvm_area *attachkvm(unsigned int cookie)
209 struct kvm_area *kvmarea;
211 kvmarea = findkvm(cookie);
219 static int freekvm(unsigned int cookie)
221 struct kvm_area *kvmarea, *rmarea;
224 while ((kvmarea->next != NULL) && (kvmarea->next->cookie != cookie))
225 kvmarea = kvmarea->next;
227 if (kvmarea->next == NULL)
230 mutex_lock(&s5p_vmem_lock);
232 rmarea = kvmarea->next;
233 kvmarea->next = rmarea->next;
237 /* defined in mm/vmalloc.c */
238 unmap_kernel_range((unsigned long)rmarea->start_addr,
241 for (i = 0; i < (rmarea->size >> PAGE_SHIFT); i++)
242 __free_page(rmarea->pages[i]);
244 if (PAGEDESCSIZE(rmarea->size) > PAGE_SIZE)
245 vfree(rmarea->pages);
247 kfree(rmarea->pages);
249 vfree(rmarea->start_addr);
253 mutex_unlock(&s5p_vmem_lock);
257 long s5p_vmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
259 /* DO NOT REFER TO GLOBAL VARIABLES IN THIS FUNCTION */
261 struct s5p_vmem_alloc param;
265 alloccmd = IOCTLNR2FMAPIDX(cmd);
266 if ((alloccmd < 0) || (alloccmd > 9)) {
268 "S5P-VMEM: Wrong allocation command number %d\n",
273 alloccmd = funcmap[alloccmd];
276 "S5P-VMEM: Wrong translated allocation command number %d\n",
281 if (alloccmd < MEM_RESET) {
282 result = copy_from_user(¶m, (struct s5p_vmem_alloc *)arg,
283 sizeof(struct s5p_vmem_alloc));
289 mutex_lock(&s5p_vmem_userlock);
290 alloctype = alloccmd;
291 result = mmanfns[alloctype] (file, ¶m);
292 alloctype = MEM_INVTYPE;
293 mutex_unlock(&s5p_vmem_userlock);
298 if (alloccmd < MEM_FREE) {
299 result = copy_to_user((struct s5p_vmem_alloc *)arg, ¶m,
300 sizeof(struct s5p_vmem_alloc));
303 mutex_lock(&s5p_vmem_userlock);
305 * allowed to access 'alloctype' global var. */
306 alloctype = MEM_FREE | ISSHARETYPE(alloccmd);
307 s5p_free(file, ¶m);
308 alloctype = MEM_INVTYPE;
309 mutex_unlock(&s5p_vmem_userlock);
313 if (alloccmd < MEM_RESET)
319 EXPORT_SYMBOL(s5p_vmem_ioctl);
321 static struct kvm_area *createallockvm(size_t size)
323 void *virt_addr, *virt_end;
324 virt_addr = vmalloc_user(size);
328 flush_outercache_pagetable(&init_mm, (unsigned long)virt_addr, size);
330 recent_kvm_area = createkvm(virt_addr, size);
331 if (recent_kvm_area == NULL)
334 /* We vmalloced page-aligned. Thus below operation is correct */
335 virt_end = virt_addr + size;
336 dmac_flush_range(virt_addr, virt_end);
337 while (virt_addr < virt_end) {
338 unsigned long phys_addr;
339 phys_addr = vmalloc_to_pfn(virt_addr) << PAGE_SHIFT;
340 outer_flush_range(phys_addr, phys_addr + PAGE_SIZE);
341 virt_addr += PAGE_SIZE;
344 return recent_kvm_area;
347 unsigned int s5p_vmalloc(size_t size)
349 struct kvm_area *kvmarea;
350 kvmarea = createallockvm(size);
351 return (kvmarea == NULL) ? 0 : kvmarea->cookie;
353 EXPORT_SYMBOL(s5p_vmalloc);
355 void s5p_vfree(unsigned int cookie)
359 EXPORT_SYMBOL(s5p_vfree);
361 void *s5p_getaddress(unsigned int cookie)
363 struct kvm_area *kvmarea;
364 kvmarea = findkvm(cookie);
365 return (kvmarea == NULL) ? NULL : kvmarea->start_addr;
367 EXPORT_SYMBOL(s5p_getaddress);
369 unsigned int s5p_getcookie(void *addr)
371 return findcookie(addr);
373 EXPORT_SYMBOL(s5p_getcookie);
375 int s5p_vmem_mmap(struct file *filp, struct vm_area_struct *vma)
379 if ((alloctype == MEM_ALLOC) || (alloctype == MEM_ALLOC_CACHEABLE))
380 recent_kvm_area = createallockvm(vma->vm_end - vma->vm_start);
381 else /* alloctype == MEM_ALLOC_SHARE or MEM_ALLOC_CACHEABLE_SHARE */
382 recent_kvm_area = attachkvm(cookie);
384 if (recent_kvm_area == NULL)
387 if ((alloctype == MEM_ALLOC) || (alloctype == MEM_ALLOC_SHARE))
388 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
390 vma->vm_flags |= VM_RESERVED;
392 if (recent_kvm_area->pages) {
393 /* We can't use remap_vmalloc_range if page frames are not
394 * allocated by vmalloc. The following code is very simillar to
395 * remap_vmalloc_range. */
397 unsigned long uaddr = vma->vm_start;
398 unsigned long usize = vma->vm_end - vma->vm_start;
400 /* below condition creates invalid mapping
401 * remap_vmalloc_range checks it internally */
402 if (recent_kvm_area->size < usize)
405 while (rpfn < (usize >> PAGE_SHIFT)) {
406 if (vm_insert_page(vma, uaddr,
407 recent_kvm_area->pages[rpfn]) != 0)
414 ret = remap_vmalloc_range(vma, recent_kvm_area->start_addr, 0);
418 flush_outercache_pagetable(vma->vm_mm, vma->vm_start,
419 vma->vm_end - vma->vm_start);
422 EXPORT_SYMBOL(s5p_vmem_mmap);
424 /* return 0 if successful */
425 static int s5p_alloc(struct file *file, struct s5p_vmem_alloc *param)
427 cookie = param->cookie;
428 /* TODO: enhance the following code to get the size of share */
429 if (ISSHARETYPE(alloctype)) {
430 struct kvm_area *kvmarea;
431 kvmarea = findkvm(cookie);
432 param->size = (kvmarea == NULL) ? 0 : kvmarea->size;
433 } else if (param->size == 0) {
437 if (param->size == 0)
440 /* unit of allocation is a page */
441 param->size = PAGE_ALIGN(param->size);
442 param->vir_addr = do_mmap(file, 0, param->size,
443 (PROT_READ | PROT_WRITE), MAP_SHARED, 0);
444 if (param->vir_addr == -EINVAL) {
446 recent_kvm_area = NULL;
450 if ((alloctype == MEM_ALLOC) || (alloctype == MEM_ALLOC_CACHEABLE))
451 param->cookie = recent_kvm_area->cookie;
453 param->vir_addr_k = (unsigned long)recent_kvm_area->start_addr;
454 param->size = recent_kvm_area->size;
459 static int s5p_free(struct file *file, struct s5p_vmem_alloc *param)
461 struct kvm_area *kvmarea = NULL;
463 if (param->vir_addr == 0)
466 kvmarea = findkvm(param->cookie);
471 if (do_munmap(current->mm, param->vir_addr, param->size) < 0)
474 if ((alloctype == MEM_FREE) && (freekvm(param->cookie) != 0))
479 recent_kvm_area = NULL;
483 /* no parameter required. pass all NULL */
484 static int s5p_reset(struct file *file, struct s5p_vmem_alloc *param)
486 struct kvm_area *kvmarea = NULL;
490 mutex_lock(&s5p_vmem_lock);
492 struct kvm_area *temp;
494 vfree(kvmarea->start_addr);
497 kvmarea = kvmarea->next;
500 mutex_unlock(&s5p_vmem_lock);
505 int s5p_vmem_open(struct inode *pinode, struct file *pfile)
509 EXPORT_SYMBOL(s5p_vmem_open);
511 int s5p_vmem_release(struct inode *pinode, struct file *pfile)
513 /* TODO: remove all instances of memory allocation
514 * when an opened file is closing */
517 EXPORT_SYMBOL(s5p_vmem_release);
520 * Maps a non-linear physical page frames into a contiguous virtual memory area
521 * in the kernel's address space
522 * @size: size in bytes to map into the virtual address space
523 * @va_start: the beginning address of the virtual memory area
524 * @va_end: the past to the last address of the virtual memory area
526 * If @end - @start is smaller than @size, allocation and mapping will fail.
527 * This returns 'cookie' of the allocated area so that users can share it.
528 * Returning '0'(zero) means mapping is failed because of memory allocation
529 * failure, mapping failure and so on.
531 * va_start and size must be aligned by PAGE_SIZE. If they are not, they will
532 * be fixed to be aligned. For example, although you wan to map physical memory
533 * into virtual address space between 0x00000FFC and 0x00001004 (size: 8 bytes),
534 * s5p_vmem_vmemmap maps between 0x00000000 and 0x00002000 (size: 8KB) because
535 * the virtual address spaces you provide are expanded through 2 pages.
536 * With the mapping above, a try to map physical memory at 0x00001008 will
537 * cause overwriting the existing mapping.
539 unsigned int s5p_vmem_vmemmap(size_t size, unsigned long va_start,
540 unsigned long va_end)
543 struct kvm_area *kvma; /* new virtual address area */
544 unsigned int nr_pages, array_size, i;
545 struct vm_struct area; /* argument for map_vm_area*/
547 /* DMA and normal memory area must not be remapped */
548 if ((va_start > va_end) || (va_start < VMALLOC_START))
551 /* Desired size must not be larger than the size of supplied virtual
553 size = PAGE_ALIGN(size + (va_start & (~PAGE_MASK)));
554 if (size > (va_end - va_start))
557 /* start address of the area must be page aligned */
558 va_start &= PAGE_MASK;
560 va_end = va_start + size;
562 nr_pages = size >> PAGE_SHIFT;
563 array_size = nr_pages * sizeof(struct page *);
565 kvma = createkvm((void *)va_start, size);
569 /* preparing memory buffer for page descriptors */
570 /* below condition must be used when freeing this memory */
571 if (array_size > PAGE_SIZE)
572 pages = vmalloc(array_size);
574 pages = kmalloc(array_size, GFP_KERNEL);
575 /* below error handling causes invoking vfree(va_start).
576 * It is ok because vfree just ignore if va_start is not found in
578 if (unlikely(!pages))
581 memset(pages, 0, array_size);
584 /* page frame allocation */
585 /* even though alloc_page fails in the middle of allocation and pages
586 * array is not filled enough, deallocation is always successful in
587 * freekvm() because pages array is initialized with 0.
589 for (i = 0; i < nr_pages; i++) {
591 unsigned long phys_addr;
593 page = alloc_page(GFP_KERNEL);
594 phys_addr = page_to_pfn(page) << PAGE_SHIFT;
595 /* flushes L2 cache */
596 outer_flush_range(phys_addr, phys_addr + PAGE_SIZE);
604 /* map_vm_area just manipulates 'addr' and 'size' of vm_struct */
605 /* but if map_vm_area is modified to touch other members of vm_struct,
606 * initialization of 'area' must be also changed!*/
607 area.addr = (void *)va_start;
608 /* map_vm_area regards all area contains a guard page */
609 area.size = size + PAGE_SIZE;
611 /* page table generation */
612 if (map_vm_area(&area, PAGE_KERNEL, &pages) == 0) {
613 flush_outercache_pagetable(&init_mm, va_start, size);
614 /* invalidates L1 cache */
615 dmac_map_area((void *)va_start, size, DMA_FROM_DEVICE);
619 /* Free all pages in 'pages' array and kvma */
620 freekvm(kvma->cookie);
623 EXPORT_SYMBOL(s5p_vmem_vmemmap);
626 * Returns the pointer to a page descriptor of any given virtual address IN THE
627 * KERNEL'S ADDRESS SPACE. Returning NULL means no mapping is prepared for the
628 * given virtual address.
629 * This function is exactly same as vmalloc_to_page.
631 struct page *s5p_vmem_va2page(const void *virt_addr)
633 struct page *page = NULL;
634 unsigned int addr = (unsigned long) virt_addr;
635 pgd_t *pgd = pgd_offset_k(addr);
637 BUG_ON((unsigned int)virt_addr < VMALLOC_START);
639 if (!pgd_none(*pgd)) {
640 pud_t *pud = pud_offset(pgd, addr);
641 if (!pud_none(*pud)) {
642 pmd_t *pmd = pmd_offset(pud, addr);
643 if (!pmd_none(*pmd)) {
646 ptep = pte_offset_map(pmd, addr);
648 if (pte_present(pte))
649 page = pte_page(pte);
656 EXPORT_SYMBOL(s5p_vmem_va2page);
659 /* s5p_vmem_dmac_map_area
660 * start_addr: the beginning virtual address to be flushed
661 * size: the size of virtual memory area
662 * dir: direction of DMA. one of DMA_FROM_DEVICE(2) and DMA_TO_DEVICE(1)
663 * The memory area must not exist under VMALLOC_START because the steps to find
664 * the physical adress for the given virtual address does not consider ARM
667 void s5p_vmem_dmac_map_area(const void *start_addr, unsigned long size, int dir)
669 void *end_addr, *cur_addr;
671 /* TODO: the first address needs not to be page-aligned but
672 * L2 line-size-aligned. Enhance that if we know how to find L2 line
675 dmac_map_area(start_addr, size, dir);
677 cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
678 size = PAGE_ALIGN(size);
679 end_addr = cur_addr + size;
681 while (cur_addr < end_addr) {
682 unsigned long phys_addr;
684 phys_addr = page_to_pfn(s5p_vmem_va2page(cur_addr));
685 phys_addr <<= PAGE_SHIFT;
686 if (dir == DMA_FROM_DEVICE)
687 outer_inv_range(phys_addr, phys_addr + PAGE_SIZE);
688 outer_clean_range(phys_addr, phys_addr + PAGE_SIZE);
689 cur_addr += PAGE_SIZE;
692 EXPORT_SYMBOL(s5p_vmem_dmac_map_area);