1 /* linux/drivers/char/exynos_mem.c
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/errno.h> /* error codes */
13 #include <linux/ioctl.h>
14 #include <linux/memblock.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/types.h>
18 #include <linux/uaccess.h>
19 #include <linux/highmem.h>
20 #include <linux/dma-mapping.h>
21 #include <asm/cacheflush.h>
25 #include <linux/exynos_mem.h>
27 #define L2_FLUSH_ALL SZ_1M
28 #define L1_FLUSH_ALL SZ_64K
35 int exynos_mem_open(struct inode *inode, struct file *filp)
37 struct exynos_mem *prv_data;
39 prv_data = kzalloc(sizeof(struct exynos_mem), GFP_KERNEL);
41 pr_err("%s: not enough memory\n", __func__);
45 prv_data->cacheable = true; /* Default: cacheable */
47 filp->private_data = prv_data;
49 printk(KERN_DEBUG "[%s:%d] private_data(0x%08x)\n",
50 __func__, __LINE__, (u32)prv_data);
55 int exynos_mem_release(struct inode *inode, struct file *filp)
57 printk(KERN_DEBUG "[%s:%d] private_data(0x%08x)\n",
58 __func__, __LINE__, (u32)filp->private_data);
60 kfree(filp->private_data);
65 enum cacheop { EM_CLEAN, EM_INV, EM_FLUSH };
67 static void cache_maint_inner(void *vaddr, size_t size, enum cacheop op)
71 dmac_map_area(vaddr, size, DMA_TO_DEVICE);
74 dmac_unmap_area(vaddr, size, DMA_TO_DEVICE);
77 dmac_flush_range(vaddr, vaddr + size);
81 static void cache_maint_phys(phys_addr_t start, size_t length, enum cacheop op)
84 phys_addr_t begin = start;
86 if (!soc_is_exynos5250() && !soc_is_exynos5210()) {
87 if (length > (size_t) L1_FLUSH_ALL) {
90 (smp_call_func_t)__cpuc_flush_kern_all,
104 page = phys_to_page(start);
105 offset = offset_in_page(start);
106 len = PAGE_SIZE - offset;
111 if (PageHighMem(page)) {
113 cache_maint_inner(vaddr + offset, len, op);
116 vaddr = page_address(page) + offset;
117 cache_maint_inner(vaddr, len, op);
123 cache_maint_inner(phys_to_virt(begin), left, op);
129 outer_clean_range(begin, begin + length);
132 if (length <= L2_FLUSH_ALL) {
133 outer_inv_range(begin, begin + length);
136 /* else FALL THROUGH */
138 outer_flush_range(begin, begin + length);
143 static void exynos_mem_paddr_cache_clean(dma_addr_t start, size_t length)
145 if (length > (size_t) L2_FLUSH_ALL) {
146 flush_cache_all(); /* L1 */
147 smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
148 outer_clean_all(); /* L2 */
149 } else if (length > (size_t) L1_FLUSH_ALL) {
150 dma_addr_t end = start + length - 1;
152 flush_cache_all(); /* L1 */
153 smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
154 outer_clean_range(start, end); /* L2 */
156 dma_addr_t end = start + length - 1;
158 dmac_flush_range(phys_to_virt(start), phys_to_virt(end));
159 outer_clean_range(start, end); /* L2 */
163 long exynos_mem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
166 case EXYNOS_MEM_SET_CACHEABLE:
168 struct exynos_mem *mem = filp->private_data;
170 if (get_user(cacheable, (u32 __user *)arg)) {
171 pr_err("[%s:%d] err: EXYNOS_MEM_SET_CACHEABLE\n",
175 mem->cacheable = cacheable;
179 case EXYNOS_MEM_PADDR_CACHE_FLUSH:
181 struct exynos_mem_flush_range range;
182 if (copy_from_user(&range,
183 (struct exynos_mem_flush_range __user *)arg,
185 pr_err("[%s:%d] err: EXYNOS_MEM_PADDR_CACHE_FLUSH\n",
190 cache_maint_phys(range.start, range.length, EM_FLUSH);
193 case EXYNOS_MEM_PADDR_CACHE_CLEAN:
195 struct exynos_mem_flush_range range;
196 if (copy_from_user(&range,
197 (struct exynos_mem_flush_range __user *)arg,
199 pr_err("[%s:%d] err: EXYNOS_MEM_PADDR_CACHE_FLUSH\n",
204 cache_maint_phys(range.start, range.length, EM_CLEAN);
207 case EXYNOS_MEM_SET_PHYADDR:
209 struct exynos_mem *mem = filp->private_data;
211 if (get_user(phyaddr, (u32 __user *)arg)) {
212 pr_err("[%s:%d] err: EXYNOS_MEM_SET_PHYADDR\n",
216 mem->phybase = phyaddr >> PAGE_SHIFT;
222 pr_err("[%s:%d] error command\n", __func__, __LINE__);
229 static void exynos_mem_mmap_open(struct vm_area_struct *vma)
231 printk(KERN_DEBUG "[%s] addr(0x%08x)\n", __func__, (u32)vma->vm_start);
234 static void exynos_mem_mmap_close(struct vm_area_struct *vma)
236 printk(KERN_DEBUG "[%s] addr(0x%08x)\n", __func__, (u32)vma->vm_start);
239 static struct vm_operations_struct exynos_mem_ops = {
240 .open = exynos_mem_mmap_open,
241 .close = exynos_mem_mmap_close,
244 int exynos_mem_mmap(struct file *filp, struct vm_area_struct *vma)
246 struct exynos_mem *mem = (struct exynos_mem *)filp->private_data;
247 bool cacheable = mem->cacheable;
248 dma_addr_t start = 0;
250 u32 size = vma->vm_end - vma->vm_start;
253 start = vma->vm_pgoff << PAGE_SHIFT;
256 start = mem->phybase << PAGE_SHIFT;
260 /* TODO: currently lowmem is only avaiable */
261 if ((phys_to_virt(start) < (void *)PAGE_OFFSET) ||
262 (phys_to_virt(start) >= high_memory)) {
263 pr_err("[%s] invalid paddr(0x%08x)\n", __func__, start);
268 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
270 vma->vm_flags |= VM_RESERVED;
271 vma->vm_ops = &exynos_mem_ops;
273 if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
274 pr_err("writable mapping must be shared\n");
278 if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
279 pr_err("mmap fail\n");
283 vma->vm_ops->open(vma);