Initial commit
[kernel/linux-3.0.git] / drivers / char / exynos_mem.c
1 /* linux/drivers/char/exynos_mem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  *              http://www.samsung.com/
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9 */
10
11 #include <linux/errno.h>        /* error codes */
12 #include <linux/fs.h>
13 #include <linux/ioctl.h>
14 #include <linux/memblock.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/types.h>
18 #include <linux/uaccess.h>
19 #include <linux/highmem.h>
20 #include <linux/dma-mapping.h>
21 #include <asm/cacheflush.h>
22
23 #include <plat/cpu.h>
24
25 #include <linux/exynos_mem.h>
26
27 #define L2_FLUSH_ALL    SZ_1M
28 #define L1_FLUSH_ALL    SZ_64K
29
30 struct exynos_mem {
31         bool cacheable;
32         unsigned int  phybase;
33 };
34
35 int exynos_mem_open(struct inode *inode, struct file *filp)
36 {
37         struct exynos_mem *prv_data;
38
39         prv_data = kzalloc(sizeof(struct exynos_mem), GFP_KERNEL);
40         if (!prv_data) {
41                 pr_err("%s: not enough memory\n", __func__);
42                 return -ENOMEM;
43         }
44
45         prv_data->cacheable = true;     /* Default: cacheable */
46
47         filp->private_data = prv_data;
48
49         printk(KERN_DEBUG "[%s:%d] private_data(0x%08x)\n",
50                 __func__, __LINE__, (u32)prv_data);
51
52         return 0;
53 }
54
55 int exynos_mem_release(struct inode *inode, struct file *filp)
56 {
57         printk(KERN_DEBUG "[%s:%d] private_data(0x%08x)\n",
58                 __func__, __LINE__, (u32)filp->private_data);
59
60         kfree(filp->private_data);
61
62         return 0;
63 }
64
65 enum cacheop { EM_CLEAN, EM_INV, EM_FLUSH };
66
67 static void cache_maint_inner(void *vaddr, size_t size, enum cacheop op)
68 {
69         switch (op) {
70                 case EM_CLEAN:
71                         dmac_map_area(vaddr, size, DMA_TO_DEVICE);
72                         break;
73                 case EM_INV:
74                         dmac_unmap_area(vaddr, size, DMA_TO_DEVICE);
75                         break;
76                 case EM_FLUSH:
77                         dmac_flush_range(vaddr, vaddr + size);
78         }
79 }
80
81 static void cache_maint_phys(phys_addr_t start, size_t length, enum cacheop op)
82 {
83         size_t left = length;
84         phys_addr_t begin = start;
85
86         if (!soc_is_exynos5250() && !soc_is_exynos5210()) {
87                 if (length > (size_t) L1_FLUSH_ALL) {
88                         flush_cache_all();
89                         smp_call_function(
90                                         (smp_call_func_t)__cpuc_flush_kern_all,
91                                         NULL, 1);
92
93                         goto outer_cache_ops;
94                 }
95         }
96
97 #ifdef CONFIG_HIGHMEM
98         do {
99                 size_t len;
100                 struct page *page;
101                 void *vaddr;
102                 off_t offset;
103
104                 page = phys_to_page(start);
105                 offset = offset_in_page(start);
106                 len = PAGE_SIZE - offset;
107
108                 if (left < len)
109                         len = left;
110
111                 if (PageHighMem(page)) {
112                         vaddr = kmap(page);
113                         cache_maint_inner(vaddr + offset, len, op);
114                         kunmap(page);
115                 } else {
116                         vaddr = page_address(page) + offset;
117                         cache_maint_inner(vaddr, len, op);
118                 }
119                 left -= len;
120                 start += len;
121         } while (left);
122 #else
123         cache_maint_inner(phys_to_virt(begin), left, op);
124 #endif
125
126 outer_cache_ops:
127         switch (op) {
128         case EM_CLEAN:
129                 outer_clean_range(begin, begin + length);
130                 break;
131         case EM_INV:
132                 if (length <= L2_FLUSH_ALL) {
133                         outer_inv_range(begin, begin + length);
134                         break;
135                 }
136                 /* else FALL THROUGH */
137         case EM_FLUSH:
138                 outer_flush_range(begin, begin + length);
139                 break;
140         }
141 }
142
143 static void exynos_mem_paddr_cache_clean(dma_addr_t start, size_t length)
144 {
145         if (length > (size_t) L2_FLUSH_ALL) {
146                 flush_cache_all();              /* L1 */
147                 smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
148                 outer_clean_all();              /* L2 */
149         } else if (length > (size_t) L1_FLUSH_ALL) {
150                 dma_addr_t end = start + length - 1;
151
152                 flush_cache_all();              /* L1 */
153                 smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
154                 outer_clean_range(start, end);  /* L2 */
155         } else {
156                 dma_addr_t end = start + length - 1;
157
158                 dmac_flush_range(phys_to_virt(start), phys_to_virt(end));
159                 outer_clean_range(start, end);  /* L2 */
160         }
161 }
162
163 long exynos_mem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
164 {
165         switch (cmd) {
166         case EXYNOS_MEM_SET_CACHEABLE:
167         {
168                 struct exynos_mem *mem = filp->private_data;
169                 int cacheable;
170                 if (get_user(cacheable, (u32 __user *)arg)) {
171                         pr_err("[%s:%d] err: EXYNOS_MEM_SET_CACHEABLE\n",
172                                 __func__, __LINE__);
173                         return -EFAULT;
174                 }
175                 mem->cacheable = cacheable;
176                 break;
177         }
178
179         case EXYNOS_MEM_PADDR_CACHE_FLUSH:
180         {
181                 struct exynos_mem_flush_range range;
182                 if (copy_from_user(&range,
183                                    (struct exynos_mem_flush_range __user *)arg,
184                                    sizeof(range))) {
185                         pr_err("[%s:%d] err: EXYNOS_MEM_PADDR_CACHE_FLUSH\n",
186                                 __func__, __LINE__);
187                         return -EFAULT;
188                 }
189
190                 cache_maint_phys(range.start, range.length, EM_FLUSH);
191                 break;
192         }
193         case EXYNOS_MEM_PADDR_CACHE_CLEAN:
194         {
195                 struct exynos_mem_flush_range range;
196                 if (copy_from_user(&range,
197                                    (struct exynos_mem_flush_range __user *)arg,
198                                    sizeof(range))) {
199                         pr_err("[%s:%d] err: EXYNOS_MEM_PADDR_CACHE_FLUSH\n",
200                                 __func__, __LINE__);
201                         return -EFAULT;
202                 }
203
204                 cache_maint_phys(range.start, range.length, EM_CLEAN);
205                 break;
206         }
207         case EXYNOS_MEM_SET_PHYADDR:
208         {
209                 struct exynos_mem *mem = filp->private_data;
210                 int phyaddr;
211                 if (get_user(phyaddr, (u32 __user *)arg)) {
212                         pr_err("[%s:%d] err: EXYNOS_MEM_SET_PHYADDR\n",
213                                 __func__, __LINE__);
214                         return -EFAULT;
215                 }
216                 mem->phybase = phyaddr >> PAGE_SHIFT;
217
218                 break;
219         }
220
221         default:
222                 pr_err("[%s:%d] error command\n", __func__, __LINE__);
223                 return -EINVAL;
224         }
225
226         return 0;
227 }
228
229 static void exynos_mem_mmap_open(struct vm_area_struct *vma)
230 {
231         printk(KERN_DEBUG "[%s] addr(0x%08x)\n", __func__, (u32)vma->vm_start);
232 }
233
234 static void exynos_mem_mmap_close(struct vm_area_struct *vma)
235 {
236         printk(KERN_DEBUG "[%s] addr(0x%08x)\n", __func__, (u32)vma->vm_start);
237 }
238
239 static struct vm_operations_struct exynos_mem_ops = {
240         .open   = exynos_mem_mmap_open,
241         .close  = exynos_mem_mmap_close,
242 };
243
244 int exynos_mem_mmap(struct file *filp, struct vm_area_struct *vma)
245 {
246         struct exynos_mem *mem = (struct exynos_mem *)filp->private_data;
247         bool cacheable = mem->cacheable;
248         dma_addr_t start = 0;
249         u32 pfn = 0;
250         u32 size = vma->vm_end - vma->vm_start;
251
252         if (vma->vm_pgoff) {
253                 start = vma->vm_pgoff << PAGE_SHIFT;
254                 pfn = vma->vm_pgoff;
255         } else {
256                 start = mem->phybase << PAGE_SHIFT;
257                 pfn = mem->phybase;
258         }
259
260         /* TODO: currently lowmem is only avaiable */
261         if ((phys_to_virt(start) < (void *)PAGE_OFFSET) ||
262             (phys_to_virt(start) >= high_memory)) {
263                 pr_err("[%s] invalid paddr(0x%08x)\n", __func__, start);
264                 return -EINVAL;
265         }
266
267         if (!cacheable)
268                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
269
270         vma->vm_flags |= VM_RESERVED;
271         vma->vm_ops = &exynos_mem_ops;
272
273         if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
274                 pr_err("writable mapping must be shared\n");
275                 return -EINVAL;
276         }
277
278         if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
279                 pr_err("mmap fail\n");
280                 return -EINVAL;
281         }
282
283         vma->vm_ops->open(vma);
284
285         return 0;
286 }