1 /* drivers/char/s3c_mem.c
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
6 * S3C MEM driver for /dev/mem
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/types.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
20 #include <linux/interrupt.h>
21 #include <linux/clk.h>
22 #include <linux/delay.h>
23 #include <linux/uaccess.h>
24 #include <linux/errno.h> /* error codes */
25 #include <asm/div64.h>
27 #include <linux/tty.h>
29 #include <linux/sched.h>
31 #include <asm/cacheflush.h>
32 #include <linux/slab.h>
33 #include <linux/mman.h>
34 #include <linux/dma-mapping.h>
36 #include <linux/unistd.h>
37 #include <linux/version.h>
39 #include <mach/hardware.h>
42 #ifdef CONFIG_S3C_DMA_MEM
43 #include "s3c_dma_mem.h"
46 #ifdef CONFIG_S3C_MEM_CMA_ALLOC
47 #include <linux/cma.h>
48 #include <linux/platform_device.h>
53 static unsigned int physical_address;
56 static unsigned int virtual_address;
59 #ifdef CONFIG_S3C_MEM_CMA_ALLOC
61 struct s3c_slot_info *s3c_slot_info;
62 int s3c_cma_max_block_num;
63 int s3c_cma_block_size;
66 static void s3c_mem_log(struct s3c_dev_info *prv_data, bool mem_info)
69 for (i = 0; i < prv_data->dev_max_slot_num; i++)
71 "s_slot_info[%d].s_start_addr=0x%x s_mapped=%d \n", i,
72 prv_data->s_slot_info[i].s_start_addr,
73 prv_data->s_slot_info[i].s_mapped);
76 "s_cur_mem_info->paddr=0x%x s_mem_info->vaddr=0x%x s_mem_info->size=%d \n",
77 prv_data->s_cur_mem_info.paddr,
78 prv_data->s_cur_mem_info.vaddr,
79 prv_data->s_cur_mem_info.mapped_size);
82 static unsigned long s3c_mapping_slot(struct s3c_dev_info *prv_data)
84 int i, j, k, v_start_slot = 0;
85 unsigned long lv_ret = 0;
87 for (i = 0; i < prv_data->dev_max_slot_num; i++) {
88 if (prv_data->s_slot_info[i].s_mapped == false) {
89 if (i + prv_data->s_cur_mem_info.req_memblock >
90 prv_data->dev_max_slot_num) {
91 printk(KERN_ERR "ERROR : not enough memory \n");
96 j < i + prv_data->s_cur_mem_info.req_memblock;
98 if (prv_data->s_slot_info[j].s_mapped == true)
101 if (j == i + prv_data->s_cur_mem_info.req_memblock) {
103 __phys_to_pfn(prv_data->s_slot_info
104 [v_start_slot].s_start_addr);
105 physical_address = (unsigned int)
106 prv_data->s_slot_info[v_start_slot].
108 for (k = v_start_slot; k < j; k++) {
109 prv_data->s_slot_info[k].s_mapped =
112 "prv_data->s_slot_info[%d].s_mapped=1 \n",
120 if (i == prv_data->dev_max_slot_num)
121 printk(KERN_ERR "ERROR :can not find the suitable slot \n");
126 static int s3c_unmapping_slot(struct s3c_dev_info *prv_data)
128 int i, j, lv_ret = 0;
129 for (i = 0; i < prv_data->dev_max_slot_num; i++) {
130 if (prv_data->s_slot_info[i].s_start_addr ==
131 prv_data->s_cur_mem_info.paddr) {
133 j < i + prv_data->s_cur_mem_info.req_memblock;
135 prv_data->s_slot_info[j].s_mapped = false;
137 "s_slot_info[%d].s_mapped = 0 \n", j);
144 int s3c_mem_open(struct inode *inode, struct file *filp)
146 struct s3c_dev_info *prv_data;
147 mutex_lock(&mem_open_lock);
149 prv_data = kzalloc(sizeof(struct s3c_dev_info), GFP_KERNEL);
151 pr_err("%s: not enough memory\n", __func__);
154 prv_data->s_slot_info = s3c_slot_info;
155 prv_data->dev_slot_size = s3c_cma_block_size;
156 prv_data->dev_max_slot_num = s3c_cma_max_block_num;
157 prv_data->s_cur_mem_info.paddr = 0;
158 prv_data->s_cur_mem_info.vaddr = 0;
159 prv_data->s_cur_mem_info.mapped_size = 0;
160 prv_data->s_cur_mem_info.req_memblock = 0;
161 filp->private_data = prv_data;
163 mutex_unlock(&mem_open_lock);
168 int s3c_mem_release(struct inode *inode, struct file *filp)
170 struct mm_struct *mm = current->mm;
171 struct s3c_dev_info *prv_data =
172 (struct s3c_dev_info *)filp->private_data;
174 mutex_lock(&mem_release_lock);
177 "prv_data->s_cur_mem_info->paddr=0x%x vaddr=0x%x size=%d \n",
178 prv_data->s_cur_mem_info.paddr, prv_data->s_cur_mem_info.vaddr,
179 prv_data->s_cur_mem_info.mapped_size);
181 if (prv_data->s_cur_mem_info.vaddr) {
182 s3c_unmapping_slot(prv_data);
184 (mm, prv_data->s_cur_mem_info.vaddr,
185 prv_data->s_cur_mem_info.mapped_size) < 0) {
186 printk(KERN_ERR "do_munmap() failed !!\n");
187 mutex_unlock(&mem_release_lock);
191 kfree(filp->private_data);
192 filp->private_data = NULL;
193 mutex_unlock(&mem_release_lock);
199 long s3c_mem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
202 unsigned long virt_addr;
204 unsigned long *virt_addr;
207 struct mm_struct *mm = current->mm;
208 struct s3c_mem_alloc param;
209 struct vm_area_struct *vma;
210 unsigned long start, this_pfn;
211 #ifdef CONFIG_S3C_DMA_MEM
212 struct s3c_mem_dma_param dma_param;
217 mutex_lock(&mem_alloc_lock);
218 if (copy_from_user(¶m, (struct s3c_mem_alloc *)arg,
219 sizeof(struct s3c_mem_alloc))) {
220 mutex_unlock(&mem_alloc_lock);
224 param.vir_addr = do_mmap(file, 0, param.size,
225 (PROT_READ|PROT_WRITE), MAP_SHARED, 0);
226 DEBUG("param.vir_addr = %08x, %d\n",
227 param.vir_addr, __LINE__);
228 if (param.vir_addr == -EINVAL) {
229 printk(KERN_INFO "S3C_MEM_ALLOC FAILED\n");
231 mutex_unlock(&mem_alloc_lock);
234 param.phy_addr = physical_address;
236 param.kvir_addr = virtual_address;
239 DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t "
240 "size = %d \t param.vir_addr = 0x%X, %d\n",
241 param.phy_addr, param.size, param.vir_addr,
244 if (copy_to_user((struct s3c_mem_alloc *)arg, ¶m,
245 sizeof(struct s3c_mem_alloc))) {
247 mutex_unlock(&mem_alloc_lock);
251 mutex_unlock(&mem_alloc_lock);
254 #ifdef CONFIG_S3C_MEM_CMA_ALLOC
255 case S3C_MEM_CMA_ALLOC:
257 struct s3c_dev_info *prv_data =
258 (struct s3c_dev_info *)file->private_data;
260 mutex_lock(&mem_alloc_lock);
261 if (copy_from_user(¶m, (struct s3c_mem_alloc *)arg,
262 sizeof(struct s3c_mem_alloc))) {
263 mutex_unlock(&mem_alloc_lock);
266 flag = MEM_CMA_ALLOC;
269 do_mmap(file, 0, param.size,
270 (PROT_READ | PROT_WRITE), MAP_SHARED, 0);
271 DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr,
274 if (param.vir_addr == -EINVAL) {
275 printk(KERN_ERR "S3C_MEM_ALLOC FAILED\n");
277 mutex_unlock(&mem_alloc_lock);
281 param.phy_addr = physical_address;
282 printk(KERN_INFO "physical_address=0x%x \n",
285 if (copy_to_user((struct s3c_mem_alloc *)arg, ¶m,
286 sizeof(struct s3c_mem_alloc))) {
288 mutex_unlock(&mem_alloc_lock);
293 prv_data->s_cur_mem_info.paddr = param.phy_addr;
294 prv_data->s_cur_mem_info.vaddr = param.vir_addr;
295 prv_data->s_cur_mem_info.mapped_size =
296 PAGE_ALIGN(param.size);
298 mutex_unlock(&mem_alloc_lock);
303 case S3C_MEM_CACHEABLE_ALLOC:
304 mutex_lock(&mem_cacheable_alloc_lock);
305 if (copy_from_user(¶m, (struct s3c_mem_alloc *)arg,
306 sizeof(struct s3c_mem_alloc))) {
307 mutex_unlock(&mem_cacheable_alloc_lock);
310 flag = MEM_ALLOC_CACHEABLE;
311 param.vir_addr = do_mmap(file, 0, param.size,
312 (PROT_READ|PROT_WRITE), MAP_SHARED, 0);
313 DEBUG("param.vir_addr = %08x, %d\n",
314 param.vir_addr, __LINE__);
315 if (param.vir_addr == -EINVAL) {
316 printk(KERN_INFO "S3C_MEM_ALLOC FAILED\n");
318 mutex_unlock(&mem_cacheable_alloc_lock);
321 param.phy_addr = physical_address;
322 DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X"
323 " \t size = %d \t param.vir_addr = 0x%X, %d\n",
324 param.phy_addr, param.size, param.vir_addr,
327 if (copy_to_user((struct s3c_mem_alloc *)arg, ¶m,
328 sizeof(struct s3c_mem_alloc))) {
330 mutex_unlock(&mem_cacheable_alloc_lock);
334 mutex_unlock(&mem_cacheable_alloc_lock);
338 case S3C_MEM_SHARE_ALLOC:
339 mutex_lock(&mem_share_alloc_lock);
340 if (copy_from_user(¶m, (struct s3c_mem_alloc *)arg,
341 sizeof(struct s3c_mem_alloc))) {
342 mutex_unlock(&mem_share_alloc_lock);
345 flag = MEM_ALLOC_SHARE;
346 physical_address = param.phy_addr;
347 DEBUG("param.phy_addr = %08x, %d\n",
348 physical_address, __LINE__);
349 param.vir_addr = do_mmap(file, 0, param.size,
350 (PROT_READ|PROT_WRITE), MAP_SHARED, 0);
351 DEBUG("param.vir_addr = %08x, %d\n",
352 param.vir_addr, __LINE__);
353 if (param.vir_addr == -EINVAL) {
354 printk(KERN_INFO "S3C_MEM_SHARE_ALLOC FAILED\n");
356 mutex_unlock(&mem_share_alloc_lock);
359 DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t "
360 "size = %d \t param.vir_addr = 0x%X, %d\n",
361 param.phy_addr, param.size, param.vir_addr,
364 if (copy_to_user((struct s3c_mem_alloc *)arg, ¶m,
365 sizeof(struct s3c_mem_alloc))) {
367 mutex_unlock(&mem_share_alloc_lock);
371 mutex_unlock(&mem_share_alloc_lock);
375 case S3C_MEM_CACHEABLE_SHARE_ALLOC:
376 mutex_lock(&mem_cacheable_share_alloc_lock);
377 if (copy_from_user(¶m, (struct s3c_mem_alloc *)arg,
378 sizeof(struct s3c_mem_alloc))) {
379 mutex_unlock(&mem_cacheable_share_alloc_lock);
382 flag = MEM_ALLOC_CACHEABLE_SHARE;
383 physical_address = param.phy_addr;
384 DEBUG("param.phy_addr = %08x, %d\n",
385 physical_address, __LINE__);
386 param.vir_addr = do_mmap(file, 0, param.size,
387 (PROT_READ|PROT_WRITE), MAP_SHARED, 0);
388 DEBUG("param.vir_addr = %08x, %d\n",
389 param.vir_addr, __LINE__);
390 if (param.vir_addr == -EINVAL) {
391 printk(KERN_INFO "S3C_MEM_SHARE_ALLOC FAILED\n");
393 mutex_unlock(&mem_cacheable_share_alloc_lock);
396 DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t "
397 "size = %d \t param.vir_addr = 0x%X, %d\n",
398 param.phy_addr, param.size, param.vir_addr,
401 if (copy_to_user((struct s3c_mem_alloc *)arg, ¶m,
402 sizeof(struct s3c_mem_alloc))) {
404 mutex_unlock(&mem_cacheable_share_alloc_lock);
408 mutex_unlock(&mem_cacheable_share_alloc_lock);
413 mutex_lock(&mem_free_lock);
414 if (copy_from_user(¶m, (struct s3c_mem_alloc *)arg,
415 sizeof(struct s3c_mem_alloc))) {
416 mutex_unlock(&mem_free_lock);
420 DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t "
421 "size = %d \t param.vir_addr = 0x%X, %d\n",
422 param.phy_addr, param.size, param.vir_addr,
425 if (do_munmap(mm, param.vir_addr, param.size) < 0) {
426 printk(KERN_INFO "do_munmap() failed !!\n");
427 mutex_unlock(&mem_free_lock);
432 virt_addr = param.kvir_addr;
433 dma_free_writecombine(NULL, param.size,
434 (unsigned int *) virt_addr, param.phy_addr);
436 virt_addr = (unsigned long *)phys_to_virt(param.phy_addr);
440 DEBUG("do_munmap() succeed !!\n");
442 if (copy_to_user((struct s3c_mem_alloc *)arg, ¶m,
443 sizeof(struct s3c_mem_alloc))) {
444 mutex_unlock(&mem_free_lock);
448 mutex_unlock(&mem_free_lock);
451 #ifdef CONFIG_S3C_MEM_CMA_ALLOC
452 case S3C_MEM_CMA_FREE:
454 struct s3c_dev_info *prv_data =
455 (struct s3c_dev_info *)file->private_data;
457 mutex_lock(&mem_free_lock);
458 if (copy_from_user(¶m, (struct s3c_mem_alloc *)arg,
459 sizeof(struct s3c_mem_alloc))) {
460 mutex_unlock(&mem_free_lock);
464 DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t "
465 "size = %d \t param.vir_addr = 0x%X, %d\n",
466 param.phy_addr, param.size, param.vir_addr,
470 ("FREE : pa = 0x%x size = %d va = 0x%x\n",
471 param.phy_addr, param.size, param.vir_addr);
472 if (param.vir_addr) {
473 s3c_unmapping_slot(prv_data);
475 if (do_munmap(mm, param.vir_addr, param.size) <
478 "do_munmap() failed !!\n");
479 mutex_unlock(&mem_free_lock);
483 prv_data->s_cur_mem_info.paddr = 0;
484 prv_data->s_cur_mem_info.vaddr = 0;
485 prv_data->s_cur_mem_info.mapped_size = 0;
486 prv_data->s_cur_mem_info.req_memblock = 0;
487 DEBUG("do_munmap() succeed !!\n");
490 if (copy_to_user((struct s3c_mem_alloc *)arg, ¶m,
491 sizeof(struct s3c_mem_alloc))) {
492 mutex_unlock(&mem_free_lock);
496 mutex_unlock(&mem_free_lock);
500 case S3C_MEM_SHARE_FREE:
501 mutex_lock(&mem_share_free_lock);
502 if (copy_from_user(¶m, (struct s3c_mem_alloc *)arg,
503 sizeof(struct s3c_mem_alloc))) {
504 mutex_unlock(&mem_share_free_lock);
507 DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t "
508 "size = %d \t param.vir_addr = 0x%X, %d\n",
509 param.phy_addr, param.size, param.vir_addr,
512 if (do_munmap(mm, param.vir_addr, param.size) < 0) {
513 printk(KERN_INFO "do_munmap() failed - MEM_SHARE_FREE!!\n");
514 mutex_unlock(&mem_share_free_lock);
519 DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n");
521 if (copy_to_user((struct s3c_mem_alloc *)arg, ¶m,
522 sizeof(struct s3c_mem_alloc))) {
523 mutex_unlock(&mem_share_free_lock);
527 mutex_unlock(&mem_share_free_lock);
531 #ifdef CONFIG_S3C_DMA_MEM
532 case S3C_MEM_DMA_COPY:
533 if (copy_from_user(&dma_param, (struct s3c_mem_dma_param *)arg,
534 sizeof(struct s3c_mem_dma_param))) {
537 if (s3c_dma_mem_start(current->mm, &dma_param,
541 if (copy_to_user((struct s3c_mem_dma_param *)arg, &dma_param,
542 sizeof(struct s3c_mem_dma_param))) {
548 case S3C_MEM_GET_PADDR:
549 if (copy_from_user(¶m, (struct s3c_mem_alloc *)arg,
550 sizeof(struct s3c_mem_alloc))) {
553 start = param.vir_addr;
554 down_read(&mm->mmap_sem);
555 vma = find_vma(mm, start);
558 up_read(&mm->mmap_sem);
562 if (follow_pfn(vma, start, &this_pfn)) {
563 up_read(&mm->mmap_sem);
567 param.phy_addr = this_pfn << PAGE_SHIFT;
568 up_read(&mm->mmap_sem);
570 if (copy_to_user((struct s3c_mem_alloc *)arg, ¶m,
571 sizeof(struct s3c_mem_alloc))) {
577 DEBUG("s3c_mem_ioctl() : default !!\n");
583 EXPORT_SYMBOL(s3c_mem_ioctl);
585 int s3c_mem_mmap(struct file *filp, struct vm_area_struct *vma)
587 unsigned long pageFrameNo = 0, size, phys_addr;
590 unsigned long virt_addr;
592 unsigned long *virt_addr;
595 size = vma->vm_end - vma->vm_start;
599 case MEM_ALLOC_CACHEABLE:
602 virt_addr = (unsigned long)dma_alloc_writecombine(NULL, size,
603 (unsigned int *) &phys_addr,
606 virt_addr = kmalloc(size, GFP_DMA | GFP_ATOMIC);
609 printk(KERN_INFO "kmalloc() failed !\n");
612 DEBUG("MMAP_KMALLOC : virt addr = 0x%08x, size = %d, %d\n",
613 virt_addr, size, __LINE__);
615 #ifndef USE_DMA_ALLOC
616 dmac_map_area(virt_addr, size / sizeof(unsigned long), 2);
617 phys_addr = virt_to_phys((unsigned long *)virt_addr);
619 physical_address = (unsigned int)phys_addr;
622 virtual_address = virt_addr;
624 pageFrameNo = __phys_to_pfn(phys_addr);
626 #ifdef CONFIG_S3C_MEM_CMA_ALLOC
629 struct s3c_dev_info *prv_data =
630 (struct s3c_dev_info *)filp->private_data;
631 prv_data->s_cur_mem_info.req_memblock =
632 PAGE_ALIGN(size) / prv_data->dev_slot_size;
634 if (PAGE_ALIGN(size) % prv_data->dev_slot_size)
635 prv_data->s_cur_mem_info.req_memblock++;
637 printk(KERN_INFO "required slot=%d size=%lu \n",
638 prv_data->s_cur_mem_info.req_memblock, size);
641 pgprot_writecombine(vma->vm_page_prot);
643 pageFrameNo = s3c_mapping_slot(prv_data);
645 printk(KERN_ERR "mapping failed !\n");
652 case MEM_ALLOC_SHARE:
653 case MEM_ALLOC_CACHEABLE_SHARE:
654 DEBUG("MMAP_KMALLOC_SHARE : phys addr = 0x%08x, %d\n",
655 physical_address, __LINE__);
657 /* page frame number of the address for the physical_address to be shared. */
658 pageFrameNo = __phys_to_pfn(physical_address);
659 DEBUG("MMAP_KMALLOC_SHARE : vma->end = 0x%08x, "
660 "vma->start = 0x%08x, size = %d, %d\n",
661 vma->vm_end, vma->vm_start, size, __LINE__);
668 if ((flag == MEM_ALLOC) || (flag == MEM_ALLOC_SHARE))
669 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
671 vma->vm_flags |= VM_RESERVED;
673 if (remap_pfn_range(vma, vma->vm_start, pageFrameNo,
674 size, vma->vm_page_prot)) {
675 printk(KERN_INFO "s3c_mem_mmap() : remap_pfn_range() failed !\n");
681 EXPORT_SYMBOL(s3c_mem_mmap);