2 * linux/drivers/media/video/s5p-mfc/s5p_mfc_mem.c
4 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/dma-mapping.h>
14 #include <asm/cacheflush.h>
16 #include "s5p_mfc_common.h"
17 #include "s5p_mfc_mem.h"
18 #include "s5p_mfc_pm.h"
19 #include "s5p_mfc_debug.h"
21 #if defined(CONFIG_S5P_MFC_VB2_CMA)
22 static const char *s5p_mem_types[] = {
28 static unsigned long s5p_mem_alignments[] = {
34 struct vb2_mem_ops *s5p_mfc_mem_ops(void)
36 return (struct vb2_mem_ops *)&vb2_cma_memops;
39 void **s5p_mfc_mem_init_multi(struct device *dev)
41 return (void **)vb2_cma_init_multi(dev, MFC_CMA_ALLOC_CTX_NUM,
42 s5p_mem_types, s5p_mem_alignments);
45 void s5p_mfc_mem_cleanup_multi(void **alloc_ctxes)
47 vb2_cma_cleanup_multi((struct vb2_alloc_ctx **)alloc_ctxes);
49 #elif defined(CONFIG_S5P_MFC_VB2_DMA_POOL)
50 static unsigned long s5p_mem_base_align[] = {
54 static unsigned long s5p_mem_bank_align[] = {
55 MFC_BANK_A_ALIGN_ORDER,
56 MFC_BANK_B_ALIGN_ORDER,
59 static unsigned long s5p_mem_sizes[] = {
64 struct vb2_mem_ops *s5p_mfc_mem_ops(void)
66 return (struct vb2_mem_ops *)&vb2_dma_pool_memops;
69 void **s5p_mfc_mem_init_multi(struct device *dev)
71 return (void **)vb2_dma_pool_init_multi(dev, MFC_ALLOC_CTX_NUM,
77 void s5p_mfc_mem_cleanup_multi(void **alloc_ctxes)
79 vb2_dma_pool_cleanup_multi(alloc_ctxes, MFC_ALLOC_CTX_NUM);
81 #elif defined(CONFIG_S5P_MFC_VB2_SDVMM)
82 struct vb2_mem_ops *s5p_mfc_mem_ops(void)
84 return (struct vb2_mem_ops *)&vb2_sdvmm_memops;
87 void **s5p_mfc_mem_init_multi(struct device *dev)
91 struct vb2_drv vb2_drv;
93 vcm.vcm_id = VCM_DEV_MFC;
94 /* FIXME: check port count */
97 vb2_drv.remap_dva = true;
98 vb2_drv.cacheable = false;
101 alloc_ctxes = (void **)vb2_sdvmm_init_multi(MFC_ALLOC_CTX_NUM, &vcm,
108 void s5p_mfc_mem_cleanup_multi(void **alloc_ctxes)
110 vb2_sdvmm_cleanup_multi(alloc_ctxes);
114 #if defined(CONFIG_S5P_MFC_VB2_SDVMM)
115 void s5p_mfc_cache_clean(const void *start_addr, unsigned long size)
118 void *cur_addr, *end_addr;
120 dmac_map_area(start_addr, size, DMA_TO_DEVICE);
122 cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
123 end_addr = cur_addr + PAGE_ALIGN(size);
125 while (cur_addr < end_addr) {
126 paddr = page_to_pfn(vmalloc_to_page(cur_addr));
127 paddr <<= PAGE_SHIFT;
129 outer_clean_range(paddr, paddr + PAGE_SIZE);
130 cur_addr += PAGE_SIZE;
133 /* FIXME: L2 operation optimization */
135 unsigned long start, end, unitsize;
136 unsigned long cur_addr, remain;
138 dmac_map_area(start_addr, size, DMA_TO_DEVICE);
140 cur_addr = (unsigned long)start_addr;
143 start = page_to_pfn(vmalloc_to_page(cur_addr));
144 start <<= PAGE_SHIFT;
145 if (start & PAGE_MASK) {
146 unitsize = min((start | PAGE_MASK) - start + 1, remain);
147 end = start + unitsize;
148 outer_clean_range(start, end);
150 cur_addr += unitsize;
153 while (remain >= PAGE_SIZE) {
154 start = page_to_pfn(vmalloc_to_page(cur_addr));
155 start <<= PAGE_SHIFT;
156 end = start + PAGE_SIZE;
157 outer_clean_range(start, end);
159 cur_addr += PAGE_SIZE;
163 start = page_to_pfn(vmalloc_to_page(cur_addr));
164 start <<= PAGE_SHIFT;
165 end = start + remain;
166 outer_clean_range(start, end);
172 void s5p_mfc_cache_inv(const void *start_addr, unsigned long size)
175 void *cur_addr, *end_addr;
177 cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
178 end_addr = cur_addr + PAGE_ALIGN(size);
180 while (cur_addr < end_addr) {
181 paddr = page_to_pfn(vmalloc_to_page(cur_addr));
182 paddr <<= PAGE_SHIFT;
184 outer_inv_range(paddr, paddr + PAGE_SIZE);
185 cur_addr += PAGE_SIZE;
188 dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
190 /* FIXME: L2 operation optimization */
192 unsigned long start, end, unitsize;
193 unsigned long cur_addr, remain;
195 cur_addr = (unsigned long)start_addr;
198 start = page_to_pfn(vmalloc_to_page(cur_addr));
199 start <<= PAGE_SHIFT;
200 if (start & PAGE_MASK) {
201 unitsize = min((start | PAGE_MASK) - start + 1, remain);
202 end = start + unitsize;
203 outer_inv_range(start, end);
205 cur_addr += unitsize;
208 while (remain >= PAGE_SIZE) {
209 start = page_to_pfn(vmalloc_to_page(cur_addr));
210 start <<= PAGE_SHIFT;
211 end = start + PAGE_SIZE;
212 outer_inv_range(start, end);
214 cur_addr += PAGE_SIZE;
218 start = page_to_pfn(vmalloc_to_page(cur_addr));
219 start <<= PAGE_SHIFT;
220 end = start + remain;
221 outer_inv_range(start, end);
224 dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
228 void s5p_mfc_mem_suspend(void *alloc_ctx)
231 vb2_sdvmm_suspend(alloc_ctx);
235 void s5p_mfc_mem_resume(void *alloc_ctx)
238 vb2_sdvmm_resume(alloc_ctx);
242 void s5p_mfc_cache_clean(const void *start_addr, unsigned long size)
246 dmac_map_area(start_addr, size, DMA_TO_DEVICE);
247 paddr = __pa((unsigned long)start_addr);
248 outer_clean_range(paddr, paddr + size);
251 void s5p_mfc_cache_inv(const void *start_addr, unsigned long size)
255 paddr = __pa((unsigned long)start_addr);
256 outer_inv_range(paddr, paddr + size);
257 dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
260 void s5p_mfc_mem_suspend(void *alloc_ctx)
265 void s5p_mfc_mem_resume(void *alloc_ctx)