4 * linux device driver for VPU.
\r
6 * Copyright (C) 2006 - 2013 CHIPS&MEDIA INC.
\r
8 * This library is free software; you can redistribute it and/or modify it under
\r
9 * the terms of the GNU Lesser General Public License as published by the Free
\r
10 * Software Foundation; either version 2.1 of the License, or (at your option)
\r
11 * any later version.
\r
13 * This library is distributed in the hope that it will be useful, but WITHOUT
\r
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
\r
15 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
\r
18 * You should have received a copy of the GNU Lesser General Public License
\r
19 * along with this library; if not, write to the Free Software Foundation, Inc.,
\r
20 * 51 Franklin St, Fifth Floor, Boston, MA
\r
25 #include <linux/kernel.h>
\r
26 #include <linux/mm.h>
\r
27 #include <linux/interrupt.h>
\r
28 #include <linux/ioport.h>
\r
29 #include <linux/module.h>
\r
30 #include <linux/platform_device.h>
\r
31 #include <linux/dma-mapping.h>
\r
32 #include <linux/wait.h>
\r
33 #include <linux/list.h>
\r
34 #include <linux/clk.h>
\r
35 #include <linux/delay.h>
\r
36 #include <linux/uaccess.h>
\r
37 #include <linux/cdev.h>
\r
38 #include <linux/slab.h>
\r
39 #include <linux/sched.h>
\r
40 #include <linux/miscdevice.h>
\r
41 #include <linux/of_device.h>
\r
42 #include <linux/of_address.h>
\r
43 #include <linux/of_irq.h>
\r
44 //#include <mach/hardware.h>
\r
45 //#include <mach/sci_glb_regs.h>
\r
46 #include <linux/sprd_iommu.h>
\r
48 #include <soc/sprd/sci.h>
\r
49 #include <soc/sprd/sci_glb_regs.h>
\r
51 #include "vpuconfig.h"
\r
53 #include "TShark2_CODEC_AHB_Control_Register.h"
\r
55 #define LOG_TAG "CNM_VPU_DRV"
\r
58 /* definitions to be changed as customer configuration */
\r
59 /* if you want to have clock gating scheme frame by frame */
\r
60 /* #define VPU_SUPPORT_CLOCK_CONTROL */
\r
62 /* if the driver want to use interrupt service from kernel ISR */
\r
63 #define VPU_SUPPORT_ISR
\r
64 #ifdef VPU_SUPPORT_ISR
\r
65 /* if the driver want to disable and enable IRQ whenever interrupt asserted. */
\r
66 //#define VPU_IRQ_CONTROL
\r
69 /* if the platform driver knows the name of this driver */
\r
70 /* VPU_PLATFORM_DEVICE_NAME */
\r
72 #define VPU_SUPPORT_PLATFORM_DRIVER_REGISTER
\r
74 /* if this driver knows the dedicated video memory address */
\r
75 //#define VPU_SUPPORT_RESERVED_VIDEO_MEMORY
\r
77 #define VPU_PLATFORM_DEVICE_NAME "vdec"
\r
78 #define VPU_CLK_NAME "vcodec"
\r
79 #define VPU_DEV_NAME "vpu"
\r
81 /* if the platform driver knows this driver */
\r
82 /* the definition of VPU_REG_BASE_ADDR and VPU_REG_SIZE are not meaningful */
\r
84 //Set register 0x3000_0110(phy_addr)
\r
85 #define VPU_AXI_CLK_ADDR (0x62000004)
\r
86 #define VPU_AXI_CLK_ENABLE 0x0A04060A
\r
87 #define VPU_AXI_CLK_DISABLE 0x0A04040A
\r
89 #define VPU_REG_BASE_ADDR 0x62100000
\r
90 #define VPU_REG_SIZE (0x4000*MAX_NUM_VPU_CORE)
\r
92 #ifdef VPU_SUPPORT_ISR
\r
93 #define VPU_IRQ_NUM (23+32)
\r
96 /* this definition is only for chipsnmedia FPGA board env */
\r
97 /* so for SOC env of customers can be ignored */
\r
100 #ifndef VM_RESERVED /*for kernel up to 3.7.0 version*/
\r
101 # define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
\r
104 #define VPU_MINOR MISC_DYNAMIC_MINOR
\r
107 typedef struct vpu_drv_context_t {
\r
108 struct fasync_struct *async_queue;
\r
109 u32 open_count; /*!<< device reference count. Not instance count */
\r
111 unsigned int freq_div;
\r
113 struct semaphore deint_mutex;
\r
115 struct clk *clk_coda7_axi;
\r
116 struct clk *clk_coda7_cc;
\r
117 struct clk *clk_coda7_apb;
\r
119 struct clk *clk_parent_axi;
\r
120 struct clk *clk_parent_cc;
\r
121 struct clk *clk_parent_apb;
\r
123 struct clk *clk_parent;
\r
124 struct clk *clk_mm_i;
\r
128 //struct deint_fh *deint_fp;
\r
129 struct device_node *dev_np;
\r
130 } vpu_drv_context_t;
\r
132 /* To track the allocated memory buffer */
\r
133 typedef struct vpudrv_buffer_pool_t {
\r
134 struct list_head list;
\r
135 struct vpudrv_buffer_t vb;
\r
137 } vpudrv_buffer_pool_t;
\r
139 /* To track the instance index and buffer in instance pool */
\r
140 typedef struct vpudrv_instanace_list_t {
\r
141 struct list_head list;
\r
142 unsigned long inst_idx;
\r
143 unsigned long core_idx;
\r
145 } vpudrv_instanace_list_t;
\r
147 #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
\r
149 #define VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE (62*1024*1024)
\r
150 #define VPU_DRAM_PHYSICAL_BASE 0x86C00000
\r
153 static video_mm_t s_vmem;
\r
154 static vpudrv_buffer_t s_video_memory = {0};
\r
156 #endif /*VPU_SUPPORT_RESERVED_VIDEO_MEMORY*/
\r
158 typedef struct vpudrv_instance_pool_t {
\r
159 unsigned char codecInstPool[MAX_NUM_INSTANCE][MAX_INST_HANDLE_SIZE];
\r
160 int vpu_instance_num;
\r
161 } vpudrv_instance_pool_t;
\r
163 static int vpu_hw_reset(void);
\r
165 static void vpu_clk_disable(struct clk *clk);
\r
166 static int vpu_clk_enable(struct clk *clk);
\r
167 static struct clk *vpu_clk_get(struct device *dev);
\r
168 static void vpu_clk_put(struct clk *clk);
\r
171 /* end customer definition */
\r
172 static vpudrv_buffer_t s_instance_pool = {0};
\r
173 static vpudrv_buffer_t s_common_memory = {0};
\r
174 static vpu_drv_context_t s_vpu_drv_context;
\r
175 //static int s_vpu_major;
\r
176 //static struct cdev s_vpu_cdev;
\r
178 static struct clk *s_vpu_clk;
\r
180 static int s_vpu_open_ref_count;
\r
182 #ifdef VPU_SUPPORT_ISR
\r
183 static int s_vpu_irq = VPU_IRQ_NUM;
\r
186 static unsigned long s_vpu_reg_phy_addr = VPU_REG_BASE_ADDR;
\r
188 static void __iomem *s_vpu_reg_virt_addr;
\r
190 static int s_interrupt_flag;
\r
191 static wait_queue_head_t s_interrupt_wait_q;
\r
193 static spinlock_t s_vpu_lock = __SPIN_LOCK_UNLOCKED(s_vpu_lock);
\r
194 static DEFINE_SEMAPHORE(s_vpu_sem);
\r
195 static struct list_head s_vbp_head = LIST_HEAD_INIT(s_vbp_head);
\r
196 static struct list_head s_inst_list_head = LIST_HEAD_INIT(s_inst_list_head);
\r
199 static vpu_bit_firmware_info_t s_bit_firmware_info[MAX_NUM_VPU_CORE];
\r
200 static struct vpu_dev vpu_hw_dev;
\r
203 #define BIT_BASE 0x0000
\r
204 #define BIT_CODE_RUN (BIT_BASE + 0x000)
\r
205 #define BIT_CODE_DOWN (BIT_BASE + 0x004)
\r
206 #define BIT_INT_CLEAR (BIT_BASE + 0x00C)
\r
207 #define BIT_INT_STS (BIT_BASE + 0x010)
\r
208 #define BIT_CODE_RESET (BIT_BASE + 0x014)
\r
209 #define BIT_BUSY_FLAG (BIT_BASE + 0x160)
\r
210 #define BIT_RUN_COMMAND (BIT_BASE + 0x164)
\r
211 #define BIT_RUN_INDEX (BIT_BASE + 0x168)
\r
212 #define BIT_RUN_COD_STD (BIT_BASE + 0x16C)
\r
213 #define BIT_CUR_PC (BIT_BASE + 0x018)
\r
214 #define BIT_INT_REASON (BIT_BASE + 0x174)
\r
215 #define VPU_PRODUCT_CODE_REGISTER (BIT_BASE + 0x1044)
\r
217 #if defined(CONFIG_ARCH_SCX35LT8)
\r
218 #define REG_PMU_APB_CODEC_CFG REG_PMU_APB_PD_MM_CODEC_CFG
\r
219 #define AUTO_SHUTDOWN_EN BIT_PD_MM_CODEC_AUTO_SHUTDOWN_EN
\r
220 #define FORCE_SHUTDOWN BIT_PD_MM_CODEC_FORCE_SHUTDOWN
\r
222 #define REG_PMU_APB_CODEC_CFG REG_PMU_APB_PD_CODEC_TOP_CFG
\r
223 #define AUTO_SHUTDOWN_EN BIT_PD_CODEC_TOP_AUTO_SHUTDOWN_EN
\r
224 #define FORCE_SHUTDOWN BIT_PD_CODEC_TOP_FORCE_SHUTDOWN
\r
228 /* implement to power management functions */
\r
229 static u32 s_vpu_reg_store[MAX_NUM_VPU_CORE][64];
\r
230 static u32 s_run_index;
\r
231 static u32 s_run_codstd;
\r
234 static int vpu_resume(struct platform_device *pdev);
\r
235 static int vpu_suspend(struct platform_device *pdev, pm_message_t state);
\r
236 static int vpu_set_mm_clk(void);
\r
237 static int vpu_set_clk_by_register(void);
\r
238 static int vpu_clk_free(vpu_drv_context_t* vpu_context);
\r
239 static int vpu_power_on();
\r
240 static int vpu_power_shutdown();
\r
242 #define ReadVpuRegister(addr) *(volatile unsigned int *)(s_vpu_reg_virt_addr + s_bit_firmware_info[core].reg_base_offset + addr)
\r
243 #define WriteVpuRegister(addr, val) *(volatile unsigned int *)(s_vpu_reg_virt_addr + s_bit_firmware_info[core].reg_base_offset + addr) = (unsigned int)val
\r
244 #define WriteVpu(addr, val) *(volatile unsigned int *)(addr) = (unsigned int)val
\r
246 #define DEFAULT_FREQ_DIV 0x0
\r
248 struct clock_name_map_t {
\r
249 unsigned long freq;
\r
253 static struct clock_name_map_t clock_coda7l_axi_map[] = {
\r
254 {192000000,"clk_192m"},
\r
255 {153600000,"clk_153m6"},
\r
256 {128000000,"clk_128m"},
\r
257 {76800000,"clk_76m8"}
\r
260 static struct clock_name_map_t clock_coda7l_cc_map[] = {
\r
261 {192000000,"clk_192m"},
\r
262 {153600000,"clk_153m6"},
\r
263 {128000000,"clk_128m"},
\r
264 {76800000,"clk_76m8"}
\r
267 static struct clock_name_map_t clock_coda7l_apb_map[] = {
\r
268 {128000000,"clk_128m"},
\r
269 {96000000,"clk_96m"},
\r
270 {76800000,"clk_76m8"},
\r
271 {26000000,"ext_26m"}
\r
274 static int max_freq_level = ARRAY_SIZE(clock_coda7l_axi_map);
\r
276 static char *vpu_get_clk_src_name(unsigned int freq_level, struct clock_name_map_t clk_map[])
\r
278 if (freq_level >= max_freq_level ) {
\r
279 printk(KERN_INFO "set freq_level to 0");
\r
283 return clk_map[freq_level].name;
\r
286 static int find_vpu_freq_level(unsigned long freq, struct clock_name_map_t clk_map[])
\r
290 for (i = 0; i < max_freq_level; i++) {
\r
291 if (clk_map[i].freq == freq) {
\r
299 static int vpu_power_on()
\r
301 __raw_writel(AUTO_SHUTDOWN_EN | __raw_readl(REG_PMU_APB_CODEC_CFG), REG_PMU_APB_CODEC_CFG);
\r
302 __raw_writel((~FORCE_SHUTDOWN) & __raw_readl(REG_PMU_APB_CODEC_CFG), REG_PMU_APB_CODEC_CFG);
\r
306 static int vpu_power_shutdown()
\r
308 __raw_writel(FORCE_SHUTDOWN | __raw_readl(REG_PMU_APB_CODEC_CFG), REG_PMU_APB_CODEC_CFG);
\r
309 __raw_writel((~AUTO_SHUTDOWN_EN) & __raw_readl(REG_PMU_APB_CODEC_CFG), REG_PMU_APB_CODEC_CFG);
\r
313 static int vpu_alloc_dma_buffer(vpudrv_buffer_t *vb)
\r
318 #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
\r
319 vb->phys_addr = (unsigned long)vmem_alloc(&s_vmem, vb->size, 0);
\r
320 if ((unsigned long)vb->phys_addr == (unsigned long)-1) {
\r
321 vpu_loge("reserved Physical memory allocation error size=%d, base_addr=0x%x, mem_size=%d\n", vb->size, (int)s_vmem.base_addr, (int)s_vmem.mem_size);
\r
325 vb->base = (unsigned long)(s_video_memory.base + (vb->phys_addr - s_video_memory.phys_addr));
\r
327 vb->base = (unsigned long)dma_alloc_coherent(NULL, PAGE_ALIGN(vb->size), (dma_addr_t *) (&vb->phys_addr), GFP_DMA | GFP_KERNEL);
\r
328 if ((void *)(vb->base) == NULL) {
\r
329 vpu_loge("dynamic Physical memory allocation error size=%d\n", vb->size);
\r
338 static void vpu_free_dma_buffer(vpudrv_buffer_t *vb)
\r
343 #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
\r
345 vmem_free(&s_vmem, vb->phys_addr, 0);
\r
348 dma_free_coherent(0, PAGE_ALIGN(vb->size), (void *)vb->base, vb->phys_addr);
\r
353 static int vpu_free_instances(struct file *filp)
\r
355 vpudrv_instanace_list_t *vil, *n;
\r
356 vpudrv_instance_pool_t *vip;
\r
357 unsigned char *vip_base;
\r
358 int instance_pool_size_per_core;
\r
359 unsigned char *vdi_mutexes_base;
\r
360 const int PTHREAD_MUTEX_T_DESTROY_VALUE = 0xdead10cc;
\r
362 vpu_logd("vpu_free_instances inter. sizeof(vpudrv_instance_pool_t)=%d \n", sizeof(vpudrv_instance_pool_t));
\r
364 instance_pool_size_per_core = (s_instance_pool.size/MAX_NUM_VPU_CORE); /* s_instance_pool.size assigned to the size of all core once call VDI_IOCTL_GET_INSTANCE_POOL by user. */
\r
366 list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
\r
368 if (vil->filp == filp) {
\r
369 s_vpu_open_ref_count--;
\r
370 vip_base = (unsigned char *)(s_instance_pool.base + (instance_pool_size_per_core*vil->core_idx));
\r
371 vpu_logd("vpu_free_instances detect instance crash\n");
\r
372 vpu_logd("instIdx=%d, coreIdx=%d, vip_base=%p, instance_pool_size_per_core=%d\n", (int)vil->inst_idx, (int)vil->core_idx, vip_base, (int)instance_pool_size_per_core);
\r
373 vip = (vpudrv_instance_pool_t *)vip_base;
\r
375 memset(&vip->codecInstPool[vil->inst_idx], 0x00, 4); /* only first 4 byte is key point to free the corresponding instance. */
\r
376 vip->vpu_instance_num = s_vpu_open_ref_count;
\r
377 #define PTHREAD_MUTEX_T_HANDLE_SIZE 4
\r
378 vdi_mutexes_base = (unsigned char *)(vip_base + (instance_pool_size_per_core - PTHREAD_MUTEX_T_HANDLE_SIZE*4));
\r
379 vpu_logd("vpu_free_instances : force to destroy vdi_mutexes_base=%p in userspace, vip->vpu_instance_num=%d\n",
\r
380 vdi_mutexes_base, vip->vpu_instance_num);
\r
381 if (vdi_mutexes_base) {
\r
383 for (i=0; i < 4; i++) {
\r
384 memcpy(vdi_mutexes_base, &PTHREAD_MUTEX_T_DESTROY_VALUE, PTHREAD_MUTEX_T_HANDLE_SIZE);
\r
385 vdi_mutexes_base += PTHREAD_MUTEX_T_HANDLE_SIZE;
\r
389 list_del(&vil->list);
\r
397 static int vpu_free_buffers(struct file *filp)
\r
399 vpudrv_buffer_pool_t *pool, *n;
\r
400 vpudrv_buffer_t vb;
\r
402 vpu_logd("vpu_free_buffers\n");
\r
404 list_for_each_entry_safe(pool, n, &s_vbp_head, list)
\r
406 if (pool->filp == filp) {
\r
409 vpu_free_dma_buffer(&vb);
\r
410 list_del(&pool->list);
\r
420 static irqreturn_t vpu_irq_handler(int irq, void *dev_id)
\r
422 vpu_drv_context_t *dev = (vpu_drv_context_t *)dev_id;
\r
424 /* this can be removed. it also work in VPU_WaitInterrupt of API function */
\r
427 if (s_vpu_drv_context.open_count <= 0)
\r
429 //printk(KERN_ERR "This interrupt signal is not for VPU\n");
\r
433 #ifdef VPU_IRQ_CONTROL
\r
434 disable_irq_nosync(s_vpu_irq);
\r
437 for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
\r
438 /*it means that we didn't get an information the current core from API layer. No core activated.*/
\r
439 if (s_bit_firmware_info[core].size == 0)
\r
442 if (ReadVpuRegister(BIT_INT_STS))
\r
444 WriteVpuRegister(BIT_INT_CLEAR, 0x1);
\r
452 if (dev->async_queue)
\r
453 kill_fasync(&dev->async_queue, SIGIO, POLL_IN); /* notify the interrupt to user space */
\r
455 s_interrupt_flag = 1;
\r
457 wake_up_interruptible(&s_interrupt_wait_q);
\r
458 return IRQ_HANDLED;
\r
461 static int vpu_open(struct inode *inode, struct file *filp)
\r
465 spin_lock(&s_vpu_lock);
\r
466 vpu_logi("[VPUDRV] vpu_open\n");
\r
468 s_vpu_drv_context.open_count++;
\r
469 filp->private_data = (void *)(&s_vpu_drv_context);
\r
472 ret = vpu_set_mm_clk();
\r
475 vpu_logi("[VPUDRV] REG_PMU_APB_PD_MM_TOP_CFG : 0x%x\n", __raw_readl(REG_PMU_APB_PD_MM_TOP_CFG));
\r
476 vpu_logi("[VPUDRV] REG_AON_APB_APB_EB0 : 0x%x\n", __raw_readl(REG_AON_APB_APB_EB0));
\r
477 vpu_logi("[VPUDRV] REG_AON_APB_APB_EB1 : 0x%x\n", __raw_readl(REG_AON_APB_APB_EB1));
\r
478 vpu_logi("[VPUDRV] REG_CODEC_AHB_CLOCK_SEL : 0x%x\n", __raw_readl(REG_CODEC_AHB_CLOCK_SEL));
\r
479 vpu_logi("[VPUDRV] REG_CODEC_AHB_CKG_ENABLE : 0x%x\n", __raw_readl(REG_CODEC_AHB_CKG_ENABLE));
\r
482 #if defined(CONFIG_SPRD_IOMMU)
\r
483 sprd_iommu_module_enable(IOMMU_MM);
\r
486 spin_unlock(&s_vpu_lock);
\r
491 /*static int vpu_ioctl(struct inode *inode, struct file *filp, u_int cmd, u_long arg) // for kernel 2.6.9 of C&M*/
\r
492 static long vpu_ioctl(struct file *filp, u_int cmd, u_long arg)
\r
496 case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY:
\r
498 vpudrv_buffer_pool_t *vbp;
\r
502 vbp = kzalloc(sizeof(*vbp), GFP_KERNEL);
\r
508 ret = copy_from_user(&(vbp->vb), (vpudrv_buffer_t *)arg, sizeof(vpudrv_buffer_t));
\r
515 ret = vpu_alloc_dma_buffer(&(vbp->vb));
\r
522 ret = copy_to_user((void __user *)arg, &(vbp->vb), sizeof(vpudrv_buffer_t));
\r
531 spin_lock(&s_vpu_lock);
\r
532 list_add(&vbp->list, &s_vbp_head);
\r
533 spin_unlock(&s_vpu_lock);
\r
538 case VDI_IOCTL_FREE_PHYSICALMEMORY:
\r
540 vpudrv_buffer_pool_t *vbp, *n;
\r
541 vpudrv_buffer_t vb;
\r
545 ret = copy_from_user(&vb, (vpudrv_buffer_t *)arg, sizeof(vpudrv_buffer_t));
\r
552 vpu_free_dma_buffer(&vb);
\r
554 spin_lock(&s_vpu_lock);
\r
555 list_for_each_entry_safe(vbp, n, &s_vbp_head, list)
\r
557 if (vbp->vb.base == vb.base) {
\r
558 list_del(&vbp->list);
\r
563 spin_unlock(&s_vpu_lock);
\r
569 case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO:
\r
571 #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
\r
572 if (s_video_memory.base != 0) {
\r
573 ret = copy_to_user((void __user *)arg, &s_video_memory, sizeof(vpudrv_buffer_t));
\r
583 case VDI_IOCTL_WAIT_INTERRUPT:
\r
585 u32 timeout = (u32) arg;
\r
587 ret = wait_event_interruptible_timeout(s_interrupt_wait_q, s_interrupt_flag != 0, msecs_to_jiffies(timeout));
\r
593 if (signal_pending(current)) {
\r
594 ret = -ERESTARTSYS;
\r
599 s_interrupt_flag = 0;
\r
600 #ifdef VPU_IRQ_CONTROL
\r
601 enable_irq(s_vpu_irq);
\r
606 case VDI_IOCTL_SET_CLOCK_GATE:
\r
610 //vpu_logi("[VPUDRV] VDI_IOCTL_SET_CLOCK_GATE s_vpu_clk = %x\n", s_vpu_clk);
\r
612 if (get_user(clkgate, (u32 __user *) arg))
\r
614 #ifdef VPU_SUPPORT_CLOCK_CONTROL
\r
616 vpu_clk_enable(s_vpu_clk);
\r
618 vpu_clk_disable(s_vpu_clk);
\r
621 //vpu_logi("[VPUDRV] VDI_IOCTL_SET_CLOCK_GATE sucessfully \n");
\r
625 case VDI_IOCTL_GET_INSTANCE_POOL:
\r
629 vpu_logi("[VPUDRV] VDI_IOCTL_GET_INSTANCE_POOL\n");
\r
631 if (s_instance_pool.base != 0) {
\r
632 ret = copy_to_user((void __user *)arg, &s_instance_pool, sizeof(vpudrv_buffer_t));
\r
636 ret = copy_from_user(&s_instance_pool, (vpudrv_buffer_t *)arg, sizeof(vpudrv_buffer_t));
\r
638 if (vpu_alloc_dma_buffer(&s_instance_pool) != -1)
\r
640 vpu_logi("[VPUDRV] vpu_alloc_dma_buffer sucessfully\n");
\r
641 memset((void *)s_instance_pool.base, 0x0, s_instance_pool.size); /*clearing memory*/
\r
642 ret = copy_to_user((void __user *)arg, &s_instance_pool, sizeof(vpudrv_buffer_t));
\r
644 /* success to get memory for instance pool */
\r
657 case VDI_IOCTL_GET_COMMON_MEMORY:
\r
659 if (s_common_memory.base != 0) {
\r
660 ret = copy_to_user((void __user *)arg, &s_common_memory, sizeof(vpudrv_buffer_t));
\r
664 ret = copy_from_user(&s_common_memory, (vpudrv_buffer_t *)arg, sizeof(vpudrv_buffer_t));
\r
666 if (vpu_alloc_dma_buffer(&s_common_memory) != -1) {
\r
667 ret = copy_to_user((void __user *)arg, &s_common_memory, sizeof(vpudrv_buffer_t));
\r
669 /* success to get memory for common memory */
\r
679 case VDI_IOCTL_OPEN_INSTANCE:
\r
681 vpudrv_inst_info_t inst_info;
\r
682 vpudrv_instanace_list_t *vil, *n;
\r
684 vil = kzalloc(sizeof(*vil), GFP_KERNEL);
\r
688 if (copy_from_user(&inst_info, (vpudrv_inst_info_t *)arg, sizeof(vpudrv_inst_info_t))) {
\r
693 vil->inst_idx = inst_info.inst_idx;
\r
694 vil->core_idx = inst_info.core_idx;
\r
696 spin_lock(&s_vpu_lock);
\r
697 list_add(&vil->list, &s_inst_list_head);
\r
699 inst_info.inst_open_count = 0; /* counting the current open instance number */
\r
700 list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
\r
702 if (vil->core_idx == inst_info.core_idx)
\r
703 inst_info.inst_open_count++;
\r
705 spin_unlock(&s_vpu_lock);
\r
706 s_vpu_open_ref_count++; /* flag just for that vpu is in opened or closed */
\r
708 if (copy_to_user((void __user *)arg, &inst_info, sizeof(vpudrv_inst_info_t))) {
\r
713 vpu_logd("[VPUDRV] VDI_IOCTL_OPEN_INSTANCE core_idx=%d, inst_idx=%d, s_vpu_open_ref_count=%d, inst_open_count=%d\n", (int)inst_info.core_idx, (int)inst_info.inst_idx, s_vpu_open_ref_count, inst_info.inst_open_count);
\r
716 case VDI_IOCTL_CLOSE_INSTANCE:
\r
718 vpudrv_inst_info_t inst_info;
\r
719 vpudrv_instanace_list_t *vil, *n;
\r
721 if (copy_from_user(&inst_info, (vpudrv_inst_info_t *)arg, sizeof(vpudrv_inst_info_t)))
\r
723 spin_lock(&s_vpu_lock);
\r
724 list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
\r
726 if (vil->inst_idx == inst_info.inst_idx && vil->core_idx == inst_info.core_idx) {
\r
727 list_del(&vil->list);
\r
733 inst_info.inst_open_count = 0; /* counting the current open instance number */
\r
734 list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
\r
736 if (vil->core_idx == inst_info.core_idx)
\r
737 inst_info.inst_open_count++;
\r
739 spin_unlock(&s_vpu_lock);
\r
740 s_vpu_open_ref_count--; /* flag just for that vpu is in opened or closed */
\r
742 if (copy_to_user((void __user *)arg, &inst_info, sizeof(vpudrv_inst_info_t)))
\r
745 vpu_logd("[VPUDRV] VDI_IOCTL_CLOSE_INSTANCE core_idx=%d, inst_idx=%d, s_vpu_open_ref_count=%d, inst_open_count=%d\n", (int)inst_info.core_idx, (int)inst_info.inst_idx, s_vpu_open_ref_count, inst_info.inst_open_count);
\r
748 case VDI_IOCTL_GET_INSTANCE_NUM:
\r
750 vpudrv_inst_info_t inst_info;
\r
751 vpudrv_instanace_list_t *vil, *n;
\r
753 ret = copy_from_user(&inst_info, (vpudrv_inst_info_t *)arg, sizeof(vpudrv_inst_info_t));
\r
757 inst_info.inst_open_count = 0;
\r
758 spin_lock(&s_vpu_lock);
\r
759 list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
\r
761 if (vil->core_idx == inst_info.core_idx)
\r
762 inst_info.inst_open_count++;
\r
764 spin_unlock(&s_vpu_lock);
\r
765 ret = copy_to_user((void __user *)arg, &inst_info, sizeof(vpudrv_inst_info_t));
\r
767 vpu_logd("[VPUDRV] VDI_IOCTL_GET_INSTANCE_NUM core_idx=%d, inst_idx=%d, open_count=%d\n", (int)inst_info.core_idx, (int)inst_info.inst_idx, inst_info.inst_open_count);
\r
771 case VDI_IOCTL_RESET:
\r
778 printk(KERN_ERR "[VPUDRV] No such IOCTL, cmd is %d\n", cmd);
\r
786 static ssize_t vpu_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
\r
792 static ssize_t vpu_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
\r
795 /* vpu_logi("[VPUDRV] vpu_write len=%d\n", (int)len); */
\r
797 vpu_loge("vpu_write buf = NULL error \n");
\r
801 if (len == sizeof(vpu_bit_firmware_info_t)) {
\r
802 vpu_bit_firmware_info_t *bit_firmware_info;
\r
804 bit_firmware_info = kzalloc(sizeof(vpu_bit_firmware_info_t), GFP_KERNEL);
\r
805 if (!bit_firmware_info) {
\r
806 vpu_loge("vpu_write bit_firmware_info allocation error \n");
\r
810 if (copy_from_user(bit_firmware_info, buf, len)) {
\r
811 vpu_loge("vpu_write copy_from_user error for bit_firmware_info\n");
\r
812 kfree(bit_firmware_info);
\r
816 if (bit_firmware_info->size == sizeof(vpu_bit_firmware_info_t)) {
\r
817 vpu_logd("vpu_write set bit_firmware_info coreIdx=0x%x, reg_base_offset=0x%x size=0x%x, bit_code[0]=0x%x\n", bit_firmware_info->core_idx, (int)bit_firmware_info->reg_base_offset, bit_firmware_info->size, bit_firmware_info->bit_code[0]);
\r
819 if (bit_firmware_info->core_idx > MAX_NUM_VPU_CORE) {
\r
820 vpu_loge("vpu_write coreIdx[%d] is exceeded than MAX_NUM_VPU_CORE[%d]\n", bit_firmware_info->core_idx, MAX_NUM_VPU_CORE);
\r
821 kfree(bit_firmware_info);
\r
825 memcpy((void *)&s_bit_firmware_info[bit_firmware_info->core_idx], bit_firmware_info, sizeof(vpu_bit_firmware_info_t));
\r
826 kfree(bit_firmware_info);
\r
830 kfree(bit_firmware_info);
\r
839 static int vpu_release(struct inode *inode, struct file *filp)
\r
843 spin_lock(&s_vpu_lock);
\r
845 vpu_logi("[VPUDRV] vpu_release, open_count= %d\n", s_vpu_drv_context.open_count);
\r
847 /* found and free the not handled buffer by user applications */
\r
848 vpu_free_buffers(filp);
\r
850 /* found and free the not closed instance by user applications */
\r
851 vpu_free_instances(filp);
\r
852 s_vpu_drv_context.open_count--;
\r
853 if (s_vpu_drv_context.open_count == 0) {
\r
854 if (s_instance_pool.base) {
\r
855 vpu_logi("[VPUDRV] free instance pool\n");
\r
856 vpu_free_dma_buffer(&s_instance_pool);
\r
857 s_instance_pool.base = 0;
\r
860 if (s_common_memory.base) {
\r
861 vpu_logi("[VPUDRV] free common memory\n");
\r
862 vpu_free_dma_buffer(&s_common_memory);
\r
863 s_common_memory.base = 0;
\r
867 vpu_clk_free(&s_vpu_drv_context);
\r
869 if(s_vpu_drv_context.open_count == 0) {
\r
870 vpu_logi("%s vpu_power_shutdown\n", __func__);
\r
871 vpu_power_shutdown();
\r
874 #if defined(CONFIG_SPRD_IOMMU)
\r
875 sprd_iommu_module_disable(IOMMU_MM);
\r
878 spin_unlock(&s_vpu_lock);
\r
886 static int vpu_fasync(int fd, struct file *filp, int mode)
\r
888 struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)filp->private_data;
\r
889 return fasync_helper(fd, filp, mode, &dev->async_queue);
\r
893 static int vpu_map_to_register(struct file *fp, struct vm_area_struct *vm)
\r
897 printk("[%s]\n", __FUNCTION__);
\r
899 vm->vm_flags |= VM_IO | VM_RESERVED;
\r
900 vm->vm_page_prot = pgprot_noncached(vm->vm_page_prot);
\r
901 pfn = s_vpu_reg_phy_addr >> PAGE_SHIFT;
\r
903 return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end-vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
\r
906 static int vpu_map_to_physical_memory(struct file *fp, struct vm_area_struct *vm)
\r
908 printk("[%s]\n", __FUNCTION__);
\r
910 vm->vm_flags |= VM_IO | VM_RESERVED;
\r
911 vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
\r
913 return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff, vm->vm_end-vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
\r
915 static int vpu_map_to_instance_pool_memory(struct file *fp, struct vm_area_struct *vm)
\r
917 printk("[%s]\n", __FUNCTION__);
\r
919 return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff, vm->vm_end-vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
\r
923 * @brief memory map interface for vpu file operation
\r
924 * @return 0 on success or negative error code on error
\r
926 static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
\r
928 printk("[%s], vm_pgoff = %ld \n", __FUNCTION__, vm->vm_pgoff);
\r
930 if (vm->vm_pgoff) {
\r
931 if (vm->vm_pgoff == (s_instance_pool.phys_addr>>PAGE_SHIFT))
\r
932 return vpu_map_to_instance_pool_memory(fp, vm);
\r
934 return vpu_map_to_physical_memory(fp, vm);
\r
936 return vpu_map_to_register(fp, vm);
\r
940 struct file_operations vpu_fops = {
\r
941 .owner = THIS_MODULE,
\r
944 .write = vpu_write,
\r
945 .unlocked_ioctl = vpu_ioctl,
\r
946 .release = vpu_release,
\r
947 .fasync = vpu_fasync,
\r
949 #ifdef CONFIG_COMPAT
\r
950 .compat_ioctl = vpu_ioctl,
\r
954 static struct miscdevice vpu_dev = {
\r
955 .minor = VPU_MINOR,
\r
956 .name = "sprd_coda7l",
\r
962 static const struct of_device_id of_match_table_coda7l[] = {
\r
963 { .compatible = "sprd,sprd_coda7l", },
\r
967 static int vpu_parse_dt(struct device *dev)
\r
969 struct device_node *np = dev->of_node;
\r
970 struct resource res;
\r
973 ret = of_address_to_resource(np, 0, &res);
\r
975 dev_err(dev, "no reg of property specified\n");
\r
976 printk(KERN_ERR "vsp: failed to parse_dt!\n");
\r
980 s_vpu_reg_phy_addr = res.start;
\r
981 s_vpu_reg_virt_addr = ioremap_nocache(res.start, resource_size(&res));
\r
982 if(!s_vpu_reg_virt_addr)
\r
985 s_vpu_drv_context.irq = irq_of_parse_and_map(np, 0);
\r
986 s_vpu_drv_context.dev_np = np;
\r
988 printk(KERN_INFO "vsp_parse_dt s_vpu_drv_context.irq = %d !\n", s_vpu_drv_context.irq);
\r
990 printk(KERN_INFO "deint_parse_dt , SPRD_VPP_PHYS = %p, SPRD_VPP_BASE = %p\n", (void*)s_vpu_reg_phy_addr, (void*)s_vpu_reg_virt_addr);
\r
995 static int vpu_parse_dt(
\r
996 struct device *dev)
\r
998 //vsp_hw_dev.irq = IRQ_VSP_INT;
\r
1004 static int vpu_probe(struct platform_device *pdev)
\r
1009 struct resource *res = NULL;
\r
1011 vpu_logi("[VPUDRV] vpu_probe\n");
\r
1014 if (pdev->dev.of_node) {
\r
1015 ret = vpu_parse_dt(&pdev->dev);
\r
1018 ret = vpu_parse_dt(&pdev->dev);
\r
1021 s_vpu_drv_context.freq_div = DEFAULT_FREQ_DIV;
\r
1022 s_vpu_drv_context.clk_mm_i= NULL;
\r
1023 s_vpu_drv_context.clk_parent_axi= NULL;
\r
1024 s_vpu_drv_context.clk_parent_cc= NULL;
\r
1025 s_vpu_drv_context.clk_parent_apb= NULL;
\r
1026 s_vpu_drv_context.clk_coda7_apb= NULL;
\r
1027 s_vpu_drv_context.clk_coda7_axi= NULL;
\r
1028 s_vpu_drv_context.clk_coda7_cc= NULL;
\r
1030 ret = misc_register(&vpu_dev);
\r
1032 printk(KERN_ERR "cannot register miscdev on minor=%d (%d)\n",
\r
1034 goto ERROR_PROVE_DEVICE;
\r
1037 #ifdef VPU_SUPPORT_ISR
\r
1039 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
\r
1040 if (res) {// if platform driver is implemented
\r
1041 s_vpu_irq = res->start;
\r
1042 vpu_logi("[VPUDRV] : vpu irq number get from platform driver irq=0x%x\n", s_vpu_irq);
\r
1044 vpu_logi("[VPUDRV] : vpu irq number get from defined value irq=0x%x\n", s_vpu_irq);
\r
1048 err = request_irq(s_vpu_irq, vpu_irq_handler, IRQF_SHARED, "VPU_CODEC_IRQ", (void *)(&s_vpu_drv_context));
\r
1050 printk(KERN_ERR "[VPUDRV] : fail to register interrupt handler\n");
\r
1051 goto ERROR_PROVE_DEVICE;
\r
1056 #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
\r
1057 s_video_memory.size = VPU_INIT_VIDEO_MEMORY_SIZE_IN_BYTE;
\r
1058 s_video_memory.phys_addr = VPU_DRAM_PHYSICAL_BASE;
\r
1059 s_video_memory.base = (unsigned long)ioremap(s_video_memory.phys_addr, PAGE_ALIGN(s_video_memory.size));
\r
1060 if (!s_video_memory.base) {
\r
1061 printk(KERN_ERR "[VPUDRV] : fail to remap video memory physical phys_addr=0x%x, base=0x%x, size=%d\n",
\r
1062 (int)s_video_memory.phys_addr, (int)s_video_memory.base, (int)s_video_memory.size);
\r
1063 goto ERROR_PROVE_DEVICE;
\r
1066 if (vmem_init(&s_vmem, s_video_memory.phys_addr, s_video_memory.size) < 0) {
\r
1067 printk(KERN_ERR "[VPUDRV] : fail to init vmem system\n");
\r
1068 goto ERROR_PROVE_DEVICE;
\r
1070 vpu_logi("[VPUDRV] success to probe vpu device with reserved video memory phys_addr=0x%x, base = 0x%x\n",
\r
1071 (int) s_video_memory.phys_addr, (int)s_video_memory.base);
\r
1073 vpu_logi("[VPUDRV] success to probe vpu device with non reserved video memory\n");
\r
1079 ERROR_PROVE_DEVICE:
\r
1081 misc_deregister(&vpu_dev);
\r
1083 if (s_vpu_reg_virt_addr)
\r
1084 iounmap(s_vpu_reg_virt_addr);
\r
1089 static int vpu_remove(struct platform_device *pdev)
\r
1091 vpu_logd("vpu_remove\n");
\r
1092 #ifdef VPU_SUPPORT_PLATFORM_DRIVER_REGISTER
\r
1094 misc_deregister(&vpu_dev);
\r
1096 if (s_instance_pool.base) {
\r
1097 vpu_free_dma_buffer(&s_instance_pool);
\r
1098 s_instance_pool.base = 0;
\r
1101 if (s_common_memory.base) {
\r
1102 vpu_free_dma_buffer(&s_common_memory);
\r
1103 s_common_memory.base = 0;
\r
1107 #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
\r
1108 if (s_video_memory.base) {
\r
1109 iounmap((void *)s_video_memory.base);
\r
1110 s_video_memory.base = 0;
\r
1112 vmem_exit(&s_vmem);
\r
1116 #ifdef VPU_SUPPORT_ISR
\r
1117 if (s_vpu_drv_context.irq)
\r
1118 free_irq(s_vpu_drv_context.irq, &s_vpu_drv_context);
\r
1121 vpu_clk_put(s_vpu_clk);
\r
1125 #endif /*VPU_SUPPORT_PLATFORM_DRIVER_REGISTER*/
\r
1132 static int vpu_suspend(struct platform_device *pdev, pm_message_t state)
\r
1134 vpu_logd("vpu_suspend\n");
\r
1137 static int vpu_resume(struct platform_device *pdev)
\r
1139 vpu_logd("vpu_resume\n");
\r
1143 #define vpu_suspend NULL
\r
1144 #define vpu_resume NULL
\r
1145 #endif /* !CONFIG_PM */
\r
1148 static struct platform_driver vpu_driver = {
\r
1149 .probe = vpu_probe,
\r
1150 .remove = vpu_remove,
\r
1151 .suspend = vpu_suspend,
\r
1152 .resume = vpu_resume,
\r
1154 .owner = THIS_MODULE,
\r
1155 .name = "sprd_coda7l",
\r
1157 .of_match_table = of_match_ptr(of_match_table_coda7l) ,
\r
1162 static int __init vpu_init(void)
\r
1166 vpu_logd("vpu_init, REG_AON_APB_BOND_OPT0 = 0x%x\n", __raw_readl(REG_AON_APB_BOND_OPT0));
\r
1168 if(__raw_readl(REG_AON_APB_BOND_OPT0) & (1<<12)) {
\r
1172 init_waitqueue_head(&s_interrupt_wait_q);
\r
1173 s_common_memory.base = 0;
\r
1174 s_instance_pool.base = 0;
\r
1175 #ifdef VPU_SUPPORT_PLATFORM_DRIVER_REGISTER
\r
1176 res = platform_driver_register(&vpu_driver);
\r
1178 res = platform_driver_register(&vpu_driver);
\r
1179 res = vpu_probe(NULL);
\r
1181 vpu_power_shutdown();
\r
1182 vpu_logd("end vpu_init result=0x%x\n", res);
\r
1187 static void __exit vpu_exit(void)
\r
1189 #ifdef VPU_SUPPORT_PLATFORM_DRIVER_REGISTER
\r
1190 vpu_logd("vpu_exit\n");
\r
1192 if(__raw_readl(REG_AON_APB_BOND_OPT0) & (1<<12)) {
\r
1196 platform_driver_unregister(&vpu_driver);
\r
1200 vpu_clk_put(s_vpu_clk);
\r
1202 if (s_instance_pool.base) {
\r
1203 vpu_free_dma_buffer(&s_instance_pool);
\r
1204 s_instance_pool.base = 0;
\r
1207 if (s_common_memory.base) {
\r
1208 vpu_free_dma_buffer(&s_common_memory);
\r
1209 s_common_memory.base = 0;
\r
1212 #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
\r
1213 if (s_video_memory.base) {
\r
1214 iounmap((void *)s_video_memory.base);
\r
1215 s_video_memory.base = 0;
\r
1217 vmem_exit(&s_vmem);
\r
1221 if (s_vpu_major > 0) {
\r
1222 cdev_del(&s_vpu_cdev);
\r
1223 unregister_chrdev_region(s_vpu_major, 1);
\r
1227 #ifdef VPU_SUPPORT_ISR
\r
1229 free_irq(s_vpu_irq, &s_vpu_drv_context);
\r
1239 MODULE_AUTHOR("A customer using C&M VPU, Inc.");
\r
1240 MODULE_DESCRIPTION("VPU linux driver");
\r
1241 MODULE_LICENSE("GPL");
\r
1243 module_init(vpu_init);
\r
1244 module_exit(vpu_exit);
\r
1246 int vpu_hw_reset(void)
\r
1248 vpu_logd("request vpu reset from application. \n");
\r
1253 static int vpu_set_clk_by_register()
\r
1255 __raw_writel((~(1<<25))&__raw_readl(REG_PMU_APB_PD_MM_TOP_CFG), REG_PMU_APB_PD_MM_TOP_CFG); //0x402b_001c (&0xfdff_ffff)
\r
1256 __raw_writel((0x02000000)|__raw_readl(REG_AON_APB_APB_EB0), REG_AON_APB_APB_EB0); //0x402e_0000 (|0x0200_0000)
\r
1258 __raw_writel(__raw_readl(REG_AON_APB_APB_EB1) | BIT_CODEC_EB, REG_AON_APB_APB_EB1); //0x402e_0004 (|0x0000_4000)
\r
1259 __raw_writel(__raw_readl(REG_CODEC_AHB_CLOCK_SEL) | 0x0333, REG_CODEC_AHB_CLOCK_SEL); //6200_0008 (|0x0333)
\r
1260 __raw_writel(__raw_readl(REG_CODEC_AHB_CKG_ENABLE) | 0x03, REG_CODEC_AHB_CKG_ENABLE); //6200_0004 (|0x03)
\r
1266 static int vpu_clk_free(vpu_drv_context_t* vpu_context)
\r
1268 if (vpu_context->clk_coda7_apb) {
\r
1269 //clk_disable_unprepare(vpu_context->clk_coda7_apb);
\r
1270 clk_disable(vpu_context->clk_coda7_apb);
\r
1273 if (vpu_context->clk_coda7_axi) {
\r
1274 //clk_disable_unprepare(vpu_context->clk_coda7_axi);
\r
1275 clk_disable(vpu_context->clk_coda7_axi);
\r
1278 if (vpu_context->clk_coda7_cc) {
\r
1279 //clk_disable_unprepare(vpu_context->clk_coda7_cc);
\r
1280 clk_disable(vpu_context->clk_coda7_cc);
\r
1283 if (vpu_context->clk_coda7_apb) {
\r
1284 clk_unprepare(vpu_context->clk_coda7_apb);
\r
1287 if (vpu_context->clk_coda7_axi) {
\r
1288 clk_unprepare(vpu_context->clk_coda7_axi);
\r
1291 if (vpu_context->clk_coda7_cc) {
\r
1292 clk_unprepare(vpu_context->clk_coda7_cc);
\r
1295 if (vpu_context->clk_mm_i) {
\r
1296 clk_disable_unprepare(vpu_context->clk_mm_i);
\r
1302 static int vpu_set_parent_for_coda7l_clk()
\r
1304 struct clk *clk_parent;
\r
1305 char *name_parent;
\r
1308 name_parent = vpu_get_clk_src_name(3, clock_coda7l_axi_map);
\r
1309 clk_parent = clk_get(NULL, name_parent);
\r
1310 printk(KERN_ERR "clock[%s]: get parent in probe[%s] by clk_get()!\n", "clk_coda7_axi", name_parent);
\r
1311 if ((!clk_parent )|| IS_ERR(clk_parent) ) {
\r
1312 printk(KERN_ERR "clock[%s]: failed to get parent in probe[%s] by clk_get()!\n", "clk_coda7_axi", name_parent);
\r
1315 s_vpu_drv_context.clk_parent_axi= clk_parent;
\r
1318 ret = clk_set_parent(s_vpu_drv_context.clk_coda7_axi, s_vpu_drv_context.clk_parent_axi);
\r
1320 printk(KERN_ERR "clock[%s]: clk_set_parent() failed in probe!", "clk_coda7_axi");
\r
1324 name_parent = vpu_get_clk_src_name(s_vpu_drv_context.freq_div, clock_coda7l_axi_map);
\r
1325 clk_parent = clk_get(NULL, name_parent);
\r
1326 printk(KERN_ERR "clock[%s]: get parent in probe[%s] by clk_get()!\n", "clk_coda7_axi", name_parent);
\r
1327 if ((!clk_parent )|| IS_ERR(clk_parent) ) {
\r
1328 printk(KERN_ERR "clock[%s]: failed to get parent in probe[%s] by clk_get()!\n", "clk_coda7_axi", name_parent);
\r
1331 s_vpu_drv_context.clk_parent_axi= clk_parent;
\r
1334 ret = clk_set_parent(s_vpu_drv_context.clk_coda7_axi, s_vpu_drv_context.clk_parent_axi);
\r
1336 printk(KERN_ERR "clock[%s]: clk_set_parent() failed in probe!", "clk_coda7_axi");
\r
1340 printk(KERN_ERR "vpu parent clock name %s, freq: %dHz\n", name_parent, (int)clk_get_rate(s_vpu_drv_context.clk_coda7_axi));
\r
1342 name_parent = vpu_get_clk_src_name(3, clock_coda7l_cc_map);
\r
1343 clk_parent = clk_get(NULL, name_parent);
\r
1344 if ((!clk_parent )|| IS_ERR(clk_parent) ) {
\r
1345 printk(KERN_ERR "clock[%s]: failed to get parent in probe[%s] by clk_get()!\n", "clk_coda7_cc", name_parent);
\r
1348 s_vpu_drv_context.clk_parent_cc= clk_parent;
\r
1351 ret = clk_set_parent(s_vpu_drv_context.clk_coda7_cc, s_vpu_drv_context.clk_parent_cc);
\r
1353 printk(KERN_ERR "clock[%s]: clk_set_parent() failed in probe!", "clk_coda7_cc");
\r
1357 name_parent = vpu_get_clk_src_name(s_vpu_drv_context.freq_div, clock_coda7l_cc_map);
\r
1358 clk_parent = clk_get(NULL, name_parent);
\r
1359 if ((!clk_parent )|| IS_ERR(clk_parent) ) {
\r
1360 printk(KERN_ERR "clock[%s]: failed to get parent in probe[%s] by clk_get()!\n", "clk_coda7_cc", name_parent);
\r
1363 s_vpu_drv_context.clk_parent_cc= clk_parent;
\r
1366 ret = clk_set_parent(s_vpu_drv_context.clk_coda7_cc, s_vpu_drv_context.clk_parent_cc);
\r
1368 printk(KERN_ERR "clock[%s]: clk_set_parent() failed in probe!", "clk_coda7_cc");
\r
1372 printk(KERN_ERR "vpu parent clock name %s, freq: %dHz\n", name_parent, (int)clk_get_rate(s_vpu_drv_context.clk_coda7_cc));
\r
1374 name_parent = vpu_get_clk_src_name(3, clock_coda7l_apb_map);
\r
1375 clk_parent = clk_get(NULL, name_parent);
\r
1376 if ((!clk_parent )|| IS_ERR(clk_parent) ) {
\r
1377 printk(KERN_ERR "clock[%s]: failed to get parent in probe[%s] by clk_get()!\n", "clk_coda7_apb", name_parent);
\r
1380 s_vpu_drv_context.clk_parent_apb= clk_parent;
\r
1383 ret = clk_set_parent(s_vpu_drv_context.clk_coda7_apb, s_vpu_drv_context.clk_parent_apb);
\r
1385 printk(KERN_ERR "clock[%s]: clk_set_parent() failed in probe!", "clk_coda7_apb");
\r
1389 name_parent = vpu_get_clk_src_name(s_vpu_drv_context.freq_div, clock_coda7l_apb_map);
\r
1390 clk_parent = clk_get(NULL, name_parent);
\r
1391 if ((!clk_parent )|| IS_ERR(clk_parent) ) {
\r
1392 printk(KERN_ERR "clock[%s]: failed to get parent in probe[%s] by clk_get()!\n", "clk_coda7_apb", name_parent);
\r
1395 s_vpu_drv_context.clk_parent_apb= clk_parent;
\r
1398 ret = clk_set_parent(s_vpu_drv_context.clk_coda7_apb, s_vpu_drv_context.clk_parent_apb);
\r
1400 printk(KERN_ERR "clock[%s]: clk_set_parent() failed in probe!", "clk_coda7_apb");
\r
1404 printk(KERN_ERR "vpu parent clock name %s, freq: %dHz\n", name_parent, (int)clk_get_rate(s_vpu_drv_context.clk_coda7_apb));
\r
1408 static int vpu_set_mm_clk(void)
\r
1411 struct clk *clk_mm_i;
\r
1412 struct clk *clk_coda7_axi;
\r
1413 struct clk *clk_coda7_cc;
\r
1414 struct clk *clk_coda7_apb;
\r
1416 #if defined(CONFIG_ARCH_SCX35)
\r
1418 //Config for clk_mm_i
\r
1420 clk_mm_i = of_clk_get_by_name(s_vpu_drv_context.dev_np, "clk_mm_i");
\r
1422 clk_mm_i = clk_get(NULL, "clk_mm_i");
\r
1425 if (IS_ERR(clk_mm_i) || (!clk_mm_i)) {
\r
1426 printk(KERN_ERR "###: Failed : Can't get clock [%s}!\n",
\r
1428 printk(KERN_ERR "###: clk_mm_i = %p\n", clk_mm_i);
\r
1432 s_vpu_drv_context.clk_mm_i= clk_mm_i;
\r
1435 ret = clk_prepare_enable(s_vpu_drv_context.clk_mm_i);
\r
1437 printk(KERN_ERR "###:s_vpu_drv_context.clk_mm_i: clk_enable() failed!\n");
\r
1442 //Config for clk_coda7_axi
\r
1444 clk_coda7_axi = of_clk_get_by_name(s_vpu_drv_context.dev_np, "clk_coda7_axi");
\r
1446 clk_coda7_axi = clk_get(NULL, "clk_coda7_axi");
\r
1449 if (IS_ERR(clk_coda7_axi) || (!clk_coda7_axi)) {
\r
1450 printk(KERN_ERR "###: Failed : Can't get clock [%s}!\n", "clk_coda7_axi");
\r
1451 printk(KERN_ERR "###: clk_coda7_axi = %p\n", clk_coda7_axi);
\r
1455 s_vpu_drv_context.clk_coda7_axi = clk_coda7_axi;
\r
1458 ret = clk_prepare_enable(s_vpu_drv_context.clk_coda7_axi);
\r
1460 printk(KERN_ERR "###: clk_coda7_axi: clk_enable() failed!\n");
\r
1464 //Config for clk_coda7_cc
\r
1466 clk_coda7_cc = of_clk_get_by_name(s_vpu_drv_context.dev_np, "clk_coda7_cc");
\r
1468 clk_coda7_cc = clk_get(NULL, "clk_coda7_cc");
\r
1471 if (IS_ERR(clk_coda7_cc) || (!clk_coda7_cc)) {
\r
1472 printk(KERN_ERR "###: Failed : Can't get clock [%s}!\n", "clk_coda7_cc");
\r
1473 printk(KERN_ERR "###: clk_coda7_cc = %p\n", clk_coda7_cc);
\r
1477 s_vpu_drv_context.clk_coda7_cc = clk_coda7_cc;
\r
1480 ret = clk_prepare_enable(s_vpu_drv_context.clk_coda7_cc);
\r
1482 printk(KERN_ERR "###: clk_coda7_cc: clk_enable() failed!\n");
\r
1486 //Config for clk_coda7_apb
\r
1488 clk_coda7_apb = of_clk_get_by_name(s_vpu_drv_context.dev_np, "clk_coda7_apb");
\r
1490 clk_coda7_apb = clk_get(NULL, "clk_coda7_apb");
\r
1493 if (IS_ERR(clk_coda7_apb) || (!clk_coda7_apb)) {
\r
1494 printk(KERN_ERR "###: Failed : Can't get clock [%s}!\n", "clk_coda7_cc");
\r
1495 printk(KERN_ERR "###: clk_coda7_cc = %p\n", clk_coda7_apb);
\r
1499 s_vpu_drv_context.clk_coda7_apb = clk_coda7_apb;
\r
1502 ret = clk_prepare_enable(s_vpu_drv_context.clk_coda7_apb);
\r
1504 printk(KERN_ERR "###: clk_coda7_apb: clk_enable() failed!\n");
\r
1508 ret = vpu_set_parent_for_coda7l_clk();
\r
1510 printk(KERN_ERR "###:vpu set parent failed!\n");
\r
1518 vpu_clk_free(&s_vpu_drv_context);
\r
1523 struct clk *vpu_clk_get(struct device *dev)
\r
1525 return clk_get(dev, VPU_CLK_NAME);
\r
1527 void vpu_clk_put(struct clk *clk)
\r
1529 if (!(clk == NULL || IS_ERR(clk)))
\r
1532 int vpu_clk_enable(struct clk *clk)
\r
1536 if (!(clk == NULL || IS_ERR(clk))) {
\r
1537 /* the bellow is for C&M EVB.*/
\r
1539 struct clk *s_vpuext_clk = NULL;
\r
1540 s_vpuext_clk = clk_get(NULL, "vcore");
\r
1543 vpu_logi("[VPUDRV] vcore clk=%p\n", s_vpuext_clk);
\r
1544 clk_enable(s_vpuext_clk);
\r
1547 vpu_logi("[VPUDRV] vbus clk=%p\n", s_vpuext_clk);
\r
1550 s_vpuext_clk = clk_get(NULL, "vbus");
\r
1551 clk_enable(s_vpuext_clk);
\r
1555 /* for C&M EVB. */
\r
1556 return clk_enable(clk);
\r
1563 void vpu_clk_disable(struct clk *clk)
\r
1566 if (!(clk == NULL || IS_ERR(clk))) {
\r
1567 vpu_logd("[VPUDRV] vpu_clk_disable\n");
\r