2 * Copyright (C) 2012 Spreadtrum Communications Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/miscdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/proc_fs.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <asm/uaccess.h>
22 #include <linux/math64.h>
23 #include <linux/types.h>
24 #include <linux/interrupt.h>
25 #include <linux/errno.h>
26 #include <linux/irq.h>
27 #include <linux/kthread.h>
28 #include <linux/io.h>//for ioremap
29 #include <linux/pid.h>
33 #include <linux/of_fdt.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_address.h>
36 #include <linux/device.h>
39 #include <soc/sprd/arch_misc.h>// get chip id
40 #include <video/ion_sprd.h>
41 //#include "gsp_drv.h"
42 #include "gsp_config_if.h"
43 #include "scaler_coef_cal.h"
45 #include "gsp_debug.h"
50 static struct device *s_gsp_of_dev = NULL;
51 ulong g_gsp_base_addr = 0;
52 ulong g_gsp_mmu_ctrl_addr = 0;
54 gsp_context_t *g_gspCtx = NULL;// only used to debug when kernel crash, not used to pass parameters
56 static gsp_user* gsp_get_user(pid_t user_pid, gsp_context_t *gspCtx)
58 gsp_user* ret_user = NULL;
61 for (i = 0; i < GSP_MAX_USER; i ++) {
62 if ((gspCtx->gsp_user_array + i)->pid == user_pid) {
63 ret_user = gspCtx->gsp_user_array + i;
68 if (ret_user == NULL) {
69 for (i = 0; i < GSP_MAX_USER; i ++) {
70 if ((gspCtx->gsp_user_array + i)->pid == INVALID_USER_ID) {
71 ret_user = gspCtx->gsp_user_array + i;
72 ret_user->pid = user_pid;
82 static int32_t gsp_drv_open(struct inode *node, struct file *file)
84 int32_t ret = GSP_NO_ERR;
85 gsp_user *pUserdata = NULL;
86 struct miscdevice *miscdev = NULL;
87 gsp_context_t *gspCtx = NULL;
88 GSP_TRACE("gsp_drv_open:pid:0x%08x enter.\n",current->pid);
90 miscdev = file->private_data;
91 if (NULL == miscdev) {
94 gspCtx = container_of(miscdev, gsp_context_t , dev);
99 pUserdata = (gsp_user *)gsp_get_user(current->pid, gspCtx);
101 if (NULL == pUserdata) {
102 printk("gsp_drv_open:pid:0x%08x user cnt full.\n",current->pid);
103 ret = GSP_KERNEL_FULL;
106 GSP_TRACE("gsp_drv_open:pid:0x%08x bf wait open sema.\n",current->pid);
107 ret = down_interruptible(&pUserdata->sem_open);
108 if(!ret) { //ret==0, wait success
109 GSP_TRACE("gsp_drv_open:pid:0x%08x wait open sema success.\n",current->pid);
110 pUserdata->priv = (void*)gspCtx;
111 file->private_data = pUserdata;
112 } else { //ret == -EINTR
113 ret = GSP_KERNEL_OPEN_INTR;
114 printk("gsp_drv_open:pid:0x%08x wait open sema failed.\n",current->pid);
122 static int32_t gsp_drv_release(struct inode *node, struct file *file)
124 gsp_user* pUserdata = file->private_data;
126 GSP_TRACE("gsp_drv_release:pid:0x%08x.\n\n",current->pid);
127 if(pUserdata == NULL) {
128 printk("gsp_drv_release:error--pUserdata is null!, pid-0x%08x \n\n",current->pid);
131 pUserdata->pid = INVALID_USER_ID;
132 sema_init(&pUserdata->sem_open, 1);
134 //if caller thread hold gsp_hw_resource_sem,but was terminated,we release hw semaphore here
135 if(gsp_cur_client_pid == current->pid)
137 GSP_Wait_Finish();//wait busy-bit down
139 //pUserdata->own_gsp_flag = 0;
140 gsp_cur_client_pid = INVALID_USER_ID;
141 sema_init(&gsp_wait_interrupt_sem,0);
142 GSP_TRACE("%s:pid:0x%08x, release gsp-hw sema, L%d \n",__func__,pUserdata->pid,__LINE__);
143 up(&gsp_hw_resource_sem);
146 file->private_data = NULL;
152 ssize_t gsp_drv_write(struct file *file, const char __user * u_data, size_t cnt, loff_t *cnt_ret)
154 gsp_user* pUserdata = file->private_data;
156 if(pUserdata == NULL) {
157 printk("%s:error--pUserdata is null!, pid-0x%08x \n\n",__func__,current->pid);
161 GSP_TRACE("gsp_drv_write:pid:0x%08x.\n",current->pid);
163 pUserdata->is_exit_force = 1;
165 nomater the target thread "pUserdata->pid" wait on done-sema or hw resource sema,
166 send a signal to resume it and make it return from ioctl(),we does not up a sema to make target
167 thread can distinguish GSP done and signal interrupt.
169 //send_sig(SIGABRT, (struct task_struct *)pUserdata->pid, 0);
174 ssize_t gsp_drv_read(struct file *file, char __user *u_data, size_t cnt, loff_t *cnt_ret)
176 char rt_word[32]= {0};
177 gsp_user* pUserdata = file->private_data;
179 if(pUserdata == NULL) {
180 printk("%s:error--pUserdata is null!, pid-0x%08x \n\n",__func__,current->pid);
185 *cnt_ret += sprintf(rt_word + *cnt_ret, "gsp read %zd\n",cnt);
186 return copy_to_user(u_data, (void*)rt_word, (ulong)*cnt_ret);
190 static void GSP_Coef_Tap_Convert(uint8_t h_tap,uint8_t v_tap, gsp_context_t *gspCtx)
194 gspCtx->gsp_cfg.layer0_info.row_tap_mode = 0;
198 gspCtx->gsp_cfg.layer0_info.row_tap_mode = 1;
202 gspCtx->gsp_cfg.layer0_info.row_tap_mode = 2;
206 gspCtx->gsp_cfg.layer0_info.row_tap_mode = 3;
210 gspCtx->gsp_cfg.layer0_info.row_tap_mode = 0;
216 gspCtx->gsp_cfg.layer0_info.col_tap_mode = 0;
220 gspCtx->gsp_cfg.layer0_info.col_tap_mode = 1;
224 gspCtx->gsp_cfg.layer0_info.col_tap_mode = 2;
228 gspCtx->gsp_cfg.layer0_info.col_tap_mode = 3;
232 gspCtx->gsp_cfg.layer0_info.col_tap_mode = 0;
235 gspCtx->gsp_cfg.layer0_info.row_tap_mode &= 0x3;
236 gspCtx->gsp_cfg.layer0_info.col_tap_mode &= 0x3;
240 static int32_t GSP_Scaling_Coef_Gen_And_Config(ulong* force_calc, gsp_context_t *gspCtx)
244 uint32_t *tmp_buf = NULL;
245 uint32_t *h_coeff = NULL;
246 uint32_t *v_coeff = NULL;
247 uint32_t coef_factor_w = 0;
248 uint32_t coef_factor_h = 0;
249 uint32_t after_rotate_w = 0;
250 uint32_t after_rotate_h = 0;
251 uint32_t coef_in_w = 0;
252 uint32_t coef_in_h = 0;
253 uint32_t coef_out_w = 0;
254 uint32_t coef_out_h = 0;
255 static volatile uint32_t coef_in_w_last = 0;//if the new in w h out w h equal last params, don't need calc again
256 static volatile uint32_t coef_in_h_last = 0;
257 static volatile uint32_t coef_out_w_last = 0;
258 static volatile uint32_t coef_out_h_last = 0;
259 static volatile uint32_t coef_h_tap_last = 0;
260 static volatile uint32_t coef_v_tap_last = 0;
262 if(gspCtx->gsp_cfg.layer0_info.scaling_en == 1) {
263 if(gspCtx->gsp_cfg.layer0_info.des_rect.rect_w < 4
264 ||gspCtx->gsp_cfg.layer0_info.des_rect.rect_h < 4) {
265 return GSP_KERNEL_GEN_OUT_RANG;
268 if(gspCtx->gsp_cfg.layer0_info.rot_angle == GSP_ROT_ANGLE_0
269 ||gspCtx->gsp_cfg.layer0_info.rot_angle == GSP_ROT_ANGLE_180
270 ||gspCtx->gsp_cfg.layer0_info.rot_angle == GSP_ROT_ANGLE_0_M
271 ||gspCtx->gsp_cfg.layer0_info.rot_angle == GSP_ROT_ANGLE_180_M) {
272 after_rotate_w = gspCtx->gsp_cfg.layer0_info.clip_rect.rect_w;
273 after_rotate_h = gspCtx->gsp_cfg.layer0_info.clip_rect.rect_h;
274 } else if(gspCtx->gsp_cfg.layer0_info.rot_angle == GSP_ROT_ANGLE_90
275 ||gspCtx->gsp_cfg.layer0_info.rot_angle == GSP_ROT_ANGLE_270
276 ||gspCtx->gsp_cfg.layer0_info.rot_angle == GSP_ROT_ANGLE_90_M
277 ||gspCtx->gsp_cfg.layer0_info.rot_angle == GSP_ROT_ANGLE_270_M) {
278 after_rotate_w = gspCtx->gsp_cfg.layer0_info.clip_rect.rect_h;
279 after_rotate_h = gspCtx->gsp_cfg.layer0_info.clip_rect.rect_w;
282 coef_factor_w = CEIL(after_rotate_w,gspCtx->gsp_cfg.layer0_info.des_rect.rect_w);
283 coef_factor_h = CEIL(after_rotate_h,gspCtx->gsp_cfg.layer0_info.des_rect.rect_h);
285 if(coef_factor_w > 16 || coef_factor_h > 16) {
286 return GSP_KERNEL_GEN_OUT_RANG;
289 if(coef_factor_w > 8) {
291 } else if(coef_factor_w > 4) {
297 if(coef_factor_h > 8) {
299 } else if(coef_factor_h > 4) {
305 coef_in_w = CEIL(after_rotate_w,coef_factor_w);
306 coef_in_h = CEIL(after_rotate_h,coef_factor_h);
307 coef_out_w = gspCtx->gsp_cfg.layer0_info.des_rect.rect_w;
308 coef_out_h = gspCtx->gsp_cfg.layer0_info.des_rect.rect_h;
310 if(GSP_SRC_FMT_RGB565 < gspCtx->gsp_cfg.layer0_info.img_format
311 && gspCtx->gsp_cfg.layer0_info.img_format < GSP_SRC_FMT_8BPP
312 && (coef_in_w>coef_out_w||coef_in_h>coef_out_h)) { //video scaling down
315 if(coef_in_w*3 <= coef_in_h*2) { // height is larger than 1.5*width
318 if(coef_in_h*3 <= coef_in_w*2) { // width is larger than 1.5*height
321 //GSP_TRACE("GSP, for video scaling down, we change tap to 2.\n");
324 //give hal a chance to set tap number
325 if((gspCtx->gsp_cfg.layer0_info.row_tap_mode>0) || (gspCtx->gsp_cfg.layer0_info.col_tap_mode>0)) {
326 //GSP_TRACE("GSP, hwc set tap: %dx%d-> ",h_tap,v_tap);
327 h_tap = (gspCtx->gsp_cfg.layer0_info.row_tap_mode>0)?gspCtx->gsp_cfg.layer0_info.row_tap_mode:h_tap;
328 v_tap = (gspCtx->gsp_cfg.layer0_info.col_tap_mode>0)?gspCtx->gsp_cfg.layer0_info.col_tap_mode:v_tap;
329 //GSP_TRACE("%dx%d\n",h_tap,v_tap);
333 ||coef_in_w_last != coef_in_w
334 || coef_in_h_last != coef_in_h
335 || coef_out_w_last != coef_out_w
336 || coef_out_h_last != coef_out_h
337 || coef_h_tap_last != h_tap
338 || coef_v_tap_last != v_tap) {
339 tmp_buf = (uint32_t *)kmalloc(GSP_COEFF_BUF_SIZE, GFP_KERNEL);
340 if (NULL == tmp_buf) {
341 printk("SCALE DRV: No mem to alloc coeff buffer! \n");
342 return GSP_KERNEL_GEN_ALLOC_ERR;
345 v_coeff = tmp_buf + (GSP_COEFF_COEF_SIZE/4);
347 if (!(GSP_Gen_Block_Ccaler_Coef(coef_in_w,
355 tmp_buf + (GSP_COEFF_COEF_SIZE/2),
356 GSP_COEFF_POOL_SIZE, gspCtx))) {
358 printk("GSP DRV: GSP_Gen_Block_Ccaler_Coef error! \n");
359 return GSP_KERNEL_GEN_COMMON_ERR;
361 GSP_Scale_Coef_Tab_Config(h_coeff,v_coeff);//write coef-metrix to register
362 coef_in_w_last = coef_in_w;
363 coef_in_h_last = coef_in_h;
364 coef_out_w_last = coef_out_w;
365 coef_out_h_last = coef_out_h;
366 coef_h_tap_last = h_tap;
367 coef_v_tap_last = v_tap;
371 GSP_Coef_Tap_Convert(coef_h_tap_last,coef_v_tap_last, gspCtx);
372 GSP_L0_SCALETAPMODE_SET(gspCtx->gsp_cfg.layer0_info.row_tap_mode,gspCtx->gsp_cfg.layer0_info.col_tap_mode);
373 GSP_TRACE("GSP DRV: GSP_Gen_Block_Ccaler_Coef, register: r_tap%d,c_tap %d \n",
374 ((volatile GSP_REG_T*)GSP_REG_BASE)->gsp_layer0_cfg_u.mBits.row_tap_mod,
375 ((volatile GSP_REG_T*)GSP_REG_BASE)->gsp_layer0_cfg_u.mBits.col_tap_mod);//
383 desc:config info to register
385 static uint32_t GSP_Info_Config(gsp_context_t *gspCtx)
387 GSP_ConfigLayer(GSP_MODULE_LAYER0,gspCtx);
388 GSP_ConfigLayer(GSP_MODULE_LAYER1,gspCtx);
389 GSP_ConfigLayer(GSP_MODULE_ID_MAX,gspCtx);
390 GSP_ConfigLayer(GSP_MODULE_DST,gspCtx);
391 return GSP_ERRCODE_GET();
398 static void GSP_Cache_Flush(void)
404 func:GSP_Cache_Invalidate
407 static void GSP_Cache_Invalidate(void)
413 func:GSP_Release_HWSema
416 static void GSP_Release_HWSema(gsp_context_t *gspCtx)
418 gsp_user *pTempUserdata = NULL;
420 GSP_TRACE("%s:pid:0x%08x, was killed without release GSP hw semaphore, L%d \n",__func__,gspCtx->gsp_cur_client_pid,__LINE__);
422 pTempUserdata = (gsp_user *)gsp_get_user(gspCtx->gsp_cur_client_pid, gspCtx);
423 pTempUserdata->pid = INVALID_USER_ID;
424 sema_init(&pTempUserdata->sem_open, 1);
426 GSP_Wait_Finish();//wait busy-bit down
428 gspCtx->gsp_cur_client_pid = INVALID_USER_ID;
429 sema_init(&gspCtx->gsp_wait_interrupt_sem,0);
430 up(&gspCtx->gsp_hw_resource_sem);
433 static int GSP_Map(gsp_context_t *gspCtx)
435 struct ion_addr_data data;
437 if(gspCtx->gsp_cfg.layer0_info.src_addr.addr_y == 0
438 &&gspCtx->gsp_cfg.layer0_info.mem_info.share_fd) {
439 data.fd_buffer = gspCtx->gsp_cfg.layer0_info.mem_info.share_fd;
440 if(sprd_ion_get_gsp_addr(&data)) {
441 printk("%s, L%d, error!\n",__func__,__LINE__);
444 if(data.iova_enabled)
445 gspCtx->gsp_cfg.layer0_info.src_addr.addr_y = data.iova_addr;
447 gspCtx->gsp_cfg.layer0_info.src_addr.addr_y = data.phys_addr;
448 gspCtx->gsp_cfg.layer0_info.src_addr.addr_uv = gspCtx->gsp_cfg.layer0_info.src_addr.addr_y + gspCtx->gsp_cfg.layer0_info.mem_info.uv_offset;
449 gspCtx->gsp_cfg.layer0_info.src_addr.addr_v = gspCtx->gsp_cfg.layer0_info.src_addr.addr_y + gspCtx->gsp_cfg.layer0_info.mem_info.v_offset;
452 if(gspCtx->gsp_cfg.layer1_info.src_addr.addr_y == 0
453 &&gspCtx->gsp_cfg.layer1_info.mem_info.share_fd) {
454 data.fd_buffer = gspCtx->gsp_cfg.layer1_info.mem_info.share_fd;
455 if(sprd_ion_get_gsp_addr(&data)) {
456 printk("%s, L%d, error!\n",__func__,__LINE__);
459 if(data.iova_enabled)
460 gspCtx->gsp_cfg.layer1_info.src_addr.addr_y = data.iova_addr;
462 gspCtx->gsp_cfg.layer1_info.src_addr.addr_y = data.phys_addr;
463 gspCtx->gsp_cfg.layer1_info.src_addr.addr_uv = gspCtx->gsp_cfg.layer1_info.src_addr.addr_y + gspCtx->gsp_cfg.layer1_info.mem_info.uv_offset;
464 gspCtx->gsp_cfg.layer1_info.src_addr.addr_v = gspCtx->gsp_cfg.layer1_info.src_addr.addr_y + gspCtx->gsp_cfg.layer1_info.mem_info.v_offset;
468 if(gspCtx->gsp_cfg.layer_des_info.src_addr.addr_y == 0
469 &&gspCtx->gsp_cfg.layer_des_info.mem_info.share_fd) {
470 data.fd_buffer = gspCtx->gsp_cfg.layer_des_info.mem_info.share_fd;
471 if(sprd_ion_get_gsp_addr(&data)) {
472 printk("%s, L%d, error!\n",__func__,__LINE__);
475 if(data.iova_enabled)
476 gspCtx->gsp_cfg.layer_des_info.src_addr.addr_y = data.iova_addr;
478 gspCtx->gsp_cfg.layer_des_info.src_addr.addr_y = data.phys_addr;
479 gspCtx->gsp_cfg.layer_des_info.src_addr.addr_uv = gspCtx->gsp_cfg.layer_des_info.src_addr.addr_y + gspCtx->gsp_cfg.layer_des_info.mem_info.uv_offset;
480 gspCtx->gsp_cfg.layer_des_info.src_addr.addr_v = gspCtx->gsp_cfg.layer_des_info.src_addr.addr_y + gspCtx->gsp_cfg.layer_des_info.mem_info.v_offset;
487 static int GSP_Unmap(gsp_context_t *gspCtx)
489 if(gspCtx->gsp_cfg.layer0_info.mem_info.share_fd)
490 sprd_ion_free_gsp_addr(gspCtx->gsp_cfg.layer0_info.mem_info.share_fd);
492 if(gspCtx->gsp_cfg.layer1_info.mem_info.share_fd)
493 sprd_ion_free_gsp_addr(gspCtx->gsp_cfg.layer1_info.mem_info.share_fd);
495 if(gspCtx->gsp_cfg.layer_des_info.mem_info.share_fd)
496 sprd_ion_free_gsp_addr(gspCtx->gsp_cfg.layer_des_info.mem_info.share_fd);
501 uint32_t __attribute__((weak)) sci_get_chip_id(void)
503 printk("GSP local read chip id, *(%p) == %x \n",(void*)(SPRD_AONAPB_BASE+0xFC),GSP_REG_READ(SPRD_AONAPB_BASE+0xFC));
504 return GSP_REG_READ(SPRD_AONAPB_BASE+0xFC);
507 uint32_t gsp_get_chip_id(void)
509 uint32_t adie_chip_id = sci_get_chip_id();
510 if((adie_chip_id & 0xffff0000) < 0x50000000) {
511 printk("%s[%d]:warning, chip id 0x%08x is invalidate, try to get it by reading reg directly!\n", __func__, __LINE__ , adie_chip_id);
512 adie_chip_id = GSP_REG_READ(SPRD_AONAPB_BASE+0xFC);
513 if((adie_chip_id & 0xffff0000) < 0x50000000) {
514 printk("%s[%d]:warning, chip id 0x%08x from reg is invalidate too!\n", __func__, __LINE__ , adie_chip_id);
517 printk("%s[%d] return chip id 0x%08x \n", __func__, __LINE__, adie_chip_id);
521 static GSP_ADDR_TYPE_E GSP_Get_Addr_Type(void)
523 static volatile GSP_ADDR_TYPE_E s_gsp_addr_type = GSP_ADDR_TYPE_INVALUE;
524 static uint32_t s_iommuCtlBugChipList[]= {0x7715a000,0x7715a001,0x8815a000}; //bug chip list
529 #ifndef CONFIG_SPRD_IOMMU // shark or (dolphin/tshark not define IOMMU)
530 s_gsp_addr_type = GSP_ADDR_TYPE_PHYSICAL;
531 #else // (dolphin/tshark defined IOMMU)
532 if(s_gsp_addr_type == GSP_ADDR_TYPE_INVALUE) {
533 uint32_t adie_chip_id = 0;
535 /*set s_gsp_addr_type according to the chip id*/
536 //adie_chip_id = sci_get_ana_chip_id();
537 //printk("GSPa : get chip id :0x%08x \n", adie_chip_id);
538 adie_chip_id = gsp_get_chip_id();
540 if((adie_chip_id & 0xffff0000) > 0x50000000) {
541 printk("GSP : get chip id :%08x is validate, scan bugchip list.\n", adie_chip_id);
542 for (i=0; i<ARRAY_SIZE(s_iommuCtlBugChipList); i++) {
543 if(s_iommuCtlBugChipList[i] == adie_chip_id) {
544 printk("GSP : match bug chip id :%08x == [%d]\n", adie_chip_id,i);
545 #ifdef GSP_IOMMU_WORKAROUND1
546 s_gsp_addr_type = GSP_ADDR_TYPE_IOVIRTUAL;
548 s_gsp_addr_type = GSP_ADDR_TYPE_PHYSICAL;
553 if(s_gsp_addr_type == GSP_ADDR_TYPE_INVALUE) {
554 printk("GSP : mismatch bug chip id.\n");
555 s_gsp_addr_type = GSP_ADDR_TYPE_IOVIRTUAL;
556 printk("dolphin tshark GSP : gsp address type :%d \n", s_gsp_addr_type);
559 printk("GSP : get chip id :%08x is invalidate,set address type as physical.\n", adie_chip_id);
560 s_gsp_addr_type = GSP_ADDR_TYPE_PHYSICAL;
564 printk("GSP [%d]: gsp address type :%d ,\n", __LINE__, s_gsp_addr_type);
565 return s_gsp_addr_type;
571 static GSP_CAPABILITY_T* GSP_Config_Capability(void)
573 uint32_t adie_chip_id = 0;
574 static GSP_CAPABILITY_T s_gsp_capability;
576 if(s_gsp_capability.magic != CAPABILITY_MAGIC_NUMBER) { // not initialized
577 memset((void*)&s_gsp_capability,0,sizeof(s_gsp_capability));
578 s_gsp_capability.max_layer_cnt = 1;
579 s_gsp_capability.blend_video_with_OSD=0;
580 s_gsp_capability.max_videoLayer_cnt = 1;
581 s_gsp_capability.max_layer_cnt_with_video = 1;
582 s_gsp_capability.scale_range_up=64;
583 s_gsp_capability.scale_range_down=1;
584 s_gsp_capability.scale_updown_sametime=0;
585 s_gsp_capability.OSD_scaling=0;
587 adie_chip_id = gsp_get_chip_id();
588 switch(adie_chip_id&0xFFFF0000) {
589 case 0x83000000:/*shark-0x8300a001 & 9620*/
590 s_gsp_capability.version = 0x00;
591 s_gsp_capability.scale_range_up=256;
595 if(adie_chip_id == 0x7715a000
596 ||adie_chip_id == 0x7715a001
597 ||adie_chip_id == 0x8815a000) { /*dolphin iommu ctl reg access err*/
598 s_gsp_capability.version = 0x01;
599 s_gsp_capability.video_need_copy = 1;
600 s_gsp_capability.max_video_size = 1;
601 } else if(adie_chip_id == 0x7715a002
602 ||adie_chip_id == 0x7715a003
603 ||adie_chip_id == 0x8815a001
604 ||adie_chip_id == 0x8815a002) { /*dolphin iommu ctl reg access ok, but with black line bug*/
605 s_gsp_capability.version = 0x02;
606 s_gsp_capability.video_need_copy = 1;
607 s_gsp_capability.max_video_size = 1;
608 } else { /*adie_chip_id > 0x7715a003 || adie_chip_id > 0x8815a002, dolphin black line bug fixed*/
609 s_gsp_capability.version = 0x03;
610 s_gsp_capability.max_video_size = 1;
611 s_gsp_capability.scale_range_up=256;
612 printk("%s[%d]: info:a new chip id, treated as newest dolphin that without any bugs!\n",__func__,__LINE__);
616 if(adie_chip_id == 0x8730b000) { /*tshark, with black line bug*/
617 s_gsp_capability.version = 0x04;
618 s_gsp_capability.video_need_copy = 1;
619 } else { /*tshark-0x8730b001 & tshark2-? & pike-?, black line bug fixed*/
620 s_gsp_capability.version = 0x05;
621 s_gsp_capability.max_layer_cnt = 2;
622 s_gsp_capability.scale_range_up=256;
625 #ifdef CONFIG_ARCH_SCX20
626 case 0x88600000:/*pike, same with tshark*/
627 s_gsp_capability.version = 0x05;
628 s_gsp_capability.max_layer_cnt = 2;
629 s_gsp_capability.scale_range_up=256;
633 if(adie_chip_id != 0x96300000/*SharkL, with YCbCr->RGB888*/
634 && adie_chip_id != 0x96310000/*SharkL64*/) {
637 after sharkL, gsp will not update any more,so these late-comers are same with sharkL.
639 printk("%s[%d]: info:a new chip id, be treated as sharkL!\n",__func__,__LINE__);
641 s_gsp_capability.version = 0x06;
642 s_gsp_capability.blend_video_with_OSD=1;
643 s_gsp_capability.max_layer_cnt_with_video = 3;
644 s_gsp_capability.max_layer_cnt = 2;
645 s_gsp_capability.scale_range_up=256;
649 s_gsp_capability.buf_type_support=GSP_Get_Addr_Type();
651 s_gsp_capability.yuv_xywh_even = 1;
652 s_gsp_capability.crop_min.w=s_gsp_capability.crop_min.h=4;
653 s_gsp_capability.out_min.w=s_gsp_capability.out_min.h=4;
654 s_gsp_capability.crop_max.w=s_gsp_capability.crop_max.h=4095;
655 s_gsp_capability.out_max.w=s_gsp_capability.out_max.h=4095;
656 s_gsp_capability.magic = CAPABILITY_MAGIC_NUMBER;
658 return &s_gsp_capability;
660 static long gsp_drv_ioctl(struct file *file,
664 int32_t ret = -GSP_NO_ERR;
665 size_t param_size = _IOC_SIZE(cmd);
666 gsp_user* pUserdata = file->private_data;
667 gsp_context_t *gspCtx = NULL;
668 struct timespec start_time;
669 struct timespec end_time;
671 memset(&start_time,0,sizeof(start_time));
672 memset(&end_time,0,sizeof(end_time));
674 GSP_TRACE("%s:pid:0x%08x,cmd:0x%08x, io number 0x%x, param_size %zu \n",
681 if (NULL == pUserdata || NULL == pUserdata->priv) {
684 gspCtx = (gsp_context_t*)pUserdata->priv;
685 if(gspCtx->suspend_resume_flag==1) {
686 printk("%s[%d]: in suspend, ioctl just return!\n",__func__,__LINE__);
691 case GSP_IO_GET_CAPABILITY:
692 if (param_size>=sizeof(GSP_CAPABILITY_T)) {
693 GSP_CAPABILITY_T *cap=GSP_Config_Capability();
694 if(arg & MEM_OPS_ADDR_ALIGN_MASK || (ulong)cap & MEM_OPS_ADDR_ALIGN_MASK) {
695 GSP_TRACE("%s[%d] copy_to_user use none 8B alignment address!",__func__,__LINE__);
697 ret=copy_to_user((void __user *)arg,(const void*)cap,sizeof(GSP_CAPABILITY_T));
699 printk("%s[%d] err:get gsp capability failed in copy_to_user !\n",__func__,__LINE__);
700 ret = GSP_KERNEL_COPY_ERR;
703 GSP_TRACE("%s[%d]: get gsp capability success in copy_to_user \n",__func__,__LINE__);
705 printk("%s[%d] err:get gsp capability, buffer is too small,come:%zu,need:%zu!",__func__, __LINE__,
706 param_size,sizeof(GSP_CAPABILITY_T));
707 ret = GSP_KERNEL_COPY_ERR;
712 case GSP_IO_SET_PARAM: {
714 GSP_TRACE("%s:pid:0x%08x, bf wait gsp-hw sema, L%d \n",__func__,pUserdata->pid,__LINE__);
716 // the caller thread was killed without release GSP hw semaphore
717 if( gspCtx->gsp_cur_client_pid != INVALID_USER_ID) {
718 struct pid * __pid = NULL;
719 struct task_struct *__task = NULL;
720 pid_t temp_pid = INVALID_USER_ID;
722 GSP_TRACE("%sL%d current:%08x store_pid:0x%08x, \n",__func__,__LINE__,current->pid, gspCtx->gsp_cur_client_pid);
724 temp_pid = gspCtx->gsp_cur_client_pid;
725 __pid = find_get_pid(temp_pid);
727 __task = get_pid_task(__pid,PIDTYPE_PID);
731 if(__task->pid != gspCtx->gsp_cur_client_pid) {
732 GSP_Release_HWSema(gspCtx);
735 GSP_Release_HWSema(gspCtx);
738 GSP_Release_HWSema(gspCtx);
744 ret = down_interruptible(&gspCtx->gsp_hw_resource_sem);
746 printk("%s:pid:0x%08x, wait gsp-hw sema interrupt by signal,return, L%d \n",__func__,pUserdata->pid,__LINE__);
748 ret = GSP_KERNEL_CFG_INTR;
751 GSP_TRACE("%s:pid:0x%08x, wait gsp-hw sema success, L%d \n",__func__,pUserdata->pid,__LINE__);
752 gspCtx->gsp_cur_client_pid = pUserdata->pid;
753 if(arg & MEM_OPS_ADDR_ALIGN_MASK || (ulong)&gspCtx->gsp_cfg & MEM_OPS_ADDR_ALIGN_MASK) {
754 GSP_TRACE("%s[%d] copy_from_user use none 8B alignment address!",__func__,__LINE__);
756 ret=copy_from_user((void*)&gspCtx->gsp_cfg, (void*)arg, param_size);
758 printk("%s:pid:0x%08x, copy_params_from_user failed! \n",__func__,pUserdata->pid);
759 ret = GSP_KERNEL_COPY_ERR;
762 GSP_TRACE("%s:pid:0x%08x, copy_params_from_user success!, L%d \n",__func__,pUserdata->pid,__LINE__);
763 ret = GSP_Init(gspCtx);
767 // if the y u v address is virtual, should be converted to phy address here!!!
768 if(gspCtx->gsp_cfg.layer0_info.layer_en == 1) {
769 if(gspCtx->gsp_cfg.layer0_info.rot_angle & 0x1) { //90 270
770 if((gspCtx->gsp_cfg.layer0_info.clip_rect.rect_w != gspCtx->gsp_cfg.layer0_info.des_rect.rect_h) ||
771 (gspCtx->gsp_cfg.layer0_info.clip_rect.rect_h != gspCtx->gsp_cfg.layer0_info.des_rect.rect_w)) {
772 gspCtx->gsp_cfg.layer0_info.scaling_en = 1;
775 if((gspCtx->gsp_cfg.layer0_info.clip_rect.rect_w != gspCtx->gsp_cfg.layer0_info.des_rect.rect_w) ||
776 (gspCtx->gsp_cfg.layer0_info.clip_rect.rect_h != gspCtx->gsp_cfg.layer0_info.des_rect.rect_h)) {
777 gspCtx->gsp_cfg.layer0_info.scaling_en = 1;
782 if(GSP_Map(gspCtx)) {
783 ret = GSP_KERNEL_ADDR_MAP_ERR;
787 gspCtx->gsp_cfg.misc_info.gsp_clock = GSP_CLOCK;
788 if(GSP_GAP & 0x100) {
789 gspCtx->gsp_cfg.misc_info.gsp_gap = (GSP_GAP & 0xff);
791 ret = GSP_Info_Config(gspCtx);
792 GSP_TRACE("%s:pid:0x%08x, config hw %s!, L%d \n",__func__,pUserdata->pid,(ret>0)?"failed":"success",__LINE__);
794 printk("%s%d:pid:0x%08x, gsp config err:%d, release hw sema.\n",__func__,__LINE__,pUserdata->pid,ret);
800 GSP_TRACE("%s:pid:0x%08x, in trigger to run , L%d \n",__func__,pUserdata->pid,__LINE__);
801 if(gspCtx->gsp_cur_client_pid == pUserdata->pid) {
802 GSP_TRACE("%s:pid:0x%08x, calc coef and trigger to run , L%d \n",__func__,pUserdata->pid,__LINE__);
804 ret = GSP_Scaling_Coef_Gen_And_Config(&gspCtx->gsp_coef_force_calc, gspCtx);
810 GSP_TRACE("%sL%d:pid:0x%08x, trigger %s!\n",__func__,__LINE__,pUserdata->pid,(ret)?"failed":"success");
812 printk("%s%d:pid:0x%08x, trigger failed!! err_code:%d \n",__func__,__LINE__,pUserdata->pid,ret);
813 printCfgInfo(gspCtx);
815 ERR_RECORD_ADD(*(GSP_REG_T *)GSP_REG_BASE);
816 ERR_RECORD_INDEX_ADD_WP();
817 if (ERR_RECORD_FULL()) {
818 ERR_RECORD_INDEX_ADD_RP();
820 GSP_TRACE("%s:pid:0x%08x, release hw sema, L%d \n",__func__,pUserdata->pid,__LINE__);
824 GSP_TRACE("%s:pid:0x%08x,exit L%d \n",__func__,pUserdata->pid,__LINE__);
825 ret = GSP_KERNEL_CALLER_NOT_OWN_HW;
829 if(gspCtx->gsp_cur_client_pid == pUserdata->pid) {
830 GSP_TRACE("%s:pid:0x%08x, bf wait done sema, L%d \n",__func__,pUserdata->pid,__LINE__);
831 //ret = down_interruptible(&gsp_wait_interrupt_sem);//interrupt lose
832 ret = down_timeout(&gspCtx->gsp_wait_interrupt_sem,msecs_to_jiffies(500));//for interrupt lose, timeout return -ETIME,
833 if (ret == 0) { //gsp process over
834 GSP_TRACE("%s:pid:0x%08x, wait done sema success, L%d \n",__func__,pUserdata->pid,__LINE__);
835 } else if (ret == -ETIME) {
836 printk("%s%d:pid:0x%08x, wait done sema 500-ms-timeout,it's abnormal!!!!!!!! \n",__func__,__LINE__,pUserdata->pid);
837 GPSTimeoutPrint(gspCtx);
838 ret = GSP_KERNEL_WAITDONE_TIMEOUT;
839 } else if (ret) { // == -EINTR
840 printk("%s:pid:0x%08x, wait done sema interrupted by a signal, L%d \n",__func__,pUserdata->pid,__LINE__);
841 ret = GSP_KERNEL_WAITDONE_INTR;
844 if (pUserdata->is_exit_force) {
845 pUserdata->is_exit_force = 0;
846 ret = GSP_KERNEL_FORCE_EXIT;
849 GSP_Wait_Finish();//wait busy-bit down
851 GSP_Cache_Invalidate();
853 gspCtx->gsp_cur_client_pid = INVALID_USER_ID;
854 sema_init(&gspCtx->gsp_wait_interrupt_sem,0);
855 GSP_TRACE("%s:pid:0x%08x, release gsp-hw sema, L%d \n",__func__,pUserdata->pid,__LINE__);
856 up(&gspCtx->gsp_hw_resource_sem);
862 ret = GSP_KERNEL_CTL_CMD_ERR;
873 gspCtx->gsp_cur_client_pid = INVALID_USER_ID;
874 up(&gspCtx->gsp_hw_resource_sem);
877 printk("%s:pid:0x%08x, error code 0x%x \n", __func__,pUserdata->pid,ret);
883 static struct file_operations gsp_drv_fops = {
884 .owner = THIS_MODULE,
885 .open = gsp_drv_open,
886 .write = gsp_drv_write,
887 .read = gsp_drv_read,
888 .unlocked_ioctl = gsp_drv_ioctl,
890 .compat_ioctl = gsp_drv_ioctl,
892 .release = gsp_drv_release
895 static irqreturn_t gsp_irq_handler(int32_t irq, void *dev_id)
897 gsp_context_t *gspCtx = (gsp_context_t *)dev_id;
899 GSP_TRACE("%s enter!\n",__func__);
901 if (NULL == gspCtx) {
904 GSP_IRQSTATUS_CLEAR();
905 GSP_IRQENABLE_SET(GSP_IRQ_TYPE_DISABLE);
906 up(&gspCtx->gsp_wait_interrupt_sem);
912 #ifdef CONFIG_HAS_EARLYSUSPEND
913 static void gsp_early_suspend(struct early_suspend* es)
915 gsp_context_t *gspCtx = NULL;
916 int32_t ret = -GSP_NO_ERR;
918 printk("%s%d\n",__func__,__LINE__);
920 gspCtx = container_of(es, gsp_context_t, earlysuspend);
921 if (NULL == gspCtx) {
925 gspCtx->suspend_resume_flag = 1;
927 //in case of GSP is processing now, wait it finish and then disable
928 ret = down_timeout(&gspCtx->gsp_hw_resource_sem,msecs_to_jiffies(500));
930 printk("%s[%d]: wait gsp-hw sema failed, ret: %d\n",__func__,__LINE__,ret);
931 GPSTimeoutPrint(gspCtx);
933 printk("%s[%d]: wait gsp-hw sema success. \n",__func__,__LINE__);
934 up(&gspCtx->gsp_hw_resource_sem);
935 //GSP_module_disable(gspCtx);
939 static void gsp_late_resume(struct early_suspend* es)
941 gsp_context_t *gspCtx = NULL;
943 printk("%s%d\n",__func__,__LINE__);
945 gspCtx = container_of(es, gsp_context_t, earlysuspend);
946 if (NULL == gspCtx) {
950 gspCtx->gsp_coef_force_calc = 1;
951 //GSP_module_enable(gspCtx);//
953 GSP_AUTO_GATE_ENABLE();//bug 198152
954 gspCtx->suspend_resume_flag = 0;
958 static int gsp_suspend(struct platform_device *pdev,pm_message_t state)
960 gsp_context_t *gspCtx = NULL;
961 int32_t ret = -GSP_NO_ERR;
962 printk("%s%d\n",__func__,__LINE__);
964 gspCtx = platform_get_drvdata(pdev);
965 if (NULL == gspCtx) {
969 gspCtx->suspend_resume_flag = 1;
971 //in case of GSP is processing now, wait it finish and then disable
972 ret = down_timeout(&gspCtx->gsp_hw_resource_sem,msecs_to_jiffies(500));
974 printk("%s[%d]: wait gsp-hw sema failed, ret: %d\n",__func__,__LINE__,ret);
975 GPSTimeoutPrint(gspCtx);
978 printk("%s[%d]: wait gsp-hw sema success. \n",__func__,__LINE__);
979 up(&gspCtx->gsp_hw_resource_sem);
980 //GSP_module_disable(gspCtx);
985 static int gsp_resume(struct platform_device *pdev)
987 gsp_context_t *gspCtx = NULL;
989 gspCtx = platform_get_drvdata(pdev);
990 if (NULL == gspCtx) {
994 printk("%s%d\n",__func__,__LINE__);
995 gspCtx->gsp_coef_force_calc = 1;
996 //GSP_module_enable(gspCtx);//
998 GSP_AUTO_GATE_ENABLE();//bug 198152
999 gspCtx->suspend_resume_flag = 0;
1006 static int32_t gsp_clock_init(gsp_context_t *gspCtx)
1008 struct clk *emc_clk_parent = NULL;
1009 struct clk *gsp_clk_parent = NULL;
1013 emc_clk_parent = of_clk_get_by_name(s_gsp_of_dev->of_node, GSP_EMC_CLOCK_PARENT_NAME);
1015 emc_clk_parent = clk_get(NULL, GSP_EMC_CLOCK_PARENT_NAME);
1017 if (IS_ERR(emc_clk_parent)) {
1018 printk(KERN_ERR "gsp: get emc clk_parent failed!\n");
1021 printk(KERN_INFO "gsp: get emc clk_parent ok!\n");//pr_debug
1025 gspCtx->gsp_emc_clk = of_clk_get_by_name(s_gsp_of_dev->of_node, GSP_EMC_CLOCK_NAME);
1027 gspCtx->gsp_emc_clk = clk_get(NULL, GSP_EMC_CLOCK_NAME);
1029 if (IS_ERR(gspCtx->gsp_emc_clk)) {
1030 printk(KERN_ERR "gsp: get emc clk failed!\n");
1033 printk(KERN_INFO "gsp: get emc clk ok!\n");//pr_debug
1036 ret = clk_set_parent(gspCtx->gsp_emc_clk, emc_clk_parent);
1038 printk(KERN_ERR "gsp: gsp set emc clk parent failed!\n");
1041 printk(KERN_INFO "gsp: gsp set emc clk parent ok!\n");//pr_debug
1045 gsp_clk_parent = of_clk_get_by_name(s_gsp_of_dev->of_node, GSP_CLOCK_PARENT3);
1047 gsp_clk_parent = clk_get(NULL, GSP_CLOCK_PARENT3);
1049 if (IS_ERR(gsp_clk_parent)) {
1050 printk(KERN_ERR "gsp: get clk_parent failed!\n");
1053 printk(KERN_INFO "gsp: get clk_parent ok!\n");
1057 gspCtx->gsp_clk = of_clk_get_by_name(s_gsp_of_dev->of_node, GSP_CLOCK_NAME);
1059 gspCtx->gsp_clk = clk_get(NULL, GSP_CLOCK_NAME);
1061 if (IS_ERR(gspCtx->gsp_clk)) {
1062 printk(KERN_ERR "gsp: get clk failed!\n");
1065 printk(KERN_INFO "gsp: get clk ok!\n");
1068 ret = clk_set_parent(gspCtx->gsp_clk, gsp_clk_parent);
1070 printk(KERN_ERR "gsp: gsp set clk parent failed!\n");
1073 printk(KERN_INFO "gsp: gsp set clk parent ok!\n");
1079 int32_t gsp_drv_probe(struct platform_device *pdev)
1083 gsp_context_t *gspCtx;
1089 gspCtx =kzalloc(sizeof(gsp_context_t), GFP_KERNEL);
1091 if (NULL == gspCtx) {
1092 dev_err(&pdev->dev, "Can't alloc memory for module data.\n");
1096 GSP_TRACE("gsp_probe enter .\n");
1097 printk("%s,AHB clock :%d\n", __func__,GSP_AHB_CLOCK_GET());
1100 s_gsp_of_dev = &(pdev->dev);
1102 gspCtx->gsp_irq_num = irq_of_parse_and_map(s_gsp_of_dev->of_node, 0);
1104 if(0 != of_address_to_resource(s_gsp_of_dev->of_node, 0, &r)) {
1105 printk(KERN_ERR "gsp probe fail. (can't get register base address)\n");
1108 g_gsp_base_addr = (unsigned long)ioremap_nocache(r.start, resource_size(&r));
1109 if(!g_gsp_base_addr)
1112 #ifndef GSP_IOMMU_WORKAROUND1
1113 #if defined(CONFIG_ARCH_SCX15) || defined(CONFIG_ARCH_SCX30G) || defined(CONFIG_ARCH_SCX35L)
1114 ret = of_property_read_u32(s_gsp_of_dev->of_node, "gsp_mmu_ctrl_base", (u32*)&g_gsp_mmu_ctrl_addr);
1117 printk("%s: read gsp_mmu_ctrl_addr fail (%d)\n", __func__, ret);
1120 g_gsp_mmu_ctrl_addr = (ulong)ioremap_nocache(g_gsp_mmu_ctrl_addr,sizeof(uint32_t));
1121 if(!g_gsp_mmu_ctrl_addr)
1126 printk("gsp: irq = %d, g_gsp_base_addr = 0x%lx,\n", gspCtx->gsp_irq_num, g_gsp_base_addr);
1128 gspCtx->gsp_irq_num = TB_GSP_INT;
1131 GSP_AUTO_GATE_ENABLE();
1132 ret = gsp_clock_init(gspCtx);
1134 printk(KERN_ERR "gsp emc clock init failed. \n");
1137 //GSP_module_enable(gspCtx);
1139 gspCtx->dev.minor = MISC_DYNAMIC_MINOR;
1140 gspCtx->dev.name = "sprd_gsp";
1141 gspCtx->dev.fops = &gsp_drv_fops;
1142 ret = misc_register(&gspCtx->dev);
1144 printk(KERN_ERR "gsp cannot register miscdev (%d)\n", ret);
1148 ret = request_irq(gspCtx->gsp_irq_num,//
1155 printk("could not request irq %d\n", gspCtx->gsp_irq_num);
1159 gspCtx->gsp_cur_client_pid = INVALID_USER_ID;
1160 for (i=0; i<GSP_MAX_USER; i++) {
1161 gspCtx->gsp_user_array[i].pid = INVALID_USER_ID;
1162 gspCtx->gsp_user_array[i].is_exit_force = 0;
1163 sema_init(&(gspCtx->gsp_user_array[i].sem_open), 1);
1166 /* initialize locks*/
1167 memset(&gspCtx->gsp_cfg,0,sizeof(gspCtx->gsp_cfg));
1168 sema_init(&gspCtx->gsp_hw_resource_sem, 1);
1169 sema_init(&gspCtx->gsp_wait_interrupt_sem, 0);
1170 /*initialize gsp_perf*/
1172 gspCtx->cache_coef_init_flag = 0;
1174 #ifdef CONFIG_HAS_EARLYSUSPEND
1175 memset(& gspCtx->earlysuspend,0,sizeof( gspCtx->earlysuspend));
1176 gspCtx->earlysuspend.suspend = gsp_early_suspend;
1177 gspCtx->earlysuspend.resume = gsp_late_resume;
1178 gspCtx->earlysuspend.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING;
1179 register_early_suspend(&gspCtx->earlysuspend);
1182 platform_set_drvdata(pdev, gspCtx);
1187 misc_deregister(&gspCtx->dev);
1192 static int32_t gsp_drv_remove(struct platform_device *dev)
1194 gsp_context_t *gspCtx;
1195 GSP_TRACE( "gsp_remove called !\n");
1197 gspCtx = platform_get_drvdata(dev);
1198 if (NULL == gspCtx) {
1201 free_irq(gspCtx->gsp_irq_num, gsp_irq_handler);
1202 misc_deregister(&gspCtx->dev);
1207 static const struct of_device_id sprdgsp_dt_ids[] = {
1208 { .compatible = "sprd,gsp", },
1213 static struct platform_driver gsp_driver = {
1214 .probe = gsp_drv_probe,
1215 .remove = gsp_drv_remove,
1216 #ifndef CONFIG_HAS_EARLYSUSPEND
1217 .suspend = gsp_suspend,
1218 .resume = gsp_resume,
1222 .owner = THIS_MODULE,
1225 .of_match_table = of_match_ptr(sprdgsp_dt_ids),
1230 int32_t __init gsp_drv_init(void)
1232 printk("gsp_drv_init enter! \n");
1234 if (platform_driver_register(&gsp_driver) != 0) {
1235 printk("gsp platform driver register Failed! \n");
1238 GSP_TRACE("gsp platform driver registered successful! \n");
1244 void gsp_drv_exit(void)
1246 platform_driver_unregister(&gsp_driver);
1247 GSP_TRACE("gsp platform driver unregistered! \n");
1250 module_init(gsp_drv_init);
1251 module_exit(gsp_drv_exit);
1253 MODULE_DESCRIPTION("GSP Driver");
1254 MODULE_LICENSE("GPL");