2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
4 * Jinyoung Jeon <jy0.jeon@samsung.com>
5 * Vijayakumar <vijay.bvb@samsung.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/miscdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/proc_fs.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <asm/uaccess.h>
22 #include <linux/math64.h>
23 #include <linux/types.h>
24 #include <linux/interrupt.h>
25 #include <linux/errno.h>
26 #include <linux/irq.h>
27 #include <linux/kthread.h>
29 #include <linux/pid.h>
30 #include <linux/pm_runtime.h>
31 #include <soc/sprd/hardware.h>
34 #include <drm/sprd_drm.h>
36 #include "sprd_drm_ipp.h"
37 #include "drm_fourcc.h"
38 #include "sprd_drm_gsp.h"
40 #ifdef GSP_WORK_AROUND1
41 #include <linux/dma-mapping.h>
46 #include <linux/of_fdt.h>
47 #include <linux/of_irq.h>
48 #include <linux/of_address.h>
49 #include <linux/device.h>
52 #define get_gsp_context(dev) platform_get_drvdata(to_platform_device(dev))
53 #define gsp_read(offset) readl(ctx->regs + (offset))
54 #define gsp_write(cfg, offset) writel(cfg, ctx->regs + (offset))
57 #if defined(CONFIG_ARCH_SCX15) || defined(CONFIG_ARCH_SCX30G)
58 uint32_t gsp_mmu_ctrl_addr = 0;
63 * A structure of gsp context.
65 * @ippdrv: prepare initialization using ippdrv.
66 * @lock: locking of operations.
67 * @regs: GSP iomapped register.
68 * @reg_size: register size.
69 * @gsp_clk: gsp clock.
70 * @emc_clk: emc clock.
75 struct sprd_drm_ippdrv ippdrv;
78 GSP_CONFIG_INFO_T gsp_cfg;
84 int cur_buf_id[SPRD_DRM_OPS_MAX];
86 volatile u32 coef_force_calc;
88 struct device *gsp_of_dev;
92 #define GSP_RATIO(x, y) ((65536 * x) / y)
93 #define GSP_UP_MAX GSP_RATIO(1, 4)
94 #define GSP_DOWN_MIN GSP_RATIO(4, 1)
97 typedef struct _coef_entry {
98 struct _coef_entry* prev;
99 struct _coef_entry* next;
104 uint32_t coef[COEF_MATRIX_ENTRY_SIZE];
107 Coef_Entry *Coef_Entry_List_Head = NULL;
109 static uint32_t s_cache_coef_init_flag = 0;
112 static uint8_t _init_pool(void *buffer_ptr, uint32_t buffer_size,
113 GSC_MEM_POOL * pool_ptr)
115 if (NULL == buffer_ptr || 0 == buffer_size || NULL == pool_ptr)
118 if (buffer_size < MIN_POOL_SIZE)
121 pool_ptr->begin_addr = (uint32_t) buffer_ptr;
122 pool_ptr->total_size = buffer_size;
123 pool_ptr->used_size = 0;
124 DRM_DEBUG("GSP_init_pool:begin_addr:0x%08x,total_size:%d,used_size:%d\n",
125 pool_ptr->begin_addr, pool_ptr->total_size,
126 pool_ptr->used_size);
130 static void *_allocate(uint32_t size, uint32_t align_shift,
131 GSC_MEM_POOL * pool_ptr)
133 uint32_t begin_addr = 0;
134 uint32_t temp_addr = 0;
135 if (NULL == pool_ptr) {
136 DRM_ERROR("GSP_Allocate:_Allocate error! \n");
139 begin_addr = pool_ptr->begin_addr;
140 temp_addr = begin_addr + pool_ptr->used_size;
141 temp_addr = (((temp_addr + (1UL << align_shift) - 1) >> align_shift)
143 if (temp_addr + size > begin_addr + pool_ptr->total_size) {
144 DRM_ERROR("GSP_Allocate err:temp_addr:0x%08x,size:%d,begin_addr:0x%08x,"
145 "total_size:%d,used_size:%d\n", temp_addr, size, begin_addr,
146 pool_ptr->total_size, pool_ptr->used_size);
149 pool_ptr->used_size = (temp_addr + size) - begin_addr;
150 SCI_MEMSET((void *) temp_addr, 0, size);
151 DRM_DEBUG("GSP_Allocate:_Allocate success!%08x \n",temp_addr);
152 return (void *) temp_addr;
155 static int64_t div64_s64_s64(int64_t dividend, int64_t divisor)
158 int64_t dividend_tmp = dividend;
159 int64_t divisor_tmp = divisor;
164 if ((dividend >> 63) & 0x1) {
166 dividend_tmp = dividend * (-1);
168 if ((divisor >> 63) & 0x1) {
170 divisor_tmp = divisor * (-1);
172 ret = div64_s64(dividend_tmp, divisor_tmp);
177 static void normalize_inter(int64_t * data, int16_t * int_data, uint8_t ilen)
181 int64_t *tmp_data = NULL;
182 int64_t tmp_sum_val = 0;
185 for (it = 0; it < ilen; it++)
186 tmp_sum_val += tmp_data[it];
188 if (0 == tmp_sum_val) {
189 uint8_t value = 256 / ilen;
190 for (it = 0; it < ilen; it++) {
192 int_data[it] = (int16_t) tmp_d;
195 for (it = 0; it < ilen; it++) {
196 tmp_d = div64_s64_s64(tmp_data[it] * (int64_t) 256, tmp_sum_val);
197 int_data[it] = (uint16_t) tmp_d;
202 /* ------------------------------------------ */
203 static int16_t sum_fun(int16_t *data, int8_t ilen)
209 for (i = 0; i < ilen; i++)
215 static void adjust_filter_inter(int16_t *filter, uint8_t ilen)
221 tmpi = sum_fun(filter, ilen) - 256;
223 GSC_SIGN2(tmp_val, tmpi);
225 if ((tmpi & 1) == 1) // tmpi is odd
227 filter[midi] = filter[midi] - tmp_val;
231 tmp_S = GSC_ABS(tmpi / 2);
233 if ((ilen & 1) == 1) // ilen is odd
235 for (i = 0; i < tmp_S; i++) {
236 filter[midi - (i + 1)] = filter[midi - (i + 1)] - tmp_val;
237 filter[midi + (i + 1)] = filter[midi + (i + 1)] - tmp_val;
239 } else { /* ilen is even */
240 for (i = 0; i < tmp_S; i++) {
241 filter[midi - (i + 1)] = filter[midi - (i + 1)] - tmp_val;
242 filter[midi + i] = filter[midi + i] - tmp_val;
246 if (filter[midi] > 255) {
247 tmp_val = filter[midi];
249 filter[midi - 1] = filter[midi - 1] + tmp_val - 255;
253 static int16_t cal_y_mode_l_coef(int16_t coef_lenght, int16_t * coef_data_ptr,
254 int16_t N, int16_t M, GSC_MEM_POOL * pool_ptr)
257 int16_t i, mid_i, kk, j, sum_val;
258 int64_t *filter = _allocate(GSC_COUNT * sizeof(int64_t), 3, pool_ptr);
259 int64_t *tmp_filter = _allocate(GSC_COUNT * sizeof(int64_t), 3, pool_ptr);
260 int16_t *normal_filter = _allocate(GSC_COUNT * sizeof(int16_t), 2,
263 if (NULL == filter || NULL == tmp_filter || NULL == normal_filter)
266 mid_i = coef_lenght >> 1;
267 filter[mid_i] = div64_s64_s64((int64_t)((int64_t) N << GSC_FIX),
268 (int64_t) MAX(M, N));
269 for (i = 0; i < mid_i; i++) {
270 int64_t angle_x = div64_s64_s64(
271 (int64_t) ARC_32_COEF * (int64_t)(i + 1) * (int64_t) N,
274 int64_t angle_y = div64_s64_s64(
275 (int64_t) ARC_32_COEF * (int64_t)(i + 1) * (int64_t) N,
276 (int64_t)(M * N) * (int64_t) 8);
277 int32_t value_x = sin_32((int32_t) angle_x);
278 int32_t value_y = sin_32((int32_t) angle_y);
279 filter[mid_i + i + 1] = div64_s64_s64(
280 (int64_t)((int64_t) value_x * (int64_t)(1 << GSC_FIX)),
281 (int64_t)((int64_t) M * (int64_t) value_y));
282 filter[mid_i - (i + 1)] = filter[mid_i + i + 1];
284 for (i = -1; i < mid_i; i++) {
285 int32_t angle_32 = (int32_t) div64_s64_s64(
287 (int64_t) 2 * (int64_t)(mid_i - i - 1) *
288 (int64_t) ARC_32_COEF), (int64_t) coef_lenght);
289 int64_t a = (int64_t) 9059697;
290 int64_t b = (int64_t) 7717519;
291 int64_t t = a - ((b * cos_32(angle_32)) >> 30);
292 filter[mid_i + i + 1] = (t * filter[mid_i + i + 1]) >> GSC_FIX;
293 filter[mid_i - (i + 1)] = filter[mid_i + i + 1];
295 for (i = 0; i < 8; i++) {
297 for (j = i; j < coef_lenght; j += 8) {
298 tmp_filter[mount] = filter[j];
301 normalize_inter(tmp_filter, normal_filter, (int8_t) mount);
302 sum_val = sum_fun(normal_filter, mount);
304 adjust_filter_inter(normal_filter, mount);
307 for (kk = i; kk < coef_lenght; kk += 8) {
308 coef_data_ptr[kk] = normal_filter[mount];
316 static int16_t cal_y_scaling_coef(int16_t tap, int16_t D, int16_t I,
317 int16_t * y_coef_data_ptr, int16_t dir, GSC_MEM_POOL * pool_ptr)
319 uint16_t coef_lenght;
321 coef_lenght = (uint16_t) (tap * 8);
322 SCI_MEMSET(y_coef_data_ptr, 0, coef_lenght * sizeof(int16_t));
323 cal_y_mode_l_coef(coef_lenght, y_coef_data_ptr, I, D, pool_ptr);
327 static int16_t cal_uv_scaling_coef(int16_t tap, int16_t D, int16_t I,
328 int16_t * uv_coef_data_ptr, int16_t dir, GSC_MEM_POOL * pool_ptr)
330 int16_t uv_coef_lenght;
333 uv_coef_lenght = (int16_t) (tap * 8);
334 cal_y_mode_l_coef(uv_coef_lenght, uv_coef_data_ptr, I, D, pool_ptr);
337 uv_coef_lenght = (int16_t) (tap * 8);
339 uv_coef_lenght = (int16_t) (2 * 8);
341 cal_y_mode_l_coef(uv_coef_lenght, uv_coef_data_ptr, I, D, pool_ptr);
343 return uv_coef_lenght;
346 static void get_filter(int16_t * coef_data_ptr, int16_t * out_filter,
347 int16_t iI_hor, int16_t coef_len, int16_t * filter_len)
349 int16_t i, pos_start;
351 pos_start = coef_len / 2;
353 while (pos_start >= iI_hor)
356 for (i = 0; i < iI_hor; i++) {
359 int16_t pos = pos_start + i;
360 while (pos >= iI_hor)
363 for (j = 0; j < coef_len; j += iI_hor) {
364 *out_filter++ = coef_data_ptr[j + pos];
371 static void write_scalar_coef(int16_t * dst_coef_ptr, int16_t * coef_ptr,
372 int16_t dst_pitch, int16_t src_pitch)
376 for (i = 0; i < 8; i++) {
377 for (j = 0; j < src_pitch; j++) {
378 *(dst_coef_ptr + j) =
379 *(coef_ptr + i * src_pitch + src_pitch - 1 - j);
381 dst_coef_ptr += dst_pitch;
385 static void check_coef_range(int16_t * coef_ptr, int16_t rows, int16_t columns,
388 int16_t value, diff, sign;
389 int16_t *coef_arr[COEF_ARR_ROWS] = { NULL };
391 for (i = 0; i < COEF_ARR_ROWS; i++) {
392 coef_arr[i] = coef_ptr;
395 for (i = 0; i < rows; i++) {
396 for (j = 0; j < columns; j++) {
397 value = coef_arr[i][j];
400 coef_arr[i][j] = 255;
401 sign = GSC_ABS(diff);
402 if ((sign & 1) == 1) { /* ilen is odd */
403 coef_arr[i][j + 1] = coef_arr[i][j + 1] + (diff + 1) / 2;
404 coef_arr[i][j - 1] = coef_arr[i][j - 1] + (diff - 1) / 2;
405 } else { /* ilen is even */
406 coef_arr[i][j + 1] = coef_arr[i][j + 1] + (diff) / 2;
407 coef_arr[i][j - 1] = coef_arr[i][j - 1] + (diff) / 2;
414 static void gsp_rearrange_coef(void* src, void*dst, int32_t tap)
417 int16_t *src_ptr, *dst_ptr;
419 src_ptr = (int16_t*) src;
420 dst_ptr = (int16_t*) dst;
421 if (src_ptr == NULL || dst_ptr == NULL)
425 memset((void*) dst_ptr, 0x00, 8 * 8 * sizeof(int16_t));
430 for (i = 0; i < 8; i++)
431 for (j = 0; j < tap; j++)
432 *(dst_ptr + i * 8 + 1 + j) = *(src_ptr + i * 8 + j);
436 for (i = 0; i < 8; i++)
437 for (j = 0; j < tap; j++)
438 *(dst_ptr + i * 8 + j) = *(src_ptr + i * 8 + j);
444 static int32_t cache_coef_init(void)
446 Coef_Entry *Coef_Entry_Array = NULL;
448 DRM_DEBUG("GSP_CACHE_COEF:init\n");
450 if (s_cache_coef_init_flag == 0) {
451 Coef_Entry_Array = (Coef_Entry *) kmalloc(
452 sizeof(Coef_Entry) * CACHED_COEF_CNT_MAX, GFP_KERNEL);
454 if (Coef_Entry_Array) {
455 memset((void*) Coef_Entry_Array, 0,
456 sizeof(Coef_Entry) * CACHED_COEF_CNT_MAX);
458 Coef_Entry_List_Head = &Coef_Entry_Array[0];
459 Coef_Entry_Array[0].prev = &Coef_Entry_Array[0];
460 Coef_Entry_Array[0].next = &Coef_Entry_Array[0];
463 while (i < CACHED_COEF_CNT_MAX) {
464 LIST_ADD_TO_LIST_HEAD(&Coef_Entry_Array[i]);
467 s_cache_coef_init_flag = 1;
475 func:cache_coef_hit_check
476 desc:find the entry have the same in_w in_h out_w out_h
477 return:if hit,return the entry pointer; else return null;
479 static Coef_Entry* cache_coef_hit_check(uint16_t in_w, uint16_t in_h,
480 uint16_t out_w, uint16_t out_h) {
481 static uint32_t total_cnt = 0;
482 static uint32_t hit_cnt = 0;
484 Coef_Entry* walk = Coef_Entry_List_Head;
487 while (walk->in_w != 0) {
488 if (walk->in_w == in_w && walk->in_h == in_h &&
489 walk->out_w == out_w && walk->out_h == out_h) {
491 DRM_DEBUG("GSP_CACHE_COEF:hit, hit_ratio:%d percent\n",
492 hit_cnt * 100 / total_cnt);
495 if (walk->next == Coef_Entry_List_Head)
500 DRM_DEBUG("GSP_CACHE_COEF:miss\n");
504 static Coef_Entry* cache_coef_move_entry_to_list_head(Coef_Entry* pEntry) {
505 LIST_FETCH_FROM_LIST(pEntry);
506 LIST_ADD_TO_LIST_HEAD(pEntry);
507 return Coef_Entry_List_Head;
512 /**---------------------------------------------------------------------------*
513 ** Public Functions *
514 **---------------------------------------------------------------------------*/
515 /****************************************************************************/
516 /* Purpose: generate scale factor */
519 /* i_w: source image width */
520 /* i_h: source image height */
521 /* o_w: target image width */
522 /* o_h: target image height */
524 /* coeff_h_ptr: pointer of horizontal coefficient buffer, the size of which must be at */
525 /* least SCALER_COEF_TAP_NUM_HOR * 4 bytes */
526 /* the output coefficient will be located in coeff_h_ptr[0], ......, */
527 /* coeff_h_ptr[SCALER_COEF_TAP_NUM_HOR-1] */
528 /* coeff_v_ptr: pointer of vertical coefficient buffer, the size of which must be at */
529 /* least (SCALER_COEF_TAP_NUM_VER + 1) * 4 bytes */
530 /* the output coefficient will be located in coeff_v_ptr[0], ......, */
531 /* coeff_h_ptr[SCALER_COEF_TAP_NUM_VER-1] and the tap number */
532 /* will be located in coeff_h_ptr[SCALER_COEF_TAP_NUM_VER] */
533 /* temp_buf_ptr: temp buffer used while generate the coefficient */
534 /* temp_buf_ptr: temp buffer size, 6k is the suggest size */
537 /****************************************************************************/
538 static uint8_t gsp_gen_block_ccaler_coef(uint32_t i_w, uint32_t i_h, uint32_t o_w,
539 uint32_t o_h, uint32_t hor_tap, uint32_t ver_tap,
540 uint32_t *coeff_h_ptr, uint32_t *coeff_v_ptr,
541 void *temp_buf_ptr, uint32_t temp_buf_size)
543 int16_t D_hor = i_w; /* decimition at horizontal */
544 int16_t I_hor = o_w; /* interpolation at horizontal */
545 int16_t *cong_com_hor = 0;
546 int16_t *cong_com_ver = 0;
547 int16_t *coeff_array = 0;
549 uint32_t coef_buf_size = 0;
550 int16_t *temp_filter_ptr = NULL;
551 int16_t *filter_ptr = NULL;
552 uint32_t filter_buf_size = GSC_COUNT * sizeof(int16_t);
553 int16_t filter_len[COEF_ARR_ROWS] = { 0 };
554 int16_t coef_len = 0;
555 GSC_MEM_POOL pool = { 0 };
559 Coef_Entry* pEntry = NULL;
561 if (s_cache_coef_init_flag == 0)
564 if (s_cache_coef_init_flag == 1) {
565 pEntry = cache_coef_hit_check(i_w, i_h, o_w, o_h);
566 if (pEntry) { /* hit */
567 memcpy((void*) coeff_h_ptr, (void*) pEntry->coef,
568 COEF_MATRIX_ENTRY_SIZE * 4);
569 cache_coef_move_entry_to_list_head(pEntry);
575 /* init pool and allocate static array */
576 if (!_init_pool(temp_buf_ptr, temp_buf_size, &pool)) {
577 DRM_ERROR("GSP_Gen_Block_Ccaler_Coef: _init_pool error! \n");
581 coef_buf_size = COEF_ARR_ROWS * COEF_ARR_COL_MAX * sizeof(int16_t);
582 cong_com_hor = (int16_t*) _allocate(coef_buf_size, 2, &pool);
583 cong_com_ver = (int16_t*) _allocate(coef_buf_size, 2, &pool);
584 coeff_array = (int16_t*) _allocate(8 * 8, 2, &pool);
586 if (NULL == cong_com_hor || NULL == cong_com_ver || NULL == coeff_array) {
587 DRM_ERROR("GSP_Gen_Block_Ccaler_Coef:_Allocate error!%08x,%08x,%08x\n",
588 cong_com_hor, cong_com_ver, coeff_array);
592 temp_filter_ptr = _allocate(filter_buf_size, 2, &pool);
593 filter_ptr = _allocate(filter_buf_size, 2, &pool);
594 if (NULL == temp_filter_ptr || NULL == filter_ptr) {
595 DRM_ERROR("GSP_Gen_Block_Ccaler_Coef:_Allocate error! \n");
599 /* calculate coefficients of Y component in horizontal direction */
600 coef_len = cal_y_scaling_coef(hor_tap, D_hor, I_hor, temp_filter_ptr, 1,
602 get_filter(temp_filter_ptr, filter_ptr, 8, coef_len, filter_len);
603 write_scalar_coef(cong_com_hor, filter_ptr, 8, hor_tap);
604 check_coef_range(cong_com_hor, 8, hor_tap, 8);
605 gsp_rearrange_coef(cong_com_hor, coeff_array, hor_tap);
607 uint32_t cnts = 0, reg = 0;
609 for (i = 0; i < 8; i++) {
610 p0 = (uint16_t) (*(coeff_array + i * 8 + 0));
611 p1 = (uint16_t) (*(coeff_array + i * 8 + 1));
612 reg = (p0 & 0x1ff) | ((p1 & 0x1ff) << 16);
613 coeff_h_ptr[cnts + 0] = reg;
615 p0 = (uint16_t) (*(coeff_array + i * 8 + 2));
616 p1 = (uint16_t) (*(coeff_array + i * 8 + 3));
617 reg = (p0 & 0x1ff) | ((p1 & 0x1ff) << 16);
618 coeff_h_ptr[cnts + 1] = reg;
620 p0 = (uint16_t) (*(coeff_array + i * 8 + 4));
621 p1 = (uint16_t) (*(coeff_array + i * 8 + 5));
622 reg = (p0 & 0x1ff) | ((p1 & 0x1ff) << 16);
623 coeff_h_ptr[cnts + 2] = reg;
625 p0 = (uint16_t) (*(coeff_array + i * 8 + 6));
626 p1 = (uint16_t) (*(coeff_array + i * 8 + 7));
627 reg = (p0 & 0x1ff) | ((p1 & 0x1ff) << 16);
628 coeff_h_ptr[cnts + 3] = reg;
634 /* calculate coefficients of UV component in horizontal direction */
635 coef_len = cal_uv_scaling_coef(ver_tap, D_hor, I_hor, temp_filter_ptr, 1,
637 get_filter(temp_filter_ptr, filter_ptr, 8, coef_len, filter_len);
638 write_scalar_coef(cong_com_ver, filter_ptr, 8, ver_tap);
639 check_coef_range(cong_com_ver, 8, ver_tap, 8);
640 memset(coeff_array, 0x00, 8 * 8 * sizeof(int16_t));
641 gsp_rearrange_coef(cong_com_ver, coeff_array, ver_tap);
643 uint32_t cnts = 0, reg = 0;
645 for (i = 0; i < 8; i++) {
646 p0 = (uint16_t) (*(coeff_array + i * 8 + 0));
647 p1 = (uint16_t) (*(coeff_array + i * 8 + 1));
648 reg = (p0 & 0x1ff) | ((p1 & 0x1ff) << 16);
649 coeff_v_ptr[cnts + 0] = reg;
651 p0 = (uint16_t) (*(coeff_array + i * 8 + 2));
652 p1 = (uint16_t) (*(coeff_array + i * 8 + 3));
653 reg = (p0 & 0x1ff) | ((p1 & 0x1ff) << 16);
654 coeff_v_ptr[cnts + 1] = reg;
656 p0 = (uint16_t) (*(coeff_array + i * 8 + 4));
657 p1 = (uint16_t) (*(coeff_array + i * 8 + 5));
658 reg = (p0 & 0x1ff) | ((p1 & 0x1ff) << 16);
659 coeff_v_ptr[cnts + 2] = reg;
661 p0 = (uint16_t) (*(coeff_array + i * 8 + 6));
662 p1 = (uint16_t) (*(coeff_array + i * 8 + 7));
663 reg = (p0 & 0x1ff) | ((p1 & 0x1ff) << 16);
664 coeff_v_ptr[cnts + 3] = reg;
671 if (s_cache_coef_init_flag == 1) {
672 pEntry = LIST_GET_THE_TAIL_ENTRY();
673 if (pEntry->in_w == 0)
674 DRM_DEBUG("GSP_CACHE_COEF:add\n");
676 DRM_DEBUG("GSP_CACHE_COEF:swap\n");
678 memcpy((void*) pEntry->coef, (void*) coeff_h_ptr,
679 COEF_MATRIX_ENTRY_SIZE * 4);
680 cache_coef_move_entry_to_list_head(pEntry);
681 LIST_SET_ENTRY_KEY(pEntry, i_w, i_h, o_w, o_h);
688 static void gsp_scale_coef_tab_config(uint32_t *p_h_coeff, uint32_t *p_v_coeff)
690 uint32_t i = 0, j = 0;
691 uint32_t *s_scaling_reg_hor_ptr = 0, *s_scaling_reg_ver_ptr = 0;
692 uint32_t scale_h_coef_addr = GSP_HOR_COEF_BASE, scale_v_coef_addr =
695 s_scaling_reg_hor_ptr = p_h_coeff;
697 for (i = 0; i < 8; i++) {
698 for (j = 0; j < 4; j++) {
699 *(volatile uint32_t*) scale_h_coef_addr = *s_scaling_reg_hor_ptr;
700 scale_h_coef_addr += 4;
701 s_scaling_reg_hor_ptr++;
705 s_scaling_reg_ver_ptr = p_v_coeff;
706 for (i = 0; i < 8; i++) {
707 for (j = 0; j < 4; j++) {
708 *(volatile uint32_t*) scale_v_coef_addr = *s_scaling_reg_ver_ptr;
709 scale_v_coef_addr += 4;
710 s_scaling_reg_ver_ptr++;
716 * M2M operation : supports crop/scale/rotation/csc so on.
717 * Memory ----> GSP H/W ----> Memory.
718 * Writeback operation : supports cloned screen with FIMD.
719 * FIMD ----> GSP H/W ----> Memory.
720 * Output operation : supports direct display using local path.
721 * Memory ----> GSP H/W ----> FIMD.
724 static int gsp_set_planar_addr(struct drm_sprd_ipp_buf_info *buf_info,
725 u32 fmt, struct drm_sprd_sz *sz)
727 dma_addr_t *base[SPRD_DRM_PLANAR_MAX];
728 uint64_t size[SPRD_DRM_PLANAR_MAX];
729 uint64_t ofs[SPRD_DRM_PLANAR_MAX];
734 for_each_ipp_planar(i) {
735 base[i] = &buf_info->base[i];
736 size[i] = buf_info->size[i];
740 DRM_DEBUG_KMS("%s:base[%d][0x%x]s[%d][%llu]\n", __func__,
741 i, *base[i], i, size[i]);
745 DRM_INFO("%s:failed to get buffer size.\n", __func__);
750 case DRM_FORMAT_NV12:
751 case DRM_FORMAT_NV21:
752 case DRM_FORMAT_NV16:
753 case DRM_FORMAT_NV61:
754 ofs[0] = (uint64_t)sz->hsize * sz->vsize;
755 ofs[1] = ofs[0] >> 1;
756 if (*base[0] && *base[1]) {
757 if (size[0] + size[1] < ofs[0] + ofs[1])
762 case DRM_FORMAT_NV12MT:
763 ofs[0] = ALIGN(ALIGN(sz->hsize, 128) *
764 ALIGN(sz->vsize, 32), SZ_8K);
765 ofs[1] = ALIGN(ALIGN(sz->hsize, 128) *
766 ALIGN(sz->vsize >> 1, 32), SZ_8K);
767 if (*base[0] && *base[1]) {
768 if (size[0] + size[1] < ofs[0] + ofs[1])
773 case DRM_FORMAT_YUV410:
774 case DRM_FORMAT_YVU410:
775 case DRM_FORMAT_YUV411:
776 case DRM_FORMAT_YVU411:
777 case DRM_FORMAT_YUV420:
778 case DRM_FORMAT_YVU420:
779 case DRM_FORMAT_YUV422:
780 case DRM_FORMAT_YVU422:
781 case DRM_FORMAT_YUV444:
782 case DRM_FORMAT_YVU444:
783 ofs[0] = (uint64_t)sz->hsize * sz->vsize;
784 ofs[1] = ofs[2] = ofs[0] >> 2;
785 if (*base[0] && *base[1] && *base[2]) {
786 if (size[0]+size[1]+size[2] < ofs[0]+ofs[1]+ofs[2])
791 case DRM_FORMAT_XRGB8888:
792 ofs[0] = (uint64_t)sz->hsize * sz->vsize << 2;
794 if (size[0] < ofs[0])
805 *base[1] = *base[0] + ofs[0];
806 if (ofs[1] && ofs[2])
807 *base[2] = *base[1] + ofs[1];
810 DRM_DEBUG_KMS("%s:y[0x%x],cb[0x%x],cr[0x%x]\n", __func__,
811 *base[0], *base[1], *base[2]);
816 DRM_ERROR("invalid size for fmt[0x%x]\n", fmt);
818 for_each_ipp_planar(i) {
819 base[i] = &buf_info->base[i];
820 size[i] = buf_info->size[i];
822 DRM_ERROR("base[%d][0x%x]s[%d][%llu]ofs[%d][%llu]\n",
823 i, *base[i], i, size[i], i, ofs[i]);
829 static int gsp_src_set_fmt(struct device *dev, u32 fmt)
831 struct gsp_context *ctx = get_gsp_context(dev);
832 struct sprd_drm_ippdrv *ippdrv = &ctx->ippdrv;
835 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
838 case DRM_FORMAT_RGB565:
839 ctx->gsp_cfg.layer0_info.img_format = GSP_SRC_FMT_RGB565;
841 case DRM_FORMAT_RGB888:
842 ctx->gsp_cfg.layer0_info.img_format = GSP_SRC_FMT_RGB888;
844 case DRM_FORMAT_YUV422:
845 ctx->gsp_cfg.layer0_info.img_format = GSP_SRC_FMT_YUV422_2P;
847 case DRM_FORMAT_NV12:
848 ctx->gsp_cfg.layer0_info.img_format = GSP_SRC_FMT_YUV420_2P;
850 case DRM_FORMAT_YUV420:
851 ctx->gsp_cfg.layer0_info.img_format = GSP_SRC_FMT_YUV420_3P;
853 case DRM_FORMAT_XRGB8888:
854 ctx->gsp_cfg.layer0_info.img_format = GSP_SRC_FMT_ARGB888;
856 case DRM_FORMAT_ARGB8888:
857 ctx->gsp_cfg.layer0_info.img_format = GSP_SRC_FMT_ARGB888;
858 ctx->gsp_cfg.layer0_info.endian_mode.y_word_endn = GSP_WORD_ENDN_1;
861 ctx->gsp_cfg.layer0_info.img_format = GSP_SRC_FMT_MAX_NUM;
862 dev_err(ippdrv->dev, "invalid target format 0x%x.\n", fmt);
866 cfg = gsp_read(SPRD_LAYER0_CFG);
868 cfg &= ~(SPRD_LAYER0_CFG_IMG_FORMAT_L0_MASK);
869 cfg |= SPRD_LAYER0_CFG_IMG_FORMAT_L0_SET(
870 ctx->gsp_cfg.layer0_info.img_format);
872 gsp_write(cfg, SPRD_LAYER0_CFG);
873 gsp_write(ctx->gsp_cfg.layer0_info.endian_mode.y_word_endn, SPRD_LAYER0_ENDIAN);
878 static int gsp_src_set_transf(struct device *dev,
879 enum drm_sprd_degree degree,
880 enum drm_sprd_flip flip, bool *swap)
882 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
885 /* ToDo: need to implement */
890 static int gsp_src_set_size(struct device *dev, int swap,
891 struct drm_sprd_pos *pos, struct drm_sprd_sz *sz)
893 struct gsp_context *ctx = get_gsp_context(dev);
896 DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
897 __func__, swap, sz->hsize, sz->vsize);
899 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
900 pos->x, pos->y, pos->w, pos->h);
902 ctx->gsp_cfg.layer0_info.clip_rect.st_x = pos->x;
903 ctx->gsp_cfg.layer0_info.clip_rect.st_y = pos->y;
904 ctx->gsp_cfg.layer0_info.clip_rect.rect_w = pos->w;
905 ctx->gsp_cfg.layer0_info.clip_rect.rect_h = pos->h;
907 cfg = (ctx->gsp_cfg.layer0_info.clip_rect.st_x
908 | (ctx->gsp_cfg.layer0_info.clip_rect.st_y << 16));
909 gsp_write(cfg, SPRD_LAYER0_CLIP_START);
911 cfg = (ctx->gsp_cfg.layer0_info.clip_rect.rect_w
912 | (ctx->gsp_cfg.layer0_info.clip_rect.rect_h << 16));
913 gsp_write(cfg, SPRD_LAYER0_CLIP_SIZE);
915 ctx->gsp_cfg.layer0_info.pitch = sz->hsize;
916 gsp_write(ctx->gsp_cfg.layer0_info.pitch, SPRD_LAYER0_PITCH);
921 static int gsp_src_set_addr(struct device *dev,
922 struct drm_sprd_ipp_buf_info *buf_info, u32 buf_id,
923 enum drm_sprd_ipp_buf_type buf_type)
925 struct gsp_context *ctx = get_gsp_context(dev);
926 struct sprd_drm_ippdrv *ippdrv = &ctx->ippdrv;
927 struct drm_sprd_ipp_cmd_node *c_node = ippdrv->c_node;
928 struct drm_sprd_ipp_property *property;
929 struct drm_sprd_ipp_config *config;
933 DRM_ERROR("failed to get c_node.\n");
937 property = &c_node->property;
939 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
940 property->prop_id, buf_id, buf_type);
942 /* address register set */
944 case IPP_BUF_ENQUEUE:
945 ctx->cur_buf_id[SPRD_DRM_OPS_SRC] = buf_id;
946 config = &property->config[SPRD_DRM_OPS_SRC];
947 ret = gsp_set_planar_addr(buf_info, config->fmt, &config->sz);
949 dev_err(dev, "failed to set plane src addr.\n");
953 ctx->gsp_cfg.layer0_info.src_addr.addr_y =
954 buf_info->base[SPRD_DRM_PLANAR_Y];
955 ctx->gsp_cfg.layer0_info.src_addr.addr_uv =
956 buf_info->base[SPRD_DRM_PLANAR_CB];
957 ctx->gsp_cfg.layer0_info.src_addr.addr_v =
958 buf_info->base[SPRD_DRM_PLANAR_CR];
960 gsp_write(ctx->gsp_cfg.layer0_info.src_addr.addr_y, SPRD_LAYER0_Y_ADDR);
961 gsp_write(ctx->gsp_cfg.layer0_info.src_addr.addr_uv,
962 SPRD_LAYER0_UV_ADDR);
963 gsp_write(ctx->gsp_cfg.layer0_info.src_addr.addr_v,
964 SPRD_LAYER0_VA_ADDR);
966 case IPP_BUF_DEQUEUE:
975 static struct sprd_drm_ipp_ops gsp_src_ops = {
976 .set_fmt = gsp_src_set_fmt,
977 .set_transf = gsp_src_set_transf,
978 .set_size = gsp_src_set_size,
979 .set_addr = gsp_src_set_addr,
982 static int gsp_dst_set_fmt(struct device *dev, u32 fmt)
984 struct gsp_context *ctx = get_gsp_context(dev);
985 struct sprd_drm_ippdrv *ippdrv = &ctx->ippdrv;
988 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
991 case DRM_FORMAT_RGB565:
992 ctx->gsp_cfg.layer_des_info.img_format = GSP_DST_FMT_RGB565;
994 case DRM_FORMAT_RGB888:
995 ctx->gsp_cfg.layer_des_info.img_format = GSP_DST_FMT_RGB888;
997 case DRM_FORMAT_YUV422:
998 ctx->gsp_cfg.layer_des_info.img_format = GSP_DST_FMT_YUV422_2P;
1000 case DRM_FORMAT_NV12:
1001 ctx->gsp_cfg.layer_des_info.img_format = GSP_DST_FMT_YUV420_2P;
1003 case DRM_FORMAT_YUV420:
1004 ctx->gsp_cfg.layer_des_info.img_format = GSP_DST_FMT_YUV420_3P;
1006 case DRM_FORMAT_XRGB8888:
1007 ctx->gsp_cfg.layer_des_info.img_format = GSP_DST_FMT_ARGB888;
1009 case DRM_FORMAT_ARGB8888:
1010 ctx->gsp_cfg.layer_des_info.img_format = GSP_DST_FMT_ARGB888;
1011 ctx->gsp_cfg.layer_des_info.endian_mode.y_word_endn = GSP_WORD_ENDN_1;
1014 dev_err(ippdrv->dev, "invalid target format 0x%x.\n", fmt);
1018 cfg = gsp_read(SPRD_DES_DATA_CFG);
1020 cfg &= ~(SPRD_DEST_DATA_CFG_IMG_FORMAT_MASK);
1022 (SPRD_DEST_DATA_CFG_IMG_FORMAT_SET(ctx->gsp_cfg.layer_des_info.img_format));
1024 gsp_write(cfg, SPRD_DES_DATA_CFG);
1025 gsp_write(ctx->gsp_cfg.layer_des_info.endian_mode.y_word_endn, SPRD_DES_DATA_ENDIAN);
1030 static int gsp_dst_set_transf(struct device *dev,
1031 enum drm_sprd_degree degree,
1032 enum drm_sprd_flip flip, bool *swap)
1034 struct gsp_context *ctx = get_gsp_context(dev);
1035 struct sprd_drm_ippdrv *ippdrv = &ctx->ippdrv;
1038 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
1042 case SPRD_DRM_DEGREE_0:
1043 if (flip & SPRD_DRM_FLIP_VERTICAL)
1044 ctx->gsp_cfg.layer0_info.rot_angle = GSP_ROT_ANGLE_0_M;
1045 else if (flip & SPRD_DRM_FLIP_HORIZONTAL)
1046 ctx->gsp_cfg.layer0_info.rot_angle = GSP_ROT_ANGLE_180_M;
1048 ctx->gsp_cfg.layer0_info.rot_angle = GSP_ROT_ANGLE_0;
1050 case SPRD_DRM_DEGREE_90:
1051 if (flip & SPRD_DRM_FLIP_VERTICAL)
1052 ctx->gsp_cfg.layer0_info.rot_angle = GSP_ROT_ANGLE_270_M;
1053 else if (flip & SPRD_DRM_FLIP_HORIZONTAL)
1054 ctx->gsp_cfg.layer0_info.rot_angle = GSP_ROT_ANGLE_90_M;
1056 ctx->gsp_cfg.layer0_info.rot_angle = GSP_ROT_ANGLE_270;
1058 case SPRD_DRM_DEGREE_180:
1059 if (flip & SPRD_DRM_FLIP_VERTICAL)
1060 ctx->gsp_cfg.layer0_info.rot_angle = GSP_ROT_ANGLE_180_M;
1061 else if (flip & SPRD_DRM_FLIP_HORIZONTAL)
1062 ctx->gsp_cfg.layer0_info.rot_angle = GSP_ROT_ANGLE_0_M;
1064 ctx->gsp_cfg.layer0_info.rot_angle = GSP_ROT_ANGLE_180;
1066 case SPRD_DRM_DEGREE_270:
1067 if (flip & SPRD_DRM_FLIP_VERTICAL)
1068 ctx->gsp_cfg.layer0_info.rot_angle = GSP_ROT_ANGLE_90_M;
1069 else if (flip & SPRD_DRM_FLIP_HORIZONTAL)
1070 ctx->gsp_cfg.layer0_info.rot_angle = GSP_ROT_ANGLE_270_M;
1072 ctx->gsp_cfg.layer0_info.rot_angle = GSP_ROT_ANGLE_90;
1075 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
1079 cfg = gsp_read(SPRD_LAYER0_CFG);
1081 cfg &= ~(SPRD_LAYER0_CFG_ROT_ANGLE_MASK);
1082 cfg |= SPRD_LAYER0_CFG_ROT_ANGLE_SET(ctx->gsp_cfg.layer0_info.rot_angle);
1084 gsp_write(cfg, SPRD_LAYER0_CFG);
1089 static int gsp_dst_set_size(struct device *dev, int swap,
1090 struct drm_sprd_pos *pos, struct drm_sprd_sz *sz)
1092 struct gsp_context *ctx = get_gsp_context(dev);
1095 DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
1096 __func__, swap, sz->hsize, sz->vsize);
1098 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
1099 __func__, pos->x, pos->y, pos->w, pos->h);
1101 ctx->gsp_cfg.layer0_info.des_rect.st_x = pos->x;
1102 ctx->gsp_cfg.layer0_info.des_rect.st_y = pos->y;
1103 ctx->gsp_cfg.layer0_info.des_rect.rect_w = pos->w;
1104 ctx->gsp_cfg.layer0_info.des_rect.rect_h = pos->h;
1106 cfg = (ctx->gsp_cfg.layer0_info.des_rect.st_x
1107 | (ctx->gsp_cfg.layer0_info.des_rect.st_y << 16));
1108 gsp_write(cfg, SPRD_LAYER0_DES_START);
1110 cfg = (ctx->gsp_cfg.layer0_info.des_rect.rect_w
1111 | (ctx->gsp_cfg.layer0_info.des_rect.rect_h << 16));
1112 gsp_write(cfg, SPRD_LAYER0_DES_SIZE);
1114 ctx->gsp_cfg.layer_des_info.pitch = sz->hsize;
1116 gsp_write(ctx->gsp_cfg.layer_des_info.pitch, SPRD_DES_PITCH);
1121 static int gsp_dst_set_addr(struct device *dev,
1122 struct drm_sprd_ipp_buf_info *buf_info, u32 buf_id,
1123 enum drm_sprd_ipp_buf_type buf_type)
1125 struct gsp_context *ctx = get_gsp_context(dev);
1126 struct sprd_drm_ippdrv *ippdrv = &ctx->ippdrv;
1127 struct drm_sprd_ipp_cmd_node *c_node = ippdrv->c_node;
1128 struct drm_sprd_ipp_property *property;
1129 struct drm_sprd_ipp_config *config;
1133 DRM_ERROR("failed to get c_node.\n");
1137 property = &c_node->property;
1139 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
1140 property->prop_id, buf_id, buf_type);
1142 /* address register set */
1144 case IPP_BUF_ENQUEUE:
1145 ctx->cur_buf_id[SPRD_DRM_OPS_DST] = buf_id;
1146 config = &property->config[SPRD_DRM_OPS_DST];
1147 ret = gsp_set_planar_addr(buf_info, config->fmt, &config->sz);
1149 dev_err(dev, "failed to set plane dst addr.\n");
1153 ctx->gsp_cfg.layer_des_info.src_addr.addr_y =
1154 buf_info->base[SPRD_DRM_PLANAR_Y];
1155 ctx->gsp_cfg.layer_des_info.src_addr.addr_uv =
1156 buf_info->base[SPRD_DRM_PLANAR_CB];
1157 ctx->gsp_cfg.layer_des_info.src_addr.addr_v =
1158 buf_info->base[SPRD_DRM_PLANAR_CR];
1160 gsp_write(ctx->gsp_cfg.layer_des_info.src_addr.addr_y, SPRD_DES_Y_ADDR);
1161 gsp_write(ctx->gsp_cfg.layer_des_info.src_addr.addr_uv,
1163 gsp_write(ctx->gsp_cfg.layer_des_info.src_addr.addr_v, SPRD_DES_V_ADDR);
1165 case IPP_BUF_DEQUEUE:
1175 static struct sprd_drm_ipp_ops gsp_dst_ops = {
1176 .set_fmt = gsp_dst_set_fmt,
1177 .set_transf = gsp_dst_set_transf,
1178 .set_size = gsp_dst_set_size,
1179 .set_addr = gsp_dst_set_addr,
1182 static int gsp_clk_ctrl(struct gsp_context *ctx, bool enable)
1184 DRM_INFO("%s:enable[%d]\n", __func__, enable);
1187 clk_prepare_enable(ctx->gsp_clk);
1188 clk_prepare_enable(ctx->emc_clk);
1189 ctx->coef_force_calc = 1;
1190 ctx->suspended = false;
1192 clk_disable_unprepare(ctx->emc_clk);
1193 clk_disable_unprepare(ctx->gsp_clk);
1194 ctx->suspended = true;
1200 static irqreturn_t gsp_irq_handler(int irq, void *dev_id)
1202 struct gsp_context *ctx = dev_id;
1203 struct sprd_drm_ippdrv *ippdrv = &ctx->ippdrv;
1204 struct drm_sprd_ipp_cmd_node *c_node = ippdrv->c_node;
1205 struct drm_sprd_ipp_event_info *event = c_node->event;
1206 enum drm_sprd_ops_id ops_id = SPRD_DRM_OPS_DST;
1209 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1210 ctx->cur_buf_id[SPRD_DRM_OPS_DST]);
1212 cfg = gsp_read(SPRD_GSP_INT_CFG);
1213 cfg |= (SPRD_GSP_INT_CFG_INT_CLR_SET(1));
1214 gsp_write(cfg, SPRD_GSP_INT_CFG);
1218 cfg = gsp_read(SPRD_GSP_INT_CFG);
1219 cfg &= ~(SPRD_GSP_INT_CFG_INT_CLR_MASK);
1220 gsp_write(cfg, SPRD_GSP_INT_CFG);
1222 cfg = gsp_read(SPRD_GSP_INT_CFG);
1223 cfg |= SPRD_GSP_INT_CFG_INT_EN_SET(GSP_IRQ_TYPE_DISABLE);
1224 gsp_write(cfg, SPRD_GSP_INT_CFG);
1226 event->ippdrv = ippdrv;
1227 event->buf_id[ops_id] = ctx->cur_buf_id[SPRD_DRM_OPS_DST];
1229 ippdrv->sched_event(event);
1234 static int gsp_init_prop_list(struct sprd_drm_ippdrv *ippdrv)
1236 struct drm_sprd_ipp_prop_list *capability;
1238 DRM_DEBUG_KMS("%s\n", __func__);
1240 capability = devm_kzalloc(ippdrv->dev, sizeof(*capability), GFP_KERNEL);
1242 DRM_ERROR("failed to alloc capability.\n");
1246 capability->writeback = 0;
1247 capability->degree = (1 << SPRD_DRM_DEGREE_0) | (1 << SPRD_DRM_DEGREE_90)
1248 | (1 << SPRD_DRM_DEGREE_180) | (1 << SPRD_DRM_DEGREE_270);
1249 capability->csc = 1;
1250 capability->crop = 1;
1251 capability->scale = 1;
1253 ippdrv->prop_list = capability;
1258 static inline bool gsp_check_limit(struct drm_sprd_ipp_property *property)
1260 struct drm_sprd_ipp_config *src_config =
1261 &property->config[SPRD_DRM_OPS_SRC];
1262 struct drm_sprd_ipp_config *dst_config =
1263 &property->config[SPRD_DRM_OPS_DST];
1264 struct drm_sprd_pos src_pos = src_config->pos;
1265 struct drm_sprd_pos dst_pos = dst_config->pos;
1266 unsigned int h_ratio, v_ratio;
1268 if (src_config->degree == SPRD_DRM_DEGREE_90 ||
1269 src_config->degree == SPRD_DRM_DEGREE_270)
1270 swap(src_pos.w, src_pos.h);
1272 if (dst_config->degree == SPRD_DRM_DEGREE_90 ||
1273 dst_config->degree == SPRD_DRM_DEGREE_270)
1274 swap(dst_pos.w, dst_pos.h);
1276 if ((src_pos.w > dst_pos.w && src_pos.h < dst_pos.h) ||
1277 (src_pos.w < dst_pos.w && src_pos.h > dst_pos.h)) {
1278 DRM_ERROR("unsupported scale[%d %d->%d %d]\n",
1279 src_pos.w, src_pos.h, dst_pos.w, dst_pos.h);
1283 h_ratio = GSP_RATIO(src_pos.w, dst_pos.w);
1284 v_ratio = GSP_RATIO(src_pos.h, dst_pos.h);
1286 if ((h_ratio > GSP_DOWN_MIN) ||
1287 (h_ratio < GSP_UP_MAX)) {
1288 DRM_ERROR("h_ratio[%d]out of range\n", h_ratio);
1292 if ((v_ratio > GSP_DOWN_MIN) ||
1293 (v_ratio < GSP_UP_MAX)) {
1294 DRM_ERROR("v_ratio[%d]out of range\n", v_ratio);
1298 /* ToDo: need to add more check routine */
1302 static int gsp_ippdrv_check_property(struct device *dev,
1303 struct drm_sprd_ipp_property *property)
1307 if (!ipp_is_m2m_cmd(property->cmd))
1310 if (!gsp_check_limit(property))
1314 DRM_ERROR("invalid property\n");
1319 static int gsp_ippdrv_reset(struct device *dev)
1321 struct gsp_context *ctx = get_gsp_context(dev);
1324 DRM_DEBUG_KMS("%s\n", __func__);
1326 memset(&ctx->gsp_cfg, 0x0, sizeof(ctx->gsp_cfg));
1328 ctx->gsp_cfg.layer0_info.layer_en = 1;
1330 #ifndef GSP_IOMMU_WORKAROUND1
1331 GSP_HWMODULE_SOFTRESET(); //workaround gsp-iommu bug
1333 GSP_AUTO_GATE_ENABLE();
1335 cfg = gsp_read(SPRD_GSP_INT_CFG);
1336 cfg |= SPRD_GSP_INT_CFG_INT_MODE_SET(GSP_IRQ_MODE_LEVEL);
1337 gsp_write(cfg, SPRD_GSP_INT_CFG);
1339 cfg = gsp_read(SPRD_GSP_CFG);
1340 cfg &= ~(SPRD_GSP_CFG_L0_EN_MASK);
1341 cfg |= SPRD_GSP_CFG_L0_EN_SET(ctx->gsp_cfg.layer0_info.layer_en);
1342 gsp_write(cfg, SPRD_GSP_CFG);
1347 static void gsp_coef_tap_convert(GSP_CONFIG_INFO_T* gsp_cfg,
1352 gsp_cfg->layer0_info.row_tap_mode = 0;
1355 gsp_cfg->layer0_info.row_tap_mode = 1;
1358 gsp_cfg->layer0_info.row_tap_mode = 2;
1361 gsp_cfg->layer0_info.row_tap_mode = 3;
1364 gsp_cfg->layer0_info.row_tap_mode = 0;
1370 gsp_cfg->layer0_info.col_tap_mode = 0;
1373 gsp_cfg->layer0_info.col_tap_mode = 1;
1376 gsp_cfg->layer0_info.col_tap_mode = 2;
1379 gsp_cfg->layer0_info.col_tap_mode = 3;
1382 gsp_cfg->layer0_info.col_tap_mode = 0;
1386 gsp_cfg->layer0_info.row_tap_mode &= 0x3;
1387 gsp_cfg->layer0_info.col_tap_mode &= 0x3;
1390 static int32_t gsp_scaling_coef_gen_and_config(struct device *dev,
1391 struct gsp_context *ctx)
1393 GSP_CONFIG_INFO_T* gsp_cfg = &ctx->gsp_cfg;
1394 u8 h_tap = 8, v_tap = 8;
1395 u32 *tmp_buf = NULL, *h_coeff = NULL, *v_coeff = NULL;
1396 u32 coef_factor_w = 0, coef_factor_h = 0;
1397 u32 after_rotate_w = 0, after_rotate_h = 0;
1398 u32 coef_in_w = 0, coef_in_h = 0;
1399 u32 coef_out_w = 0, coef_out_h = 0;
1400 static volatile u32 coef_in_w_last = 0, coef_in_h_last = 0;
1401 static volatile u32 coef_out_w_last = 0, coef_out_h_last = 0;
1404 if (gsp_cfg->layer0_info.scaling_en == 1) {
1405 if (gsp_cfg->layer0_info.des_rect.rect_w < 4
1406 || gsp_cfg->layer0_info.des_rect.rect_h < 4) {
1407 return GSP_KERNEL_GEN_OUT_RANG;
1410 if (gsp_cfg->layer0_info.rot_angle == GSP_ROT_ANGLE_0
1411 || gsp_cfg->layer0_info.rot_angle == GSP_ROT_ANGLE_180
1412 || gsp_cfg->layer0_info.rot_angle == GSP_ROT_ANGLE_0_M
1413 || gsp_cfg->layer0_info.rot_angle == GSP_ROT_ANGLE_180_M) {
1414 after_rotate_w = gsp_cfg->layer0_info.clip_rect.rect_w;
1415 after_rotate_h = gsp_cfg->layer0_info.clip_rect.rect_h;
1416 } else if (gsp_cfg->layer0_info.rot_angle == GSP_ROT_ANGLE_90
1417 || gsp_cfg->layer0_info.rot_angle == GSP_ROT_ANGLE_270
1418 || gsp_cfg->layer0_info.rot_angle == GSP_ROT_ANGLE_90_M
1419 || gsp_cfg->layer0_info.rot_angle == GSP_ROT_ANGLE_270_M) {
1420 after_rotate_w = gsp_cfg->layer0_info.clip_rect.rect_h;
1421 after_rotate_h = gsp_cfg->layer0_info.clip_rect.rect_w;
1425 CEIL(after_rotate_w,gsp_cfg->layer0_info.des_rect.rect_w);
1427 CEIL(after_rotate_h,gsp_cfg->layer0_info.des_rect.rect_h);
1429 if (coef_factor_w > 16 || coef_factor_h > 16)
1430 return GSP_KERNEL_GEN_OUT_RANG;
1432 if (coef_factor_w > 8)
1434 else if (coef_factor_w > 4)
1439 if (coef_factor_h > 8)
1441 else if (coef_factor_h > 4)
1446 coef_in_w = CEIL(after_rotate_w,coef_factor_w);
1447 coef_in_h = CEIL(after_rotate_h,coef_factor_h);
1448 coef_out_w = gsp_cfg->layer0_info.des_rect.rect_w;
1449 coef_out_h = gsp_cfg->layer0_info.des_rect.rect_h;
1450 if (ctx->coef_force_calc ||coef_in_w_last != coef_in_w
1451 || coef_in_h_last != coef_in_h || coef_out_w_last != coef_out_w
1452 || coef_out_h_last != coef_out_h) {
1453 tmp_buf = (u32 *) kmalloc(GSP_COEFF_BUF_SIZE, GFP_KERNEL);
1454 if (NULL == tmp_buf) {
1455 DRM_ERROR("SCALE DRV: No mem to alloc coeff buffer! \n");
1456 return GSP_KERNEL_GEN_ALLOC_ERR;
1460 v_coeff = tmp_buf + (GSP_COEFF_COEF_SIZE / 4);
1462 if (!(gsp_gen_block_ccaler_coef(coef_in_w, coef_in_h, coef_out_w,
1463 coef_out_h, h_tap, v_tap, h_coeff, v_coeff,
1464 tmp_buf + (GSP_COEFF_COEF_SIZE / 2),
1465 GSP_COEFF_POOL_SIZE))) {
1467 DRM_ERROR("GSP DRV: GSP_Gen_Block_Ccaler_Coef error! \n");
1468 return GSP_KERNEL_GEN_COMMON_ERR;
1471 gsp_scale_coef_tab_config(h_coeff, v_coeff);
1472 coef_in_w_last = coef_in_w;
1473 coef_in_h_last = coef_in_h;
1474 coef_out_w_last = coef_out_w;
1475 coef_out_h_last = coef_out_h;
1476 ctx->coef_force_calc = 0;
1479 gsp_coef_tap_convert(gsp_cfg, h_tap, v_tap);
1481 cfg = gsp_read(SPRD_LAYER0_CFG);
1482 cfg &= ~((SPRD_LAYER0_CFG_ROW_TAP_MODE_MASK)
1483 | (SPRD_LAYER0_CFG_COL_TAP_MODE_MASK));
1485 SPRD_LAYER0_CFG_ROW_TAP_MODE_SET(gsp_cfg->layer0_info.row_tap_mode);
1487 SPRD_LAYER0_CFG_COL_TAP_MODE_SET(gsp_cfg->layer0_info.col_tap_mode);
1488 gsp_write(cfg, SPRD_LAYER0_CFG);
1496 static int gsp_ippdrv_start(struct device *dev, enum drm_sprd_ipp_cmd cmd)
1498 struct gsp_context *ctx = get_gsp_context(dev);
1502 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1507 ctx->gsp_cfg.misc_info.ahb_clock = 2;
1508 ctx->gsp_cfg.misc_info.gsp_clock = 3;
1510 if (ctx->gsp_cfg.layer0_info.rot_angle & 0x1) {
1511 if ((ctx->gsp_cfg.layer0_info.clip_rect.rect_w
1512 != ctx->gsp_cfg.layer0_info.des_rect.rect_h)
1513 || (ctx->gsp_cfg.layer0_info.clip_rect.rect_h
1514 != ctx->gsp_cfg.layer0_info.des_rect.rect_w))
1515 ctx->gsp_cfg.layer0_info.scaling_en = 1;
1517 if ((ctx->gsp_cfg.layer0_info.clip_rect.rect_w
1518 != ctx->gsp_cfg.layer0_info.des_rect.rect_w)
1519 || (ctx->gsp_cfg.layer0_info.clip_rect.rect_h
1520 != ctx->gsp_cfg.layer0_info.des_rect.rect_h))
1521 ctx->gsp_cfg.layer0_info.scaling_en = 1;
1524 if (ctx->gsp_cfg.layer0_info.scaling_en == 1) {
1525 cfg = gsp_read(SPRD_GSP_CFG);
1526 cfg |= (SPRD_GSP_CFG_SCALE_STATUS_CLEAR_SET(1));
1527 gsp_write(cfg, SPRD_GSP_CFG);
1531 cfg = gsp_read(SPRD_GSP_CFG);
1532 cfg &= ~(SPRD_GSP_CFG_SCALE_STATUS_CLEAR_MASK);
1533 gsp_write(cfg, SPRD_GSP_CFG);
1536 cfg = gsp_read(SPRD_GSP_CFG);
1537 cfg &= ~(SPRD_GSP_CFG_SCALE_EN_MASK);
1538 cfg |= (SPRD_GSP_CFG_SCALE_EN_SET(ctx->gsp_cfg.layer0_info.scaling_en));
1539 gsp_write(cfg, SPRD_GSP_CFG);
1541 /* enable bypass/split bit for iommu issue - 14th bit */
1542 cfg = gsp_read(SPRD_GSP_CFG);
1544 gsp_write(cfg, SPRD_GSP_CFG);
1546 GSP_CLOCK_SET(ctx->gsp_cfg.misc_info.gsp_clock);
1548 ret = gsp_scaling_coef_gen_and_config(dev, ctx);
1550 DRM_ERROR("%s:gsp config err:%d\n", __func__, ret);
1554 cfg = gsp_read(SPRD_GSP_CFG);
1555 cfg &= SPRD_GSP_CFG_ERR_FLAG_MASK;
1556 if (SPRD_GSP_CFG_ERR_FLAG_GET(cfg)) {
1557 cfg = gsp_read(SPRD_GSP_CFG);
1558 cfg &= SPRD_GSP_CFG_ERR_CODE_MASK;
1560 DRM_ERROR("%s:GSP configuration error[%u]\n", __func__, cfg);
1562 return SPRD_GSP_CFG_ERR_CODE_GET(cfg);
1565 cfg = gsp_read(SPRD_GSP_INT_CFG);
1566 cfg |= SPRD_GSP_INT_CFG_INT_EN_SET(GSP_IRQ_TYPE_ENABLE);
1567 gsp_write(cfg, SPRD_GSP_INT_CFG);
1569 cfg = gsp_read(SPRD_GSP_CFG);
1570 cfg |= SPRD_GSP_CFG_RUN_SET(1);
1571 gsp_write(cfg, SPRD_GSP_CFG);
1575 dev_err(dev, "Invalid operations.\n");
1580 DRM_DEBUG_KMS("%s:cmd[%d]done\n", __func__, cmd);
1585 static void gsp_ippdrv_stop(struct device *dev, enum drm_sprd_ipp_cmd cmd)
1587 struct gsp_context *ctx = get_gsp_context(dev);
1590 DRM_INFO("%s:cmd[%d]\n", __func__, cmd);
1594 cfg = gsp_read(SPRD_GSP_INT_CFG);
1595 cfg |= (SPRD_GSP_INT_CFG_INT_CLR_SET(1));
1596 gsp_write(cfg, SPRD_GSP_INT_CFG);
1600 cfg = gsp_read(SPRD_GSP_INT_CFG);
1601 cfg &= ~(SPRD_GSP_INT_CFG_INT_CLR_MASK);
1602 gsp_write(cfg, SPRD_GSP_INT_CFG);
1604 cfg = gsp_read(SPRD_GSP_INT_CFG);
1605 cfg |= SPRD_GSP_INT_CFG_INT_EN_SET(GSP_IRQ_TYPE_DISABLE);
1606 gsp_write(cfg, SPRD_GSP_INT_CFG);
1610 dev_err(dev, "Invalid operations.\n");
1615 static int32_t gsp_clock_init(struct gsp_context *ctx)
1617 struct clk *emc_clk_parent = NULL;
1618 struct clk *gsp_clk_parent = NULL;
1621 emc_clk_parent = clk_get(NULL, GSP_EMC_CLOCK_PARENT_NAME);
1622 if (IS_ERR(emc_clk_parent)) {
1623 DRM_ERROR("gsp: get emc clk_parent failed!\n");
1626 DRM_DEBUG("gsp: get emc clk_parent ok!\n");
1629 ctx->emc_clk = clk_get(NULL, GSP_EMC_CLOCK_NAME);
1630 if (IS_ERR(ctx->emc_clk)) {
1631 DRM_ERROR("gsp: get emc clk failed!\n");
1634 DRM_DEBUG("gsp: get emc clk ok!\n");
1637 ret = clk_set_parent(ctx->emc_clk, emc_clk_parent);
1639 DRM_ERROR("gsp: gsp set emc clk parent failed!\n");
1642 DRM_DEBUG("gsp: gsp set emc clk parent ok!\n");
1645 gsp_clk_parent = clk_get(NULL, GSP_CLOCK_PARENT3);
1646 if (IS_ERR(gsp_clk_parent)) {
1647 DRM_ERROR("gsp: get clk_parent failed!\n");
1650 DRM_DEBUG("gsp: get clk_parent ok!\n");
1653 ctx->gsp_clk = clk_get(NULL, GSP_CLOCK_NAME);
1654 if (IS_ERR(ctx->gsp_clk)) {
1655 DRM_ERROR("gsp: get clk failed!\n");
1658 DRM_DEBUG("gsp: get clk ok!\n");
1661 ret = clk_set_parent(ctx->gsp_clk, gsp_clk_parent);
1663 DRM_ERROR("gsp: gsp set clk parent failed!\n");
1666 DRM_DEBUG("gsp: gsp set clk parent ok!\n");
1672 static int gsp_probe(struct platform_device *pdev)
1674 struct device *dev = &pdev->dev;
1675 struct gsp_context *ctx;
1676 struct sprd_drm_ippdrv *ippdrv;
1678 struct resource *res;
1682 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1686 #define GSP_RATIO(x, y) ((65536 * x) / y)
1687 #define GSP_UP_MAX GSP_RATIO(1, 4)
1688 #define GSP_DOWN_MIN GSP_RATIO(4, 1)
1692 /* we use Least Recently Used(LRU) to implement the coef-matrix cache policy */
1693 #define COEF_MATRIX_ENTRY_SIZE (GSP_COEFF_COEF_SIZE/2)
1694 #define CACHED_COEF_CNT_MAX 32
1696 #define LIST_ADD_TO_LIST_HEAD(pEntry)\
1698 Coef_Entry_List_Head->prev->next = (pEntry);\
1699 (pEntry)->prev = Coef_Entry_List_Head->prev;\
1700 (pEntry)->next = Coef_Entry_List_Head;\
1701 Coef_Entry_List_Head->prev = (pEntry);\
1702 Coef_Entry_List_Head = (pEntry);\
1705 #define LIST_FETCH_FROM_LIST(pEntry)\
1707 pEntry->prev->next = pEntry->next;\
1708 pEntry->next->prev = pEntry->prev;\
1711 #define LIST_SET_ENTRY_KEY(pEntry,i_w,i_h,o_w,o_h)\
1713 pEntry->in_w = i_w;\
1714 pEntry->in_h = i_h;\
1715 pEntry->out_w = o_w;\
1716 pEntry->out_h = o_h;\
1719 #define LIST_GET_THE_TAIL_ENTRY() (Coef_Entry_List_Head->prev)
1723 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sprdgsp");
1725 DRM_ERROR("failed to get GSP base address\n");
1729 ctx->suspended = true; /* gsp device in off state */
1730 ctx->reg_size = resource_size(res);
1731 ctx->regs = devm_ioremap(&pdev->dev, res->start,
1733 if (unlikely(!ctx->regs)) {
1734 DRM_ERROR("failed to map GSP base\n");
1738 ctx->gsp_of_dev = &(pdev->dev);
1739 ctx->irq = irq_of_parse_and_map(ctx->gsp_of_dev->of_node, 0);
1741 ret = of_property_read_u32(ctx->gsp_of_dev->of_node, "gsp_mmu_ctrl_base",
1742 &gsp_mmu_ctrl_addr);
1744 DRM_ERROR("read gsp_mmu_ctrl_addr failed:ret[%d]\n", ret);
1748 gsp_mmu_ctrl_addr = (uint32_t)ioremap_nocache(gsp_mmu_ctrl_addr, sizeof(gsp_mmu_ctrl_addr));
1750 if(!gsp_mmu_ctrl_addr)
1753 ctx->regs = (void __iomem*) GSP_REG_BASE;
1755 dev_err(dev, "failed to map registers.\n");
1759 ctx->irq = TB_GSP_INT;
1762 GSP_AUTO_GATE_ENABLE();
1765 ret = gsp_clock_init(ctx);
1767 dev_err(dev, "gsp emc clock init failed. \n");
1771 ret = request_threaded_irq(ctx->irq, NULL, gsp_irq_handler,
1772 IRQF_ONESHOT, "drm_gsp", ctx);
1774 dev_err(dev, "failed to request irq.\n");
1778 ippdrv = &ctx->ippdrv;
1780 ippdrv->ops[SPRD_DRM_OPS_SRC] = &gsp_src_ops;
1781 ippdrv->ops[SPRD_DRM_OPS_DST] = &gsp_dst_ops;
1782 ippdrv->check_property = gsp_ippdrv_check_property;
1783 ippdrv->reset = gsp_ippdrv_reset;
1784 ippdrv->start = gsp_ippdrv_start;
1785 ippdrv->stop = gsp_ippdrv_stop;
1787 ret = gsp_init_prop_list(ippdrv);
1789 dev_err(dev, "failed to init property list.\n");
1793 DRM_INFO("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id, (int) ippdrv);
1795 mutex_init(&ctx->lock);
1796 platform_set_drvdata(pdev, ctx);
1798 pm_runtime_set_active(dev);
1799 pm_runtime_enable(dev);
1801 ret = sprd_drm_ippdrv_register(ippdrv);
1803 dev_err(dev, "failed to register drm gsp device.\n");
1804 goto err_ippdrv_register;
1807 memset(&ctx->gsp_cfg, 0, sizeof(ctx->gsp_cfg));
1809 dev_info(dev, "drm gsp registered successfully.\n");
1813 err_ippdrv_register:
1814 devm_kfree(dev, ippdrv->prop_list);
1815 pm_runtime_disable(dev);
1818 free_irq(ctx->irq, ctx);
1823 static int gsp_remove(struct platform_device *pdev)
1825 struct device *dev = &pdev->dev;
1826 struct gsp_context *ctx = get_gsp_context(dev);
1827 struct sprd_drm_ippdrv *ippdrv = &ctx->ippdrv;
1829 devm_kfree(dev, ippdrv->prop_list);
1830 sprd_drm_ippdrv_unregister(ippdrv);
1831 mutex_destroy(&ctx->lock);
1833 pm_runtime_set_suspended(dev);
1834 pm_runtime_disable(dev);
1836 free_irq(ctx->irq, ctx);
1841 #ifdef CONFIG_PM_SLEEP
1842 static int gsp_suspend(struct device *dev)
1844 struct gsp_context *ctx = get_gsp_context(dev);
1846 DRM_INFO("%s\n", __func__);
1848 if (pm_runtime_suspended(dev))
1851 return gsp_clk_ctrl(ctx, false);
1854 static int gsp_resume(struct device *dev)
1856 struct gsp_context *ctx = get_gsp_context(dev);
1858 DRM_INFO("%s\n", __func__);
1860 if (!pm_runtime_suspended(dev))
1861 return gsp_clk_ctrl(ctx, true);
1867 #ifdef CONFIG_PM_RUNTIME
1868 static int gsp_runtime_suspend(struct device *dev)
1870 struct gsp_context *ctx = get_gsp_context(dev);
1872 DRM_INFO("%s\n", __func__);
1874 if (pm_runtime_suspended(dev) || ctx->suspended)
1877 return gsp_clk_ctrl(ctx, false);
1880 static int gsp_runtime_resume(struct device *dev)
1882 struct gsp_context *ctx = get_gsp_context(dev);
1884 DRM_INFO("%s\n", __func__);
1886 if (!pm_runtime_suspended(dev))
1887 return gsp_clk_ctrl(ctx, true);
1893 static const struct dev_pm_ops gsp_pm_ops = {
1894 SET_SYSTEM_SLEEP_PM_OPS(gsp_suspend, gsp_resume)
1895 SET_RUNTIME_PM_OPS(gsp_runtime_suspend, gsp_runtime_resume, NULL)
1899 static const struct of_device_id sprd_drm_gsp_dt_match[] = {
1900 { .compatible = "sprd,sprd_drm_gsp",},
1903 MODULE_DEVICE_TABLE(of, sprd_drm_gsp_dt_match);
1906 struct platform_driver gsp_driver = {
1908 .remove = gsp_remove,
1910 .name = "sprd-drm-gsp",
1911 .owner = THIS_MODULE,
1914 .of_match_table = sprd_drm_gsp_dt_match,