soc: Add SpacemiT K1-X SoC drivers 38/316538/1
authorMichal Wilczynski <m.wilczynski@samsung.com>
Thu, 22 Aug 2024 07:50:38 +0000 (09:50 +0200)
committerMichal Wilczynski <m.wilczynski@samsung.com>
Thu, 22 Aug 2024 11:53:44 +0000 (13:53 +0200)
SoC drivers are important dependents for the other hardware on the SoC.
Port them from the vendor kernel [1]. Some of those drivers will not be
used just yet, but the important ones are in pom_domain and
k1x-dma-range. Port the reset nonetheless as they may be useful in the
future.

[1] - https://github.com/BPI-SINOVOIP/pi-linux.git

Change-Id: If80669df0bb3f613593358bc5e99533f20a66c5f
Signed-off-by: Michal Wilczynski <m.wilczynski@samsung.com>
36 files changed:
drivers/soc/Kconfig
drivers/soc/Makefile
drivers/soc/spacemit/Kconfig [new file with mode: 0644]
drivers/soc/spacemit/Makefile [new file with mode: 0644]
drivers/soc/spacemit/jpu/Kconfig [new file with mode: 0644]
drivers/soc/spacemit/jpu/Makefile [new file with mode: 0644]
drivers/soc/spacemit/jpu/config.h [new file with mode: 0644]
drivers/soc/spacemit/jpu/jmm.h [new file with mode: 0644]
drivers/soc/spacemit/jpu/jpu.c [new file with mode: 0644]
drivers/soc/spacemit/jpu/jpu.h [new file with mode: 0644]
drivers/soc/spacemit/jpu/jpu_export.c [new file with mode: 0644]
drivers/soc/spacemit/jpu/jpu_export.h [new file with mode: 0644]
drivers/soc/spacemit/jpu/jpuconfig.h [new file with mode: 0644]
drivers/soc/spacemit/jpu/regdefine.h [new file with mode: 0644]
drivers/soc/spacemit/k1x-dma-range.c [new file with mode: 0644]
drivers/soc/spacemit/pm_domain/Makefile [new file with mode: 0644]
drivers/soc/spacemit/pm_domain/atomic_qos.c [new file with mode: 0644]
drivers/soc/spacemit/pm_domain/atomic_qos.h [new file with mode: 0644]
drivers/soc/spacemit/pm_domain/k1x-pm_domain.c [new file with mode: 0644]
drivers/soc/spacemit/spacemit-rf/Kconfig [new file with mode: 0755]
drivers/soc/spacemit/spacemit-rf/Makefile [new file with mode: 0755]
drivers/soc/spacemit/spacemit-rf/spacemit-bt.c [new file with mode: 0755]
drivers/soc/spacemit/spacemit-rf/spacemit-pwrseq.c [new file with mode: 0755]
drivers/soc/spacemit/spacemit-rf/spacemit-pwrseq.h [new file with mode: 0755]
drivers/soc/spacemit/spacemit-rf/spacemit-wlan.c [new file with mode: 0755]
drivers/soc/spacemit/spacemit_reboot.c [new file with mode: 0755]
drivers/soc/spacemit/v2d/Kconfig [new file with mode: 0644]
drivers/soc/spacemit/v2d/Makefile [new file with mode: 0644]
drivers/soc/spacemit/v2d/csc_matrix.h [new file with mode: 0644]
drivers/soc/spacemit/v2d/v2d_drv.c [new file with mode: 0644]
drivers/soc/spacemit/v2d/v2d_drv.h [new file with mode: 0644]
drivers/soc/spacemit/v2d/v2d_hw.c [new file with mode: 0644]
drivers/soc/spacemit/v2d/v2d_iommu.c [new file with mode: 0644]
drivers/soc/spacemit/v2d/v2d_priv.h [new file with mode: 0644]
drivers/soc/spacemit/v2d/v2d_reg.h [new file with mode: 0644]
include/dt-bindings/pmu/k1x_pmu.h [new file with mode: 0644]

index d21e75d69294307c82300e72020b2da205557848..9ccd5a0050236e425347967bfc6ba38ada2420d3 100644 (file)
@@ -26,6 +26,7 @@ source "drivers/soc/samsung/Kconfig"
 source "drivers/soc/sifive/Kconfig"
 source "drivers/soc/starfive/Kconfig"
 source "drivers/soc/sunxi/Kconfig"
+source "drivers/soc/spacemit/Kconfig"
 source "drivers/soc/tegra/Kconfig"
 source "drivers/soc/ti/Kconfig"
 source "drivers/soc/ux500/Kconfig"
index 0706a27d13bef3c39e81ee53b496dee587c75cf9..78b3a68d937c80032b6a0e54d2d6d3c6c0758a98 100644 (file)
@@ -30,6 +30,7 @@ obj-y                         += rockchip/
 obj-$(CONFIG_SOC_SAMSUNG)      += samsung/
 obj-y                          += sifive/
 obj-y                          += sunxi/
+obj-$(CONFIG_ARCH_SPACEMIT)    += spacemit/
 obj-$(CONFIG_ARCH_TEGRA)       += tegra/
 obj-y                          += ti/
 obj-$(CONFIG_ARCH_U8500)       += ux500/
diff --git a/drivers/soc/spacemit/Kconfig b/drivers/soc/spacemit/Kconfig
new file mode 100644 (file)
index 0000000..d3e29d2
--- /dev/null
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if ARCH_SPACEMIT
+
+#
+# spacemit Soc drivers
+#
+config SPACEMIT_PM_DOMAINS
+       bool "Spacemit generic power domain"
+       depends on PM
+       select PM_GENERIC_DOMAINS
+       help
+         Say y here to enable power domain support.
+         In order to meet high performance and low power requirements, a power
+         management unit is designed or saving power.
+
+endif
+
+config SPACEMIT_REBOOT_CONTROL
+       tristate "Spacemit k1x reboot handler"
+       depends on ARCH_SPACEMIT
+       help
+         Spacemit reboot into fastboot mode
+
+config SPACEMI_K1X_DMA_RANGE
+       tristate "Spacemit dram range driver for k1x"
+       depends on ARCH_SPACEMIT
+       help
+         This driver is an empty shell, in order to make the dma-ranges function
+          effective
+
+source "drivers/soc/spacemit/jpu/Kconfig"
+source "drivers/soc/spacemit/v2d/Kconfig"
+source "drivers/soc/spacemit/spacemit-rf/Kconfig"
diff --git a/drivers/soc/spacemit/Makefile b/drivers/soc/spacemit/Makefile
new file mode 100644 (file)
index 0000000..5d865c2
--- /dev/null
@@ -0,0 +1,6 @@
+obj-$(CONFIG_SPACEMIT_PM_DOMAINS) += pm_domain/
+obj-$(CONFIG_CHIP_MEDIA_JPU)    += jpu/
+obj-$(CONFIG_SPACEMIT_V2D)    += v2d/
+obj-$(CONFIG_SPACEMIT_RFKILL)   += spacemit-rf/
+obj-$(CONFIG_SPACEMIT_REBOOT_CONTROL)  += spacemit_reboot.o
+obj-$(CONFIG_SPACEMI_K1X_DMA_RANGE) += k1x-dma-range.o
diff --git a/drivers/soc/spacemit/jpu/Kconfig b/drivers/soc/spacemit/jpu/Kconfig
new file mode 100644 (file)
index 0000000..c31eb50
--- /dev/null
@@ -0,0 +1,11 @@
+config CHIP_MEDIA_JPU
+       tristate "chip media jpu driver"
+       help
+               This enables the chip media jpu driver
+
+config JPU_ENABLE_DEBUG_MSG
+       depends on CHIP_MEDIA_JPU
+       bool "chip media jpu driver debug"
+       default n
+       help
+               This enabled debug message output
diff --git a/drivers/soc/spacemit/jpu/Makefile b/drivers/soc/spacemit/jpu/Makefile
new file mode 100644 (file)
index 0000000..afdfed7
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CHIP_MEDIA_JPU) += jpu.o jpu_export.o
+ccflags-${CONFIG_JPU_ENABLE_DEBUG_MSG} += -DENABLE_DEBUG_MSG
diff --git a/drivers/soc/spacemit/jpu/config.h b/drivers/soc/spacemit/jpu/config.h
new file mode 100644 (file)
index 0000000..5522f5c
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __CONFIG_H__
+#define __CONFIG_H__
+
+#if defined(_WIN32) || defined(__WIN32__) || defined(_WIN64) || defined(WIN32) || defined(__MINGW32__)
+#      define PLATFORM_WIN32
+#elif defined(linux) || defined(__linux) || defined(ANDROID)
+#      define PLATFORM_LINUX
+#else
+#      define PLATFORM_NON_OS
+#endif
+
+#if defined(CNM_FPGA_PLATFORM) || defined(CNM_SIM_PLATFORM)
+#ifdef ANDROID
+#else
+#endif
+#endif
+
+#define API_VERSION 0x124
+
+#endif /* __CONFIG_H__ */
diff --git a/drivers/soc/spacemit/jpu/jmm.h b/drivers/soc/spacemit/jpu/jmm.h
new file mode 100644 (file)
index 0000000..2b07173
--- /dev/null
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __CNM_JPU_MM_H__
+#define __CNM_JPU_MM_H__
+
+typedef unsigned long long jmem_key_t;
+
+#define JMEM_PAGE_SIZE           (16*1024)
+#define MAKE_KEY(_a, _b)        (((jmem_key_t)_a)<<32 | _b)
+#define KEY_TO_VALUE(_key)      (_key>>32)
+
+typedef struct page_struct {
+       int pageno;
+       unsigned long addr;
+       int used;
+       int alloc_pages;
+       int first_pageno;
+} page_t;
+
+typedef struct avl_node_struct {
+       jmem_key_t key;
+       int height;
+       page_t *page;
+       struct avl_node_struct *left;
+       struct avl_node_struct *right;
+} avl_node_t;
+
+typedef struct _jpu_mm_struct {
+       avl_node_t *free_tree;
+       avl_node_t *alloc_tree;
+       page_t *page_list;
+       int num_pages;
+       unsigned long base_addr;
+       unsigned long mem_size;
+       int free_page_count;
+       int alloc_page_count;
+} jpu_mm_t;
+
+#define VMEM_P_ALLOC(_x)         kmalloc(_x, GFP_KERNEL)
+#define VMEM_P_FREE(_x)          kfree(_x)
+
+#define VMEM_ASSERT(_exp)        if (!(_exp)) { printk(KERN_INFO "VMEM_ASSERT at %s:%d\n", __FILE__, __LINE__); /*while(1);*/ }
+#define VMEM_HEIGHT(_tree)       (_tree==NULL ? -1 : _tree->height)
+
+#define MAX(_a, _b)         (_a >= _b ? _a : _b)
+
+typedef enum {
+       LEFT,
+       RIGHT
+} rotation_dir_t;
+
+typedef struct avl_node_data_struct {
+       int key;
+       page_t *page;
+} avl_node_data_t;
+
+static avl_node_t *make_avl_node(jmem_key_t key, page_t *page)
+{
+       avl_node_t *node = (avl_node_t *) VMEM_P_ALLOC(sizeof(avl_node_t));
+       node->key = key;
+       node->page = page;
+       node->height = 0;
+       node->left = NULL;
+       node->right = NULL;
+
+       return node;
+}
+
+static int get_balance_factor(avl_node_t *tree)
+{
+       int factor = 0;
+       if (tree)
+               factor = VMEM_HEIGHT(tree->right) - VMEM_HEIGHT(tree->left);
+
+       return factor;
+}
+
+/*
+ * Left Rotation
+ *
+ *      A                      B
+ *       \                    / \
+ *        B         =>       A   C
+ *       /  \                 \
+ *      D    C                 D
+ *
+ */
+static avl_node_t *rotation_left(avl_node_t *tree)
+{
+       avl_node_t *rchild;
+       avl_node_t *lchild;
+
+       if (tree == NULL)
+               return NULL;
+
+       rchild = tree->right;
+       if (rchild == NULL) {
+               return tree;
+       }
+
+       lchild = rchild->left;
+       rchild->left = tree;
+       tree->right = lchild;
+
+       tree->height = MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1;
+       rchild->height = MAX(VMEM_HEIGHT(rchild->left), VMEM_HEIGHT(rchild->right)) + 1;
+
+       return rchild;
+}
+
+/*
+ * Reft Rotation
+ *
+ *         A                  B
+ *       \                  /  \
+ *      B         =>       D    A
+ *    /  \                     /
+ *   D    C                   C
+ *
+ */
+static avl_node_t *rotation_right(avl_node_t *tree)
+{
+       avl_node_t *rchild;
+       avl_node_t *lchild;
+
+       if (tree == NULL)
+               return NULL;
+
+       lchild = tree->left;
+       if (lchild == NULL)
+               return NULL;
+
+       rchild = lchild->right;
+       lchild->right = tree;
+       tree->left = rchild;
+
+       tree->height = MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1;
+       lchild->height = MAX(VMEM_HEIGHT(lchild->left), VMEM_HEIGHT(lchild->right)) + 1;
+
+       return lchild;
+}
+
+static avl_node_t *do_balance(avl_node_t *tree)
+{
+       int bfactor = 0, child_bfactor; /* balancing factor */
+
+       bfactor = get_balance_factor(tree);
+
+       if (bfactor >= 2) {
+               child_bfactor = get_balance_factor(tree->right);
+               if (child_bfactor == 1 || child_bfactor == 0) {
+                       tree = rotation_left(tree);
+               } else if (child_bfactor == -1) {
+                       tree->right = rotation_right(tree->right);
+                       tree = rotation_left(tree);
+               } else {
+                       printk(KERN_INFO "invalid balancing factor: %d\n", child_bfactor);
+                       VMEM_ASSERT(0);
+                       return NULL;
+               }
+       } else if (bfactor <= -2) {
+               child_bfactor = get_balance_factor(tree->left);
+               if (child_bfactor == -1 || child_bfactor == 0) {
+                       tree = rotation_right(tree);
+               } else if (child_bfactor == 1) {
+                       tree->left = rotation_left(tree->left);
+                       tree = rotation_right(tree);
+               } else {
+                       printk(KERN_INFO "invalid balancing factor: %d\n", child_bfactor);
+                       VMEM_ASSERT(0);
+                       return NULL;
+               }
+       }
+
+       return tree;
+}
+
+static avl_node_t *unlink_end_node(avl_node_t *tree, int dir, avl_node_t **found_node)
+{
+       avl_node_t *node;
+       *found_node = NULL;
+
+       if (tree == NULL)
+               return NULL;
+
+       if (dir == LEFT) {
+               if (tree->left == NULL) {
+                       *found_node = tree;
+                       return NULL;
+               }
+       } else {
+               if (tree->right == NULL) {
+                       *found_node = tree;
+                       return NULL;
+               }
+       }
+
+       if (dir == LEFT) {
+               node = tree->left;
+               tree->left = unlink_end_node(tree->left, LEFT, found_node);
+               if (tree->left == NULL) {
+                       tree->left = (*found_node)->right;
+                       (*found_node)->left = NULL;
+                       (*found_node)->right = NULL;
+               }
+       } else {
+               node = tree->right;
+               tree->right = unlink_end_node(tree->right, RIGHT, found_node);
+               if (tree->right == NULL) {
+                       tree->right = (*found_node)->left;
+                       (*found_node)->left = NULL;
+                       (*found_node)->right = NULL;
+               }
+       }
+
+       tree->height = MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1;
+
+       return do_balance(tree);
+}
+
+static avl_node_t *avltree_insert(avl_node_t *tree, jmem_key_t key, page_t *page)
+{
+       if (tree == NULL) {
+               tree = make_avl_node(key, page);
+       } else {
+               if (key >= tree->key) {
+                       tree->right = avltree_insert(tree->right, key, page);
+               } else {
+                       tree->left = avltree_insert(tree->left, key, page);
+               }
+       }
+
+       tree = do_balance(tree);
+
+       tree->height = MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1;
+
+       return tree;
+}
+
+static avl_node_t *do_unlink(avl_node_t *tree)
+{
+       avl_node_t *node;
+       avl_node_t *end_node;
+       node = unlink_end_node(tree->right, LEFT, &end_node);
+       if (node) {
+               tree->right = node;
+       } else {
+               node = unlink_end_node(tree->left, RIGHT, &end_node);
+               if (node)
+                       tree->left = node;
+       }
+
+       if (node == NULL) {
+               node = tree->right ? tree->right : tree->left;
+               end_node = node;
+       }
+
+       if (end_node) {
+               end_node->left = (tree->left != end_node) ? tree->left : end_node->left;
+               end_node->right = (tree->right != end_node) ? tree->right : end_node->right;
+               end_node->height =
+                   MAX(VMEM_HEIGHT(end_node->left), VMEM_HEIGHT(end_node->right)) + 1;
+       }
+
+       tree = end_node;
+
+       return tree;
+}
+
+static avl_node_t *avltree_remove(avl_node_t *tree, avl_node_t **found_node, jmem_key_t key)
+{
+       *found_node = NULL;
+       if (tree == NULL) {
+               printk(KERN_INFO "failed to find key %d\n", (int)key);
+               return NULL;
+       }
+
+       if (key == tree->key) {
+               *found_node = tree;
+               tree = do_unlink(tree);
+       } else if (key > tree->key) {
+               tree->right = avltree_remove(tree->right, found_node, key);
+       } else {
+               tree->left = avltree_remove(tree->left, found_node, key);
+       }
+
+       if (tree)
+               tree->height = MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1;
+
+       tree = do_balance(tree);
+
+       return tree;
+}
+
+void avltree_free(avl_node_t *tree)
+{
+       if (tree == NULL)
+               return;
+       if (tree->left == NULL && tree->right == NULL) {
+               VMEM_P_FREE(tree);
+               return;
+       }
+
+       avltree_free(tree->left);
+       tree->left = NULL;
+       avltree_free(tree->right);
+       tree->right = NULL;
+}
+
+static avl_node_t *remove_approx_value(avl_node_t *tree, avl_node_t **found, jmem_key_t key)
+{
+       *found = NULL;
+       if (tree == NULL) {
+               return NULL;
+       }
+
+       if (key == tree->key) {
+               *found = tree;
+               tree = do_unlink(tree);
+       } else if (key > tree->key) {
+               tree->right = remove_approx_value(tree->right, found, key);
+       } else {
+               tree->left = remove_approx_value(tree->left, found, key);
+               if (*found == NULL) {
+                       *found = tree;
+                       tree = do_unlink(tree);
+               }
+       }
+       if (tree)
+               tree->height = MAX(VMEM_HEIGHT(tree->left), VMEM_HEIGHT(tree->right)) + 1;
+       tree = do_balance(tree);
+
+       return tree;
+}
+
+static void set_blocks_free(jpu_mm_t *mm, int pageno, int npages)
+{
+       int last_pageno = pageno + npages - 1;
+       int i;
+       page_t *page;
+       page_t *last_page;
+
+       VMEM_ASSERT(npages);
+
+       if (last_pageno >= mm->num_pages) {
+               printk(KERN_INFO "set_blocks_free: invalid last page number: %d\n", last_pageno);
+               VMEM_ASSERT(0);
+               return;
+       }
+
+       for (i = pageno; i <= last_pageno; i++) {
+               mm->page_list[i].used = 0;
+               mm->page_list[i].alloc_pages = 0;
+               mm->page_list[i].first_pageno = -1;
+       }
+
+       page = &mm->page_list[pageno];
+       page->alloc_pages = npages;
+       last_page = &mm->page_list[last_pageno];
+       last_page->first_pageno = pageno;
+
+       mm->free_tree = avltree_insert(mm->free_tree, MAKE_KEY(npages, pageno), page);
+}
+
+static void set_blocks_alloc(jpu_mm_t *mm, int pageno, int npages)
+{
+       int last_pageno = pageno + npages - 1;
+       int i;
+       page_t *page;
+       page_t *last_page;
+
+       if (last_pageno >= mm->num_pages) {
+               printk(KERN_INFO "set_blocks_free: invalid last page number: %d\n", last_pageno);
+               VMEM_ASSERT(0);
+               return;
+       }
+
+       for (i = pageno; i <= last_pageno; i++) {
+               mm->page_list[i].used = 1;
+               mm->page_list[i].alloc_pages = 0;
+               mm->page_list[i].first_pageno = -1;
+       }
+
+       page = &mm->page_list[pageno];
+       page->alloc_pages = npages;
+
+       last_page = &mm->page_list[last_pageno];
+       last_page->first_pageno = pageno;
+
+       mm->alloc_tree = avltree_insert(mm->alloc_tree, MAKE_KEY(page->addr, 0), page);
+}
+
+int jmem_init(jpu_mm_t *mm, unsigned long addr, unsigned long size)
+{
+       int i;
+
+       mm->base_addr = (addr + (JMEM_PAGE_SIZE - 1)) & ~(JMEM_PAGE_SIZE - 1);
+       mm->mem_size = size & ~JMEM_PAGE_SIZE;
+       mm->num_pages = mm->mem_size / JMEM_PAGE_SIZE;
+       mm->page_list = (page_t *) VMEM_P_ALLOC(mm->num_pages * sizeof(page_t));
+       mm->free_tree = NULL;
+       mm->alloc_tree = NULL;
+       mm->free_page_count = mm->num_pages;
+       mm->alloc_page_count = 0;
+
+       for (i = 0; i < mm->num_pages; i++) {
+               mm->page_list[i].pageno = i;
+               mm->page_list[i].addr = mm->base_addr + i * JMEM_PAGE_SIZE;
+               mm->page_list[i].alloc_pages = 0;
+               mm->page_list[i].used = 0;
+               mm->page_list[i].first_pageno = -1;
+       }
+
+       set_blocks_free(mm, 0, mm->num_pages);
+
+       return 0;
+}
+
+int jmem_exit(jpu_mm_t *mm)
+{
+       if (mm == NULL) {
+               printk(KERN_INFO "vmem_exit: invalid handle\n");
+               return -1;
+       }
+
+       if (mm->free_tree) {
+               avltree_free(mm->free_tree);
+       }
+       if (mm->alloc_tree) {
+               avltree_free(mm->alloc_tree);
+       }
+
+       VMEM_P_FREE(mm->page_list);
+
+       mm->base_addr = 0;
+       mm->mem_size = 0;
+       mm->num_pages = 0;
+       mm->page_list = NULL;
+       mm->free_tree = NULL;
+       mm->alloc_tree = NULL;
+       mm->free_page_count = 0;
+       mm->alloc_page_count = 0;
+       return 0;
+}
+
+unsigned long jmem_alloc(jpu_mm_t *mm, int size, unsigned long pid)
+{
+       avl_node_t *node;
+       page_t *free_page;
+       int npages, free_size;
+       int alloc_pageno;
+       unsigned long ptr;
+
+       if (mm == NULL) {
+               printk(KERN_INFO "vmem_alloc: invalid handle\n");
+               return -1;
+       }
+
+       if (size <= 0)
+               return -1;
+
+       npages = (size + JMEM_PAGE_SIZE - 1) / JMEM_PAGE_SIZE;
+
+       mm->free_tree = remove_approx_value(mm->free_tree, &node, MAKE_KEY(npages, 0));
+       if (node == NULL) {
+               return -1;
+       }
+       free_page = node->page;
+       free_size = KEY_TO_VALUE(node->key);
+
+       alloc_pageno = free_page->pageno;
+       set_blocks_alloc(mm, alloc_pageno, npages);
+       if (npages != free_size) {
+               int free_pageno = alloc_pageno + npages;
+               set_blocks_free(mm, free_pageno, (free_size - npages));
+       }
+
+       VMEM_P_FREE(node);
+
+       ptr = mm->page_list[alloc_pageno].addr;
+       mm->alloc_page_count += npages;
+       mm->free_page_count -= npages;
+
+       return ptr;
+}
+
+int jmem_free(jpu_mm_t *mm, unsigned long ptr, unsigned long pid)
+{
+       unsigned long addr;
+       avl_node_t *found;
+       page_t *page;
+       int pageno, prev_free_pageno, next_free_pageno;
+       int prev_size, next_size;
+       int merge_page_no, merge_page_size, free_page_size;
+
+       if (mm == NULL) {
+               printk(KERN_INFO "vmem_free: invalid handle\n");
+               return -1;
+       }
+
+       addr = ptr;
+
+       mm->alloc_tree = avltree_remove(mm->alloc_tree, &found, MAKE_KEY(addr, 0));
+       if (found == NULL) {
+               printk(KERN_INFO "vmem_free: 0x%08x not found\n", (int)addr);
+               VMEM_ASSERT(0);
+               return -1;
+       }
+
+       /* find previous free block */
+       page = found->page;
+       pageno = page->pageno;
+       free_page_size = page->alloc_pages;
+       prev_free_pageno = pageno - 1;
+       prev_size = -1;
+       if (prev_free_pageno >= 0) {
+               if (mm->page_list[prev_free_pageno].used == 0) {
+                       prev_free_pageno = mm->page_list[prev_free_pageno].first_pageno;
+                       prev_size = mm->page_list[prev_free_pageno].alloc_pages;
+               }
+       }
+
+       /* find next free block */
+       next_free_pageno = pageno + page->alloc_pages;
+       next_free_pageno = (next_free_pageno == mm->num_pages) ? -1 : next_free_pageno;
+       next_size = -1;
+       if (next_free_pageno >= 0) {
+               if (mm->page_list[next_free_pageno].used == 0) {
+                       next_size = mm->page_list[next_free_pageno].alloc_pages;
+               }
+       }
+       VMEM_P_FREE(found);
+
+       /* merge */
+       merge_page_no = page->pageno;
+       merge_page_size = page->alloc_pages;
+       if (prev_size >= 0) {
+               mm->free_tree =
+                   avltree_remove(mm->free_tree, &found, MAKE_KEY(prev_size, prev_free_pageno));
+               if (found == NULL) {
+                       VMEM_ASSERT(0);
+                       return -1;
+               }
+               merge_page_no = found->page->pageno;
+               merge_page_size += found->page->alloc_pages;
+               VMEM_P_FREE(found);
+       }
+       if (next_size >= 0) {
+               mm->free_tree =
+                   avltree_remove(mm->free_tree, &found, MAKE_KEY(next_size, next_free_pageno));
+               if (found == NULL) {
+                       VMEM_ASSERT(0);
+                       return -1;
+               }
+               merge_page_size += found->page->alloc_pages;
+               VMEM_P_FREE(found);
+       }
+
+       page->alloc_pages = 0;
+       page->first_pageno = -1;
+
+       set_blocks_free(mm, merge_page_no, merge_page_size);
+
+       mm->alloc_page_count -= free_page_size;
+       mm->free_page_count += free_page_size;
+
+       return 0;
+}
+
+#endif /* __CNM_JPU_MM_H__ */
diff --git a/drivers/soc/spacemit/jpu/jpu.c b/drivers/soc/spacemit/jpu/jpu.c
new file mode 100644 (file)
index 0000000..93dc44d
--- /dev/null
@@ -0,0 +1,1806 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+//#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/pm_runtime.h>
+#include <asm/io.h>
+#include <linux/pm_qos.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include "jpuconfig.h"
+#include "regdefine.h"
+#include "jpu.h"
+#include "jpu_export.h"
+
+#define JPU_TBU_BASE_VA (0x80000000)
+#define JPU_TBU_VA_STEP (0x2000000)
+#define DDR_QOS_ENABLE
+#ifdef ENABLE_DEBUG_MSG
+#define JLOG(dev, fmt, args...) dev_info(dev, "JPU: " fmt,  ## args);
+#else
+#define JLOG(args...)
+#endif
+jpu_dma_buf_info buf_inf[2];
+#if defined (CONFIG_PM) && defined (DDR_QOS_ENABLE)
+//static struct freq_qos_request jpu_ddrfreq_qos_rreq_sum;
+//static struct freq_qos_request jpu_ddrfreq_qos_wreq_sum;
+#endif
+typedef struct jpu_drv_context_t {
+       struct fasync_struct *async_queue;
+       u32 open_count;
+       u32 interrupt_reason[MAX_NUM_INSTANCE];
+} jpu_drv_context_t;
+
+enum spacemit_iommu_type {
+       TBU_INPUT = 0,
+       TBU_OUTPUT = 1,
+};
+
+typedef struct jpudrv_buffer_pool_t {
+       struct list_head list;
+       struct jpudrv_buffer_t jb;
+       struct file *filp;
+} jpudrv_buffer_pool_t;
+
+typedef struct jpudrv_instance_list_t {
+       struct list_head list;
+       unsigned long inst_idx;
+       struct file *filp;
+} jpudrv_instance_list_t;
+
+typedef struct jpudrv_instance_pool_t {
+       unsigned char codecInstPool[MAX_NUM_INSTANCE][MAX_INST_HANDLE_SIZE];
+} jpudrv_instance_pool_t;
+
+struct jpu_device {
+       struct device *jdev;
+
+       struct device *jpu_device;
+       struct cdev s_jpu_cdev;
+       struct class *jpu_class;
+       dev_t s_jpu_major;
+
+       jpudrv_buffer_t s_instance_pool;
+       jpu_drv_context_t s_jpu_drv_context;
+
+       int s_jpu_open_ref_count;
+       int s_jpu_irq;
+
+       struct clk *cclk;
+       atomic_t cclk_enable_count;
+       int32_t cclk_max_frequency;
+       int32_t cclk_min_frequency;
+       int32_t cclk_cur_frequency;
+       int32_t cclk_default_frequency;
+       struct clk *aclk;
+       atomic_t aclk_enable_count;
+       int64_t aclk_max_frequency;
+       int32_t aclk_min_frequency;
+       int32_t aclk_cur_frequency;
+       int32_t aclk_default_frequency;
+
+       struct clk *iclk;
+       atomic_t iclk_enable_count;
+       int64_t iclk_max_frequency;
+       int32_t iclk_min_frequency;
+       int32_t iclk_cur_frequency;
+       int32_t iclk_default_frequency;
+
+       struct reset_control *jpg_reset;
+       atomic_t jpg_reset_enable_count;
+
+       struct reset_control *lcd_mclk_reset;
+       atomic_t lcd_mclk_reset_enable_count;
+
+       struct reset_control *isp_ci_reset;
+       atomic_t isp_ci_reset_enable_count;
+
+       struct reset_control *freset;
+       atomic_t freset_enable_count;
+
+       struct reset_control *sreset;
+       atomic_t sreset_enable_count;
+
+       jpudrv_buffer_t s_jpu_register;
+       void __iomem *reg;
+
+       int s_interrupt_flag[MAX_NUM_INSTANCE];
+       wait_queue_head_t s_interrupt_wait_q[MAX_NUM_INSTANCE];
+
+       spinlock_t s_jpu_lock;
+       struct semaphore s_jpu_sem;
+       struct list_head s_jbp_head;
+       struct list_head s_inst_list_head;
+       u32 time_out_cycs;
+       u32 page_size;
+       u64 va_base;
+       u64 va_end;
+       struct tbu_instance tbu_ins[TBU_INSTANCES_NUM];
+       unsigned long tbu_ins_bitmap;
+       spinlock_t tbu_ins_bitmap_lock;
+       int tbu_ins_map;
+       bool is_hw_enable;
+       spinlock_t hw_access_lock;
+       struct semaphore tbu_ins_free_cnt;
+#ifdef CONFIG_PM
+#ifdef DDR_QOS_ENABLE
+       struct freq_constraints *ddr_qos_cons;
+       struct freq_qos_request *ddr_qos_rreq;
+       struct freq_qos_request *ddr_qos_wreq;
+#endif
+#endif
+};
+
+static void jpu_writel(struct jpu_device *dev, int offset, u32 val)
+{
+       writel(val, dev->reg + offset);
+}
+
+static u32 jpu_readl(struct jpu_device *dev, int offset)
+{
+       return readl(dev->reg + offset);
+}
+
+static void jpu_set_reg_bits(struct jpu_device *dev, u64 offset, u32 bits)
+{
+       jpu_writel(dev, offset, (jpu_readl(dev, offset) | bits));
+}
+
+static void jpu_clear_reg_bits(struct jpu_device *dev, u64 offset, u32 bits)
+{
+       jpu_writel(dev, offset, (jpu_readl(dev, offset) & ~bits));
+}
+
+static int jpu_jpg_reset_deassert(struct jpu_device *jdev)
+{
+       if (IS_ERR_OR_NULL(jdev->freset)) {
+               return -EINVAL;
+       }
+       atomic_inc(&jdev->jpg_reset_enable_count);
+       JLOG(jdev->jdev, "deassert jpg_reset\n");
+       return reset_control_deassert(jdev->jpg_reset);
+}
+
+static int jpu_lcd_mclk_reset_deassert(struct jpu_device *jdev)
+{
+       if (IS_ERR_OR_NULL(jdev->lcd_mclk_reset)) {
+               return -EINVAL;
+       }
+       atomic_inc(&jdev->lcd_mclk_reset_enable_count);
+       JLOG(jdev->jdev, "deassert lcd_mclk_reset\n");
+       return reset_control_deassert(jdev->lcd_mclk_reset);
+}
+
+static int jpu_isp_ci_reset_deassert(struct jpu_device *jdev)
+{
+       if (IS_ERR_OR_NULL(jdev->isp_ci_reset)) {
+               return -EINVAL;
+       }
+       atomic_inc(&jdev->isp_ci_reset_enable_count);
+       JLOG(jdev->jdev, "deassert isp_ci_reset\n");
+       return reset_control_deassert(jdev->isp_ci_reset);
+}
+
+static int jpu_freset_deassert(struct jpu_device *jdev)
+{
+       if (IS_ERR_OR_NULL(jdev->freset)) {
+               return -EINVAL;
+       }
+       atomic_inc(&jdev->freset_enable_count);
+       JLOG(jdev->jdev, "deassert freset\n");
+       return reset_control_deassert(jdev->freset);
+}
+
+static int jpu_sreset_deassert(struct jpu_device *jdev)
+{
+       if (IS_ERR_OR_NULL(jdev->sreset)) {
+               return -EINVAL;
+       }
+       atomic_inc(&jdev->sreset_enable_count);
+       JLOG(jdev->jdev, "deassert sreset\n");
+       return reset_control_deassert(jdev->sreset);
+}
+
+static void jpu_jpg_reset_assert(struct jpu_device *jdev)
+{
+       if (!IS_ERR_OR_NULL(jdev->jpg_reset)
+           && atomic_read(&jdev->jpg_reset_enable_count) >= 1) {
+               JLOG(jdev->jdev, "assert jpg_reset\n");
+               atomic_dec(&jdev->jpg_reset_enable_count);
+               reset_control_assert(jdev->jpg_reset);
+       }
+}
+
+static void jpu_lcd_mclk_reset_assert(struct jpu_device *jdev)
+{
+       if (!IS_ERR_OR_NULL(jdev->lcd_mclk_reset)
+           && atomic_read(&jdev->lcd_mclk_reset_enable_count) >= 1) {
+               JLOG(jdev->jdev, "assert lcd_mclk_reset\n");
+               atomic_dec(&jdev->lcd_mclk_reset_enable_count);
+               reset_control_assert(jdev->lcd_mclk_reset);
+       }
+}
+
+static void jpu_isp_ci_reset_assert(struct jpu_device *jdev)
+{
+       if (!IS_ERR_OR_NULL(jdev->isp_ci_reset)
+           && atomic_read(&jdev->isp_ci_reset_enable_count) >= 1) {
+               JLOG(jdev->jdev, "assert isp_ci_reset\n");
+               atomic_dec(&jdev->isp_ci_reset_enable_count);
+               reset_control_assert(jdev->isp_ci_reset);
+       }
+}
+
+static void jpu_freset_assert(struct jpu_device *jdev)
+{
+       if (!IS_ERR_OR_NULL(jdev->freset)
+           && atomic_read(&jdev->freset_enable_count) >= 1) {
+               JLOG(jdev->jdev, "assert freset\n");
+               atomic_dec(&jdev->freset_enable_count);
+               reset_control_assert(jdev->freset);
+       }
+}
+
+static void jpu_sreset_assert(struct jpu_device *jdev)
+{
+       if (!IS_ERR_OR_NULL(jdev->sreset)
+           && atomic_read(&jdev->sreset_enable_count) >= 1) {
+               JLOG(jdev->jdev, "assert sreset\n");
+               atomic_dec(&jdev->sreset_enable_count);
+               reset_control_assert(jdev->sreset);
+       }
+}
+
+
+#ifndef CONFIG_SOC_SPACEMIT_K1_FPGA
+static int jpu_aclk_enable(struct jpu_device *jdev)
+{
+       if (IS_ERR_OR_NULL(jdev->aclk)) {
+               return -EINVAL;
+       }
+       atomic_inc(&jdev->aclk_enable_count);
+       JLOG(jdev->jdev, "enable aclk\n");
+       return clk_prepare_enable(jdev->aclk);
+}
+
+static int jpu_iclk_enable(struct jpu_device *jdev)
+{
+       if (IS_ERR_OR_NULL(jdev->iclk)) {
+               return -EINVAL;
+       }
+       atomic_inc(&jdev->iclk_enable_count);
+       JLOG(jdev->jdev, "enable iclk\n");
+       return clk_prepare_enable(jdev->iclk);
+}
+
+static int jpu_cclk_enable(struct jpu_device *jdev)
+{
+       if (IS_ERR_OR_NULL(jdev->cclk)) {
+               return -EINVAL;
+       }
+       atomic_inc(&jdev->cclk_enable_count);
+       JLOG(jdev->jdev, "enable cclk\n");
+       return clk_prepare_enable(jdev->cclk);
+}
+
+static void jpu_aclk_disable(struct jpu_device *jdev)
+{
+       if (!IS_ERR_OR_NULL(jdev->aclk) && __clk_is_enabled(jdev->aclk)
+           && atomic_read(&jdev->aclk_enable_count) >= 1) {
+               JLOG(jdev->jdev, "disable aclk\n");
+               atomic_dec(&jdev->aclk_enable_count);
+               clk_disable_unprepare(jdev->aclk);
+       }
+}
+
+static void jpu_cclk_disable(struct jpu_device *jdev)
+{
+       if (!IS_ERR_OR_NULL(jdev->cclk) && __clk_is_enabled(jdev->cclk)
+           && atomic_read(&jdev->cclk_enable_count) >= 1) {
+               JLOG(jdev->jdev, "disable cclk\n");
+               atomic_dec(&jdev->cclk_enable_count);
+               clk_disable_unprepare(jdev->cclk);
+       }
+}
+
+static void jpu_iclk_disable(struct jpu_device *jdev)
+{
+       if (!IS_ERR_OR_NULL(jdev->iclk) && __clk_is_enabled(jdev->iclk)
+           && atomic_read(&jdev->iclk_enable_count) >= 1) {
+               JLOG(jdev->jdev, "disable iclk\n");
+               atomic_dec(&jdev->iclk_enable_count);
+               clk_disable_unprepare(jdev->iclk);
+       }
+}
+
+static int jpu_clk_enable(struct jpu_device *jdev)
+{
+       int ret;
+#if 0
+       ret = clk_set_rate(jdev->aclk, jdev->aclk_cur_frequency);
+       if (ret) {
+               return ret;
+       }
+#endif
+
+#if 0
+       ret = clk_set_rate(jdev->cclk, jdev->cclk_cur_frequency);
+       if (ret) {
+               return ret;
+       }
+#endif
+
+       ret = jpu_cclk_enable(jdev);
+       if (ret) {
+               return ret;
+       }
+       ret = jpu_aclk_enable(jdev);
+       if (ret) {
+               return ret;
+       }
+       ret = jpu_iclk_enable(jdev);
+       if (ret) {
+               return ret;
+       }
+       ret = jpu_jpg_reset_deassert(jdev);
+       if (ret) {
+               return ret;
+       }
+       ret = jpu_lcd_mclk_reset_deassert(jdev);
+       if (ret) {
+               return ret;
+       }
+       ret = jpu_isp_ci_reset_deassert(jdev);
+       if (ret) {
+               return ret;
+       }
+       ret = jpu_freset_deassert(jdev);
+       if (ret) {
+               return ret;
+       }
+       ret = jpu_sreset_deassert(jdev);
+       if (ret) {
+               return ret;
+       }
+
+       return ret;
+}
+
+static void jpu_clk_disable(struct jpu_device *jdev)
+{
+       jpu_cclk_disable(jdev);
+       jpu_aclk_disable(jdev);
+       jpu_iclk_disable(jdev);
+       jpu_jpg_reset_assert(jdev);
+       jpu_lcd_mclk_reset_assert(jdev);
+       jpu_isp_ci_reset_assert(jdev);
+       jpu_freset_assert(jdev);
+       jpu_sreset_assert(jdev);
+}
+
+static int jpu_hw_reset(struct jpu_device *jdev)
+{
+       JLOG(jdev->jdev, "request jpu reset from application.\n");
+
+       jpu_cclk_disable(jdev);
+       jpu_aclk_disable(jdev);
+       jpu_iclk_disable(jdev);
+       jpu_jpg_reset_assert(jdev);
+       jpu_lcd_mclk_reset_assert(jdev);
+       jpu_isp_ci_reset_assert(jdev);
+       jpu_freset_assert(jdev);
+       jpu_sreset_assert(jdev);
+
+       return 0;
+}
+#endif
+
+static int jpu_enable_jpu_mmu_hw(struct jpu_device *jpu_device)
+{
+       int i;
+       struct tbu_instance *tbu;
+
+       for (i = 0; i < TBU_INSTANCES_NUM; i++) {
+               tbu = &jpu_device->tbu_ins[i];
+               tbu->ttb_size = 0;
+               tbu->always_preload = false;
+               tbu->enable_preload = true;
+               tbu->nsaid = 0;
+               tbu->qos = 2;
+               tbu->secure_enable = false;
+       }
+       jpu_device->tbu_ins_map = -1;
+       //jpu_device->tbu_ins_bitmap = 0;
+       sema_init(&jpu_device->tbu_ins_free_cnt, TBU_INSTANCES_NUM);
+
+       /* Set MJPEG_MMU iova base */
+       jpu_writel(jpu_device, MJPEG_MMU_BVA_LO, jpu_device->va_base & 0xFFFFFFFF);
+       jpu_writel(jpu_device, MJPEG_MMU_BVA_HI, jpu_device->va_base >> 32);
+
+       /* Set MJPEG_MMU timeout cycles */
+       jpu_writel(jpu_device, MJPEG_MMU_TIMEOUT_VALUE, jpu_device->time_out_cycs);
+
+       /* Enable MJPEG_MMU irq */
+       jpu_set_reg_bits(jpu_device, MJPEG_MMU_IRQ_ENABLE, 0);
+
+       jpu_device->is_hw_enable = true;
+       return 0;
+}
+
+static void jpu_disable_jpu_mmu_hw(struct jpu_device *jpu_device)
+{
+       int i;
+       struct tbu_instance *tbu;
+
+       /* Waiting for post done. */
+       spin_lock(&jpu_device->hw_access_lock);
+       jpu_device->is_hw_enable = false;
+       spin_unlock(&jpu_device->hw_access_lock);
+
+       for (i = 0; i < TBU_INSTANCES_NUM; i++) {
+               tbu = &jpu_device->tbu_ins[i];
+               tbu->ttb_size = 0;
+       }
+       /* Disable all TBUs. */
+       for (i = 0; i < TBU_NUM; i++)
+               jpu_writel(jpu_device, MJPEG_MMU_TCR0_BASE + MJPEG_MMU_TBUx_STEP * i, 0);
+
+       /* Disable MJPEG_MMU irq. */
+       jpu_clear_reg_bits(jpu_device, MJPEG_MMU_IRQ_ENABLE, 0x1FF);
+
+}
+
+static void jpu_write_tbu_table(struct jpu_device *jpu_device, struct tbu_instance *tbu,
+                               unsigned long iova, phys_addr_t paddr, size_t size)
+{
+       u32 *ttb_entry;
+       uint64_t mask = 0;
+       uint32_t val;
+
+       mask = (jpu_device->page_size == 4096) ? 0xFFFFFFFFFFFFF000 : 0xFFFFFFFFFFFF0000;
+       ttb_entry = tbu->ttb_va + (iova - tbu->va_base) / jpu_device->page_size;
+       while (size != 0) {
+               paddr = paddr & 0xFFFFFFFF;
+               val = ((paddr & mask) >> TTB_ENTRY_SHIFT) & 0x1FFFFF;
+               *ttb_entry = val;
+               size -= jpu_device->page_size;
+               ttb_entry++;
+               paddr += jpu_device->page_size;
+       }
+}
+
+static void jpu_mmu_post(struct jpu_device *jpu_device, int *ins_id, int num)
+{
+       u32 reg;
+       struct tbu_instance *tbu;
+       int i, tbu_slot[TBU_NUM];
+
+       for (i = 0; i < TBU_NUM; i++)
+               tbu_slot[i] = -1;
+
+       for (i = 0; i < num; i++) {
+               int index;
+               tbu = &jpu_device->tbu_ins[ins_id[i]];
+               index = (tbu->va_base - jpu_device->va_base) / VA_STEP_PER_TBU;
+               tbu_slot[index] = ins_id[i];
+       }
+
+       spin_lock(&jpu_device->hw_access_lock);
+       if (!jpu_device->is_hw_enable) {
+               spin_unlock(&jpu_device->hw_access_lock);
+               return;
+       }
+
+       for (i = 0; i < TBU_NUM; i++) {
+               if (tbu_slot[i] != -1) {
+                       tbu = &jpu_device->tbu_ins[tbu_slot[i]];
+                       if (tbu->ttb_size == 0) {
+                               jpu_writel(jpu_device,
+                                          MJPEG_MMU_TCR0_BASE + i * MJPEG_MMU_TBUx_STEP, 0);
+                       } else {
+                               jpu_writel(jpu_device,
+                                          MJPEG_MMU_TTBLR_BASE +
+                                          i * MJPEG_MMU_TBUx_STEP, tbu->ttb_pa & 0xFFFFFFFF);
+                               jpu_writel(jpu_device,
+                                          MJPEG_MMU_TTBHR_BASE +
+                                          i * MJPEG_MMU_TBUx_STEP, tbu->ttb_pa >> 32);
+
+                               reg = (tbu->ttb_size - 1) << 16;
+                               if (tbu->always_preload)
+                                       reg |= BIT(3);
+                               if (tbu->enable_preload)
+                                       reg |= BIT(2);
+                               if (jpu_device->page_size == SZ_64K)
+                                       reg |= BIT(1);
+                               reg |= BIT(0);
+                               jpu_writel(jpu_device,
+                                          MJPEG_MMU_TCR0_BASE + i * MJPEG_MMU_TBUx_STEP, reg);
+                       }
+               }
+       }
+       spin_unlock(&jpu_device->hw_access_lock);
+}
+
+static int jpu_mmu_map_sg(struct jpu_device *jpu_device, unsigned long iova,
+                         struct scatterlist *sg, unsigned int nents, size_t *mapped,
+                         u32 data_size, u32 append_buf_size, u32 need_append)
+{
+       struct tbu_instance *tbu;
+       struct scatterlist *s;
+       unsigned int i;
+       unsigned int j;
+       phys_addr_t paddr;
+       size_t size;
+       unsigned long orig_iova = iova;
+       unsigned int offset = 0;
+       unsigned int find = 0;
+       unsigned int bottom_buf_size = 0;
+
+       int invaild_data_size = data_size - append_buf_size;
+       if ((iova >= jpu_device->va_end) && (nents == 1))
+               return sg->length;
+
+       jpu_device->tbu_ins_map = (iova - BASE_VIRTUAL_ADDRESS) / VA_STEP_PER_TBU;
+
+       if (jpu_device->tbu_ins_map < 0 || jpu_device->tbu_ins_map >= TBU_INSTANCES_NUM)
+               goto out_id_err;
+       tbu = &jpu_device->tbu_ins[jpu_device->tbu_ins_map];
+
+       if (tbu->ttb_size == 0) {
+               int index;
+               if (iova < jpu_device->va_base || iova >= jpu_device->va_end)
+                       goto out_iova_err;
+               index = (iova - jpu_device->va_base) / VA_STEP_PER_TBU;
+               tbu->va_base = jpu_device->va_base + index * VA_STEP_PER_TBU;
+               tbu->va_end = tbu->va_base + VA_STEP_PER_TBU;
+       }
+
+       if (iova < tbu->va_base || iova >= tbu->va_end)
+               goto out_iova_err;
+       if (append_buf_size && need_append) {
+               for_each_sg(sg, s, nents, i) {
+                       paddr = page_to_phys(sg_page(s)) + s->offset;
+                       size = s->length;
+                       if (!IS_ALIGNED(s->offset, jpu_device->page_size)) {
+                               dev_warn(jpu_device->jdev,
+                                        "paddr not aligned: iova %lx, paddr %llx, size %lx\n",
+                                        iova, paddr, size);
+                               goto out_region_err;
+                       }
+                       invaild_data_size -= size;
+                       if (invaild_data_size < 0) {
+                               if (!find) {
+                                       find = 1;
+                               }
+                               if (find) {
+                                       bottom_buf_size += size;
+                               }
+                               if (iova + size > tbu->va_end || size == 0)
+                                       goto out_region_err;
+                               jpu_write_tbu_table(jpu_device, tbu, iova, paddr, size);
+                               iova += size;
+                       }
+               }
+               if (append_buf_size)
+                       offset = bottom_buf_size - append_buf_size;
+       }
+       for_each_sg(sg, s, nents, j) {
+               paddr = page_to_phys(sg_page(s)) + s->offset;
+               size = s->length;
+               if (!IS_ALIGNED(s->offset, jpu_device->page_size)) {
+                       dev_warn(jpu_device->jdev,
+                                "paddr not aligned: iova %lx, paddr %llx, size %lx\n",
+                                iova, paddr, size);
+                       goto out_region_err;
+               }
+               if (iova + size > tbu->va_end || size == 0)
+                       goto out_region_err;
+
+               jpu_write_tbu_table(jpu_device, tbu, iova, paddr, size);
+               iova += size;
+       }
+
+       if (iova > tbu->va_base + jpu_device->page_size * tbu->ttb_size)
+               tbu->ttb_size = (iova - tbu->va_base) / jpu_device->page_size;
+
+       *mapped = iova - orig_iova;
+
+       jpu_mmu_post(jpu_device, &jpu_device->tbu_ins_map, 1);
+       return offset;
+
+out_region_err:
+       dev_err(jpu_device->jdev, "Map_sg is wrong: iova %lx, paddr %llx, size %lx\n",
+               iova, paddr, size);
+       return 0;
+out_iova_err:
+       dev_err(jpu_device->jdev, "Map_sg is wrong: iova %lx", iova);
+       return 0;
+out_id_err:
+       dev_err(jpu_device->jdev, "TBU ins_id is wrong: %d\n", jpu_device->tbu_ins_map);
+       return 0;
+}
+
+static dma_addr_t get_addr_from_fd(struct jpu_device *jpu_dev, int fd,
+                                  struct jpu_dma_buf_info *pInfo, u32 data_size,
+                                  u32 append_buf_size)
+{
+       struct device *dev = jpu_dev->jdev;
+       struct sg_table *sgt;
+       dma_addr_t addr;
+       int offset = 0;
+       size_t mapped_size = 0;
+       u32 need_append = 0;
+
+       pInfo->buf_fd = fd;
+       pInfo->dmabuf = dma_buf_get(fd);
+       if (IS_ERR(pInfo->dmabuf)) {
+               pr_err("jpu get dmabuf fail fd:%d\n", fd);
+               return 0;
+       }
+       pInfo->attach = dma_buf_attach(pInfo->dmabuf, dev);
+       if (IS_ERR(pInfo->attach)) {
+               pr_err("jpu get dma buf attach fail\n");
+               goto err_dmabuf_put;
+       }
+       pInfo->sgtable = dma_buf_map_attachment(pInfo->attach, DMA_BIDIRECTIONAL);
+       if (IS_ERR(pInfo->sgtable)) {
+               pr_err("jpu get dma buf map attachment fail\n");
+               goto err_dmabuf_detach;
+       }
+       sgt = pInfo->sgtable;
+       if (sgt->nents == 1) {
+               addr = sg_dma_address(sgt->sgl);
+       } else {
+               if (!jpu_dev->is_hw_enable)
+                       jpu_enable_jpu_mmu_hw(jpu_dev);
+               addr = JPU_TBU_BASE_VA + (pInfo->tbu_id) * JPU_TBU_VA_STEP;
+               if (pInfo->tbu_id == TBU_INPUT) {
+                       need_append = 1;
+               }
+               offset =
+                   jpu_mmu_map_sg(jpu_dev, addr, sgt->sgl, sgt->nents, &mapped_size,
+                                  data_size, append_buf_size, need_append);
+               if (!mapped_size) {
+                       pr_err("jpu iommu map sgtable fail\n");
+                       goto err_dmabuf_unmap;
+               }
+       }
+       return addr + offset;
+err_dmabuf_unmap:
+       dma_buf_unmap_attachment(pInfo->attach, pInfo->sgtable, DMA_BIDIRECTIONAL);
+err_dmabuf_detach:
+       dma_buf_detach(pInfo->dmabuf, pInfo->attach);
+err_dmabuf_put:
+       dma_buf_put(pInfo->dmabuf);
+       return 0;
+}
+
+static int jpu_alloc_dma_buffer(struct jpu_device *jdev, jpudrv_buffer_t *jb)
+{
+       if (!jb) {
+               return -EINVAL;
+       }
+
+       jb->base = (unsigned long)dma_alloc_coherent(jdev->jdev, PAGE_ALIGN(jb->size),
+                                                    (dma_addr_t *) (&jb->phys_addr),
+                                                    GFP_DMA | GFP_KERNEL);
+       if ((void *)(jb->base) == NULL) {
+               dev_err(jdev->jdev, "Physical memory allocation error size=%d\n", jb->size);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void jpu_free_dma_buffer(struct jpu_device *jdev, jpudrv_buffer_t *jb)
+{
+       if (!jb) {
+               return;
+       }
+
+       if (jb->base) {
+               dma_free_coherent(jdev->jdev, PAGE_ALIGN(jb->size), (void *)jb->base,
+                                 jb->phys_addr);
+       }
+}
+
+static int jpu_free_instances(struct file *filp)
+{
+       struct jpu_device *jdev = filp->private_data;
+       jpudrv_instance_list_t *vil, *n;
+       jpudrv_instance_pool_t *vip;
+       void *vip_base;
+       int instance_pool_size_per_core;
+       void *jdi_mutexes_base;
+       const int PTHREAD_MUTEX_T_DESTROY_VALUE = 0xdead10cc;
+
+       JLOG(jdev->jdev, "free instance\n");
+
+       // s_instance_pool.size  assigned to the size of all core once call JDI_IOCTL_GET_INSTANCE_POOL by user.
+       instance_pool_size_per_core = (jdev->s_instance_pool.size / MAX_NUM_JPU_CORE);
+
+       list_for_each_entry_safe(vil, n, &jdev->s_inst_list_head, list) {
+               if (vil->filp == filp) {
+                       vip_base =
+                           (void *)(jdev->s_instance_pool.base + instance_pool_size_per_core);
+                       JLOG(jdev->jdev,
+                            "jpu_free_instances detect instance crash instIdx=%d, vip_base=%p, instance_pool_size_per_core=%d\n",
+                            (int)vil->inst_idx, vip_base, (int)instance_pool_size_per_core);
+                       vip = (jpudrv_instance_pool_t *) vip_base;
+                       if (vip) {
+                               // only first 4 byte is key point(inUse of CodecInst in jpuapi) to free the corresponding instance
+                               memset(&vip->codecInstPool[vil->inst_idx], 0x00, 4);
+#define PTHREAD_MUTEX_T_HANDLE_SIZE 4
+                               jdi_mutexes_base =
+                                   (vip_base + (instance_pool_size_per_core - PTHREAD_MUTEX_T_HANDLE_SIZE * 4));
+                               JLOG(jdev->jdev,
+                                    "force to destroy jdi_mutexes_base=%p in userspace \n", jdi_mutexes_base);
+                               if (jdi_mutexes_base) {
+                                       int i;
+                                       for (i = 0; i < 4; i++) {
+                                               memcpy(jdi_mutexes_base,
+                                                      &PTHREAD_MUTEX_T_DESTROY_VALUE, PTHREAD_MUTEX_T_HANDLE_SIZE);
+                                               jdi_mutexes_base += PTHREAD_MUTEX_T_HANDLE_SIZE;
+                                       }
+                               }
+                       }
+
+                       jdev->s_jpu_open_ref_count--;
+                       list_del(&vil->list);
+                       kfree(vil);
+               }
+       }
+
+       return 0;
+}
+
+static int jpu_free_buffers(struct file *filp)
+{
+       struct jpu_device *jdev = filp->private_data;
+       jpudrv_buffer_pool_t *pool, *n;
+       jpudrv_buffer_t jb;
+
+       JLOG(jdev->jdev, "jpu free buffers\n");
+
+       list_for_each_entry_safe(pool, n, &jdev->s_jbp_head, list) {
+               if (pool->filp == filp) {
+                       jb = pool->jb;
+                       if (jb.base) {
+                               jpu_free_dma_buffer(jdev, &jb);
+                               list_del(&pool->list);
+                               kfree(pool);
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
+{
+       struct jpu_device *jdev = (struct jpu_device *)dev_id;
+       jpu_drv_context_t *dev = &jdev->s_jpu_drv_context;
+       int i = 0;
+       unsigned long flags;
+       u32 int_reason;
+       u64 last_va, last_pa;
+       u32 mmu_irq_status;
+       u32 reg;
+       int j;
+
+       spin_lock_irqsave(&jdev->s_jpu_lock, flags);
+       // suppose instance 0 irq handle
+       int_reason = jpu_readl(jdev, MJPEG_PIC_STATUS_REG);
+       if (int_reason != 0) {
+               jdev->s_interrupt_flag[i] = 1;
+               if (int_reason & (1 << INT_JPU_DONE)) {
+                       jpu_writel(jdev, MJPEG_BBC_FLUSH_CMD_REG, 0);
+               }
+               jpu_writel(jdev, MJPEG_PIC_STATUS_REG, int_reason);     // clear JPEG register
+       }
+       mmu_irq_status = jpu_readl(jdev, MJPEG_MMU_IRQ_STATUS);
+
+       if (mmu_irq_status != 0) {
+               reg = jpu_readl(jdev, MJPEG_MMU_LAST_PA_ADDR_HI);
+               last_pa = reg & 0x1;
+               reg = jpu_readl(jdev, MJPEG_MMU_LAST_PA_ADDR_LO);
+               last_pa = (last_pa << 32) | reg;
+               reg = jpu_readl(jdev, MJPEG_MMU_LAST_VA_ADDR_HI);
+               last_va = reg & 0x1;
+               reg = jpu_readl(jdev, MJPEG_MMU_LAST_VA_ADDR_LO);
+               last_va = (last_va << 32) | reg;
+
+               /* Print IRQ status. */
+               dev_err_ratelimited(jdev->jdev,
+                                   "Unexpected fault: IRQ status 0x%x, last PA 0x%09llx, last VA 0x%09llx\n",
+                                   mmu_irq_status, last_pa, last_va);
+
+               if (mmu_irq_status & BIT(8)) {
+                       u64 timeout_va_addr;
+                       reg = jpu_readl(jdev, MJPEG_MMU_TIMEOUT_VA_ADDR_HI);
+                       timeout_va_addr = reg & 0x1;
+                       reg = jpu_readl(jdev, MJPEG_MMU_TIMEOUT_VA_ADDR_LO);
+                       timeout_va_addr = (timeout_va_addr << 32) | reg;
+                       dev_err_ratelimited(jdev->jdev,
+                                           "timeout error: timeout_va 0x%09llx\n",
+                                           timeout_va_addr);
+               }
+
+               for (j = 0; j < TBU_NUM; j++) {
+                       if (mmu_irq_status & BIT(i)) {
+                               reg = jpu_readl(jdev, MJPEG_MMU_TBU_STATUS_BASE + j * MJPEG_MMU_TBUx_STEP);
+                               dev_err_ratelimited(jdev->jdev,
+                                                   "TBU%d error: read addr 0x%08x, write addr 0x%08x\n",
+                                                   j, ((reg >> 16) & 0xFFF), reg & 0x1FFF);
+                       }
+               }
+
+               /* clear DMA error */
+               if (mmu_irq_status & 0xFF)
+                       jpu_set_reg_bits(jdev, MJPEG_MMU_ERROR_CLEAR, BIT(1));
+
+               /* reset IRQ status */
+               jpu_writel(jdev, MJPEG_MMU_IRQ_STATUS, mmu_irq_status);
+       }
+       dev->interrupt_reason[i] = int_reason;
+       spin_unlock_irqrestore(&jdev->s_jpu_lock, flags);
+
+       JLOG(jdev->jdev, "JPU: instance no %d, INTERRUPT FLAG: %08x, %08x\n",
+            i, dev->interrupt_reason[i], MJPEG_PIC_STATUS_REG);
+
+       if (dev->async_queue)
+               kill_fasync(&dev->async_queue, SIGIO, POLL_IN); // notify the interrupt to userspace
+
+       wake_up_interruptible(&jdev->s_interrupt_wait_q[i]);
+       return IRQ_HANDLED;
+}
+
+static int jpu_open(struct inode *inode, struct file *filp)
+{
+       struct jpu_device *jdev = container_of(inode->i_cdev, struct jpu_device, s_jpu_cdev);
+
+       spin_lock(&jdev->s_jpu_lock);
+
+       if (jdev->s_jpu_drv_context.open_count) {
+               spin_unlock(&jdev->s_jpu_lock);
+               return -EBUSY;
+       } else {
+               jdev->s_jpu_drv_context.open_count++;
+       }
+
+       filp->private_data = jdev;
+       spin_unlock(&jdev->s_jpu_lock);
+       pm_runtime_get_sync(jdev->jdev);
+       return 0;
+}
+
+static ssize_t jpu_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
+{
+       return 0;
+}
+
+static ssize_t jpu_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
+{
+       return 0;
+}
+
+static long jpu_ioctl(struct file *filp, u_int cmd, u_long arg)
+{
+       struct jpu_device *jdev = filp->private_data;
+       jpudrv_buffer_pool_t *jbp, *n;
+       jpudrv_buffer_t jb;
+       jpudrv_intr_info_t info;
+       jpudrv_inst_info_t inst_info;
+       struct jpu_drv_context_t *dev_ctx;
+       u32 instance_no;
+       JPU_DMA_CFG cfg;
+       int i;
+       struct dma_buf *dmabuf;
+       struct dma_buf_attachment *attach;
+       struct sg_table *sg_table;
+       jpu_dma_buf_info pInfo;
+#ifndef CONFIG_SOC_SPACEMIT_K1_FPGA
+       u32 clkgate;
+#endif
+       int ret = 0;
+
+       switch (cmd) {
+       case JDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY:
+               ret = down_interruptible(&jdev->s_jpu_sem);
+               if (ret) {
+                       return -EAGAIN;
+               }
+
+               jbp = kzalloc(sizeof(jpudrv_buffer_pool_t), GFP_KERNEL);
+               if (!jbp) {
+                       up(&jdev->s_jpu_sem);
+                       return -ENOMEM;
+               }
+
+               ret = copy_from_user(&(jbp->jb), (jpudrv_buffer_t *) arg, sizeof(jpudrv_buffer_t));
+               if (ret) {
+                       kfree(jbp);
+                       up(&jdev->s_jpu_sem);
+                       return -EFAULT;
+               }
+
+               ret = jpu_alloc_dma_buffer(jdev, &(jbp->jb));
+               if (ret) {
+                       kfree(jbp);
+                       up(&jdev->s_jpu_sem);
+                       return -ENOMEM;
+               }
+
+               ret = copy_to_user((void __user *)arg, &(jbp->jb), sizeof(jpudrv_buffer_t));
+               if (ret) {
+                       kfree(jbp);
+                       up(&jdev->s_jpu_sem);
+                       return -EFAULT;
+               }
+
+               jbp->filp = filp;
+
+               spin_lock(&jdev->s_jpu_lock);
+               list_add(&jbp->list, &jdev->s_jbp_head);
+               spin_unlock(&jdev->s_jpu_lock);
+
+               up(&jdev->s_jpu_sem);
+
+               break;
+       case JDI_IOCTL_FREE_PHYSICALMEMORY:
+               ret = down_interruptible(&jdev->s_jpu_sem);
+               if (ret) {
+                       return -EAGAIN;
+               }
+
+               ret = copy_from_user(&jb, (jpudrv_buffer_t *) arg, sizeof(jpudrv_buffer_t));
+               if (ret) {
+                       up(&jdev->s_jpu_sem);
+                       return -EACCES;
+               }
+
+               if (jb.base) {
+                       jpu_free_dma_buffer(jdev, &jb);
+               }
+
+               spin_lock(&jdev->s_jpu_lock);
+
+               list_for_each_entry_safe(jbp, n, &jdev->s_jbp_head, list) {
+                       if (jbp->jb.base == jb.base) {
+                               list_del(&jbp->list);
+                               kfree(jbp);
+                               break;
+                       }
+               }
+
+               spin_unlock(&jdev->s_jpu_lock);
+
+               up(&jdev->s_jpu_sem);
+
+               break;
+       case JDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO:
+               ret = -EFAULT;
+               break;
+       case JDI_IOCTL_WAIT_INTERRUPT:
+               dev_ctx = &jdev->s_jpu_drv_context;
+               ret = copy_from_user(&info, (jpudrv_intr_info_t *) arg, sizeof(jpudrv_intr_info_t));
+               if (ret) {
+                       return -EFAULT;
+               }
+
+               instance_no = info.inst_idx;
+               ret =
+                   wait_event_interruptible_timeout(jdev->s_interrupt_wait_q[instance_no],
+                                                    jdev->s_interrupt_flag[instance_no] != 0,
+                                                    msecs_to_jiffies(info.timeout));
+               if (!ret) {
+                       dev_err(jdev->jdev,
+                               "instance no %d ETIME, s_interrupt_flag(%d), reason(0x%08x)\n",
+                               instance_no, jdev->s_interrupt_flag[instance_no],
+                               dev_ctx->interrupt_reason[instance_no]);
+                       ret = -ETIME;
+                       break;
+               }
+               if (signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       dev_err(jdev->jdev, "instance no: %d ERESTARTSYS\n", instance_no);
+                       break;
+               }
+
+               JLOG(jdev->jdev, "INST(%d) s_interrupt_flag(%d), reason(0x%08x)\n",
+                    instance_no, jdev->s_interrupt_flag[instance_no],
+                    dev_ctx->interrupt_reason[instance_no]);
+
+               spin_lock(&jdev->s_jpu_lock);
+               info.intr_reason = dev_ctx->interrupt_reason[instance_no];
+               jdev->s_interrupt_flag[instance_no] = 0;
+               dev_ctx->interrupt_reason[instance_no] = 0;
+               spin_unlock(&jdev->s_jpu_lock);
+
+               for (i = 0; i < TBU_INSTANCES_NUM; i++) {
+                       pInfo = buf_inf[i];
+                       dmabuf = pInfo.dmabuf;
+                       attach = pInfo.attach;
+                       sg_table = pInfo.sgtable;
+
+                       if (dmabuf && attach && sg_table) {
+                               dma_buf_unmap_attachment(attach, sg_table, DMA_BIDIRECTIONAL);
+                               dma_buf_detach(dmabuf, attach);
+                               dma_buf_put(dmabuf);
+                       }
+               }
+               if (jdev->is_hw_enable) {
+                       jpu_disable_jpu_mmu_hw(jdev);
+               }
+               ret = copy_to_user((void __user *)arg, &info, sizeof(jpudrv_intr_info_t));
+               if (ret) {
+                       return -EFAULT;
+               }
+#if defined (CONFIG_PM) && defined (DDR_QOS_ENABLE)
+               //freq_qos_update_request(jdev->ddr_qos_rreq, PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+               //freq_qos_update_request(jdev->ddr_qos_wreq, PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+#endif
+               break;
+       case JDI_IOCTL_SET_CLOCK_GATE:;
+#ifndef CONFIG_SOC_SPACEMIT_K1_FPGA
+               ret = down_interruptible(&jdev->s_jpu_sem);
+               if (ret) {
+                       return -EAGAIN;
+               }
+
+               if (get_user(clkgate, (u32 __user *) arg)) {
+                       up(&jdev->s_jpu_sem);
+                       return -EFAULT;
+               }
+
+               if (clkgate) {
+                       pm_runtime_get_sync(jdev->jdev);
+                       jpu_clk_enable(jdev);
+               } else {
+                       jpu_clk_disable(jdev);
+                       pm_runtime_put_sync(jdev->jdev);
+               }
+
+               up(&jdev->s_jpu_sem);
+#endif
+               break;
+       case JDI_IOCTL_GET_INSTANCE_POOL:
+               ret = down_interruptible(&jdev->s_jpu_sem);
+               if (ret) {
+                       return -EAGAIN;
+               }
+
+               if (jdev->s_instance_pool.base) {
+                       ret = copy_to_user((void __user *)arg, &jdev->s_instance_pool,
+                                          sizeof(jpudrv_buffer_t));
+               } else {
+                       ret = copy_from_user(&jdev->s_instance_pool,
+                                            (jpudrv_buffer_t *) arg, sizeof(jpudrv_buffer_t));
+                       if (!ret) {
+                               jdev->s_instance_pool.size = PAGE_ALIGN(jdev->s_instance_pool.size);
+                               jdev->s_instance_pool.base =
+                                   (unsigned long)vmalloc(jdev->s_instance_pool.size);
+                               jdev->s_instance_pool.phys_addr = jdev->s_instance_pool.base;
+
+                               if (jdev->s_instance_pool.base != 0) {
+                                       memset((void *)jdev->s_instance_pool.base, 0x0, jdev->s_instance_pool.size);    /*clearing memory */
+                                       ret = copy_to_user((void __user *)arg,
+                                                          &jdev->s_instance_pool, sizeof(jpudrv_buffer_t));
+                                       if (ret == 0) {
+                                               /* success to get memory for instance pool */
+                                               up(&jdev->s_jpu_sem);
+                                               break;
+                                       }
+                               }
+                               ret = -EFAULT;
+                       }
+
+               }
+
+               up(&jdev->s_jpu_sem);
+
+               JLOG(jdev->jdev,
+                    "JDI_IOCTL_GET_INSTANCE_POOL: %s base: %lx, size: %d\n",
+                    (ret == 0 ? "OK" : "NG"), jdev->s_instance_pool.base,
+                    jdev->s_instance_pool.size);
+
+               break;
+       case JDI_IOCTL_OPEN_INSTANCE:
+               if (copy_from_user(&inst_info, (jpudrv_inst_info_t *) arg, sizeof(jpudrv_inst_info_t)))
+                       return -EFAULT;
+
+               spin_lock(&jdev->s_jpu_lock);
+               jdev->s_jpu_open_ref_count++;   /* flag just for that jpu is in opened or closed */
+               inst_info.inst_open_count = jdev->s_jpu_open_ref_count;
+               spin_unlock(&jdev->s_jpu_lock);
+
+               if (copy_to_user((void __user *)arg, &inst_info, sizeof(jpudrv_inst_info_t))) {
+                       return -EFAULT;
+               }
+
+               JLOG(jdev->jdev,
+                    "JDI_IOCTL_OPEN_INSTANCE inst_idx=%d, s_jpu_open_ref_count=%d, inst_open_count=%d\n",
+                    (int)inst_info.inst_idx, jdev->s_jpu_open_ref_count, inst_info.inst_open_count);
+
+               break;
+       case JDI_IOCTL_CLOSE_INSTANCE:
+               if (copy_from_user(&inst_info, (jpudrv_inst_info_t *) arg, sizeof(jpudrv_inst_info_t)))
+                       return -EFAULT;
+
+               spin_lock(&jdev->s_jpu_lock);
+               jdev->s_jpu_open_ref_count--;   /* flag just for that jpu is in opened or closed */
+               inst_info.inst_open_count = jdev->s_jpu_open_ref_count;
+               spin_unlock(&jdev->s_jpu_lock);
+
+               if (copy_to_user((void __user *)arg, &inst_info, sizeof(jpudrv_inst_info_t)))
+                       return -EFAULT;
+
+               JLOG(jdev->jdev,
+                    "JDI_IOCTL_CLOSE_INSTANCE inst_idx=%d, s_jpu_open_ref_count=%d, inst_open_count=%d\n",
+                    (int)inst_info.inst_idx, jdev->s_jpu_open_ref_count, inst_info.inst_open_count);
+
+               break;
+       case JDI_IOCTL_GET_INSTANCE_NUM:
+               ret = copy_from_user(&inst_info, (jpudrv_inst_info_t *) arg, sizeof(jpudrv_inst_info_t));
+               if (ret != 0)
+                       break;
+
+               spin_lock(&jdev->s_jpu_lock);
+               inst_info.inst_open_count = jdev->s_jpu_open_ref_count;
+               spin_unlock(&jdev->s_jpu_lock);
+
+               ret = copy_to_user((void __user *)arg, &inst_info, sizeof(jpudrv_inst_info_t));
+
+               JLOG(jdev->jdev,
+                    "JDI_IOCTL_GET_INSTANCE_NUM inst_idx=%d, open_count=%d\n",
+                    (int)inst_info.inst_idx, inst_info.inst_open_count);
+               break;
+       case JDI_IOCTL_RESET:
+#ifndef CONFIG_SOC_SPACEMIT_K1_FPGA
+
+               ret = down_interruptible(&jdev->s_jpu_sem);
+               if (ret) {
+                       return -EAGAIN;
+               }
+
+               jpu_hw_reset(jdev);
+
+               up(&jdev->s_jpu_sem);
+#endif
+               break;
+       case JDI_IOCTL_GET_REGISTER_INFO:
+               ret = copy_to_user((void __user *)arg, &jdev->s_jpu_register, sizeof(jpudrv_buffer_t));
+               if (ret != 0) {
+                       ret = -EFAULT;
+               }
+               JLOG(jdev->jdev, "JDI_IOCTL_GET_REGISTER_INFO s_jpu_register.phys_addr=0x%lx, s_jpu_register.virt_addr=0x%lx, s_jpu_register.size=%d\n",
+                                jdev->s_jpu_register.phys_addr, jdev->s_jpu_register.virt_addr, jdev->s_jpu_register.size);
+               break;
+       case JDI_IOCTL_CFG_MMU:
+               //JLOG(jdev->jdev, "JDI_IOCTL_CFG_MMU \n");
+               ret = copy_from_user(&cfg, (JPU_DMA_CFG *) arg, sizeof(JPU_DMA_CFG));
+               if (ret != 0) {
+                       ret = -EFAULT;
+               }
+               //JLOG(jdev->jdev, "JDI_IOCTL_CFG_MMU input fd:%d output:%d\n",cfg.intput_buf_fd,cfg.output_buf_fd);
+               buf_inf[0].tbu_id = TBU_INPUT;
+               buf_inf[1].tbu_id = TBU_OUTPUT;
+               cfg.intput_virt_addr =
+                   get_addr_from_fd(jdev, cfg.intput_buf_fd, &buf_inf[0], cfg.data_size, cfg.append_buf_size);
+               cfg.output_virt_addr =
+                   get_addr_from_fd(jdev, cfg.output_buf_fd, &buf_inf[1], cfg.data_size, cfg.append_buf_size);
+               ret = copy_to_user((void __user *)arg, &cfg, sizeof(JPU_DMA_CFG));
+               if (ret != 0) {
+                       ret = -EFAULT;
+               }
+               jpu_writel(jdev, JPU_MMU_TRI, 0x01);
+#if defined (CONFIG_PM) && defined (DDR_QOS_ENABLE)
+               //_update_request(jdev->ddr_qos_rreq,160000);
+               //freq_qos_update_request(jdev->ddr_qos_wreq,8000);
+#endif
+               JLOG(jdev->jdev, "JDI_IOCTL_CFG_MMU DONE!\n");
+               break;
+       default:
+               dev_err(jdev->jdev, "No such IOCTL, cmd is %d\n", cmd);
+               break;
+       }
+
+       return ret;
+}
+
+static int jpu_fasync(int fd, struct file *filp, int mode)
+{
+       struct jpu_device *jdev = filp->private_data;
+
+       return fasync_helper(fd, filp, mode, &jdev->s_jpu_drv_context.async_queue);
+}
+
+static int jpu_release(struct inode *inode, struct file *filp)
+{
+       struct jpu_device *jdev = filp->private_data;
+       int ret = 0;
+       u32 open_count;
+
+       ret = down_interruptible(&jdev->s_jpu_sem);
+       if (ret) {
+               return -EAGAIN;
+       }
+
+       /* found and free the not handled buffer by user applications */
+       jpu_free_buffers(filp);
+
+       /* found and free the not closed instance by user applications */
+       jpu_free_instances(filp);
+       JLOG(jdev->jdev, "open_count: %d\n", jdev->s_jpu_drv_context.open_count);
+
+       spin_lock(&jdev->s_jpu_lock);
+       jdev->s_jpu_drv_context.open_count--;
+       open_count = jdev->s_jpu_drv_context.open_count;
+       spin_unlock(&jdev->s_jpu_lock);
+
+       if (open_count == 0) {
+               if (jdev->s_instance_pool.base) {
+                       JLOG(jdev->jdev, "free instance pool\n");
+                       vfree((const void *)jdev->s_instance_pool.base);
+                       jdev->s_instance_pool.base = 0;
+               }
+#ifndef CONFIG_SOC_SPACEMIT_K1_FPGA
+               jpu_clk_disable(jdev);
+               pm_runtime_put_sync(jdev->jdev);
+
+#endif
+       }
+
+       up(&jdev->s_jpu_sem);
+
+       JLOG(jdev->jdev, "released\n");
+
+       return 0;
+}
+
+static int jpu_map_to_register(struct file *fp, struct vm_area_struct *vm)
+{
+       struct jpu_device *jdev = fp->private_data;
+       unsigned long pfn;
+
+       vm->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vm->vm_page_prot = pgprot_noncached(vm->vm_page_prot);
+       pfn = jdev->s_jpu_register.phys_addr >> PAGE_SHIFT;
+
+       return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end - vm->vm_start,
+                              vm->vm_page_prot) ? -EAGAIN : 0;
+}
+
+static int jpu_map_to_physical_memory(struct file *fp, struct vm_area_struct *vm)
+{
+       vm->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vm->vm_page_prot = pgprot_noncached(vm->vm_page_prot);
+
+       return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
+                              vm->vm_end - vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
+}
+
+static int jpu_map_to_instance_pool_memory(struct file *fp, struct vm_area_struct *vm)
+{
+       struct jpu_device *jdev = fp->private_data;
+       int ret;
+       long length = vm->vm_end - vm->vm_start;
+       unsigned long start = vm->vm_start;
+       char *vmalloc_area_ptr = (char *)jdev->s_instance_pool.base;
+       unsigned long pfn;
+
+       vm->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+
+       /* loop over all pages, map it page individually */
+       while (length > 0) {
+               pfn = vmalloc_to_pfn(vmalloc_area_ptr);
+               if ((ret = remap_pfn_range(vm, start, pfn, PAGE_SIZE, PAGE_SHARED)) < 0) {
+                       return ret;
+               }
+               start += PAGE_SIZE;
+               vmalloc_area_ptr += PAGE_SIZE;
+               length -= PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+static int jpu_mmap(struct file *fp, struct vm_area_struct *vm)
+{
+       struct jpu_device *jdev = fp->private_data;
+
+       if (vm->vm_pgoff == 0)
+               return jpu_map_to_instance_pool_memory(fp, vm);
+
+       if (vm->vm_pgoff == (jdev->s_jpu_register.phys_addr >> PAGE_SHIFT))
+               return jpu_map_to_register(fp, vm);
+
+       return jpu_map_to_physical_memory(fp, vm);
+}
+
+struct file_operations jpu_fops = {
+       .owner = THIS_MODULE,
+       .open = jpu_open,
+       .read = jpu_read,
+       .write = jpu_write,
+       .unlocked_ioctl = jpu_ioctl,
+       .release = jpu_release,
+       .fasync = jpu_fasync,
+       .mmap = jpu_mmap,
+};
+
+static ssize_t cclk_max_frequency_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct jpu_device *jdev = platform_get_drvdata(pdev);
+
+       return sprintf(buf, "%u\n", jdev->cclk_max_frequency);
+}
+
+static ssize_t cclk_min_frequency_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct jpu_device *jdev = platform_get_drvdata(pdev);
+
+       return sprintf(buf, "%u\n", jdev->cclk_min_frequency);
+}
+
+static ssize_t cclk_cur_frequency_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct jpu_device *jdev = platform_get_drvdata(pdev);
+
+       return sprintf(buf, "%u\n", jdev->cclk_cur_frequency);
+}
+
+static ssize_t cclk_cur_frequency_store(struct device *dev,
+                                       struct device_attribute *attr, const char *buf,
+                                       size_t count)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct jpu_device *jdev = platform_get_drvdata(pdev);
+       unsigned long cclk_cur_frequency = 0;
+       int ret;
+
+       ret = kstrtoul(buf, 0, &cclk_cur_frequency);
+       if (ret < 0) {
+               return ret;
+       }
+
+       if (cclk_cur_frequency < jdev->cclk_min_frequency
+           || cclk_cur_frequency > jdev->cclk_max_frequency) {
+               return -EINVAL;
+       }
+
+       jdev->cclk_cur_frequency = cclk_cur_frequency;
+
+       return count;
+}
+
+static DEVICE_ATTR_RO(cclk_max_frequency);
+static DEVICE_ATTR_RO(cclk_min_frequency);
+static DEVICE_ATTR_RW(cclk_cur_frequency);
+
+static struct attribute *cclk_frequency_attrs[] = {
+       &dev_attr_cclk_max_frequency.attr,
+       &dev_attr_cclk_min_frequency.attr,
+       &dev_attr_cclk_cur_frequency.attr,
+       NULL,
+};
+
+static const struct attribute_group cclk_frequency_group = {
+       .name = "cclk",
+       .attrs = cclk_frequency_attrs,
+};
+
+static ssize_t aclk_max_frequency_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct jpu_device *jdev = platform_get_drvdata(pdev);
+
+       return sprintf(buf, "%llu\n", jdev->aclk_max_frequency);
+}
+
+static ssize_t aclk_min_frequency_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct jpu_device *jdev = platform_get_drvdata(pdev);
+
+       return sprintf(buf, "%u\n", jdev->aclk_min_frequency);
+}
+
+static ssize_t aclk_cur_frequency_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct jpu_device *jdev = platform_get_drvdata(pdev);
+
+       return sprintf(buf, "%u\n", jdev->aclk_cur_frequency);
+}
+
+static ssize_t aclk_cur_frequency_store(struct device *dev,
+                                       struct device_attribute *attr, const char *buf,
+                                       size_t count)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct jpu_device *jdev = platform_get_drvdata(pdev);
+       unsigned long aclk_cur_frequency = 0;
+       int ret;
+
+       ret = kstrtoul(buf, 0, &aclk_cur_frequency);
+       if (ret < 0) {
+               return ret;
+       }
+
+       if (aclk_cur_frequency < jdev->aclk_min_frequency
+           || aclk_cur_frequency > jdev->aclk_max_frequency) {
+               return -EINVAL;
+       }
+
+       jdev->aclk_cur_frequency = aclk_cur_frequency;
+
+       return count;
+}
+
+static DEVICE_ATTR_RO(aclk_max_frequency);
+static DEVICE_ATTR_RO(aclk_min_frequency);
+static DEVICE_ATTR_RW(aclk_cur_frequency);
+
+static struct attribute *aclk_frequency_attrs[] = {
+       &dev_attr_aclk_max_frequency.attr,
+       &dev_attr_aclk_min_frequency.attr,
+       &dev_attr_aclk_cur_frequency.attr,
+       NULL,
+};
+
+static const struct attribute_group aclk_frequency_group = {
+       .name = "aclk",
+       .attrs = aclk_frequency_attrs
+};
+
+static const struct attribute_group *jpu_frequency_group[] = {
+       &cclk_frequency_group,
+       &aclk_frequency_group,
+       NULL,
+};
+
+static const struct of_device_id jpu_dt_match[] = {
+       {
+        .compatible = "chip-media, jpu",
+         },
+       { }
+};
+
+static u64 jpu_dmamask = 0xffffffffffUL;
+
+static int jpu_probe(struct platform_device *pdev)
+{
+       struct jpu_device *jdev;
+       struct resource *res;
+       const struct of_device_id *of_id;
+       uint32_t device_id = 0;
+       char cdev_name[32] = { 0 };
+       int err, i;
+       struct cpumask mask = { CPU_BITS_NONE };
+       int cpuid = 0;
+       void *va_temp;
+       dma_addr_t pa_temp;
+       jdev = devm_kzalloc(&pdev->dev, sizeof(*jdev), GFP_KERNEL);
+       if (!jdev) {
+               return -ENOMEM;
+       }
+
+       jdev->jdev = &pdev->dev;
+       jdev->jdev->dma_mask = &jpu_dmamask;
+       jdev->jdev->coherent_dma_mask = 0xffffffffffull;
+       platform_set_drvdata(pdev, jdev);
+       INIT_LIST_HEAD(&jdev->s_jbp_head);
+       INIT_LIST_HEAD(&jdev->s_inst_list_head);
+       sema_init(&jdev->s_jpu_sem, 1);
+       spin_lock_init(&jdev->s_jpu_lock);
+       for (i = 0; i < MAX_NUM_INSTANCE; i++) {
+               init_waitqueue_head(&jdev->s_interrupt_wait_q[i]);
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (IS_ERR_OR_NULL(res)) {
+               dev_err(jdev->jdev, "No I/O registers defined");
+               return -ENXIO;
+       }
+
+       jdev->reg = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(jdev->reg)) {
+               dev_err(jdev->jdev, "Failed to map JPU registers.\n");
+               return PTR_ERR(jdev->reg);
+       }
+
+       jdev->s_jpu_register.phys_addr = res->start;
+       jdev->s_jpu_register.virt_addr = (unsigned long)jdev->reg;
+       jdev->s_jpu_register.size = resource_size(res);
+
+       jdev->s_jpu_irq = platform_get_irq(pdev, 0);
+       if (jdev->s_jpu_irq < 0) {
+               dev_err(jdev->jdev, "No irq defined\n");
+               return -ENXIO;
+       }
+       err = of_property_read_u32(pdev->dev.of_node, "page-size", &jdev->page_size);
+       if (err == 0)
+               jdev->page_size *= SZ_1K;
+       else
+               jdev->page_size = SZ_4K;
+
+       jdev->is_hw_enable = false;
+       va_temp = dma_alloc_coherent(jdev->jdev, MAX_SIZE_PER_TTB * TBU_INSTANCES_NUM,
+                                    &pa_temp, GFP_KERNEL | GFP_DMA);
+       if (!va_temp) {
+               dev_err(jdev->jdev, "No memory for %d tbu_ins!\n", TBU_INSTANCES_NUM);
+               goto err0;
+       }
+       for (i = 0; i < TBU_INSTANCES_NUM; i++) {
+               struct tbu_instance *tbu = &jdev->tbu_ins[i];
+               tbu->ttb_va = va_temp + i * MAX_SIZE_PER_TTB;
+               tbu->ttb_pa = pa_temp + i * MAX_SIZE_PER_TTB;
+               tbu->ins_id = i;
+       }
+       spin_lock_init(&jdev->tbu_ins_bitmap_lock);
+
+       jdev->va_base = BASE_VIRTUAL_ADDRESS;
+       jdev->va_end = BASE_VIRTUAL_ADDRESS + TBU_NUM * VA_STEP_PER_TBU;
+       jdev->time_out_cycs = DEFAULT_TIMEOUT_CYCS;
+
+       spin_lock_init(&jdev->hw_access_lock);
+       err = devm_request_irq(&pdev->dev, jdev->s_jpu_irq, jpu_irq_handler, 0, "JPU", jdev);
+       if (err) {
+               dev_err(jdev->jdev, "irq not be registered\n");
+               return err;
+       }
+#ifndef CONFIG_SOC_SPACEMIT_K1_FPGA
+
+       jdev->aclk = devm_clk_get(&pdev->dev, "aclk");
+       if (IS_ERR_OR_NULL(jdev->aclk)) {
+               dev_err(jdev->jdev, "not found axi clk\n");
+               return PTR_ERR(jdev->aclk);
+       }
+       atomic_set(&jdev->aclk_enable_count, 0);
+       jdev->cclk = devm_clk_get(&pdev->dev, "cclk");
+       if (IS_ERR_OR_NULL(jdev->cclk)) {
+               dev_err(jdev->jdev, "not found core clock\n");
+               return PTR_ERR(jdev->cclk);
+       }
+       atomic_set(&jdev->cclk_enable_count, 0);
+       if (of_property_read_u32(pdev->dev.of_node, "jpu,cclk-max-frequency", &jdev->cclk_max_frequency)) {
+               dev_err(jdev->jdev, "not read cclk max frequency.\n");
+               return -ENXIO;
+       }
+
+       if (of_property_read_u32(pdev->dev.of_node, "jpu,cclk-min-frequency", &jdev->cclk_min_frequency)) {
+               dev_err(jdev->jdev, "not read cclk min frequency.\n");
+               return -ENXIO;
+       }
+
+       if (of_property_read_u32(pdev->dev.of_node, "jpu,cclk-default-frequency", &jdev->cclk_default_frequency)) {
+               dev_err(jdev->jdev, "not read cclk default frequency.\n");
+               return -ENXIO;
+       } else {
+               jdev->cclk_cur_frequency = jdev->cclk_default_frequency;
+       }
+
+       jdev->iclk = devm_clk_get(&pdev->dev, "iclk");
+       if (IS_ERR_OR_NULL(jdev->iclk)) {
+               dev_err(jdev->jdev, "not found core clock\n");
+               return PTR_ERR(jdev->iclk);
+       }
+       atomic_set(&jdev->iclk_enable_count, 0);
+#endif
+       jdev->jpg_reset = devm_reset_control_get_optional_shared(&pdev->dev, "jpg_reset");
+       if (IS_ERR_OR_NULL(jdev->jpg_reset)) {
+               dev_err(jdev->jdev, "not found core jpg_reset\n");
+               return PTR_ERR(jdev->jpg_reset);
+       }
+       atomic_set(&jdev->jpg_reset_enable_count, 0);
+       jdev->lcd_mclk_reset = devm_reset_control_get_optional_shared(&pdev->dev, "lcd_mclk_reset");
+       if (IS_ERR_OR_NULL(jdev->lcd_mclk_reset)) {
+               dev_err(jdev->jdev, "not found core lcd_mclk_reset\n");
+               return PTR_ERR(jdev->lcd_mclk_reset);
+       }
+       atomic_set(&jdev->lcd_mclk_reset_enable_count, 0);
+       jdev->isp_ci_reset = devm_reset_control_get_optional_shared(&pdev->dev, "isp_ci_reset");
+       if (IS_ERR_OR_NULL(jdev->isp_ci_reset)) {
+               dev_err(jdev->jdev, "not found core isp_ci_reset\n");
+               return PTR_ERR(jdev->isp_ci_reset);
+       }
+       atomic_set(&jdev->isp_ci_reset_enable_count, 0);
+       jdev->freset = devm_reset_control_get_optional_shared(&pdev->dev, "freset");
+       if (IS_ERR_OR_NULL(jdev->freset)) {
+               dev_err(jdev->jdev, "not found core freset\n");
+               return PTR_ERR(jdev->freset);
+       }
+       atomic_set(&jdev->freset_enable_count, 0);
+       jdev->sreset = devm_reset_control_get_optional_shared(&pdev->dev, "sreset");
+       if (IS_ERR_OR_NULL(jdev->sreset)) {
+               dev_err(jdev->jdev, "not found core sreset\n");
+               return PTR_ERR(jdev->sreset);
+       }
+       atomic_set(&jdev->sreset_enable_count, 0);
+
+       of_id = of_match_device(jpu_dt_match, &pdev->dev);
+       if (!of_id) {
+               dev_err(jdev->jdev, "No matching device to of_node: %p.\n", pdev->dev.of_node);
+               return -EINVAL;
+       }
+
+       if (of_property_read_u32(pdev->dev.of_node, "jpu,chip-id", &device_id)) {
+               dev_err(jdev->jdev, "not found device id defined.\n");
+               return -ENXIO;
+       }
+
+       snprintf(cdev_name, sizeof(cdev_name), "%s%d", "jpu", device_id);
+       JLOG(jdev->jdev, "cdev name %s\n", cdev_name);
+       if ((alloc_chrdev_region(&jdev->s_jpu_major, 0, 1, cdev_name)) < 0) {
+               dev_err(jdev->jdev, "could not allocate major number\n");
+               return -EBUSY;
+       }
+
+       /* initialize the device structure and register the device with the kernel */
+       cdev_init(&jdev->s_jpu_cdev, &jpu_fops);
+       jdev->s_jpu_cdev.owner = THIS_MODULE;
+
+       /* Register char dev with the kernel */
+       if ((cdev_add(&jdev->s_jpu_cdev, jdev->s_jpu_major, 1)) < 0) {
+               dev_err(jdev->jdev, "could not allocate chrdev\n");
+               return -EBUSY;
+       }
+
+       JLOG(jdev->jdev, "cdev major %d, minor %d\n", MAJOR(jdev->s_jpu_major),
+            MINOR(jdev->s_jpu_major));
+
+       /* Create class for device driver. */
+       jdev->jpu_class = class_create(THIS_MODULE, cdev_name);
+
+       /* Create a device node. */
+       jdev->jpu_device = device_create(jdev->jpu_class, NULL, jdev->s_jpu_major, NULL, cdev_name);
+
+       err = sysfs_create_groups(&pdev->dev.kobj, jpu_frequency_group);
+       if (err < 0) {
+               return err;
+       }
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+#if defined (CONFIG_PM) && defined (DDR_QOS_ENABLE)
+#if 0
+       jdev->ddr_qos_cons = ddr_get_freq_constraints();
+       jdev->ddr_qos_rreq = &jpu_ddrfreq_qos_rreq_sum;
+       jdev->ddr_qos_wreq = &jpu_ddrfreq_qos_wreq_sum;
+       freq_qos_add_request(jdev->ddr_qos_cons, jdev->ddr_qos_rreq, FREQ_QOS_RSUM,
+                            PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+       freq_qos_add_request(jdev->ddr_qos_cons, jdev->ddr_qos_wreq, FREQ_QOS_WSUM,
+                            PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
+#endif
+#endif
+       cpuid = 0;
+       cpumask_set_cpu(cpuid, &mask);
+       irq_set_affinity(jdev->s_jpu_irq, &mask);
+       dev_notice(jdev->jdev, "driver probe successfully\n");
+       return 0;
+err0:
+       return -1;
+}
+
+static int jpu_remove(struct platform_device *pdev)
+{
+       struct jpu_device *jdev = platform_get_drvdata(pdev);
+
+       if (jdev->s_instance_pool.base) {
+               vfree((const void *)jdev->s_instance_pool.base);
+               jdev->s_instance_pool.base = 0;
+       }
+
+       if (jdev->jpu_device) {
+               device_destroy(jdev->jpu_class, jdev->s_jpu_major);
+       }
+
+       if (jdev->jpu_class) {
+               class_destroy(jdev->jpu_class);
+       }
+
+       if (jdev->s_jpu_major) {
+               cdev_del(&jdev->s_jpu_cdev);
+               unregister_chrdev_region(jdev->s_jpu_major, 1);
+               jdev->s_jpu_major = 0;
+       }
+#ifndef CONFIG_SOC_SPACEMIT_K1_FPGA
+       jpu_clk_disable(jdev);
+       pm_runtime_put_sync(jdev->jdev);
+       pm_runtime_disable(&pdev->dev);
+#endif
+       sysfs_remove_groups(&pdev->dev.kobj, jpu_frequency_group);
+
+       dev_notice(jdev->jdev, "driver removed\n");
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int jpu_suspend(struct platform_device *pdev, pm_message_t state)
+{
+#ifndef CONFIG_SOC_SPACEMIT_K1_FPGA
+       struct jpu_device *jdev = platform_get_drvdata(pdev);
+       jpu_clk_disable(jdev);
+#endif
+
+       return 0;
+}
+
+static int jpu_resume(struct platform_device *pdev)
+{
+       return 0;
+}
+
+static int jpu_runtime_suspend(struct device *dev)
+{
+       return 0;
+}
+
+static int jpu_runtime_resume(struct device *dev)
+{
+    return 0;
+}
+
+static const struct dev_pm_ops jpu_pm_ops = {
+       .runtime_suspend = jpu_runtime_suspend,
+       .runtime_resume  = jpu_runtime_resume,
+};
+#else
+#define jpu_suspend NULL
+#define jpu_resume NULL
+#define jpu_runtime_suspend NULL
+#define jpu_runtime_resume NULL
+#endif
+
+static struct platform_driver jpu_driver = {
+       .driver = {
+                       .name = "jpu",
+                       .of_match_table = jpu_dt_match,
+#ifdef CONFIG_PM
+                       .pm             = &jpu_pm_ops
+#endif /* CONFIG_PM */
+                   },
+       .probe = jpu_probe,
+       .remove = jpu_remove,
+       .suspend = jpu_suspend,
+       .resume = jpu_resume,
+};
+
+static int __init jpu_init(void)
+{
+       jpu_exp_init();
+       return platform_driver_register(&jpu_driver);
+}
+
+static void __exit jpu_exit(void)
+{
+       jpu_exp_exit();
+       platform_driver_unregister(&jpu_driver);
+}
+
+MODULE_AUTHOR("SPACEMIT Limited, Inc.");
+MODULE_DESCRIPTION("JPU linux driver");
+MODULE_LICENSE("Proprietary");
+
+module_init(jpu_init);
+module_exit(jpu_exit);
diff --git a/drivers/soc/spacemit/jpu/jpu.h b/drivers/soc/spacemit/jpu/jpu.h
new file mode 100644 (file)
index 0000000..09faa5c
--- /dev/null
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __JPU_DRV_H__
+#define __JPU_DRV_H__
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+
+#define JDI_IOCTL_MAGIC  'J'
+
+#define JDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY          _IO(JDI_IOCTL_MAGIC, 0)
+#define JDI_IOCTL_FREE_PHYSICALMEMORY               _IO(JDI_IOCTL_MAGIC, 1)
+#define JDI_IOCTL_WAIT_INTERRUPT                    _IO(JDI_IOCTL_MAGIC, 2)
+#define JDI_IOCTL_SET_CLOCK_GATE                    _IO(JDI_IOCTL_MAGIC, 3)
+#define JDI_IOCTL_RESET                             _IO(JDI_IOCTL_MAGIC, 4)
+#define JDI_IOCTL_GET_INSTANCE_POOL                 _IO(JDI_IOCTL_MAGIC, 5)
+#define JDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO    _IO(JDI_IOCTL_MAGIC, 6)
+#define JDI_IOCTL_GET_REGISTER_INFO                 _IO(JDI_IOCTL_MAGIC, 7)
+#define JDI_IOCTL_OPEN_INSTANCE                     _IO(JDI_IOCTL_MAGIC, 8)
+#define JDI_IOCTL_CLOSE_INSTANCE                    _IO(JDI_IOCTL_MAGIC, 9)
+#define JDI_IOCTL_GET_INSTANCE_NUM                  _IO(JDI_IOCTL_MAGIC, 10)
+#define JDI_IOCTL_CFG_MMU                              _IO(JDI_IOCTL_MAGIC, 11)
+#define JDI_IOCTL_RELEASE_MMU                          _IO(JDI_IOCTL_MAGIC, 12)
+
+enum {
+       INT_JPU_DONE = 0,
+       INT_JPU_ERROR = 1,
+       INT_JPU_BIT_BUF_EMPTY = 2,
+       INT_JPU_BIT_BUF_FULL = 2,
+       INT_JPU_OVERFLOW,
+       INT_JPU_PARTIAL_BUFFER_0,
+       INT_JPU_PARTIAL_BUFFER_1,
+       INT_JPU_PARTIAL_BUFFER_2,
+       INT_JPU_PARTIAL_BUFFER_3,
+       INT_JPU_STOP,
+       INT_JPU_CFG_DONE,
+       INT_JPU_SOF,
+};
+
+typedef struct jpudrv_buffer_t {
+       unsigned int size;
+       unsigned long phys_addr;
+       unsigned long base;     /* kernel logical address in use kernel */
+       unsigned long virt_addr;        /* virtual user space address */
+} jpudrv_buffer_t;
+
+typedef struct jpudrv_inst_info_t {
+       unsigned int inst_idx;
+       int inst_open_count;    /* for output only */
+} jpudrv_inst_info_t;
+
+typedef struct jpudrv_intr_info_t {
+       unsigned int timeout;
+       int intr_reason;
+       unsigned int inst_idx;
+} jpudrv_intr_info_t;
+
+typedef struct jpu_dma_buf_info {
+       struct dma_buf *dmabuf;
+       int buf_fd;
+       struct dma_buf_attachment *attach;
+       struct sg_table *sgtable;
+       int tbu_id;
+       u32 append_buf_size;    //append buffer to workarourd when picture size is not 16 align 
+} jpu_dma_buf_info;
+typedef struct jpu_dma_cfg {
+       int intput_buf_fd;
+       int output_buf_fd;
+       unsigned int intput_virt_addr;
+       unsigned int output_virt_addr;
+       unsigned int data_size;
+       unsigned int append_buf_size;
+} JPU_DMA_CFG;
+struct tbu_instance {
+       int ins_id;
+       u32 *ttb_va;
+       dma_addr_t ttb_pa;
+       u64 ttb_size;
+       u64 va_base;
+       u64 va_end;
+       bool always_preload;
+       bool enable_preload;
+       u32 nsaid;
+       u32 qos;
+       bool secure_enable;
+};
+#endif
diff --git a/drivers/soc/spacemit/jpu/jpu_export.c b/drivers/soc/spacemit/jpu/jpu_export.c
new file mode 100644 (file)
index 0000000..641ee2a
--- /dev/null
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/miscdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/genalloc.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/types.h>
+#include <linux/types.h>
+#include "jpu_export.h"
+struct jpu_alloc_dma_buf {
+       __s32 fd;               /* fd */
+       __u32 flags;            /* flags to map with */
+       __u64 size;             /* size */
+};
+struct jpu_data {
+       bool is_continue;
+       int npages;
+       unsigned int size;
+       struct page *pages[];
+};
+
+#define jpu_IOCTL_ALLOC_DMA_BUF \
+       _IOWR('V', 10, struct jpu_alloc_dma_buf)
+
+static struct device *pdev;
+static int jpu_exporter_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
+{
+       return 0;
+}
+
+static void jpu_exporter_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
+{
+}
+
+static struct sg_table *jpu_exporter_map_dma_buf(struct dma_buf_attachment *attachment,
+                                                enum dma_data_direction dir)
+{
+       struct jpu_data *data;
+       struct sg_table *table;
+       struct scatterlist *sg;
+       int i;
+
+       data = attachment->dmabuf->priv;
+       table = kmalloc(sizeof(*table), GFP_KERNEL);
+       if (!table)
+               return ERR_PTR(-ENOMEM);
+
+       if (sg_alloc_table(table, data->npages, GFP_KERNEL)) {
+               kfree(table);
+               return ERR_PTR(-ENOMEM);
+       }
+       sg = table->sgl;
+       for (i = 0; i < data->npages; i++) {
+               sg_set_page(sg, data->pages[i], data->size, 0);
+               sg = sg_next(sg);
+       }
+
+       if (!dma_map_sg(pdev, table->sgl, table->nents, dir)) {
+               sg_free_table(table);
+               kfree(table);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       return table;
+}
+
+static void jpu_exporter_unmap_dma_buf(struct dma_buf_attachment *attachment,
+                                      struct sg_table *table, enum dma_data_direction dir)
+{
+       dma_unmap_sg(pdev, table->sgl, table->nents, dir);
+       sg_free_table(table);
+       kfree(table);
+}
+
+static void jpu_exporter_release(struct dma_buf *dma_buf)
+{
+       struct jpu_data *data = dma_buf->priv;
+       int i;
+
+       pr_info("dmabuf release data:%px\n", data);
+
+       for (i = 0; i < data->npages; i++)
+               put_page(data->pages[i]);
+
+       kfree(data);
+}
+
+static int jpu_exporter_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+       struct jpu_data *data = dma_buf->priv;
+       unsigned long vm_start = vma->vm_start;
+       unsigned long size = vma->vm_end - vma->vm_start;
+       int i = 0, ret;
+       //pr_info("dma mmap vm start:0x%llx,size:0x%llx\n", vm_start, size);
+       if (data->is_continue) {
+               ret = remap_pfn_range(vma, vm_start, page_to_pfn(data->pages[i]),
+                                     size, vma->vm_page_prot);
+       } else {
+               for (i = 0; i < data->npages; i++) {
+                       remap_pfn_range(vma, vm_start, page_to_pfn(data->pages[i]),
+                                       PAGE_SIZE, vma->vm_page_prot);
+                       vm_start += PAGE_SIZE;
+               }
+       }
+       return 0;
+}
+
+static int jpu_exporter_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
+{
+       struct dma_buf_attachment *attachment;
+       struct sg_table *table;
+
+       if (list_empty(&dmabuf->attachments))
+               return 0;
+
+       attachment = list_first_entry(&dmabuf->attachments, struct dma_buf_attachment, node);
+       table = attachment->priv;
+       dma_sync_sg_for_cpu(NULL, table->sgl, table->nents, dir);
+
+       return 0;
+}
+
+static int jpu_exporter_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
+{
+       struct dma_buf_attachment *attachment;
+       struct sg_table *table;
+
+       if (list_empty(&dmabuf->attachments))
+               return 0;
+
+       attachment = list_first_entry(&dmabuf->attachments, struct dma_buf_attachment, node);
+       table = attachment->priv;
+       dma_sync_sg_for_device(NULL, table->sgl, table->nents, dir);
+
+       return 0;
+}
+
+static const struct dma_buf_ops jpu_dmabuf_ops = {
+       .attach = jpu_exporter_attach,
+       .detach = jpu_exporter_detach,
+       .map_dma_buf = jpu_exporter_map_dma_buf,
+       .unmap_dma_buf = jpu_exporter_unmap_dma_buf,
+       .release = jpu_exporter_release,
+       .mmap = jpu_exporter_mmap,
+       .begin_cpu_access = jpu_exporter_begin_cpu_access,
+       .end_cpu_access = jpu_exporter_end_cpu_access,
+};
+
+//#define ALLOC_PAGES
+#ifndef ALLOC_PAGES
+static struct dma_buf *jpu_exp_alloc(struct jpu_alloc_dma_buf *alloc_data)
+{
+       DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+       struct dma_buf *dmabuf;
+       struct jpu_data *data;
+
+       int i, npages, size;
+
+       size = alloc_data->size;
+       npages = PAGE_ALIGN(size) / PAGE_SIZE;
+       if (!npages)
+               return ERR_PTR(-EINVAL);
+
+       data = kmalloc(sizeof(*data) + npages * sizeof(struct page *), GFP_KERNEL);
+       if (!data)
+               return ERR_PTR(-ENOMEM);
+
+       data->is_continue = 0;
+       for (i = 0; i < npages; i++) {
+               data->pages[i] = alloc_page(GFP_KERNEL);
+               if (!data->pages[i])
+                       goto err;
+       }
+       data->npages = npages;
+       data->size = PAGE_SIZE;
+       pr_info("dmabuf alloc data:%px, npages:%d, size:0x%x\n", data, npages, size);
+
+       exp_info.ops = &jpu_dmabuf_ops;
+       exp_info.size = npages * PAGE_SIZE;
+       exp_info.flags = O_CLOEXEC | O_RDWR;
+       exp_info.priv = data;
+
+       dmabuf = dma_buf_export(&exp_info);
+       if (IS_ERR(dmabuf))
+               goto err;
+
+       return dmabuf;
+
+err:
+       while (i--)
+               put_page(data->pages[i]);
+       kfree(data);
+       return ERR_PTR(-ENOMEM);
+}
+#else
+static struct dma_buf *jpu_exp_alloc(struct jpu_alloc_dma_buf *alloc_data)
+{
+       DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+       struct dma_buf *dmabuf;
+       struct jpu_data *data;
+       int npages, size, order;
+
+       size = alloc_data->size;
+       npages = PAGE_ALIGN(size) / PAGE_SIZE;
+       order = get_order(size);
+       if (!npages)
+               return ERR_PTR(-EINVAL);
+
+       npages = 1 << order;
+       data = kmalloc(sizeof(*data) + sizeof(struct page *), GFP_KERNEL);
+       if (!data)
+               return ERR_PTR(-ENOMEM);
+       data->is_continue = 1;
+       data->pages[0] = alloc_pages(GFP_KERNEL, order);
+       data->npages = 1;
+       data->size = npages * PAGE_SIZE;
+       pr_info("dmabuf alloc data:%px, real num:%d, order:%d, size:0x%x\n", data,
+               npages, order, size);
+
+       exp_info.ops = &jpu_dmabuf_ops;
+       exp_info.size = npages * PAGE_SIZE;
+       exp_info.flags = O_CLOEXEC | O_RDWR;
+       exp_info.priv = data;
+
+       dmabuf = dma_buf_export(&exp_info);
+       if (IS_ERR(dmabuf))
+               goto err;
+
+       return dmabuf;
+
+err:
+       put_page(data->pages[0]);
+       kfree(data);
+       return ERR_PTR(-ENOMEM);
+}
+#endif
+
+static long jpu_exp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       struct dma_buf *dmabuf = NULL;
+       unsigned int fd;
+       struct jpu_alloc_dma_buf alloc_data;
+       switch (cmd) {
+       case jpu_IOCTL_ALLOC_DMA_BUF:
+               if (copy_from_user(&alloc_data, (void __user *)arg, sizeof(alloc_data)))
+                       return -EFAULT;
+
+               dmabuf = jpu_exp_alloc(&alloc_data);
+               if (!dmabuf) {
+                       pr_err("error: exporter alloc page failed\n");
+                       return -ENOMEM;
+               }
+               fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+               pr_info("dmabuf fd:%d\n", fd);
+               alloc_data.fd = fd;
+               if (copy_to_user((void __user *)arg, &alloc_data, sizeof(alloc_data)))
+                       return -EFAULT;
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static struct file_operations jpu_exp_fops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = jpu_exp_ioctl,
+};
+
+static struct miscdevice jpu_exp = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "jpu_exp",
+       .fops = &jpu_exp_fops,
+};
+
+static u64 jpu_exp_dmamask = 0xffffffffffUL;
+
+int jpu_exp_init(void)
+{
+       int ret;
+       ret = misc_register(&jpu_exp);
+       pdev = jpu_exp.this_device;
+       pdev->dma_mask = &jpu_exp_dmamask;
+       pdev->coherent_dma_mask = 0xffffffffffull;
+       return ret;
+}
+
+void jpu_exp_exit(void)
+{
+       misc_deregister(&jpu_exp);
+}
diff --git a/drivers/soc/spacemit/jpu/jpu_export.h b/drivers/soc/spacemit/jpu/jpu_export.h
new file mode 100644 (file)
index 0000000..837d45a
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __CNM_JPU_EXP_H__
+#define __CNM_JPU_EXP_H__
+int jpu_exp_init(void);
+void jpu_exp_exit(void);
+#endif /* __CNM_JPU_EXP_H__ */
diff --git a/drivers/soc/spacemit/jpu/jpuconfig.h b/drivers/soc/spacemit/jpu/jpuconfig.h
new file mode 100644 (file)
index 0000000..f81b8dd
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _JPU_CONFIG_H_
+#define _JPU_CONFIG_H_
+
+#include "config.h"
+
+#define MAX_NUM_JPU_CORE                1
+#define MAX_NUM_INSTANCE                1
+#define MAX_INST_HANDLE_SIZE            48
+
+#define JPU_FRAME_ENDIAN                JDI_LITTLE_ENDIAN
+#define JPU_STREAM_ENDIAN               JDI_LITTLE_ENDIAN
+#define JPU_CHROMA_INTERLEAVE           1      // 0 (chroma separate mode), 1 (cbcr interleave mode), 2 (crcb interleave mode)
+
+#define JPU_STUFFING_BYTE_FF            0      // 0 : ON ("0xFF"), 1 : OFF ("0x00") for stuffing
+
+#define MAX_MJPG_PIC_WIDTH              32768
+#define MAX_MJPG_PIC_HEIGHT             32768
+
+#define MAX_FRAME                       (19*MAX_NUM_INSTANCE)
+
+#define STREAM_FILL_SIZE                0x10000
+#define STREAM_END_SIZE                 0
+
+#define JPU_GBU_SIZE                    256
+
+#define STREAM_BUF_SIZE                 0x200000
+
+#define JPU_CHECK_WRITE_RESPONSE_BVALID_SIGNAL 0
+
+#define JPU_INTERRUPT_TIMEOUT_MS        (5000*4)
+#ifdef CNM_SIM_PLATFORM
+#undef JPU_INTERRUPT_TIMEOUT_MS
+#define JPU_INTERRUPT_TIMEOUT_MS        3600000        // 1 hour for simultation environment
+#endif
+
+#define JPU_INST_CTRL_TIMEOUT_MS        (5000*4)
+#ifdef CNM_SIM_PLATFORM
+#undef JPU_INST_CTRL_TIMEOUT_MS
+#define JPU_INST_CTRL_TIMEOUT_MS        3600000        // 1 hour for simulation environment
+#endif
+#endif /* _JPU_CONFIG_H_ */
diff --git a/drivers/soc/spacemit/jpu/regdefine.h b/drivers/soc/spacemit/jpu/regdefine.h
new file mode 100644 (file)
index 0000000..b36ef2e
--- /dev/null
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef NPT_REGDEFINE_H_INCLUDED
+#define NPT_REGDEFINE_H_INCLUDED
+//------------------------------------------------------------------------------
+// REGISTER BASE
+//------------------------------------------------------------------------------
+#ifndef NIEUPORT_BASE
+//#define NIEUPORT_BASE (0xB00)
+#define NIEUPORT_BASE (0x0)
+
+#endif
+
+#ifndef MMU_BASE
+#define MMU_BASE (0x500)
+#endif
+
+//===================================================================================
+// NIEUPORT REGISTERS
+//===================================================================================
+#define MJPEG_PIC_START_REG         (NIEUPORT_BASE + 0x0000)
+#define MJPEG_PIC_STATUS_REG        (NIEUPORT_BASE + 0x0004)
+#define MJPEG_PIC_ERRMB_REG         (NIEUPORT_BASE + 0x0008)
+#define MJPEG_PIC_SETMB_REG         (NIEUPORT_BASE + 0x000C)
+
+#define MJPEG_PIC_CTRL_REG          (NIEUPORT_BASE + 0x0010)
+#define MJPEG_PIC_SIZE_REG          (NIEUPORT_BASE + 0x0014)
+#define MJPEG_MCU_INFO_REG          (NIEUPORT_BASE + 0x0018)
+#define MJPEG_ROT_INFO_REG          (NIEUPORT_BASE + 0x001C)
+
+#define MJPEG_SCL_INFO_REG          (NIEUPORT_BASE + 0x0020)
+#define MJPEG_IF_INFO_REG           (NIEUPORT_BASE + 0x0024)
+#define MJPEG_CLP_INFO_REG          (NIEUPORT_BASE + 0x0028)
+#define MJPEG_OP_INFO_REG           (NIEUPORT_BASE + 0x002C)
+
+#define MJPEG_DPB_CONFIG_REG        (NIEUPORT_BASE + 0x0030)
+#define MJPEG_DPB_BASE00_REG        (NIEUPORT_BASE + 0x0034)
+#define MJPEG_DPB_BASE01_REG        (NIEUPORT_BASE + 0x0038)
+#define MJPEG_DPB_BASE02_REG        (NIEUPORT_BASE + 0x003C)
+#define MJPEG_DPB_BASE10_REG        (NIEUPORT_BASE + 0x0040)
+#define MJPEG_DPB_BASE11_REG        (NIEUPORT_BASE + 0x0044)
+#define MJPEG_DPB_BASE12_REG        (NIEUPORT_BASE + 0x0048)
+#define MJPEG_DPB_BASE20_REG        (NIEUPORT_BASE + 0x004C)
+#define MJPEG_DPB_BASE21_REG        (NIEUPORT_BASE + 0x0050)
+#define MJPEG_DPB_BASE22_REG        (NIEUPORT_BASE + 0x0054)
+#define MJPEG_DPB_BASE30_REG        (NIEUPORT_BASE + 0x0058)
+#define MJPEG_DPB_BASE31_REG        (NIEUPORT_BASE + 0x005C)
+#define MJPEG_DPB_BASE32_REG        (NIEUPORT_BASE + 0x0060)
+#define MJPEG_DPB_YSTRIDE_REG       (NIEUPORT_BASE + 0x0064)
+#define MJPEG_DPB_CSTRIDE_REG       (NIEUPORT_BASE + 0x0068)
+#define MJPEG_WRESP_CHECK_REG       (NIEUPORT_BASE + 0x006C)
+
+#define MJPEG_CLP_BASE_REG          (NIEUPORT_BASE + 0x0070)
+#define MJPEG_CLP_SIZE_REG          (NIEUPORT_BASE + 0x0074)
+#define MJPEG_HUFF_CTRL_REG         (NIEUPORT_BASE + 0x0080)
+#define MJPEG_HUFF_ADDR_REG         (NIEUPORT_BASE + 0x0084)
+#define MJPEG_HUFF_DATA_REG         (NIEUPORT_BASE + 0x0088)
+#define MJPEG_QMAT_CTRL_REG         (NIEUPORT_BASE + 0x0090)
+#define MJPEG_QMAT_ADDR_REG         (NIEUPORT_BASE + 0x0094)
+#define MJPEG_QMAT_DATA_REG         (NIEUPORT_BASE + 0x0098)
+#define MJPEG_COEF_CTRL_REG         (NIEUPORT_BASE + 0x00A0)
+#define MJPEG_COEF_ADDR_REG         (NIEUPORT_BASE + 0x00A4)
+#define MJPEG_COEF_DATA_REG         (NIEUPORT_BASE + 0x00A8)
+#define MJPEG_RST_INTVAL_REG        (NIEUPORT_BASE + 0x00B0)
+#define MJPEG_RST_INDEX_REG         (NIEUPORT_BASE + 0x00B4)
+#define MJPEG_RST_COUNT_REG         (NIEUPORT_BASE + 0x00B8)
+#define MJPEG_INTR_MASK_REG         (NIEUPORT_BASE + 0x00C0)
+#define MJPEG_CYCLE_INFO_REG        (NIEUPORT_BASE + 0x00C8)
+#define MJPEG_DPCM_DIFF_Y_REG       (NIEUPORT_BASE + 0x00F0)
+#define MJPEG_DPCM_DIFF_CB_REG      (NIEUPORT_BASE + 0x00F4)
+#define MJPEG_DPCM_DIFF_CR_REG      (NIEUPORT_BASE + 0x00F8)
+
+// GBU
+#define MJPEG_GBU_CTRL_REG          (NIEUPORT_BASE + 0x0100)
+#define MJPEG_GBU_PBIT_BUSY_REG     (NIEUPORT_BASE + 0x0104)
+#define MJPEG_GBU_BT_PTR_REG        (NIEUPORT_BASE + 0x0110)
+#define MJPEG_GBU_WD_PTR_REG        (NIEUPORT_BASE + 0x0114)
+#define MJPEG_GBU_TT_CNT_REG        (NIEUPORT_BASE + 0x0118)
+#define MJPEG_GBU_PBIT_08_REG       (NIEUPORT_BASE + 0x0120)
+#define MJPEG_GBU_PBIT_16_REG       (NIEUPORT_BASE + 0x0124)
+#define MJPEG_GBU_PBIT_24_REG       (NIEUPORT_BASE + 0x0128)
+#define MJPEG_GBU_PBIT_32_REG       (NIEUPORT_BASE + 0x012C)
+#define MJPEG_GBU_BBSR_REG          (NIEUPORT_BASE + 0x0140)
+#define MJPEG_GBU_BBER_REG          (NIEUPORT_BASE + 0x0144)
+#define MJPEG_GBU_BBIR_REG          (NIEUPORT_BASE + 0x0148)
+#define MJPEG_GBU_BBHR_REG          (NIEUPORT_BASE + 0x014C)
+#define MJPEG_GBU_BCNT_REG          (NIEUPORT_BASE + 0x0158)
+#define MJPEG_GBU_FF_RPTR_REG       (NIEUPORT_BASE + 0x0160)
+#define MJPEG_GBU_FF_WPTR_REG       (NIEUPORT_BASE + 0x0164)
+
+// BBC
+#define MJPEG_BBC_END_ADDR_REG      (NIEUPORT_BASE + 0x0208)
+#define MJPEG_BBC_WR_PTR_REG        (NIEUPORT_BASE + 0x020C)
+#define MJPEG_BBC_RD_PTR_REG        (NIEUPORT_BASE + 0x0210)
+#define MJPEG_BBC_EXT_ADDR_REG      (NIEUPORT_BASE + 0x0214)
+#define MJPEG_BBC_INT_ADDR_REG      (NIEUPORT_BASE + 0x0218)
+#define MJPEG_BBC_DATA_CNT_REG      (NIEUPORT_BASE + 0x021C)
+#define MJPEG_BBC_COMMAND_REG       (NIEUPORT_BASE + 0x0220)
+#define MJPEG_BBC_BUSY_REG          (NIEUPORT_BASE + 0x0224)
+#define MJPEG_BBC_CTRL_REG          (NIEUPORT_BASE + 0x0228)
+#define MJPEG_BBC_CUR_POS_REG       (NIEUPORT_BASE + 0x022C)
+#define MJPEG_BBC_BAS_ADDR_REG      (NIEUPORT_BASE + 0x0230)
+#define MJPEG_BBC_STRM_CTRL_REG     (NIEUPORT_BASE + 0x0234)
+#define MJPEG_BBC_FLUSH_CMD_REG     (NIEUPORT_BASE + 0x0238)
+
+#define JPU_MMU_TRI (NIEUPORT_BASE +0x400)
+
+#define TBU_NUM 32
+#define MJPEG_MMU_TTBLR_BASE         (MMU_BASE + 0x40)
+#define MJPEG_MMU_TTBHR_BASE         (MMU_BASE + 0x44)
+#define MJPEG_MMU_TCR0_BASE          (MMU_BASE + 0x48)
+#define MJPEG_MMU_TCR1_BASE          (MMU_BASE + 0x4c)
+#define MJPEG_MMU_TBU_STATUS_BASE    (MMU_BASE + 0x50)
+#define MJPEG_MMU_TBUx_STEP          0x20
+#define MJPEG_MMU_BVA_LO             (MMU_BASE + 0x00)
+#define MJPEG_MMU_BVA_HI             (MMU_BASE + 0x04)
+#define MJPEG_MMU_TIMEOUT_VA_ADDR_LO (MMU_BASE + 0x08)
+#define MJPEG_MMU_TIMEOUT_VA_ADDR_HI (MMU_BASE + 0x0C)
+#define MJPEG_MMU_IRQ_STATUS         (MMU_BASE + 0x10)
+#define MJPEG_MMU_IRQ_ENABLE         (MMU_BASE + 0x14)
+#define MJPEG_MMU_TIMEOUT_VALUE      (MMU_BASE + 0x18)
+#define MJPEG_MMU_ERROR_CLEAR        (MMU_BASE + 0x1C)
+#define MJPEG_MMU_LAST_VA_ADDR_LO    (MMU_BASE + 0x20)
+#define MJPEG_MMU_LAST_VA_ADDR_HI    (MMU_BASE + 0x24)
+#define MJPEG_MMU_LAST_PA_ADDR_LO    (MMU_BASE + 0x28)
+#define MJPEG_MMU_LAST_PA_ADDR_HI    (MMU_BASE + 0x2C)
+#define MJPEG_MMU_VERSION            (MMU_BASE + 0x3C)
+
+#define BASE_VIRTUAL_ADDRESS   0x80000000
+#define VA_STEP_PER_TBU                0x2000000
+#define MAX_ENTRIES_PER_TTB    8096
+#define ENTRY_SIZE             4
+#define MAX_SIZE_PER_TTB       (MAX_ENTRIES_PER_TTB*ENTRY_SIZE)
+#define DEFAULT_TIMEOUT_CYCS   0x80000
+#define SPACEMIT_MJPEG_MMU_PGSIZE_BITMAP 0x02FFF000    /* 4K~32M */
+
+#define TBU_INSTANCES_NUM      2
+
+#define TTB_ENTRY_SHIFT                12
+#define AQUIRE_TIMEOUT_MS      100
+#endif
diff --git a/drivers/soc/spacemit/k1x-dma-range.c b/drivers/soc/spacemit/k1x-dma-range.c
new file mode 100644 (file)
index 0000000..7896074
--- /dev/null
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_clk.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+
+static const struct of_device_id spacemit_dma_range_dt_match[] = {
+       { .compatible = "spacemit-dram-bus", },
+       { },
+};
+
+static int spacemit_dma_range_probe(struct platform_device *pdev)
+{
+       return 0;
+}
+
+static struct platform_driver spacemit_dma_range_driver = {
+       .probe = spacemit_dma_range_probe,
+       .driver = {
+               .name   = "spacemit-dma-range",
+               .of_match_table = spacemit_dma_range_dt_match,
+       },
+};
+
+static int __init spacemit_dma_range_drv_register(void)
+{
+       return platform_driver_register(&spacemit_dma_range_driver);
+}
+
+core_initcall(spacemit_dma_range_drv_register);
diff --git a/drivers/soc/spacemit/pm_domain/Makefile b/drivers/soc/spacemit/pm_domain/Makefile
new file mode 100644 (file)
index 0000000..579bcd2
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_ARCH_SPACEMIT_K1X) += atomic_qos.o
+obj-$(CONFIG_ARCH_SPACEMIT_K1X) += k1x-pm_domain.o
diff --git a/drivers/soc/spacemit/pm_domain/atomic_qos.c b/drivers/soc/spacemit/pm_domain/atomic_qos.c
new file mode 100644 (file)
index 0000000..68f987e
--- /dev/null
@@ -0,0 +1,276 @@
+#include <linux/plist.h>
+#include <linux/pm_qos.h>
+#include <linux/notifier.h>
+#include <linux/err.h>
+#include <linux/spinlock_types.h>
+#include "atomic_qos.h"
+
+/*
+ * locking rule: all changes to constraints or notifiers lists
+ * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
+ * held, taken with _irqsave.  One lock to rule them all
+ */
+static DEFINE_SPINLOCK(atomic_pm_qos_lock);
+
+/**
+ * atomic_freq_constraints_init - Initialize frequency QoS constraints.
+ * @qos: Frequency QoS constraints to initialize.
+ */
+void atomic_freq_constraints_init(struct atomic_freq_constraints *qos)
+{
+       struct atomic_pm_qos_constraints *c;
+
+       c = &qos->min_freq;
+       plist_head_init(&c->list);
+       c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+       c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+       c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+       c->type = PM_QOS_MAX;
+       c->notifiers = &qos->min_freq_notifiers;
+       ATOMIC_INIT_NOTIFIER_HEAD(c->notifiers);
+
+       c = &qos->max_freq;
+       plist_head_init(&c->list);
+       c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+       c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+       c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+       c->type = PM_QOS_MIN;
+       c->notifiers = &qos->max_freq_notifiers;
+       ATOMIC_INIT_NOTIFIER_HEAD(c->notifiers);
+}
+
+
+/**
+ * atomic_freq_qos_add_notifier - Add frequency QoS change notifier.
+ * @qos: List of requests to add the notifier to.
+ * @type: Request type.
+ * @notifier: Notifier block to add.
+ */
+int atomic_freq_qos_add_notifier(struct atomic_freq_constraints *qos,
+                         enum freq_qos_req_type type,
+                         struct notifier_block *notifier)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(qos) || !notifier)
+               return -EINVAL;
+
+       switch (type) {
+       case FREQ_QOS_MIN:
+               ret = atomic_notifier_chain_register(qos->min_freq.notifiers, notifier);
+               break;
+       case FREQ_QOS_MAX:
+               ret = atomic_notifier_chain_register(qos->max_freq.notifiers, notifier);
+               break;
+       default:
+               WARN_ON(1);
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * atomic_freq_qos_remove_notifier - Remove frequency QoS change notifier.
+ * @qos: List of requests to remove the notifier from.
+ * @type: Request type.
+ * @notifier: Notifier block to remove.
+ */
+int atomic_freq_qos_remove_notifier(struct atomic_freq_constraints *qos,
+                            enum freq_qos_req_type type,
+                            struct notifier_block *notifier)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(qos) || !notifier)
+               return -EINVAL;
+
+       switch (type) {
+       case FREQ_QOS_MIN:
+               ret = atomic_notifier_chain_unregister(qos->min_freq.notifiers, notifier);
+               break;
+       case FREQ_QOS_MAX:
+               ret = atomic_notifier_chain_unregister(qos->max_freq.notifiers, notifier);
+               break;
+       default:
+               WARN_ON(1);
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int pm_qos_get_value(struct atomic_pm_qos_constraints *c)
+{
+       if (plist_head_empty(&c->list))
+               return c->no_constraint_value;
+
+       switch (c->type) {
+       case PM_QOS_MIN:
+               return plist_first(&c->list)->prio;
+
+       case PM_QOS_MAX:
+               return plist_last(&c->list)->prio;
+
+       default:
+               WARN(1, "Unknown PM QoS type in %s\n", __func__);
+               return PM_QOS_DEFAULT_VALUE;
+       }
+}
+
+static void pm_qos_set_value(struct atomic_pm_qos_constraints *c, s32 value)
+{
+       WRITE_ONCE(c->target_value, value);
+}
+
+/**
+ * pm_qos_update_target - Update a list of PM QoS constraint requests.
+ * @c: List of PM QoS requests.
+ * @node: Target list entry.
+ * @action: Action to carry out (add, update or remove).
+ * @value: New request value for the target list entry.
+ *
+ * Update the given list of PM QoS constraint requests, @c, by carrying an
+ * @action involving the @node list entry and @value on it.
+ *
+ * The recognized values of @action are PM_QOS_ADD_REQ (store @value in @node
+ * and add it to the list), PM_QOS_UPDATE_REQ (remove @node from the list, store
+ * @value in it and add it to the list again), and PM_QOS_REMOVE_REQ (remove
+ * @node from the list, ignore @value).
+ *
+ * Return: 1 if the aggregate constraint value has changed, 0  otherwise.
+ */
+static int atomic_pm_qos_update_target(struct atomic_pm_qos_constraints *c, struct plist_node *node,
+                        enum pm_qos_req_action action, int value)
+{
+       int prev_value, curr_value, new_value;
+       unsigned long flags;
+
+       spin_lock_irqsave(&atomic_pm_qos_lock, flags);
+
+       prev_value = pm_qos_get_value(c);
+       if (value == PM_QOS_DEFAULT_VALUE)
+               new_value = c->default_value;
+       else
+               new_value = value;
+
+       switch (action) {
+       case PM_QOS_REMOVE_REQ:
+               plist_del(node, &c->list);
+               break;
+       case PM_QOS_UPDATE_REQ:
+               /*
+                * To change the list, atomically remove, reinit with new value
+                * and add, then see if the aggregate has changed.
+                */
+               plist_del(node, &c->list);
+               fallthrough;
+       case PM_QOS_ADD_REQ:
+               plist_node_init(node, new_value);
+               plist_add(node, &c->list);
+               break;
+       default:
+               /* no action */
+               break;
+       }
+
+       curr_value = pm_qos_get_value(c);
+       pm_qos_set_value(c, curr_value);
+
+       spin_unlock_irqrestore(&atomic_pm_qos_lock, flags);
+
+       if (prev_value == curr_value)
+               return 0;
+
+       if (c->notifiers)
+               atomic_notifier_call_chain(c->notifiers, curr_value, NULL);
+
+       return 1;
+}
+
+/**
+ * atomic_freq_qos_apply - Add/modify/remove frequency QoS request.
+ * @req: Constraint request to apply.
+ * @action: Action to perform (add/update/remove).
+ * @value: Value to assign to the QoS request.
+ *
+ * This is only meant to be called from inside pm_qos, not drivers.
+ */
+static int atomic_freq_qos_apply(struct atomic_freq_qos_request *req,
+                         enum pm_qos_req_action action, s32 value)
+{
+       int ret;
+
+       switch(req->type) {
+       case FREQ_QOS_MIN:
+               ret = atomic_pm_qos_update_target(&req->qos->min_freq, &req->pnode,
+                                          action, value);
+               break;
+       case FREQ_QOS_MAX:
+               ret = atomic_pm_qos_update_target(&req->qos->max_freq, &req->pnode,
+                                          action, value);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+/**
+ * atomic_freq_qos_add_request - Insert new frequency QoS request into a given list.
+ * @qos: Constraints to update.
+ * @req: Preallocated request object.
+ * @type: Request type.
+ * @value: Request value.
+ *
+ * Insert a new entry into the @qos list of requests, recompute the effective
+ * QoS constraint value for that list and initialize the @req object.  The
+ * caller needs to save that object for later use in updates and removal.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int atomic_freq_qos_add_request(struct atomic_freq_constraints *qos,
+                        struct atomic_freq_qos_request *req,
+                        enum freq_qos_req_type type, s32 value)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(qos) || !req || value < 0)
+               return -EINVAL;
+
+       req->qos = qos;
+       req->type = type;
+       ret = atomic_freq_qos_apply(req, PM_QOS_ADD_REQ, value);
+       if (ret < 0) {
+               req->qos = NULL;
+               req->type = 0;
+       }
+
+       return ret;
+}
+
+/**
+ * atomic_freq_qos_update_request - Modify existing frequency QoS request.
+ * @req: Request to modify.
+ * @new_value: New request value.
+ *
+ * Update an existing frequency QoS request along with the effective constraint
+ * value for the list of requests it belongs to.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int atomic_freq_qos_update_request(struct atomic_freq_qos_request *req, s32 new_value)
+{
+       if (!req || new_value < 0)
+               return -EINVAL;
+
+       if (req->pnode.prio == new_value)
+               return 0;
+
+       return atomic_freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
+}
diff --git a/drivers/soc/spacemit/pm_domain/atomic_qos.h b/drivers/soc/spacemit/pm_domain/atomic_qos.h
new file mode 100644 (file)
index 0000000..61d0837
--- /dev/null
@@ -0,0 +1,93 @@
+#ifndef __ATOMIC_QOS_H__
+#define __ATOMIC_QOS_H__
+
+#include <linux/plist.h>
+#include <linux/pm_qos.h>
+#include <linux/notifier.h>
+#include <linux/err.h>
+#include <linux/spinlock_types.h>
+
+/*
+ * Note: The lockless read path depends on the CPU accessing target_value
+ * or effective_flags atomically.  Atomic access is only guaranteed on all CPU
+ * types linux supports for 32 bit quantites
+ */
+struct atomic_pm_qos_constraints {
+       struct plist_head list;
+       s32 target_value;       /* Do not change to 64 bit */
+       s32 default_value;
+       s32 no_constraint_value;
+       enum pm_qos_type type;
+       struct atomic_notifier_head *notifiers;
+};
+
+struct atomic_freq_constraints {
+       struct atomic_pm_qos_constraints min_freq;
+       struct atomic_notifier_head min_freq_notifiers;
+       struct atomic_pm_qos_constraints max_freq;
+       struct atomic_notifier_head max_freq_notifiers;
+};
+
+struct atomic_freq_qos_request {
+       enum freq_qos_req_type type;
+       struct plist_node pnode;
+       struct atomic_freq_constraints *qos;
+};
+
+/**
+ * atomic_freq_constraints_init - Initialize frequency QoS constraints.
+ * @qos: Frequency QoS constraints to initialize.
+ */
+void atomic_freq_constraints_init(struct atomic_freq_constraints *qos);
+
+/**
+ * atomic_freq_qos_add_notifier - Add frequency QoS change notifier.
+ * @qos: List of requests to add the notifier to.
+ * @type: Request type.
+ * @notifier: Notifier block to add.
+ */
+int atomic_freq_qos_add_notifier(struct atomic_freq_constraints *qos,
+                         enum freq_qos_req_type type,
+                         struct notifier_block *notifier);
+
+/**
+ * atomic_freq_qos_remove_notifier - Remove frequency QoS change notifier.
+ * @qos: List of requests to remove the notifier from.
+ * @type: Request type.
+ * @notifier: Notifier block to remove.
+ */
+int atomic_freq_qos_remove_notifier(struct atomic_freq_constraints *qos,
+                            enum freq_qos_req_type type,
+                            struct notifier_block *notifier);
+/**
+ * atomic_freq_qos_add_request - Insert new frequency QoS request into a given list.
+ * @qos: Constraints to update.
+ * @req: Preallocated request object.
+ * @type: Request type.
+ * @value: Request value.
+ *
+ * Insert a new entry into the @qos list of requests, recompute the effective
+ * QoS constraint value for that list and initialize the @req object.  The
+ * caller needs to save that object for later use in updates and removal.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int atomic_freq_qos_add_request(struct atomic_freq_constraints *qos,
+                        struct atomic_freq_qos_request *req,
+                        enum freq_qos_req_type type, s32 value);
+/**
+ * atomic_freq_qos_update_request - Modify existing frequency QoS request.
+ * @req: Request to modify.
+ * @new_value: New request value.
+ *
+ * Update an existing frequency QoS request along with the effective constraint
+ * value for the list of requests it belongs to.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int atomic_freq_qos_update_request(struct atomic_freq_qos_request *req, s32 new_value);
+
+#endif
+
diff --git a/drivers/soc/spacemit/pm_domain/k1x-pm_domain.c b/drivers/soc/spacemit/pm_domain/k1x-pm_domain.c
new file mode 100644 (file)
index 0000000..3891cb4
--- /dev/null
@@ -0,0 +1,956 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Spacemit Generic power domain support.
+ *
+ * Copyright (c) 2023 SPACEMIT, Co. Ltd.
+ */
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
+#include <linux/of_address.h>
+#include <linux/of_clk.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/pm_qos.h>
+#include <linux/mfd/syscon.h>
+#include <linux/spinlock_types.h>
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/pmu/k1x_pmu.h>
+#include <linux/syscore_ops.h>
+#include "atomic_qos.h"
+
+#define MAX_REGMAP             5
+#define MAX_REGULATOR_PER_DOMAIN       5
+
+#define MPMU_REGMAP_INDEX      0
+#define APMU_REGMAP_INDEX      1
+
+#define APMU_POWER_STATUS_REG  0xf0
+#define MPMU_APCR_PER_REG      0x1098
+#define MPMU_AWUCRM_REG                0x104c
+
+#define APMU_AUDIO_CLK_RES_CTRL        0x14c
+#define AP_POWER_CTRL_AUDIO_AUTH_OFFSET        28
+#define FORCE_AUDIO_POWER_ON_OFFSET    13
+
+/* wakeup set */
+/* pmic */
+#define WAKEUP_SOURCE_WAKEUP_7 7
+
+
+#define PM_QOS_BLOCK_C1                0x0 /* core wfi */
+#define PM_QOS_BLOCK_C2                0x2 /* core power off */
+#define PM_QOS_BLOCK_M2                0x6 /* core l2 off */
+#define PM_QOS_BLOCK_AXI        0x7 /* d1p */
+#define PM_QOS_BLOCK_DDR        12 /* d1 */
+#define PM_QOS_BLOCK_UDR_VCTCXO 13 /* d2 */
+#define PM_QOS_BLOCK_UDR        14 /* d2pp */
+#define PM_QOS_BLOCK_DEFAULT_VALUE     15
+
+#define PM_QOS_AXISDD_OFFSET   31
+#define PM_QOS_DDRCORSD_OFFSET 27
+#define PM_QOS_APBSD_OFFSET    26
+#define PM_QOS_VCTCXOSD_OFFSET 19
+#define PM_QOS_STBYEN_OFFSET   13
+#define PM_QOS_PE_VOTE_AP_SLPEN_OFFSET 3
+
+#define DEV_PM_QOS_CLK_GATE            1
+#define DEV_PM_QOS_REGULATOR_GATE      2
+#define DEV_PM_QOS_PM_DOMAIN_GATE      4
+#define DEV_PM_QOS_DEFAULT             7
+
+struct spacemit_pm_domain_param {
+       int reg_pwr_ctrl;
+       int pm_qos;
+       int bit_hw_mode;
+       int bit_sleep2;
+       int bit_sleep1;
+       int bit_isolation;
+       int bit_auto_pwr_on;
+       int bit_hw_pwr_stat;
+       int bit_pwr_stat;
+       int use_hw;
+};
+
+struct per_device_qos {
+       struct notifier_block notifier;
+       struct list_head qos_node;
+       struct dev_pm_qos_request req;
+       int level;
+       struct device *dev;
+       struct regulator *rgr[MAX_REGULATOR_PER_DOMAIN];
+       int rgr_count;
+       /**
+        * manageing the cpuidle-qos, should be per-device
+        */
+       struct atomic_freq_qos_request qos;
+
+       bool handle_clk;
+       bool handle_regulator;
+       bool handle_pm_domain;
+       bool handle_cpuidle_qos;
+};
+
+struct spacemit_pm_domain {
+       struct generic_pm_domain genpd;
+       int pm_index;
+       struct device *gdev;
+       int rgr_count;
+       struct regulator *rgr[MAX_REGULATOR_PER_DOMAIN];
+       /**
+        * manageing the cpuidle-qos
+        */
+       struct spacemit_pm_domain_param param;
+
+       /**
+        * manageing the device-drivers power qos
+        */
+       struct list_head qos_head;
+};
+
+struct spacemit_pmu {
+       struct device *dev;
+       int number_domains;
+       struct genpd_onecell_data genpd_data;
+       struct regmap *regmap[MAX_REGMAP];
+       struct spacemit_pm_domain **domains;
+       /**
+        * manageing the cpuidle-qos
+        */
+       struct notifier_block notifier;
+};
+
+static DEFINE_SPINLOCK(spacemit_apcr_qos_lock);
+
+static unsigned int g_acpr_per;
+static struct spacemit_pmu *gpmu;
+
+static struct atomic_freq_constraints afreq_constraints;
+
+static const struct of_device_id spacemit_regmap_dt_match[] = {
+       { .compatible = "spacemit,spacemit-mpmu", },
+       { .compatible = "spacemit,spacemit-apmu", },
+};
+
+static int spacemit_pd_power_off(struct generic_pm_domain *domain)
+{
+       unsigned int val;
+       int loop, ret;
+       struct per_device_qos *pos;
+       struct spacemit_pm_domain *spd = container_of(domain, struct spacemit_pm_domain, genpd);
+
+       if (spd->param.reg_pwr_ctrl == 0)
+               return 0;
+
+       /**
+        * if all the devices in this power domain don't want the pm-domain driver taker over
+        * the power-domian' on/off, return directly.
+        */
+       list_for_each_entry(pos, &spd->qos_head, qos_node) {
+               if (!pos->handle_pm_domain)
+                       return 0;
+       }
+
+       /**
+        * as long as there is one device don't want to on/off this power-domain, just return
+        */
+       list_for_each_entry(pos, &spd->qos_head, qos_node) {
+               if ((pos->level & DEV_PM_QOS_PM_DOMAIN_GATE) == 0)
+                       return 0;
+       }
+
+       if (!spd->param.use_hw) {
+               /* this is the sw type */
+               regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, &val);
+               val &= ~(1 << spd->param.bit_isolation);
+               regmap_write(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, val);
+
+               usleep_range(10, 15);
+
+               /* mcu power off */
+               regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, &val);
+               val &= ~((1 << spd->param.bit_sleep1) | (1 << spd->param.bit_sleep2));
+               regmap_write(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, val);
+
+               usleep_range(10, 15);
+
+               for (loop = 10000; loop >= 0; --loop) {
+                       regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], APMU_POWER_STATUS_REG, &val);
+                       if ((val & (1 << spd->param.bit_pwr_stat)) == 0)
+                               break;
+                       usleep_range(4, 6);
+               }
+       } else {
+               /* LCD */
+               regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, &val);
+               val &= ~(1 << spd->param.bit_auto_pwr_on);
+               val &= ~(1 << spd->param.bit_hw_mode);
+               regmap_write(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, val);
+
+               usleep_range(10, 30);
+
+               for (loop = 10000; loop >= 0; --loop) {
+                       regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], APMU_POWER_STATUS_REG, &val);
+                       if ((val & (1 << spd->param.bit_hw_pwr_stat)) == 0)
+                               break;
+                       usleep_range(4, 6);
+               }
+       }
+
+       if (loop < 0) {
+               pr_err("power-off domain: %d, error\n", spd->pm_index);
+               return -EBUSY;
+       }
+
+       /* enable the supply */
+       for (loop = 0; loop < spd->rgr_count; ++loop) {
+               ret = regulator_disable(spd->rgr[loop]);
+               if (ret < 0) {
+                       pr_err("%s: regulator disable failed\n", __func__);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int spacemit_pd_power_on(struct generic_pm_domain *domain)
+{
+       int loop, ret;
+       unsigned int val;
+       struct per_device_qos *pos;
+       struct spacemit_pm_domain *spd = container_of(domain, struct spacemit_pm_domain, genpd);
+
+       if (spd->param.reg_pwr_ctrl == 0)
+               return 0;
+
+       /**
+        * if all the devices in this power domain don't want the pm-domain driver taker over
+        * the power-domian' on/off, return directly.
+        * */
+       list_for_each_entry(pos, &spd->qos_head, qos_node) {
+               if (!pos->handle_pm_domain)
+                       return 0;
+       }
+
+       /**
+        * as long as there is one device don't want to on/off this power-domain, just return
+        */
+       list_for_each_entry(pos, &spd->qos_head, qos_node) {
+               if ((pos->level & DEV_PM_QOS_PM_DOMAIN_GATE) == 0)
+                       return 0;
+       }
+
+       /* enable the supply */
+       for (loop = 0; loop < spd->rgr_count; ++loop) {
+               ret = regulator_enable(spd->rgr[loop]);
+               if (ret < 0) {
+                       pr_err("%s: regulator disable failed\n", __func__);
+                       return ret;
+               }
+       }
+
+       if (spd->pm_index == K1X_PMU_AUD_PWR_DOMAIN) {
+               regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], APMU_AUDIO_CLK_RES_CTRL, &val);
+               val |= (1 << AP_POWER_CTRL_AUDIO_AUTH_OFFSET);
+               regmap_write(gpmu->regmap[APMU_REGMAP_INDEX], APMU_AUDIO_CLK_RES_CTRL, val);
+       }
+
+       regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], APMU_POWER_STATUS_REG, &val);
+       if (val & (1 << spd->param.bit_pwr_stat)) {
+               if (!spd->param.use_hw) {
+                       /* this is the sw type */
+                       regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, &val);
+                       val &= ~(1 << spd->param.bit_isolation);
+                       regmap_write(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, val);
+
+                       usleep_range(10, 15);
+
+                       /* mcu power off */
+                       regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, &val);
+                       val &= ~((1 << spd->param.bit_sleep1) | (1 << spd->param.bit_sleep2));
+                       regmap_write(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, val);
+
+                       usleep_range(10, 15);
+
+                       for (loop = 10000; loop >= 0; --loop) {
+                               regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], APMU_POWER_STATUS_REG, &val);
+                               if ((val & (1 << spd->param.bit_pwr_stat)) == 0)
+                                       break;
+                               usleep_range(4, 6);
+                       }
+               } else {
+                       /* LCD */
+                       regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, &val);
+                       val &= ~(1 << spd->param.bit_auto_pwr_on);
+                       val &= ~(1 << spd->param.bit_hw_mode);
+                       regmap_write(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, val);
+
+                       usleep_range(10, 30);
+
+                       for (loop = 10000; loop >= 0; --loop) {
+                               regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], APMU_POWER_STATUS_REG, &val);
+                               if ((val & (1 << spd->param.bit_hw_pwr_stat)) == 0)
+                                       break;
+                               usleep_range(4, 6);
+                       }
+               }
+
+               if (loop < 0) {
+                       pr_err("power-off domain: %d, error\n", spd->pm_index);
+                       return -EBUSY;
+               }
+       }
+
+       if (!spd->param.use_hw) {
+               /* mcu power on */
+               regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, &val);
+               val |= (1 << spd->param.bit_sleep1);
+               regmap_write(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, val);
+
+               usleep_range(20, 25);
+
+               regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, &val);
+               val |= (1 << spd->param.bit_sleep2) | (1 << spd->param.bit_sleep1);
+               regmap_write(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, val);
+
+               usleep_range(20, 25);
+
+               /* disable isolation */
+               regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, &val);
+               val |= (1 << spd->param.bit_isolation);
+               regmap_write(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, val);
+
+               usleep_range(10, 15);
+
+               for (loop = 10000; loop >= 0; --loop) {
+                       regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], APMU_POWER_STATUS_REG, &val);
+                       if (val & (1 << spd->param.bit_pwr_stat))
+                               break;
+                       usleep_range(4, 6);
+               }
+       } else {
+               /* LCD */
+               regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, &val);
+               val |= (1 << spd->param.bit_auto_pwr_on);
+               val |= (1 << spd->param.bit_hw_mode);
+               regmap_write(gpmu->regmap[APMU_REGMAP_INDEX], spd->param.reg_pwr_ctrl, val);
+
+               usleep_range(290, 310);
+
+               for (loop = 10000; loop >= 0; --loop) {
+                       regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], APMU_POWER_STATUS_REG, &val);
+                       if (val & (1 << spd->param.bit_hw_pwr_stat))
+                               break;
+                       usleep_range(4, 6);
+               }
+       }
+
+       if (loop < 0) {
+               pr_err("power-on domain: %d, error\n", spd->pm_index);
+               return -EBUSY;
+       }
+
+       /* for audio power domain, we should let the rcpu handle it, and disable force power on */
+       if (spd->pm_index == K1X_PMU_AUD_PWR_DOMAIN) {
+               regmap_read(gpmu->regmap[APMU_REGMAP_INDEX], APMU_AUDIO_CLK_RES_CTRL, &val);
+               val &= ~((1 << AP_POWER_CTRL_AUDIO_AUTH_OFFSET) | (1 << FORCE_AUDIO_POWER_ON_OFFSET));
+               regmap_write(gpmu->regmap[APMU_REGMAP_INDEX], APMU_AUDIO_CLK_RES_CTRL, val);
+       }
+
+       return 0;
+}
+
+static int spacemit_handle_level_notfier_call(struct notifier_block *nb, unsigned long action, void *data)
+{
+       struct per_device_qos *per_qos = container_of(nb, struct per_device_qos, notifier);
+
+       per_qos->level = action;
+
+       return 0;
+}
+
+static int spacemit_pd_attach_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+       int err, i = 0, count;
+       struct clk *clk;
+       struct per_device_qos *per_qos, *pos;
+       const char *strings[MAX_REGULATOR_PER_DOMAIN];
+       struct spacemit_pm_domain *spd = container_of(genpd, struct spacemit_pm_domain, genpd);
+
+       /**
+        * per-device qos set
+        * this feature enable the device drivers to dynamically modify the power
+        * module taken over by PM domain driver
+        */
+       per_qos = (struct per_device_qos *)devm_kzalloc(dev, sizeof(struct per_device_qos), GFP_KERNEL);
+       if (!per_qos) {
+               pr_err(" allocate per device qos error\n");
+               return -ENOMEM;
+       }
+
+       per_qos->dev = dev;
+       INIT_LIST_HEAD(&per_qos->qos_node);
+       list_add(&per_qos->qos_node, &spd->qos_head);
+       per_qos->notifier.notifier_call = spacemit_handle_level_notfier_call;
+
+       dev_pm_qos_add_notifier(dev, &per_qos->notifier, DEV_PM_QOS_MAX_FREQUENCY);
+
+       dev_pm_qos_add_request(dev, &per_qos->req, DEV_PM_QOS_MAX_FREQUENCY, DEV_PM_QOS_DEFAULT);
+
+       if (!of_property_read_bool(dev->of_node, "clk,pm-runtime,no-sleep")) {
+               err = pm_clk_create(dev);
+               if (err) {
+                        dev_err(dev, "pm_clk_create failed %d\n", err);
+                        return err;
+               }
+
+               while ((clk = of_clk_get(dev->of_node, i++)) && !IS_ERR(clk)) {
+                       err = pm_clk_add_clk(dev, clk);
+                       if (err) {
+                                dev_err(dev, "pm_clk_add_clk failed %d\n", err);
+                                clk_put(clk);
+                                pm_clk_destroy(dev);
+                                return err;
+                       }
+               }
+
+               per_qos->handle_clk = true;
+       }
+
+       /* parse the regulator */
+       if (!of_property_read_bool(dev->of_node, "regulator,pm-runtime,no-sleep")) {
+               count = of_property_count_strings(dev->of_node, "vin-supply-names");
+               if (count < 0)
+                       pr_debug("no vin-suppuly-names found\n");
+               else {
+                       err = of_property_read_string_array(dev->of_node, "vin-supply-names",
+                               strings, count);
+                       if (err < 0) {
+                               pr_info("read string array vin-supplu-names error\n");
+                               return err;
+                       }
+
+                       for (i = 0; i < count; ++i) {
+                               per_qos->rgr[i] = devm_regulator_get(dev, strings[i]);
+                               if (IS_ERR(per_qos->rgr[i])) {
+                                       pr_err("regulator supply %s, get failed\n", strings[i]);
+                                       return PTR_ERR(per_qos->rgr[i]);
+                               }
+                       }
+
+                       per_qos->rgr_count = count;
+               }
+
+               per_qos->handle_regulator = true;
+       }
+
+       /* dealing with the cpuidle-qos */
+       if (of_property_read_bool(dev->of_node, "cpuidle,pm-runtime,sleep")) {
+               atomic_freq_qos_add_request(&afreq_constraints, &per_qos->qos, FREQ_QOS_MAX, PM_QOS_BLOCK_DEFAULT_VALUE);
+               per_qos->handle_cpuidle_qos = true;
+       }
+
+       if (!of_property_read_bool(dev->of_node, "pwr-domain,pm-runtime,no-sleep"))
+               per_qos->handle_pm_domain = true;
+
+       list_for_each_entry(pos, &spd->qos_head, qos_node) {
+               if (per_qos->handle_pm_domain != pos->handle_pm_domain) {
+                       pr_err("all the devices in this power domain must has the same 'pwr-domain,pm-runtime,no-sleep' perporty\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static void spacemit_pd_detach_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+       struct per_device_qos *pos;
+       struct spacemit_pm_domain *spd = container_of(genpd, struct spacemit_pm_domain, genpd);
+
+       list_for_each_entry(pos, &spd->qos_head, qos_node) {
+               if (pos->dev == dev)
+                       break;
+       }
+
+       if (pos->handle_clk)
+               pm_clk_destroy(dev);
+
+       if (pos->handle_regulator) {
+               dev_pm_qos_remove_notifier(dev, &pos->notifier, DEV_PM_QOS_MAX_FREQUENCY);
+               while (--pos->rgr_count >= 0)
+                       devm_regulator_put(pos->rgr[pos->rgr_count]);
+               list_del(&pos->qos_node);
+       }
+}
+
+static int spacemit_cpuidle_qos_notfier_call(struct notifier_block *nb, unsigned long action, void *data)
+{
+       unsigned int apcr_per;
+       unsigned int apcr_clear = 0, apcr_set = (1 << PM_QOS_PE_VOTE_AP_SLPEN_OFFSET);
+
+       spin_lock(&spacemit_apcr_qos_lock);
+
+       switch (action) {
+               case PM_QOS_BLOCK_C1:
+               case PM_QOS_BLOCK_C2:
+               case PM_QOS_BLOCK_M2:
+               case PM_QOS_BLOCK_AXI:
+                       apcr_clear |= (1 << PM_QOS_AXISDD_OFFSET);
+                       apcr_clear |= (1 << PM_QOS_DDRCORSD_OFFSET);
+                       apcr_clear |= (1 << PM_QOS_APBSD_OFFSET);
+                       apcr_clear |= (1 << PM_QOS_VCTCXOSD_OFFSET);
+                       apcr_clear |= (1 << PM_QOS_STBYEN_OFFSET);
+                       break;
+               case PM_QOS_BLOCK_DDR:
+                       apcr_set |= (1 << PM_QOS_AXISDD_OFFSET);
+                       apcr_clear |= (1 << PM_QOS_DDRCORSD_OFFSET);
+                       apcr_clear |= (1 << PM_QOS_APBSD_OFFSET);
+                       apcr_clear |= (1 << PM_QOS_VCTCXOSD_OFFSET);
+                       apcr_clear |= (1 << PM_QOS_STBYEN_OFFSET);
+                       break;
+               case PM_QOS_BLOCK_UDR:
+                       apcr_clear |= (1 << PM_QOS_STBYEN_OFFSET);
+                       apcr_set |= (1 << PM_QOS_AXISDD_OFFSET);
+                       apcr_set |= (1 << PM_QOS_DDRCORSD_OFFSET);
+                       apcr_set |= (1 << PM_QOS_APBSD_OFFSET);
+                       apcr_set |= (1 << PM_QOS_VCTCXOSD_OFFSET);
+                       break;
+               case PM_QOS_BLOCK_DEFAULT_VALUE:
+                       apcr_set |= (1 << PM_QOS_AXISDD_OFFSET);
+                       apcr_set |= (1 << PM_QOS_DDRCORSD_OFFSET);
+                       apcr_set |= (1 << PM_QOS_APBSD_OFFSET);
+                       apcr_set |= (1 << PM_QOS_VCTCXOSD_OFFSET);
+                       apcr_set |= (1 << PM_QOS_STBYEN_OFFSET);
+                       break;
+               default:
+                       pr_warn("Invalidate pm qos value\n");
+                       spin_unlock(&spacemit_apcr_qos_lock);
+       }
+
+       regmap_read(gpmu->regmap[MPMU_REGMAP_INDEX], MPMU_APCR_PER_REG, &apcr_per);
+       apcr_per &= ~(apcr_clear);
+       apcr_per |= apcr_set;
+       regmap_write(gpmu->regmap[MPMU_REGMAP_INDEX], MPMU_APCR_PER_REG, apcr_per);
+
+       regmap_read(gpmu->regmap[MPMU_REGMAP_INDEX], MPMU_APCR_PER_REG, &apcr_per);
+
+       spin_unlock(&spacemit_apcr_qos_lock);
+
+       return 0;
+}
+
+static int spacemit_genpd_stop(struct device *dev)
+{
+       int loop, ret;
+       struct per_device_qos *pos;
+       struct generic_pm_domain *pd = pd_to_genpd(dev->pm_domain);
+       struct spacemit_pm_domain *spd = container_of(pd, struct spacemit_pm_domain, genpd);
+
+       list_for_each_entry(pos, &spd->qos_head, qos_node) {
+               if (pos->dev == dev)
+                       break;
+       }
+
+       /* disable the clk */
+       if ((pos->level & DEV_PM_QOS_CLK_GATE) && pos->handle_clk)
+               pm_clk_suspend(dev);
+
+       /* dealing with the pm_qos */
+       if (pos->handle_cpuidle_qos)
+               atomic_freq_qos_update_request(&pos->qos, PM_QOS_BLOCK_DEFAULT_VALUE);
+
+       if (pos->handle_regulator && (pos->level & DEV_PM_QOS_REGULATOR_GATE)) {
+               for (loop = 0; loop < pos->rgr_count; ++loop) {
+                       ret = regulator_disable(pos->rgr[loop]);
+                       if (ret < 0) {
+                               pr_err("%s: regulator disable failed\n", __func__);
+                               return ret;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int spacemit_genpd_start(struct device *dev)
+{
+       int loop, ret;
+       struct per_device_qos *pos;
+       struct generic_pm_domain *pd = pd_to_genpd(dev->pm_domain);
+       struct spacemit_pm_domain *spd = container_of(pd, struct spacemit_pm_domain, genpd);
+
+       list_for_each_entry(pos, &spd->qos_head, qos_node) {
+               if (pos->dev == dev)
+                       break;
+       }
+
+       if (pos->handle_regulator && (pos->level & DEV_PM_QOS_REGULATOR_GATE)) {
+               for (loop = 0; loop < pos->rgr_count; ++loop) {
+                       ret = regulator_enable(pos->rgr[loop]);
+                       if (ret < 0) {
+                               pr_err("%s: regulator disable failed\n", __func__);
+                               return ret;
+                       }
+               }
+       }
+
+       /* dealing with the pm_qos */
+       if (pos->handle_cpuidle_qos)
+               atomic_freq_qos_update_request(&pos->qos, spd->param.pm_qos);
+
+       if ((pos->level & DEV_PM_QOS_CLK_GATE) && pos->handle_clk)
+               pm_clk_resume(dev);
+
+       return 0;
+}
+
+static int spacemit_get_pm_domain_parameters(struct device_node *node, struct spacemit_pm_domain *pd)
+{
+       int err;
+
+       err = of_property_read_u32(node, "pm_qos", &pd->param.pm_qos);
+       if (err) {
+               pr_err("%s:%d, failed to retrive the domain pm_qos\n",
+                               __func__, __LINE__);
+               return -EINVAL;
+       }
+
+       err = of_property_read_u32(node, "reg_pwr_ctrl", &pd->param.reg_pwr_ctrl);
+       err |= of_property_read_u32(node, "bit_hw_mode", &pd->param.bit_hw_mode);
+       err |= of_property_read_u32(node, "bit_sleep2", &pd->param.bit_sleep2);
+       err |= of_property_read_u32(node, "bit_sleep1", &pd->param.bit_sleep1);
+       err |= of_property_read_u32(node, "bit_isolation", &pd->param.bit_isolation);
+       err |= of_property_read_u32(node, "bit_auto_pwr_on", &pd->param.bit_auto_pwr_on);
+       err |= of_property_read_u32(node, "bit_hw_pwr_stat", &pd->param.bit_hw_pwr_stat);
+       err |= of_property_read_u32(node, "bit_pwr_stat", &pd->param.bit_pwr_stat);
+       err |= of_property_read_u32(node, "use_hw", &pd->param.use_hw);
+
+       if (err)
+               pr_debug("get pm domain parameter failed\n");
+
+       return 0;
+}
+
+static int spacemit_pm_add_one_domain(struct spacemit_pmu *pmu, struct device_node *node)
+{
+       int err;
+       int id, count, i;
+       struct spacemit_pm_domain *pd;
+       const char *strings[MAX_REGULATOR_PER_DOMAIN];
+
+       err = of_property_read_u32(node, "reg", &id);
+       if (err) {
+               pr_err("%s:%d, failed to retrive the domain id\n",
+                               __func__, __LINE__);
+               return -EINVAL;
+       }
+
+       if (id >= pmu->number_domains) {
+               pr_err("%pOFn: invalid domain id %d\n", node, id);
+               return -EINVAL;
+       }
+
+       pd = (struct spacemit_pm_domain *)devm_kzalloc(pmu->dev, sizeof(struct spacemit_pm_domain), GFP_KERNEL);
+       if (!pd)
+               return -ENOMEM;
+
+       pd->pm_index = id;
+
+       /* we will add all the notifiers to this device */
+       pd->gdev = pmu->dev;
+
+       err = spacemit_get_pm_domain_parameters(node, pd);
+       if (err)
+               return -EINVAL;
+
+       /* get the power supply of the power-domain */
+       count = of_property_count_strings(node, "vin-supply-names");
+       if (count < 0)
+               pr_debug("no vin-suppuly-names found\n");
+       else {
+               err = of_property_read_string_array(node, "vin-supply-names",
+                       strings, count);
+               if (err < 0) {
+                       pr_info("read string array vin-supplu-names error\n");
+                       return err;
+               }
+
+               for (i = 0; i < count; ++i) {
+                       pd->rgr[i] = regulator_get(NULL, strings[i]);
+                       if (IS_ERR(pd->rgr[i])) {
+                               pr_err("regulator supply %s, get failed\n", strings[i]);
+                               return PTR_ERR(pd->rgr[i]);
+                       }
+               }
+
+               pd->rgr_count = count;
+       }
+
+       INIT_LIST_HEAD(&pd->qos_head);
+
+       pd->genpd.name = kbasename(node->full_name);
+       pd->genpd.power_off = spacemit_pd_power_off;
+       pd->genpd.power_on = spacemit_pd_power_on;
+       pd->genpd.attach_dev = spacemit_pd_attach_dev;
+       pd->genpd.detach_dev = spacemit_pd_detach_dev;
+
+       pd->genpd.dev_ops.stop = spacemit_genpd_stop;
+       pd->genpd.dev_ops.start = spacemit_genpd_start;
+
+       /* audio power-domain is power-on by default */
+       if (id == K1X_PMU_AUD_PWR_DOMAIN)
+               pm_genpd_init(&pd->genpd, NULL, false);
+       else
+               pm_genpd_init(&pd->genpd, NULL, true);
+
+       pmu->domains[id] = pd;
+
+       return 0;
+}
+
+static void spacemit_pm_remove_one_domain(struct spacemit_pm_domain *pd)
+{
+       int ret;
+
+       ret = pm_genpd_remove(&pd->genpd);
+       if (ret < 0) {
+               pr_err("failed to remove domain '%s' : %d\n", pd->genpd.name, ret);
+       }
+
+       /* devm will free our memory */
+}
+
+static int spacemit_pm_add_subdomain(struct spacemit_pmu *pmu, struct device_node *parent)
+{
+       struct device_node *np;
+       struct generic_pm_domain *child_domain, *parent_domain;
+       int err, idx;
+
+       for_each_child_of_node(parent, np) {
+               err = of_property_read_u32(parent, "reg", &idx);
+               if (err) {
+                       pr_err("%pOFn: failed to retrive domain id (reg): %d\n",
+                                       parent, err);
+                       goto err_out;
+               }
+
+               parent_domain = &pmu->domains[idx]->genpd;
+
+               err = spacemit_pm_add_one_domain(pmu, np);
+               if (err) {
+                       pr_err("failed to handle node %pOFn: %d\n", np, err);
+                       goto err_out;
+               }
+
+               err = of_property_read_u32(np, "reg", &idx);
+               if (err) {
+                       pr_err("%pOFn: failed to retrive domain id (reg): %d\n",
+                                       parent, err);
+                       goto err_out;
+               }
+
+               child_domain = &pmu->domains[idx]->genpd;
+
+               err = pm_genpd_add_subdomain(parent_domain, child_domain);
+               if (err) {
+                       pr_err("%s failed to add subdomain %s: %d\n",
+                                       parent_domain->name, child_domain->name, err);
+                       goto err_out;
+               } else {
+                       pr_info("%s add subdomain: %s\n",
+                                       parent_domain->name, child_domain->name);
+               }
+
+               spacemit_pm_add_subdomain(pmu, np);
+       }
+
+       return 0;
+
+err_out:
+       of_node_put(np);
+       return err;
+}
+
+static void spacemit_pm_domain_cleanup(struct spacemit_pmu *pmu)
+{
+       struct spacemit_pm_domain *pd;
+       int i;
+
+       for (i = 0; i < pmu->number_domains; i++) {
+               pd = pmu->domains[i];
+               if (pd)
+                       spacemit_pm_remove_one_domain(pd);
+       }
+
+       /* devm will free our memory */
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int acpr_per_suspend(void)
+{
+       unsigned int apcr_per;
+       unsigned int apcr_clear = 0, apcr_set = (1 << PM_QOS_PE_VOTE_AP_SLPEN_OFFSET);
+
+       spin_lock(&spacemit_apcr_qos_lock);
+
+       apcr_set |= (1 << PM_QOS_AXISDD_OFFSET);
+       apcr_set |= (1 << PM_QOS_DDRCORSD_OFFSET);
+       apcr_set |= (1 << PM_QOS_APBSD_OFFSET);
+       apcr_set |= (1 << PM_QOS_VCTCXOSD_OFFSET);
+       apcr_set |= (1 << PM_QOS_STBYEN_OFFSET);
+
+       regmap_read(gpmu->regmap[MPMU_REGMAP_INDEX], MPMU_APCR_PER_REG, &apcr_per);
+       g_acpr_per = apcr_per;
+       apcr_per &= ~(apcr_clear);
+       apcr_per |= apcr_set;
+       regmap_write(gpmu->regmap[MPMU_REGMAP_INDEX], MPMU_APCR_PER_REG, apcr_per);
+
+       spin_unlock(&spacemit_apcr_qos_lock);
+
+       /* enable pmic wakeup */
+       regmap_read(gpmu->regmap[MPMU_REGMAP_INDEX], MPMU_AWUCRM_REG, &apcr_per);
+       apcr_per |= (1 << WAKEUP_SOURCE_WAKEUP_7);
+       regmap_write(gpmu->regmap[MPMU_REGMAP_INDEX], MPMU_AWUCRM_REG, apcr_per);
+
+       return 0;
+}
+
+static void acpr_per_resume(void)
+{
+       unsigned int apcr_per;
+
+       spin_lock(&spacemit_apcr_qos_lock);
+
+       regmap_write(gpmu->regmap[MPMU_REGMAP_INDEX], MPMU_APCR_PER_REG, g_acpr_per);
+
+       spin_unlock(&spacemit_apcr_qos_lock);
+
+       /* disable pmic wakeup */
+       regmap_read(gpmu->regmap[MPMU_REGMAP_INDEX], MPMU_AWUCRM_REG, &apcr_per);
+       apcr_per &= ~(1 << WAKEUP_SOURCE_WAKEUP_7);
+       regmap_write(gpmu->regmap[MPMU_REGMAP_INDEX], MPMU_AWUCRM_REG, apcr_per);
+}
+
+static struct syscore_ops acpr_per_syscore_ops = {
+       .suspend = acpr_per_suspend,
+       .resume = acpr_per_resume,
+};
+#endif
+
+static int spacemit_pm_domain_probe(struct platform_device *pdev)
+{
+       int err = 0, i;
+       struct device *dev = &pdev->dev;
+       struct device_node *node;
+       struct device_node *np = dev->of_node;
+       struct spacemit_pmu *pmu = NULL;
+
+       pmu = (struct spacemit_pmu *)devm_kzalloc(dev, sizeof(struct spacemit_pmu), GFP_KERNEL);
+       if (pmu == NULL) {
+               pr_err("%s:%d, err\n", __func__, __LINE__);
+               return -ENOMEM;
+       }
+
+       pmu->dev = dev;
+
+       for (i = 0; i < sizeof(spacemit_regmap_dt_match) / sizeof(spacemit_regmap_dt_match[0]); ++i) {
+               pmu->regmap[i] = syscon_regmap_lookup_by_compatible(spacemit_regmap_dt_match[i].compatible);
+               if (IS_ERR(pmu->regmap[i])) {
+                       pr_err("%s:%d err\n", __func__, __LINE__);
+                       return PTR_ERR(pmu->regmap[i]);
+               }
+       }
+
+       /* get number power domains */
+       err = of_property_read_u32(np, "domains", &pmu->number_domains);
+       if (err) {
+               pr_err("%s:%d, failed to retrive the number of domains\n",
+                               __func__, __LINE__);
+               return -EINVAL;
+       }
+
+       pmu->domains = devm_kzalloc(dev, sizeof(struct spacemit_pm_domain *) * pmu->number_domains,
+                       GFP_KERNEL);
+       if (!pmu->domains) {
+               pr_err("%s:%d, err\n", __func__, __LINE__);
+               return -ENOMEM;
+       }
+
+       err = -ENODEV;
+
+       for_each_available_child_of_node(np, node) {
+               err = spacemit_pm_add_one_domain(pmu, node);
+               if (err) {
+                       pr_err("%s:%d, failed to handle node %pOFn: %d\n", __func__, __LINE__,
+                                       node, err);
+                       of_node_put(node);
+                       goto err_out;
+               }
+
+               err = spacemit_pm_add_subdomain(pmu, node);
+               if (err) {
+                       pr_err("%s:%d, failed to handle subdomain node %pOFn: %d\n",
+                                       __func__, __LINE__, node, err);
+                       of_node_put(node);
+                       goto err_out;
+               }
+       }
+
+       if(err) {
+               pr_err("no power domains defined\n");
+               goto err_out;
+       }
+
+       pmu->genpd_data.domains = (struct generic_pm_domain **)pmu->domains;
+       pmu->genpd_data.num_domains = pmu->number_domains;
+
+       err = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+       if (err) {
+               pr_err("failed to add provider: %d\n", err);
+               goto err_out;
+       }
+
+       /**
+        * dealing with the cpuidle qos
+        */
+       pmu->notifier.notifier_call = spacemit_cpuidle_qos_notfier_call;
+       atomic_freq_constraints_init(&afreq_constraints);
+       atomic_freq_qos_add_notifier(&afreq_constraints, FREQ_QOS_MAX, &pmu->notifier);
+
+       gpmu = pmu;
+
+#ifdef CONFIG_PM_SLEEP
+       register_syscore_ops(&acpr_per_syscore_ops);
+#endif
+       return 0;
+
+err_out:
+       spacemit_pm_domain_cleanup(pmu);
+       return err;
+}
+
+static const struct of_device_id spacemit_pm_domain_dt_match[] = {
+       { .compatible = "spacemit,power-controller", },
+       { },
+};
+
+static struct platform_driver spacemit_pm_domain_driver = {
+       .probe = spacemit_pm_domain_probe,
+       .driver = {
+               .name   = "spacemit-pm-domain",
+               .of_match_table = spacemit_pm_domain_dt_match,
+       },
+};
+
+static int __init spacemit_pm_domain_drv_register(void)
+{
+       return platform_driver_register(&spacemit_pm_domain_driver);
+}
+core_initcall(spacemit_pm_domain_drv_register);
diff --git a/drivers/soc/spacemit/spacemit-rf/Kconfig b/drivers/soc/spacemit/spacemit-rf/Kconfig
new file mode 100755 (executable)
index 0000000..8410233
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Spacemit rfkill driver.
+#
+config SPACEMIT_RFKILL
+  tristate "Spacemit rfkill driver"
+  depends on WIRELESS || RFKILL
+  help
+    Spacemit rfkill driver
+
diff --git a/drivers/soc/spacemit/spacemit-rf/Makefile b/drivers/soc/spacemit/spacemit-rf/Makefile
new file mode 100755 (executable)
index 0000000..c2cb0d1
--- /dev/null
@@ -0,0 +1,6 @@
+
+#
+# Makefile for wifi bluetooth power controller drivers
+#
+
+obj-$(CONFIG_SPACEMIT_RFKILL)   += spacemit-pwrseq.o spacemit-wlan.o spacemit-bt.o
diff --git a/drivers/soc/spacemit/spacemit-rf/spacemit-bt.c b/drivers/soc/spacemit/spacemit-rf/spacemit-bt.c
new file mode 100755 (executable)
index 0000000..34e89a7
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * spacemit-bt.c -- power on/off bt part of SoC
+ *
+ * Copyright 2023, Spacemit Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/rfkill.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include "spacemit-pwrseq.h"
+
+struct bt_pwrseq {
+       struct device           *dev;
+       bool power_state;
+       u32 power_on_delay_ms;
+
+       struct gpio_desc *reset_n;
+       struct clk *ext_clk;
+       struct rfkill *rfkill;
+};
+
+static int spacemit_bt_on(struct bt_pwrseq *pwrseq, bool on_off)
+{
+       struct spacemit_pwrseq *parent_pwrseq = spacemit_get_pwrseq();
+
+       if (!pwrseq || IS_ERR(pwrseq->reset_n))
+               return 0;
+
+       if (on_off){
+               if(parent_pwrseq)
+                       spacemit_power_on(parent_pwrseq, 1);
+               gpiod_set_value(pwrseq->reset_n, 1);
+               if (pwrseq->power_on_delay_ms)
+                       msleep(pwrseq->power_on_delay_ms);
+       }else{
+               gpiod_set_value(pwrseq->reset_n, 0);
+               if(parent_pwrseq)
+                       spacemit_power_on(parent_pwrseq, 0);
+       }
+
+       pwrseq->power_state = on_off;
+       return 0;
+}
+
+static int spacemit_bt_set_block(void *data, bool blocked)
+{
+       struct bt_pwrseq *pwrseq = data;
+       int ret;
+
+       if (blocked != pwrseq->power_state) {
+               dev_warn(pwrseq->dev, "block state already is %d\n", blocked);
+               return 0;
+       }
+
+       dev_info(pwrseq->dev, "set block: %d\n", blocked);
+       ret = spacemit_bt_on(pwrseq, !blocked);
+       if (ret) {
+               dev_err(pwrseq->dev, "set block failed\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct rfkill_ops spacemit_bt_rfkill_ops = {
+       .set_block = spacemit_bt_set_block,
+};
+
+static int spacemit_bt_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct bt_pwrseq *pwrseq;
+       int ret;
+
+       pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
+       if (!pwrseq)
+               return -ENOMEM;
+
+       pwrseq->dev = dev;
+       platform_set_drvdata(pdev, pwrseq);
+
+       pwrseq->reset_n = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(pwrseq->reset_n) &&
+               PTR_ERR(pwrseq->reset_n) != -ENOENT &&
+               PTR_ERR(pwrseq->reset_n) != -ENOSYS) {
+               return PTR_ERR(pwrseq->reset_n);
+       }
+
+       pwrseq->ext_clk = devm_clk_get(dev, "clock");
+       if (IS_ERR_OR_NULL(pwrseq->ext_clk)){
+               dev_dbg(dev, "failed get ext clock\n");
+       } else {
+               ret = clk_prepare_enable(pwrseq->ext_clk);
+               if (ret < 0)
+                       dev_warn(dev, "can't enable clk\n");
+       }
+
+       if(device_property_read_u32(dev, "power-on-delay-ms",
+                                &pwrseq->power_on_delay_ms))
+               pwrseq->power_on_delay_ms = 10;
+
+       pwrseq->rfkill = rfkill_alloc("spacemit-bt", dev, RFKILL_TYPE_BLUETOOTH,
+                                   &spacemit_bt_rfkill_ops, pwrseq);
+       if (!pwrseq->rfkill) {
+               dev_err(dev, "failed alloc bt rfkill\n");
+               ret = -ENOMEM;
+               goto alloc_err;
+       }
+
+       rfkill_set_states(pwrseq->rfkill, true, false);
+
+       ret = rfkill_register(pwrseq->rfkill);
+       if (ret) {
+               dev_err(dev, "failed register bt rfkill\n");
+               goto register_err;
+       }
+
+       return 0;
+
+register_err:
+       if (pwrseq->rfkill)
+               rfkill_destroy(pwrseq->rfkill);
+alloc_err:
+       if (!IS_ERR_OR_NULL(pwrseq->ext_clk))
+               clk_disable_unprepare(pwrseq->ext_clk);
+       return ret;
+}
+
+static int spacemit_bt_remove(struct platform_device *pdev)
+{
+       struct bt_pwrseq *pwrseq = platform_get_drvdata(pdev);
+
+       if (pwrseq->rfkill) {
+               rfkill_unregister(pwrseq->rfkill);
+               rfkill_destroy(pwrseq->rfkill);
+       }
+
+       if (!IS_ERR_OR_NULL(pwrseq->ext_clk))
+               clk_disable_unprepare(pwrseq->ext_clk);
+
+       return 0;
+}
+
+static const struct of_device_id spacemit_bt_ids[] = {
+       { .compatible = "spacemit,bt-pwrseq" },
+       { /* Sentinel */ }
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int spacemit_bt_suspend(struct device *dev)
+{
+       return 0;
+}
+
+static int spacemit_bt_resume(struct device *dev)
+{
+       return 0;
+}
+
+static const struct dev_pm_ops spacemit_bt_dev_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(spacemit_bt_suspend, spacemit_bt_resume)
+};
+
+#define DEV_PM_OPS     (&spacemit_bt_dev_pm_ops)
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static struct platform_driver spacemit_bt_driver = {
+       .probe          = spacemit_bt_probe,
+       .remove = spacemit_bt_remove,
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "spacemit-bt",
+               .of_match_table = spacemit_bt_ids,
+       },
+};
+
+module_platform_driver(spacemit_bt_driver);
+
+MODULE_DESCRIPTION("spacemit bt pwrseq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/spacemit/spacemit-rf/spacemit-pwrseq.c b/drivers/soc/spacemit/spacemit-rf/spacemit-pwrseq.c
new file mode 100755 (executable)
index 0000000..4e1b512
--- /dev/null
@@ -0,0 +1,317 @@
+/*
+ * spacemit-pwrseq.c -- power on/off pwrseq part of SoC
+ *
+ * Copyright 2023, Spacemit Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+#include <linux/of_platform.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+#include "spacemit-pwrseq.h"
+
+struct spacemit_pwrseq *pwrseq_data;
+
+struct spacemit_pwrseq *spacemit_get_pwrseq(void)
+{
+       return pwrseq_data;
+}
+
+static void spacemit_set_gpios_value(struct spacemit_pwrseq *pwrseq,
+                                               int value)
+{
+       struct gpio_descs *pwr_gpios = pwrseq->pwr_gpios;
+
+       if (!IS_ERR(pwr_gpios)) {
+               unsigned long *values;
+               int nvalues = pwr_gpios->ndescs;
+
+               values = bitmap_alloc(nvalues, GFP_KERNEL);
+               if (!values)
+                       return;
+
+               if (value)
+                       bitmap_fill(values, nvalues);
+               else
+                       bitmap_zero(values, nvalues);
+
+               gpiod_set_array_value_cansleep(nvalues, pwr_gpios->desc,
+                                              pwr_gpios->info, values);
+
+               bitmap_free(values);
+       }
+}
+
+static int spacemit_regulator_set_voltage_if_supported(struct regulator *regulator,
+                                                 int min_uV, int target_uV,
+                                                 int max_uV)
+{
+       int current_uV;
+
+       /*
+        * Check if supported first to avoid errors since we may try several
+        * signal levels during power up and don't want to show errors.
+        */
+       if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
+               return -EINVAL;
+
+       /*
+        * The voltage is already set, no need to switch.
+        * Return 1 to indicate that no switch happened.
+        */
+       current_uV = regulator_get_voltage(regulator);
+       if (current_uV == target_uV)
+               return 1;
+
+       return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
+                                            max_uV);
+}
+
+static void spacemit_regulator_on(struct spacemit_pwrseq *pwrseq,
+                                               struct regulator *regulator, int volt, bool on_off)
+{
+       struct device *dev = pwrseq->dev;
+       int ret, min_uV, max_uV;
+
+       if(on_off){
+               /*
+                * mostly, vdd voltage is 3.3V, io voltage is 1.8V or 3.3V.
+                * maybe need support 1.2V io signaling later.
+                */
+               if(regulator == pwrseq->io_supply){
+                       min_uV = max(volt - 100000, 1700000);
+                       max_uV = min(volt + 150000, 3300000);
+               }else{
+                       min_uV = max(volt - 300000, 2700000);
+                       max_uV = min(volt + 200000, 3600000);
+               }
+
+               ret = spacemit_regulator_set_voltage_if_supported(regulator,
+                                       min_uV, volt, max_uV);
+               if (ret < 0) {
+                       dev_err(dev, "set voltage failed!\n");
+                       return;
+               }
+
+               ret = regulator_enable(regulator);
+               if (ret < 0) {
+                       dev_err(dev, "enable failed\n");
+                       return;
+               }
+
+               ret = regulator_get_voltage(regulator);
+               if (ret < 0) {
+                       dev_err(dev, "get voltage failed\n");
+                       return;
+               }
+
+               dev_info(dev, "check voltage: %d\n", ret);
+       }else{
+               regulator_disable(regulator);
+       }
+}
+
+static void spacemit_pre_power_on(struct spacemit_pwrseq *pwrseq,
+                                               bool on_off)
+{
+       if(!IS_ERR(pwrseq->vdd_supply))
+               spacemit_regulator_on(pwrseq, pwrseq->vdd_supply, pwrseq->vdd_voltage, on_off);
+
+       if(!IS_ERR(pwrseq->io_supply))
+               spacemit_regulator_on(pwrseq, pwrseq->io_supply, pwrseq->io_voltage, on_off);
+}
+
+static void spacemit_post_power_on(struct spacemit_pwrseq *pwrseq,
+                                               bool on_off)
+{
+       if (!IS_ERR(pwrseq->ext_clk)) {
+               if(on_off && !pwrseq->clk_enabled){
+                       clk_prepare_enable(pwrseq->ext_clk);
+                       pwrseq->clk_enabled = true;
+               }
+
+               if(!on_off && pwrseq->clk_enabled){
+                       clk_disable_unprepare(pwrseq->ext_clk);
+                       pwrseq->clk_enabled = false;
+               }
+       }
+}
+
+void spacemit_power_on(struct spacemit_pwrseq *pwrseq,
+                                               bool on_off)
+{
+       mutex_lock(&pwrseq->pwrseq_mutex);
+       if(on_off){
+               if (!atomic_read(&pwrseq->pwrseq_count)){
+                       dev_info(pwrseq->dev, "turn power on\n");
+                       spacemit_pre_power_on(pwrseq, on_off);
+                       spacemit_set_gpios_value(pwrseq, on_off);
+                       if (pwrseq->power_on_delay_ms)
+                               msleep(pwrseq->power_on_delay_ms);
+                       spacemit_post_power_on(pwrseq, on_off);
+               }
+               atomic_inc(&pwrseq->pwrseq_count);
+       }else{
+               if (atomic_read(&pwrseq->pwrseq_count)){
+                       if (!atomic_dec_return(&pwrseq->pwrseq_count)){
+                               dev_info(pwrseq->dev, "turn power off\n");
+                               spacemit_post_power_on(pwrseq, on_off);
+                               spacemit_set_gpios_value(pwrseq, on_off);
+                               spacemit_pre_power_on(pwrseq, on_off);
+                       }
+               }else{
+                       dev_err(pwrseq->dev, "already power off, please check\n");
+               }
+       }
+       mutex_unlock(&pwrseq->pwrseq_mutex);
+}
+
+static int spacemit_pwrseq_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
+       struct spacemit_pwrseq *pwrseq;
+       int ret;
+
+       pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
+       if (!pwrseq)
+               return -ENOMEM;
+
+       pwrseq->dev = dev;
+       platform_set_drvdata(pdev, pwrseq);
+
+       pwrseq->vdd_supply = devm_regulator_get_optional(dev, "vdd");
+       if (IS_ERR(pwrseq->vdd_supply)) {
+               if (PTR_ERR(pwrseq->vdd_supply) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               dev_dbg(dev, "No vdd regulator found\n");
+       }else{
+               if (device_property_read_u32(dev, "vdd_voltage", &pwrseq->vdd_voltage)) {
+                       pwrseq->vdd_voltage = 3300000;
+                       dev_dbg(dev, "failed get vdd voltage,use default value (%u)\n", pwrseq->vdd_voltage);
+               }
+       }
+
+       pwrseq->io_supply = devm_regulator_get_optional(dev, "io");
+       if (IS_ERR(pwrseq->io_supply)) {
+               if (PTR_ERR(pwrseq->io_supply) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               dev_dbg(dev, "No io regulator found\n");
+       }else{
+               if (device_property_read_u32(dev, "io_voltage", &pwrseq->io_voltage)) {
+                       pwrseq->io_voltage = 1800000;
+                       dev_dbg(dev, "failed get io voltage,use default value (%u)\n", pwrseq->io_voltage);
+               }
+       }
+
+       pwrseq->pwr_gpios = devm_gpiod_get_array(dev, "pwr",
+                                                       GPIOD_OUT_LOW);
+       if (IS_ERR(pwrseq->pwr_gpios) &&
+               PTR_ERR(pwrseq->pwr_gpios) != -ENOENT &&
+               PTR_ERR(pwrseq->pwr_gpios) != -ENOSYS) {
+               return PTR_ERR(pwrseq->pwr_gpios);
+       }
+
+       pwrseq->ext_clk = devm_clk_get(dev, "clock");
+       if (IS_ERR(pwrseq->ext_clk) && PTR_ERR(pwrseq->ext_clk) != -ENOENT){
+               dev_dbg(dev, "failed get ext clock\n");
+               return PTR_ERR(pwrseq->ext_clk);
+       }
+
+       if(device_property_read_u32(dev, "power-on-delay-ms",
+                                &pwrseq->power_on_delay_ms))
+               pwrseq->power_on_delay_ms = 10;
+
+       if(device_property_read_bool(dev, "power-always-on"))
+               pwrseq->always_on = true;
+
+       if (node) {
+               ret = of_platform_populate(node, NULL, NULL, dev);
+               if (ret) {
+                       dev_err(dev, "failed to add sub pwrseq\n");
+                       return ret;
+               }
+       } else {
+               dev_err(dev, "no device node, failed to add sub pwrseq\n");
+               ret = -ENODEV;
+               return ret;
+       }
+
+       pwrseq_data = pwrseq;
+
+       mutex_init(&pwrseq->pwrseq_mutex);
+       atomic_set(&pwrseq->pwrseq_count, 0);
+
+       if(pwrseq->always_on)
+               spacemit_power_on(pwrseq, 1);
+
+       return 0;
+}
+
+static int spacemit_pwrseq_remove(struct platform_device *pdev)
+{
+       struct spacemit_pwrseq *pwrseq = platform_get_drvdata(pdev);
+
+       if(pwrseq->always_on)
+               spacemit_power_on(pwrseq, 0);
+
+       mutex_destroy(&pwrseq->pwrseq_mutex);
+       of_platform_depopulate(&pdev->dev);
+
+       pwrseq_data = NULL;
+       return 0;
+}
+
+static const struct of_device_id spacemit_pwrseq_ids[] = {
+       { .compatible = "spacemit,rf-pwrseq" },
+       { /* Sentinel */ }
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int spacemit_pwrseq_suspend(struct device *dev)
+{
+       return 0;
+}
+
+static int spacemit_pwrseq_resume(struct device *dev)
+{
+       return 0;
+}
+
+static const struct dev_pm_ops spacemit_pwrseq_dev_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(spacemit_pwrseq_suspend, spacemit_pwrseq_resume)
+};
+
+#define DEV_PM_OPS     (&spacemit_pwrseq_dev_pm_ops)
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static struct platform_driver spacemit_pwrseq_driver = {
+       .probe          = spacemit_pwrseq_probe,
+       .remove = spacemit_pwrseq_remove,
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "spacemit-rf-pwrseq",
+               .of_match_table = spacemit_pwrseq_ids,
+       },
+};
+
+module_platform_driver(spacemit_pwrseq_driver);
+
+MODULE_DESCRIPTION("spacemit rf pwrseq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/spacemit/spacemit-rf/spacemit-pwrseq.h b/drivers/soc/spacemit/spacemit-rf/spacemit-pwrseq.h
new file mode 100755 (executable)
index 0000000..75cd875
--- /dev/null
@@ -0,0 +1,24 @@
+#ifndef __SPACEMIT_PWRSEQ_H
+#define __SPACEMIT_PWRSEQ_H
+
+struct spacemit_pwrseq {
+       struct device           *dev;
+       bool clk_enabled;
+       u32 power_on_delay_ms;
+
+       struct clk *ext_clk;
+       struct gpio_descs *pwr_gpios;
+       struct regulator *vdd_supply;
+       struct regulator *io_supply;
+       int     vdd_voltage;
+       int io_voltage;
+
+       bool always_on;
+
+       struct mutex pwrseq_mutex;
+       atomic_t pwrseq_count;
+};
+
+void spacemit_power_on(struct spacemit_pwrseq *pwrseq, bool on_off);
+struct spacemit_pwrseq *spacemit_get_pwrseq(void);
+#endif
diff --git a/drivers/soc/spacemit/spacemit-rf/spacemit-wlan.c b/drivers/soc/spacemit/spacemit-rf/spacemit-wlan.c
new file mode 100755 (executable)
index 0000000..2c6a5fa
--- /dev/null
@@ -0,0 +1,193 @@
+/*
+ * spacemit-wlan.c -- power on/off wlan part of SoC
+ *
+ * Copyright 2023, Spacemit Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include "spacemit-pwrseq.h"
+
+struct wlan_pwrseq {
+       struct device           *dev;
+       bool power_state;
+       u32 power_on_delay_ms;
+
+       struct gpio_desc *regon;
+       struct gpio_desc *hostwake;
+
+       struct mutex wlan_mutex;
+};
+
+static struct wlan_pwrseq *pdata = NULL;
+static int spacemit_wlan_on(struct wlan_pwrseq *pwrseq, bool on_off);
+
+void spacemit_wlan_set_power(bool on_off)
+{
+       struct wlan_pwrseq *pwrseq = pdata;
+       int ret = 0;
+
+       if (!pwrseq)
+               return;
+
+       mutex_lock(&pwrseq->wlan_mutex);
+       if (on_off != pwrseq->power_state) {
+               ret = spacemit_wlan_on(pwrseq, on_off);
+               if (ret)
+                       dev_err(pwrseq->dev, "set power failed\n");
+       }
+       mutex_unlock(&pwrseq->wlan_mutex);
+}
+EXPORT_SYMBOL_GPL(spacemit_wlan_set_power);
+
+int spacemit_wlan_get_oob_irq(void)
+{
+       struct wlan_pwrseq *pwrseq = pdata;
+       int host_oob_irq = 0;
+
+       if (!pwrseq || IS_ERR(pwrseq->hostwake))
+               return 0;
+
+       host_oob_irq = gpiod_to_irq(pwrseq->hostwake);
+       if (host_oob_irq < 0)
+               dev_err(pwrseq->dev, "map hostwake gpio to virq failed\n");
+
+       return host_oob_irq;
+}
+EXPORT_SYMBOL_GPL(spacemit_wlan_get_oob_irq);
+
+int spacemit_wlan_get_oob_irq_flags(void)
+{
+       struct wlan_pwrseq *pwrseq = pdata;
+       int oob_irq_flags;
+
+       if (!pwrseq)
+               return 0;
+
+       oob_irq_flags = (IRQF_TRIGGER_HIGH | IRQF_SHARED | IRQF_NO_SUSPEND);
+
+       return oob_irq_flags;
+}
+EXPORT_SYMBOL_GPL(spacemit_wlan_get_oob_irq_flags);
+
+static int spacemit_wlan_on(struct wlan_pwrseq *pwrseq, bool on_off)
+{
+       struct spacemit_pwrseq *parent_pwrseq = spacemit_get_pwrseq();
+
+       if (!pwrseq || IS_ERR(pwrseq->regon))
+               return 0;
+
+       if (on_off){
+               if(parent_pwrseq)
+                       spacemit_power_on(parent_pwrseq, 1);
+               gpiod_set_value(pwrseq->regon, 1);
+               if (pwrseq->power_on_delay_ms)
+                       msleep(pwrseq->power_on_delay_ms);
+       }else{
+               gpiod_set_value(pwrseq->regon, 0);
+               if(parent_pwrseq)
+                       spacemit_power_on(parent_pwrseq, 0);
+       }
+
+       pwrseq->power_state = on_off;
+       return 0;
+}
+
+static int spacemit_wlan_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct wlan_pwrseq *pwrseq;
+
+       pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
+       if (!pwrseq)
+               return -ENOMEM;
+
+       pwrseq->dev = dev;
+       platform_set_drvdata(pdev, pwrseq);
+
+       pwrseq->regon = devm_gpiod_get(dev, "regon", GPIOD_OUT_LOW);
+       if (IS_ERR(pwrseq->regon) &&
+               PTR_ERR(pwrseq->regon) != -ENOENT &&
+               PTR_ERR(pwrseq->regon) != -ENOSYS) {
+               return PTR_ERR(pwrseq->regon);
+       }
+
+       pwrseq->hostwake = devm_gpiod_get(dev, "hostwake", GPIOD_IN);
+       if (IS_ERR(pwrseq->hostwake) &&
+               PTR_ERR(pwrseq->hostwake) != -ENOENT &&
+               PTR_ERR(pwrseq->hostwake) != -ENOSYS) {
+               return PTR_ERR(pwrseq->hostwake);
+       }
+
+       if(device_property_read_u32(dev, "power-on-delay-ms",
+                                &pwrseq->power_on_delay_ms))
+               pwrseq->power_on_delay_ms = 10;
+
+       mutex_init(&pwrseq->wlan_mutex);
+       pdata = pwrseq;
+
+       return 0;
+}
+
+static int spacemit_wlan_remove(struct platform_device *pdev)
+{
+       struct wlan_pwrseq *pwrseq = platform_get_drvdata(pdev);
+
+       mutex_destroy(&pwrseq->wlan_mutex);
+       pdata = NULL;
+
+       return 0;
+}
+
+static const struct of_device_id spacemit_wlan_ids[] = {
+       { .compatible = "spacemit,wlan-pwrseq" },
+       { /* Sentinel */ }
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int spacemit_wlan_suspend(struct device *dev)
+{
+       return 0;
+}
+
+static int spacemit_wlan_resume(struct device *dev)
+{
+       return 0;
+}
+
+static const struct dev_pm_ops spacemit_wlan_dev_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(spacemit_wlan_suspend, spacemit_wlan_resume)
+};
+
+#define DEV_PM_OPS     (&spacemit_wlan_dev_pm_ops)
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static struct platform_driver spacemit_wlan_driver = {
+       .probe          = spacemit_wlan_probe,
+       .remove = spacemit_wlan_remove,
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "spacemit-wlan",
+               .of_match_table = spacemit_wlan_ids,
+       },
+};
+
+module_platform_driver(spacemit_wlan_driver);
+
+MODULE_DESCRIPTION("spacemit wlan pwrseq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/spacemit/spacemit_reboot.c b/drivers/soc/spacemit/spacemit_reboot.c
new file mode 100755 (executable)
index 0000000..7375ec3
--- /dev/null
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Spacemit k1x soc fastboot mode reboot
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/io.h>
+
+#define RESET_REG_VALUE 0x55a
+#define RESET_REG_VALUE1 0x55f
+static char *rebootcmd = "bootloader";
+static char *shellcmd = "uboot";
+
+struct spacemit_reboot_ctrl {
+       void __iomem *base;
+       struct notifier_block reset_handler;
+};
+
+static int k1x_reset_handler(struct notifier_block *this, unsigned long mode,
+               void *cmd)
+{
+       struct spacemit_reboot_ctrl *info = container_of(this,struct spacemit_reboot_ctrl,
+                       reset_handler);
+
+       if(cmd != NULL && !strcmp(cmd, rebootcmd))
+               writel(RESET_REG_VALUE, info->base);
+
+       if(cmd != NULL && !strcmp(cmd, shellcmd))
+                writel(RESET_REG_VALUE1, info->base);
+
+       return NOTIFY_DONE;
+}
+
+static const struct of_device_id spacemit_reboot_of_match[] = {
+       {.compatible = "spacemit,k1x-reboot"},
+       {},
+};
+MODULE_DEVICE_TABLE(of, spacemit_reboot_of_match);
+
+static int spacemit_reboot_probe(struct platform_device *pdev)
+{
+       struct spacemit_reboot_ctrl *info;
+       int ret;
+
+       info = devm_kzalloc(&pdev->dev, sizeof(struct spacemit_reboot_ctrl), GFP_KERNEL);
+       if(info == NULL)
+               return -ENOMEM;
+
+       info->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(info->base))
+               return PTR_ERR(info->base);
+
+       platform_set_drvdata(pdev, info);
+
+       info->reset_handler.notifier_call = k1x_reset_handler;
+       info->reset_handler.priority = 128;
+       ret = register_restart_handler(&info->reset_handler);
+       if (ret) {
+               dev_warn(&pdev->dev, "cannot register restart handler: %d\n",
+                        ret);
+       }
+
+       return 0;
+}
+
+static int spacemit_reboot_remove(struct platform_device *pdev)
+{
+       struct spacemit_reboot_ctrl *info = platform_get_drvdata(pdev);
+
+       unregister_restart_handler(&info->reset_handler);
+       return 0;
+}
+
+static struct platform_driver spacemit_reboot_driver = {
+       .driver = {
+               .name = "spacemit-reboot",
+               .of_match_table = of_match_ptr(spacemit_reboot_of_match),
+       },
+       .probe = spacemit_reboot_probe,
+       .remove = spacemit_reboot_remove,
+};
+
+module_platform_driver(spacemit_reboot_driver);
+MODULE_DESCRIPTION("K1x fastboot mode reboot");
+MODULE_AUTHOR("Spacemit");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/spacemit/v2d/Kconfig b/drivers/soc/spacemit/v2d/Kconfig
new file mode 100644 (file)
index 0000000..97de4ed
--- /dev/null
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+config SPACEMIT_V2D
+        tristate "Spacemit V2D Engine Driver"
+        depends on SYNC_FILE
+        default m
+        help
+          This enables Spacemit V2D Engine driver
diff --git a/drivers/soc/spacemit/v2d/Makefile b/drivers/soc/spacemit/v2d/Makefile
new file mode 100644 (file)
index 0000000..d99658d
--- /dev/null
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_SPACEMIT_V2D) += v2d.o
+v2d-y := v2d_drv.o v2d_hw.o v2d_iommu.o
+
+
diff --git a/drivers/soc/spacemit/v2d/csc_matrix.h b/drivers/soc/spacemit/v2d/csc_matrix.h
new file mode 100644 (file)
index 0000000..8bbcab6
--- /dev/null
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __CSC_MATRIX_h__
+#define __CSC_MATRIX_h__
+#include "v2d_drv.h"
+int cscmatrix[V2D_CSC_MODE_BUTT][3][4] = {
+//RGB2BT601Wide
+    {{  306,  601, 117,   0 },
+     { -173, -339, 512, 128 },
+     {  512, -429, -83, 128 }},
+
+//BT601Wide2RGB
+    {{ 1024,    0, 1436, -179 },
+     { 1024, -352, -731,  135 },
+     { 1024, 1815,    0, -227 }},
+
+//RGB2BT601Narrow
+    {{  263,  516, 100,  16 },
+     { -152, -298, 450, 128 },
+     {  450, -377, -73, 128 }},
+
+//BT601Narrow2RGB
+    {{ 1192,    0, 1634, -223 },
+     { 1192, -401, -832,  136 },
+     { 1192, 2066,    0, -277 }},
+
+//RGB2BT709Wide
+    {{  218,  732,   74,   0 },
+     { -117, -395,  512, 128 },
+     {  512, -465,  -47, 128 }},
+
+//BT709Wide2RGB
+    {{ 1024,    0, 1613, -202 },
+     { 1024, -192, -479,   84 },
+     { 1024, 1900,    0, -238 }},
+
+//RGB2BT709Narrow
+    {{  187,  629,  63, 16 },
+     { -103, -347, 450, 128},
+     {  450, -409, -41, 128}},
+
+//BT709Narrow2RGB
+    {{ 1192,    0, 1836, -248 },
+     { 1192, -218, -546,   77 },
+     { 1192, 2163,    0, -289 }},
+
+//BT601Wide2BT709Wide
+    {{ 1024, -121, -218,  42 },
+     {    0, 1043,  117, -17 },
+     {    0,   77, 1050, -13 }},
+
+//BT601Wide2BT709Narrow
+    {{ 879, -104, -187, 52 },
+     {   0,  916,  103,  1 },
+     {   0,   68,  922,  4 }},
+
+//BT601Wide2BT601Narrow
+    {{ 879,   0,   0,  16 },
+     {   0, 900,   0,  16 },
+     {   0,   0, 900,  16 }},
+
+//BT601Narrow2BT709Wide
+    {{ 1192, -138, -248,   30 },
+     {   0,  1187,  134,  -37 },
+     {   0,    88, 1195,  -32 }},
+
+//BT601Narrow2BT709Narrow
+    {{ 1024, -118, -213,  41 },
+     {    0, 1043,  117, -17 },
+     {    0,   77, 1050, -13 }},
+
+//BT601Narrow2BT601Wide
+    {{ 1192,    0,    0, -19 },
+     {    0, 1166,    0, -18 },
+     {    0,    0, 1166, -18 }},
+
+//BT709Wide2BT601Wide
+    { { 1024,  104,  201,  -38 },
+        {   0, 1014, -113,   15 },
+        {   0,  -74, 1007,   11 } },
+
+//BT709Wide2BT601Narrow
+    {{ 879,  89,  172, -17 },
+     {   0, 890, -100,  29 },
+     {   0, -65,  885,  26 }},
+
+//BT709Wide2BT709Narrow
+    {{ 879,   0,   0,  16 },
+     {   0, 900,   0,  16 },
+     {   0,   0, 900,  16 }},
+
+//BT709Narrow2BT601Wide
+    {{ 1192,  118,  229,  -62 },
+     {    0, 1154, -129,    0 },
+     {    0,  -85, 1146,   -5 }},
+
+//BT709Narrow2BT601Narrow
+    {{ 1024,  102,  196,  -37 },
+     {    0, 1014, -113,   15 },
+     {    0,  -74, 1007,   11 }},
+
+//BT709Narrow2BT709Wide
+    {{ 1192,    0,    0, -19 },
+     {    0, 1166,    0, -18 },
+     {    0,    0, 1166, -18 }},
+
+    //RGB2Grey
+    {{  218,  732,  74,   0  },
+     { -117, -395, 512,  128 },
+     {  512, -465, -47,  128 }},
+
+    //RGB2RGB
+    {{ 1024,    0,    0, 0 },
+     {    0, 1024,    0, 0 },
+     {    0,    0, 1024, 0 }}
+};
+#endif
diff --git a/drivers/soc/spacemit/v2d/v2d_drv.c b/drivers/soc/spacemit/v2d/v2d_drv.c
new file mode 100644 (file)
index 0000000..ef83645
--- /dev/null
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+* V2D driver for Spacemit
+* Copyright (C) 2023 Spacemit Co., Ltd.
+*
+*/
+
+#include "v2d_priv.h"
+#include "v2d_drv.h"
+#include "v2d_reg.h"
+#include <linux/clk-provider.h>
+#include <linux/dma-fence.h>
+#include <linux/sync_file.h>
+#include <linux/syscalls.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+
+#define  V2D_DRV_NAME          "spacemit_v2d"
+struct v2d_info *v2dInfo;
+
+#ifdef CONFIG_SPACEMIT_DEBUG
+static bool check_v2d_running_status(struct v2d_info *pV2dInfo)
+{
+       return pV2dInfo->b_v2d_running;
+}
+#define to_devinfo(_nb) container_of(_nb, struct v2d_info, nb)
+static int v2d_clkoffdet_notifier_handler(struct notifier_block *nb,
+                                         unsigned long msg, void *data)
+{
+       struct clk_notifier_data *cnd = data;
+       struct v2d_info *pV2dInfo = to_devinfo(nb);
+       if ((__clk_is_enabled(cnd->clk)) && (msg & PRE_RATE_CHANGE) &&
+           (cnd->new_rate == 0) && (cnd->old_rate != 0)) {
+               if (pV2dInfo->is_v2d_running(pV2dInfo))
+                       return NOTIFY_BAD;
+       }
+       return NOTIFY_OK;
+}
+#endif
+
+static void v2d_clk_on(struct v2d_info *info)
+{
+       clk_prepare_enable(info->clkcore);
+       clk_prepare_enable(info->clkio);
+#ifdef CONFIG_SPACEMIT_DEBUG
+       info->b_v2d_running = true;
+#endif
+}
+
+static void v2d_clk_off(struct v2d_info *info)
+{
+#ifdef CONFIG_SPACEMIT_DEBUG
+       info->b_v2d_running = false;
+#endif
+       if (__clk_is_enabled(info->clkio)) {
+               clk_disable_unprepare(info->clkio);
+       }
+       if (__clk_is_enabled(info->clkcore)) {
+               clk_disable_unprepare(info->clkcore);
+       }
+}
+
+static ssize_t v2d_sysfs_clkrate_get(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct v2d_info *info  = dev_get_drvdata(dev);
+       long rate = 0;
+       rate = clk_get_rate(info->clkcore);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", (int)rate);
+}
+
+static ssize_t v2d_sysfs_clkrate_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct v2d_info *info  = dev_get_drvdata(dev);
+       long rate = 0;
+       rate = simple_strtol(buf, NULL, 10);
+       if (0 != rate)
+       {
+               clk_set_rate(info->clkcore, rate);
+       }
+       return count;
+}
+
+static struct device_attribute v2d_sysfs_files[] = {
+       __ATTR(clkrate, S_IRUGO | S_IWUSR, v2d_sysfs_clkrate_get, v2d_sysfs_clkrate_set),
+};
+
+static int v2d_suspend(struct device *dev)
+{
+       return 0;
+}
+
+static int v2d_resume(struct device *dev)
+{
+       return 0;
+}
+
+static int v2d_runtime_suspend(struct device *dev)
+{
+       struct v2d_info *info  = dev_get_drvdata(dev);
+       if (!IS_ERR_OR_NULL(info->clkcore))
+       {
+               clk_disable_unprepare(info->clkcore);
+               V2DLOGI("v2d: clock off.\n");
+       }
+       return 0;
+}
+
+static int v2d_runtime_resume(struct device *dev)
+{
+       struct v2d_info *info  = dev_get_drvdata(dev);
+       long clk_rate = 0;
+
+       if (!IS_ERR_OR_NULL(info->clkcore))
+       {
+                       clk_prepare_enable(info->clkcore);
+                       clk_rate = clk_get_rate(info->clkcore);
+                       V2DLOGI("v2d: clock on, rate: %ld\n", clk_rate);
+       }
+       return 0;
+}
+
+static const struct dev_pm_ops v2d_pm_ops = {
+       .suspend = v2d_suspend,
+       .resume = v2d_resume,
+       .runtime_suspend = v2d_runtime_suspend,
+       .runtime_resume = v2d_runtime_resume,
+};
+
+static irqreturn_t v2d_irq_handler(int32_t irq, void *dev_id)
+{
+       unsigned long flags =  0;
+       uint32_t irqstatus = 0;
+       uint32_t irqerr = 0;
+
+       struct v2d_info *info = (struct v2d_info *)dev_id;
+
+       if (!info) {
+               V2DLOGE("v2d info is NULL!\n");
+               return IRQ_NONE;
+       }
+
+       spin_lock_irqsave(&info->power_spinlock, flags);
+       if (!info->refcount)
+       {
+               spin_unlock_irqrestore(&info->power_spinlock, flags);
+               V2DLOGE("v2d power is off !\n");
+               return IRQ_NONE;
+       }
+       iommu_irq_reset();
+       irqstatus = v2d_irq_status();
+       irqerr = v2d_irqerr_status();
+       v2d_irqerr_clear(irqerr);
+       v2d_irq_clear(irqstatus);
+       if (irqerr){
+               V2DLOGE("%s irq %d irq_status 0x%x,irqerr 0x%x\n", __func__, irq, irqstatus, irqerr);
+               info->do_reset = 1;
+               queue_work(info->v2d_job_done_wq, &info->work);
+       } else if ((irqstatus == V2D_EOF_IRQ_STATUS) || (irqstatus == (V2D_EOF_IRQ_STATUS|V2D_FBCENC_IRQ_STATUS))) {
+               queue_work(info->v2d_job_done_wq, &info->work);
+       }
+       spin_unlock_irqrestore(&info->power_spinlock, flags);
+       return IRQ_HANDLED;
+}
+
+
+static DEFINE_SPINLOCK(v2d_fence_lock);
+static const char *v2d_fence_get_driver_name(struct dma_fence *fence)
+{
+       return "v2d";
+}
+
+static const char *v2d_fence_get_timeline_name(struct dma_fence *fence)
+{
+       return "v2d.timeline";
+}
+
+static bool v2d_fence_enable_signaling(struct dma_fence *fence)
+{
+       return true;
+}
+
+static void v2d_fence_fence_value_str(struct dma_fence *fence, char *str, int size)
+{
+       snprintf(str, size, "%llu", fence->seqno);
+}
+
+const struct dma_fence_ops v2d_fence_ops = {
+       .wait = dma_fence_default_wait,
+       .get_driver_name = v2d_fence_get_driver_name,
+       .get_timeline_name = v2d_fence_get_timeline_name,
+       .enable_signaling = v2d_fence_enable_signaling,
+       .fence_value_str = v2d_fence_fence_value_str
+};
+
+int v2d_fence_generate(struct v2d_info *info, struct dma_fence **fence, int *fence_fd)
+{
+       struct sync_file *sync_file = NULL;
+       int fd;
+
+       struct dma_fence *dmaFence;
+       dmaFence = kzalloc(sizeof(*dmaFence), GFP_KERNEL);
+       if (!dmaFence)
+               return -ENOMEM;
+
+       dma_fence_init(dmaFence, &v2d_fence_ops, &v2d_fence_lock, info->context, atomic_inc_return(&info->seqno));
+       *fence = dmaFence;
+       /* create a sync_file fd representing the fence */
+       #ifdef CONFIG_SYNC_FILE
+       sync_file = sync_file_create(*fence);
+       #endif
+       if (!sync_file) {
+               dma_fence_put(*fence);
+               return -ENOMEM;
+       }
+       fd = get_unused_fd_flags(O_CLOEXEC);
+       *fence_fd = fd;
+       if(fd<0)
+       {
+               dma_fence_put(*fence);
+               fput(sync_file->file);
+               return -ENOMEM;
+       }
+       fd_install(fd, sync_file->file);
+       return 0;
+}
+
+void v2d_fence_wait(struct v2d_info *info, struct dma_fence *fence)
+{
+       int err = dma_fence_wait_timeout(fence, false, msecs_to_jiffies(V2D_SHORT_FENCE_TIMEOUT));
+       if (err > 0)
+               return;
+
+       if (err == 0)
+               err = dma_fence_wait_timeout(fence, false, msecs_to_jiffies(V2D_LONG_FENCE_TIMEOUT));
+
+       if (err <= 0)
+               dev_warn(&info->pdev->dev, "error waiting on fence: %d\n", err);
+}
+
+void kfree_v2d_post_task(struct v2d_pending_post_task *element)
+{
+       if (!element)
+       {
+               return;
+       }
+       if (!element->pTask)
+       {
+               kfree(element);
+               return;
+       }
+       kfree(element->pTask);
+       kfree(element);
+}
+
+#define V2D_TBU_BASE_VA (0x80000000)
+#define V2D_TBU_VA_STEP (0x2000000)
+static int v2d_get_dmabuf(struct v2d_info *v2dinfo, struct v2d_pending_post_task *cfg)
+{
+       V2D_SUBMIT_TASK_S *pTask = cfg->pTask;
+       V2D_SURFACE_S *pLayer0, *pLayer1, *pDst, *pMask;
+       struct dma_buf *dmabuf = NULL;
+       int fd;
+
+       pLayer0 = &pTask->param.layer0;
+       pLayer1 = &pTask->param.layer1;
+       pDst    = &pTask->param.dst;
+       pMask   = &pTask->param.mask;
+
+       if (pLayer0->fbc_enable || pLayer0->fd) {
+               cfg->info[0].valid = 1;
+               cfg->info[0].tbu_id = 0;
+               fd = pLayer0->fbc_enable ? pLayer0->fbcDecInfo.fd : pLayer0->fd;
+               dmabuf = dma_buf_get(fd);
+               if (IS_ERR(dmabuf)) {
+                       pr_err("v2d layer0 get dmabuf fail fd:%d\n", fd);
+                       return -1;
+               }
+               cfg->info[0].dmabuf = dmabuf;
+       }
+
+       if (pLayer1->fbc_enable || pLayer1->fd) {
+               cfg->info[1].valid = 1;
+               cfg->info[1].tbu_id = -1;
+               fd = pLayer1->fbc_enable ? pLayer1->fbcDecInfo.fd : pLayer1->fd;
+               dmabuf = dma_buf_get(fd);
+               if (IS_ERR(dmabuf)) {
+                       pr_err("v2d layer1 get dmabuf fail fd:%d\n", fd);
+                       return -1;
+               }
+               cfg->info[1].dmabuf = dmabuf;
+       }
+
+       cfg->info[2].valid = 1;
+       cfg->info[2].tbu_id = 1;
+       fd = pDst->fbc_enable ? pDst->fbcEncInfo.fd : pDst->fd;
+       dmabuf = dma_buf_get(fd);
+       if (IS_ERR(dmabuf)) {
+               pr_err("v2d layer1 get dmabuf fail fd:%d\n", fd);
+               return -1;
+       }
+       cfg->info[2].dmabuf = dmabuf;
+
+       if (pMask->fd) {
+               cfg->info[3].valid = 1;
+               cfg->info[3].tbu_id = -1;
+               dmabuf = dma_buf_get(pMask->fd);
+               if (IS_ERR(dmabuf)) {
+                       pr_err("v2d mask get dmabuf fail fd:%d\n", fd);
+                       return -1;
+               }
+               cfg->info[3].dmabuf = dmabuf;
+       }
+       return 0;
+}
+
+static int get_addr_from_dmabuf(struct v2d_info *v2dinfo, struct v2d_dma_buf_info *pInfo, dma_addr_t *paddr)
+{
+       struct device *dev = &v2dinfo->pdev->dev;
+       struct sg_table *sgt;
+       dma_addr_t addr;
+       int ret, flags;
+       size_t size = 0;
+       ret = 0;
+
+       pInfo->attach  = dma_buf_attach(pInfo->dmabuf, dev);
+       if (IS_ERR(pInfo->attach)) {
+               pr_err("v2d get dma buf attach fail\n");
+               goto err_dmabuf_put;
+       }
+       pInfo->sgtable = dma_buf_map_attachment(pInfo->attach, DMA_BIDIRECTIONAL);
+       if (IS_ERR(pInfo->sgtable)) {
+               pr_err("v2d get dma buf map attachment fail\n");
+               goto err_dmabuf_detach;
+       }
+       sgt = pInfo->sgtable;
+       flags = IOMMU_READ | IOMMU_CACHE | IOMMU_WRITE;
+
+       if (sgt->nents == 1) {
+               addr = sg_dma_address(sgt->sgl);
+       } else {
+               addr = V2D_TBU_BASE_VA + (dma_addr_t)(pInfo->tbu_id)*V2D_TBU_VA_STEP;
+               size = v2d_iommu_map_sg(addr, sgt->sgl, sgt->orig_nents, flags);
+               if (!size) {
+                       pr_err("v2d iommu map sgtable fail\n");
+                       goto err_dmabuf_unmap;
+               }
+       }
+       *paddr = addr;
+       return ret;
+
+err_dmabuf_unmap:
+       dma_buf_unmap_attachment(pInfo->attach, pInfo->sgtable, DMA_BIDIRECTIONAL);
+err_dmabuf_detach:
+       dma_buf_detach(pInfo->dmabuf, pInfo->attach);
+err_dmabuf_put:
+       dma_buf_put(pInfo->dmabuf);
+       return -1;
+
+}
+
+static int v2d_get_dma_addr(struct v2d_info *v2dinfo, struct v2d_pending_post_task *cfg)
+{
+       V2D_SUBMIT_TASK_S *pTask = cfg->pTask;
+       V2D_SURFACE_S *pLayer0, *pLayer1, *pDst, *pMask;
+       dma_addr_t addr;
+       struct v2d_dma_buf_info *pInfo;
+       int ret = 0;
+       pLayer0 = &pTask->param.layer0;
+       pLayer1 = &pTask->param.layer1;
+       pDst    = &pTask->param.dst;
+       pMask   = &pTask->param.mask;
+
+       pInfo = &cfg->info[0];
+       if (pInfo->valid) {
+               ret = get_addr_from_dmabuf(v2dinfo, pInfo, &addr);
+               if (ret) return ret;
+               if (pLayer0->fbc_enable) {
+                       pLayer0->fbcDecInfo.headerAddr_l = addr & 0xFFFFFFFF;
+                       pLayer0->fbcDecInfo.headerAddr_h = (addr >> 32) & 0xFFFFFFFF;
+               } else {
+                       pLayer0->phyaddr_y_l  = addr & 0xFFFFFFFF;
+                       pLayer0->phyaddr_y_h  = (addr >> 32) & 0xFFFFFFFF;
+                       pLayer0->phyaddr_uv_l = pLayer0->offset ? (pLayer0->phyaddr_y_l+pLayer0->offset) : 0;
+                       pLayer0->phyaddr_uv_h = pLayer0->offset ? pLayer0->phyaddr_y_h : 0;
+               }
+       }
+
+       pInfo = &cfg->info[1];
+       if (pInfo->valid) {
+               ret = get_addr_from_dmabuf(v2dinfo, pInfo, &addr);
+               if (ret) return ret;
+               if (pLayer1->fbc_enable) {
+                       pLayer1->fbcDecInfo.headerAddr_l = addr & 0xFFFFFFFF;
+                       pLayer1->fbcDecInfo.headerAddr_h = (addr >> 32) & 0xFFFFFFFF;
+               } else {
+                       pLayer1->phyaddr_y_l  = addr & 0xFFFFFFFF;
+                       pLayer1->phyaddr_y_h  = (addr >> 32) & 0xFFFFFFFF;
+                       pLayer1->phyaddr_uv_l = pLayer0->offset ? (pLayer0->phyaddr_y_l+pLayer0->offset) : 0;
+                       pLayer1->phyaddr_uv_h = pLayer0->offset ? pLayer0->phyaddr_y_h : 0;
+               }
+       }
+
+       pInfo = &cfg->info[2];
+       if (pInfo->valid) {
+               ret = get_addr_from_dmabuf(v2dinfo, pInfo, &addr);
+               if (ret) return ret;
+               if (pDst->fbc_enable) {
+                       pDst->fbcEncInfo.headerAddr_l  = addr & 0xFFFFFFFF;
+                       pDst->fbcEncInfo.headerAddr_h  = (addr >> 32) & 0xFFFFFFFF;
+                       pDst->fbcEncInfo.payloadAddr_l = pDst->fbcEncInfo.headerAddr_l + pDst->fbcEncInfo.offset;
+                       pDst->fbcEncInfo.payloadAddr_h = pDst->fbcEncInfo.headerAddr_h;
+               } else {
+                       pDst->phyaddr_y_l  = addr & 0xFFFFFFFF;
+                       pDst->phyaddr_y_h  = (addr >> 32) & 0xFFFFFFFF;
+                       pDst->phyaddr_uv_l = pDst->offset ? (pDst->phyaddr_y_l+pDst->offset) : 0;
+                       pDst->phyaddr_uv_h = pDst->offset ? pDst->phyaddr_y_h : 0;
+               }
+       }
+
+       pInfo = &cfg->info[3];
+       if (pInfo->valid) {
+               ret = get_addr_from_dmabuf(v2dinfo, pInfo, &addr);
+               if (ret) return ret;
+               pMask->phyaddr_y_l = addr & 0xFFFFFFFF;
+               pMask->phyaddr_y_h = (addr >> 32) & 0xFFFFFFFF;
+       }
+       return ret;
+}
+
+static void v2d_put_dmabuf(struct v2d_info *v2dinfo, struct v2d_pending_post_task *cfg)
+{
+       int i;
+       struct dma_buf *dmabuf;
+       struct dma_buf_attachment *attach;
+       struct sg_table *sg_table;
+       struct v2d_dma_buf_info *pInfo;
+
+       for (i=0; i<4; i++) {
+               pInfo    = &cfg->info[i];
+               dmabuf   = pInfo->dmabuf;
+               attach   = pInfo->attach;
+               sg_table = pInfo->sgtable;
+
+               if (dmabuf && attach && sg_table) {
+                       dma_buf_unmap_attachment(attach, sg_table, DMA_BIDIRECTIONAL);
+                       dma_buf_detach(dmabuf, attach);
+                       dma_buf_put(dmabuf);
+               }
+       }
+       v2d_iommu_map_end();
+}
+
+int v2d_job_submit(struct v2d_info *info, V2D_SUBMIT_TASK_S *psubmit)
+{
+       int err = 0;
+       V2D_SUBMIT_TASK_S *pTask = NULL;
+       struct v2d_pending_post_task *cfg = NULL;
+       struct dma_fence *fence = NULL;
+       pTask = kzalloc(sizeof(V2D_SUBMIT_TASK_S), GFP_KERNEL);
+       if (!pTask){
+               err = -ENOMEM;
+               goto error;
+       }
+       memset(pTask,0,sizeof(V2D_SUBMIT_TASK_S));
+       if(copy_from_user(pTask,(uint32_t *)psubmit, sizeof(V2D_SUBMIT_TASK_S)) != 0) {
+               err = -EINVAL;
+               goto error;
+       }
+       if(v2d_fence_generate(info, &fence, &pTask->completeFencefd))
+       {
+               printk(KERN_ERR "%s" "-%s-Failed to generate fence(%pf),fd(%d)-slot1\n", "v2d", __func__,fence, pTask->completeFencefd);
+               err = -EINVAL;
+               goto error;
+       }
+       if (0 != copy_to_user((__user uint8_t *)psubmit+offsetof(V2D_SUBMIT_TASK_S, completeFencefd), &pTask->completeFencefd, sizeof(int32_t))) {
+               pTask->completeFencefd = -1;
+               err = -EINVAL;
+               goto error;
+       }
+       mutex_lock(&info->client_lock);
+       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       if (!cfg){
+               mutex_unlock(&info->client_lock);
+               err = -ENOMEM;
+               goto error;
+       }
+       memset(cfg,0,sizeof(struct v2d_pending_post_task));
+       INIT_LIST_HEAD(&cfg->head);
+       cfg->pTask = pTask;
+       if (pTask->completeFencefd>=0)
+       {
+               cfg->pCompleteFence = fence;
+       }
+       if (pTask->acquireFencefd>=0)
+       {
+               #ifdef CONFIG_SYNC_FILE
+               cfg->pAcquireFence = sync_file_get_fence(cfg->pTask->acquireFencefd);
+               #endif
+       }
+       err = v2d_get_dmabuf(info, cfg);
+       if (err) {
+               mutex_unlock(&info->client_lock);
+               kfree(cfg);
+               goto error;
+       }
+       mutex_lock(&info->post_lock);
+       list_add_tail(&cfg->head, &info->post_list);
+       kthread_queue_work(&info->post_worker, &info->post_work);
+       mutex_unlock(&info->post_lock);
+       mutex_unlock(&info->client_lock);
+       return 0;
+
+error:
+       if(pTask){
+               kfree(pTask);
+       }
+       return err;
+}
+
+void v2d_work_done(struct work_struct *data)
+{
+       struct v2d_pending_post_task *element, *tmp;
+       int refcount;
+       struct dma_fence *pCompleteFence = NULL;
+       struct v2d_info *info = container_of(data, struct v2d_info, work);
+
+       mutex_lock(&info->free_lock);
+       list_for_each_entry_safe(element, tmp, &info->free_list, head) {
+               if (element->pTask->completeFencefd>=0)
+               {
+                       pCompleteFence = element->pCompleteFence;
+                       if(NULL != pCompleteFence) {
+                               dma_fence_signal(pCompleteFence);
+                               dma_fence_put(pCompleteFence);
+                       }
+               }
+               v2d_put_dmabuf(info, element);
+               mutex_lock(&info->power_mutex);
+               info->refcount--;
+               refcount = info->refcount;
+               if (info->do_reset) {
+                       v2d_golbal_reset();
+                       info->do_reset = 0;
+               }
+               if(!refcount)
+               {
+                       v2d_irq_disable();
+                       v2d_clk_off(info);
+               }
+               mutex_unlock(&info->power_mutex);
+               list_del(&element->head);
+               kfree_v2d_post_task(element);
+               up(&info->sem_lock);
+       }
+       mutex_unlock(&info->free_lock);
+}
+
+void do_softreset(void)
+{
+       struct v2d_pending_post_task *element, *tmp;
+       struct dma_fence *pCompleteFence = NULL;
+       int refcount;
+       unsigned long flags =  0;
+       struct v2d_info *info = v2dInfo;
+
+       mutex_lock(&info->free_lock);
+       list_for_each_entry_safe(element, tmp, &info->free_list, head) {
+               if (element->pTask->completeFencefd>=0)
+               {
+                       pCompleteFence = element->pCompleteFence;
+                       if(NULL != pCompleteFence) {
+                               dma_fence_signal(pCompleteFence);
+                               dma_fence_put(pCompleteFence);
+                       }
+               }
+               v2d_put_dmabuf(info, element);
+               mutex_lock(&info->power_mutex);
+               spin_lock_irqsave(&info->power_spinlock, flags);
+               info->refcount--;
+               refcount = info->refcount;
+               v2d_dump_irqraw_status();
+               v2d_golbal_reset();
+               spin_unlock_irqrestore(&info->power_spinlock, flags);
+               if(!refcount)
+               {
+                       v2d_irq_disable();
+                       v2d_clk_off(info);
+               }
+               mutex_unlock(&info->power_mutex);
+               list_del(&element->head);
+               kfree_v2d_post_task(element);
+               up(&info->sem_lock);
+       }
+       mutex_unlock(&info->free_lock);
+       flush_workqueue(info->v2d_job_done_wq);
+}
+
+void v2d_post_work_func(struct kthread_work *work)
+{
+       struct v2d_info *info = container_of(work, struct v2d_info, post_work);
+       struct v2d_pending_post_task *post, *next;
+       int refcount;
+       unsigned long flags = 0;
+       struct dma_fence *pAcquireFence = NULL;
+       mutex_lock(&info->post_lock);
+       list_for_each_entry_safe(post, next, &info->post_list, head) {
+               while(down_timeout(&info->sem_lock, msecs_to_jiffies(2500)))
+               {
+                       printk(KERN_ERR "%s hang do softreset\n", "v2d");
+                       do_softreset();
+               }
+               if (post->pTask->acquireFencefd>=0)
+               {
+                       pAcquireFence = post->pAcquireFence;
+                       v2d_fence_wait(info, pAcquireFence);
+                       dma_fence_put(pAcquireFence);
+               }
+               list_del(&post->head);
+               mutex_lock(&info->free_lock);
+               list_add_tail(&post->head, &info->free_list);
+               mutex_unlock(&info->free_lock);
+               mutex_lock(&info->power_mutex);
+               spin_lock_irqsave(&info->power_spinlock, flags);
+               refcount = info->refcount;
+               info->refcount++;
+               spin_unlock_irqrestore(&info->power_spinlock, flags);
+               if(!refcount)
+               {
+                       v2d_clk_on(info);
+                       v2d_irq_enable();
+               }
+               if (v2d_get_dma_addr(info, post)) {
+                       queue_work(info->v2d_job_done_wq, &info->work);
+               } else {
+                       config_v2d_hw(post->pTask);
+               }
+               mutex_unlock(&info->power_mutex);
+       }
+       mutex_unlock(&info->post_lock);
+}
+
+
+static DEFINE_MUTEX(v2d_wr_lock);
+static DEFINE_MUTEX(v2d_dev_lock);
+static int v2d_dev_ref = 0;
+static int v2d_dev_open(struct inode *inode, struct file *filp)
+{
+       mutex_lock(&(v2d_dev_lock));
+       filp->private_data = (void *)v2dInfo;
+       v2d_dev_ref++;
+       mutex_unlock(&(v2d_dev_lock));
+       return 0;
+}
+
+static int v2d_dev_release(struct inode *inode, struct file *filp)
+{
+       mutex_lock(&(v2d_dev_lock));
+       v2d_dev_ref--;
+       mutex_unlock(&(v2d_dev_lock));
+       return 0;
+}
+
+ssize_t v2d_dev_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+{
+       return 0;
+}
+
+ssize_t v2d_dev_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos)
+{
+       int ret;
+       struct v2d_info *pInfo;
+
+       mutex_lock(&(v2d_wr_lock));
+       pInfo = (struct v2d_info *)filp->private_data;
+       ret = v2d_job_submit(pInfo, (V2D_SUBMIT_TASK_S*)buf);
+       if (ret) {
+               mutex_unlock(&(v2d_wr_lock));
+               V2DLOGE("v2d faild to write msg %d\n", ret);
+               return -EIO;
+       }
+       mutex_unlock(&(v2d_wr_lock));
+       return count;
+}
+
+static int v2d_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       int err;
+       unsigned long size = vma->vm_end - vma->vm_start;
+
+       if ((vma->vm_pgoff + (size >> PAGE_SHIFT)) > (1 + (P4D_SHIFT >> PAGE_SHIFT))) {
+                       pr_err("out of physical memory\n");
+                       return -EINVAL;
+       }
+
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       vma->vm_ops = NULL;
+       err = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+                                                                       size, vma->vm_page_prot);
+       if (err) {
+                       pr_err("failed to v2d map memroy\n");
+                       return -ENXIO;
+       }
+
+       return 0;
+}
+
+static const struct file_operations v2d_dev_fops = {
+       .owner  = THIS_MODULE,
+       .open   = v2d_dev_open,
+       .release = v2d_dev_release,
+       .read   = v2d_dev_read,
+       .write  = v2d_dev_write,
+       .poll   = NULL,
+       .mmap   = v2d_mmap,
+};
+
+extern struct v2d_iommu_res sV2dIommuRes;
+static int v2d_iommu_init(struct platform_device *pdev, void __iomem *base)
+{
+       struct device *dev = &pdev->dev;
+       int i;
+       struct v2d_iommu_res *v2d_res = &sV2dIommuRes;
+       void *va_temp;
+       dma_addr_t pa_temp;
+
+       v2d_res->base = base;
+       v2d_res->page_size = SZ_4K;
+       va_temp = dma_alloc_coherent(dev, MAX_SIZE_PER_TTB*TBU_INSTANCES_NUM, &pa_temp, GFP_KERNEL|GFP_DMA);
+       if (!va_temp) {
+               pr_err("v2d iommu no memory for %d tbu_ins!\n",
+                       TBU_INSTANCES_NUM);
+               return -1;
+       }
+
+       for (i = 0; i < TBU_INSTANCES_NUM; i++) {
+               struct tbu_instance *tbu = NULL;
+               if (i <TBU_INSTANCES_NUM) {
+                       tbu = &v2d_res->tbu_ins[i];
+                       tbu->ins_id = i;
+               }
+               tbu->ttb_va = va_temp + i * MAX_SIZE_PER_TTB;
+               tbu->ttb_pa = pa_temp + i * MAX_SIZE_PER_TTB;
+       }
+
+       v2d_res->va_base = BASE_VIRTUAL_ADDRESS;
+       v2d_res->va_end  = BASE_VIRTUAL_ADDRESS + TBU_NUM * VA_STEP_PER_TBU;
+       v2d_res->time_out_cycs = DEFAULT_TIMEOUT_CYCS;
+
+       return 0;
+}
+
+static int v2d_iommu_deinit(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct v2d_iommu_res *v2d_res = &sV2dIommuRes;
+
+       dma_free_coherent(dev,
+                               MAX_SIZE_PER_TTB*TBU_INSTANCES_NUM,
+                               v2d_res->tbu_ins[0].ttb_va,
+                               v2d_res->tbu_ins[0].ttb_pa);
+
+       return 0;
+}
+
+
+static u64 v2d_dmamask = 0xffffffffffUL;
+static int v2d_probe(struct platform_device *pdev)
+{
+       struct v2d_info *info;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       void __iomem *base;
+       int i, rval = 0;
+       struct sched_param param;
+       int ret;
+
+       info = devm_kzalloc(dev, sizeof(struct v2d_info), GFP_KERNEL);
+       if (info == NULL) {
+               return -ENOMEM;
+       }
+       dev->dma_mask = &v2d_dmamask;
+       dev->coherent_dma_mask = 0xffffffffffull;
+
+       info->clkcore = devm_clk_get(dev, "v2d-core");
+       if (IS_ERR(info->clkcore)) {
+               V2DLOGE("Could not get v2d core clk!\n");
+               return -EINVAL;
+       }
+
+       info->v2d_reset = devm_reset_control_get_optional_shared(&pdev->dev, "v2d_reset");
+       if (IS_ERR_OR_NULL(info->v2d_reset)) {
+               V2DLOGE("Could not get v2d reset!\n");
+               return -EINVAL;
+       }
+
+       ret = reset_control_deassert(info->v2d_reset);
+       if (ret < 0) {
+               V2DLOGI("Failed to deassert v2d_reset\n");
+       }
+       clk_prepare_enable(info->clkcore);
+       clk_set_rate(info->clkcore, 409600000);
+
+       info->clkio = devm_clk_get(dev, "v2d-io");
+       if (IS_ERR(info->clkio)) {
+               V2DLOGE("Could not get v2d io clk!\n");
+               return -EINVAL;
+       }
+       /* get v2d regs base */
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "v2dreg");
+       if (res == NULL) {
+                return -ENOENT;
+       }
+       base = devm_ioremap(dev, res->start, resource_size(res));
+       if (base == NULL) {
+               return -EIO;
+       }
+       info->v2dreg_iomem_base = base;
+       info->irq = platform_get_irq(pdev, 0);
+       //V2DLOGI("v2d irq num = %d\n", info->irq);
+       if (info->irq < 0) {
+               return -ENOENT;
+       }
+       rval = devm_request_irq(dev, info->irq, v2d_irq_handler, IRQF_SHARED, "v2d-irq", info);
+       if (rval) {
+               return rval;
+       }
+       v2d_iommu_init(pdev, base);
+
+       for (i = 0; i < ARRAY_SIZE(v2d_sysfs_files); i++) {
+               rval = device_create_file(dev, &v2d_sysfs_files[i]);
+               if (rval)
+                       return rval;
+       }
+       mutex_init(&info->power_mutex);
+       spin_lock_init(&info->power_spinlock);
+       info->refcount = 0;
+       info->pdev = pdev;
+       platform_set_drvdata(pdev, info);
+       info->mdev.minor  = MISC_DYNAMIC_MINOR;
+       info->mdev.name   = "v2d_dev";
+       info->mdev.fops   = &v2d_dev_fops;
+       rval = misc_register(&info->mdev);
+       if (rval) {
+               V2DLOGE("failed register v2d misc device ret=%d\n", rval);
+               goto err_misc;
+       }
+       sema_init(&info->sem_lock, 1);
+       info->context = dma_fence_context_alloc(1);
+       info->v2d_job_done_wq = alloc_workqueue("spacemit_v2d", WQ_HIGHPRI | WQ_UNBOUND, 1);
+       if (NULL == info->v2d_job_done_wq) {
+               V2DLOGE( "%s: alloc_workqueue failed\n", __func__);
+               goto err;
+       }
+       INIT_WORK(&info->work, v2d_work_done);
+       mutex_init(&info->client_lock);
+       INIT_LIST_HEAD(&info->post_list);
+       mutex_init(&info->post_lock);
+       INIT_LIST_HEAD(&info->free_list);
+       mutex_init(&info->free_lock);
+       kthread_init_worker(&info->post_worker);
+       info->post_thread = kthread_run(kthread_worker_fn, &info->post_worker, "v2d");
+       if (IS_ERR(info->post_thread)) {
+               rval = PTR_ERR(info->post_thread);
+               info->post_thread = NULL;
+               V2DLOGE("%s: failed to run config posting thread: %d\n", __func__, rval);
+               goto err;
+       }
+       param.sched_priority = 1;
+       sched_setscheduler(info->post_thread, SCHED_FIFO, &param);
+       kthread_init_work(&info->post_work, v2d_post_work_func);
+#ifdef CONFIG_SPACEMIT_DEBUG
+       info->is_v2d_running = check_v2d_running_status;
+       info->nb.notifier_call = v2d_clkoffdet_notifier_handler;
+       clk_notifier_register(info->clkcore, &info->nb);
+#endif
+       //V2DLOGI("probe v2d driver done!\n");
+       v2dInfo = info;
+
+       return 0;
+
+err:
+       if(info->post_thread)
+                kthread_stop(info->post_thread);
+       if(info->v2d_job_done_wq)
+               destroy_workqueue(info->v2d_job_done_wq);
+
+err_misc:
+       for (i = 0; i < ARRAY_SIZE(v2d_sysfs_files); i++) {
+               device_remove_file(dev, &v2d_sysfs_files[i]);
+       }
+       misc_deregister(&info->mdev);
+
+       return rval;
+}
+
+static int v2d_remove(struct platform_device *pdev)
+{
+       struct v2d_info *info = platform_get_drvdata(pdev);
+       struct device *dev = &info->pdev->dev;
+       int i;
+       int ret;
+
+       //V2DLOGI("remove v2d driver!\n");
+       v2d_iommu_deinit(pdev);
+       devm_free_irq(dev, info->irq, info);
+       kthread_flush_worker(&info->post_worker);
+       kthread_stop(info->post_thread);
+       for (i = 0; i < ARRAY_SIZE(v2d_sysfs_files); i++) {
+               device_remove_file(dev, &v2d_sysfs_files[i]);
+       }
+#ifdef CONFIG_SPACEMIT_DEBUG
+       info->is_v2d_running = NULL;
+       info->nb.notifier_call = NULL;
+       clk_notifier_unregister(info->clkcore, &info->nb);
+#endif
+       misc_deregister(&info->mdev);
+       if(info->v2d_job_done_wq)
+               destroy_workqueue(info->v2d_job_done_wq);
+
+       if (__clk_is_enabled(info->clkcore)) {
+               clk_disable_unprepare(info->clkcore);
+       }
+       ret = reset_control_assert(info->v2d_reset);
+       if (ret < 0) {
+               V2DLOGI("Failed to assert v2d_reset\n");
+       }
+
+       v2dInfo = NULL;
+
+       return 0;
+}
+
+static const struct of_device_id v2d_drv_match_table[] = {
+       { .compatible = "spacemit,v2d" },
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, v2d_drv_match_table);
+
+static struct platform_driver v2d_driver = {
+       .driver         = {
+               .name   = V2D_DRV_NAME,
+               .of_match_table = of_match_ptr(v2d_drv_match_table),
+               .pm             = &v2d_pm_ops,
+       },
+       .probe          = v2d_probe,
+       .remove         = v2d_remove,
+};
+
+static int __init v2d_init(void)
+{
+       return platform_driver_register(&v2d_driver);
+}
+
+static void __exit v2d_exit(void)
+{
+       platform_driver_unregister(&v2d_driver);
+}
+
+module_init(v2d_init);
+module_exit(v2d_exit);
+
+MODULE_DESCRIPTION("Spacemit V2D driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/spacemit/v2d/v2d_drv.h b/drivers/soc/spacemit/v2d/v2d_drv.h
new file mode 100644 (file)
index 0000000..5faec61
--- /dev/null
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _V2D_DRV_H_
+#define _V2D_DRV_H_
+#include <linux/types.h>
+
+typedef enum SPACEMIT_V2D_SCALER_MODE_E {
+    V2D_NO_SCALE    =0,
+    V2D_SCALE_DOWN  =1,
+    V2D_SCALE_UP    =2,
+} V2D_SCALER_MODE_E;
+
+typedef enum SPACEMIT_V2D_INPUT_LAYER_E {
+    V2D_INPUT_LAYER0    =0,
+    V2D_INPUT_LAYER1    =1,
+    V2D_INPUT_LAYER_NUM =2,
+} V2D_INPUT_LAYER_E;
+
+typedef enum SPACEMIT_V2D_FUNCTION_MODE_E {
+    V2D_FUNC_DISABLE=0,
+    V2D_FUNC_ENABLE =1,
+} V2D_FUNCTION_MODE_E;
+
+typedef enum SPACEMIT_V2D_DITHER_E {
+    V2D_NO_DITHER   =0,
+    V2D_DITHER_4X4  =1,
+    V2D_DITHER_8X8  =2,
+} V2D_DITHER_E;
+
+typedef enum SPACEMIT_V2D_ROTATE_ANGLE {
+    V2D_ROT_0       =0,
+    V2D_ROT_90      =1,
+    V2D_ROT_180     =2,
+    V2D_ROT_270     =3,
+    V2D_ROT_MIRROR  =4,
+    V2D_ROT_FLIP    =5,
+} V2D_ROTATE_ANGLE_E;
+
+typedef enum SPACEMIT_V2D_BLENDCMD_E {
+    V2D_BLENDCMD_ALPHA  = 0,
+    V2D_BLENDCMD_ROP2   = 1,
+    V2D_BLENDCMD_BUTT
+} V2D_BLENDCMD_E;
+
+typedef enum SPACEMIT_V2D_MASKCMD_E {
+    V2D_MASKCMD_DISABLE     = 0,
+    V2D_MASKCMD_NORMAL      = 1,
+    V2D_MASKCMD_AS_VALUE    = 2,
+    V2D_MASKCMD_BUTT
+} V2D_MASKCMD_E;
+
+typedef enum SPACEMIT_V2D_BLENDALPHA_SOURCE_E {
+    V2D_BLENDALPHA_SOURCE_PIXEL     = 0,
+    V2D_BLENDALPHA_SOURCE_GOLBAL    = 1,
+    V2D_BLENDALPHA_SOURCE_MASK      = 2,
+    V2D_BLENDALPHA_SOURCE_BUTT
+} V2D_BLENDALPHA_SOURCE_E;
+
+typedef enum SPACEMIT_V2D_BLEND_PRE_ALPHA_FUNC_E {
+    V2D_BLEND_PRE_ALPHA_FUNC_DISABLE                = 0,
+    V2D_BLEND_PRE_ALPHA_FUNC_GLOBAL_MULTI_SOURCE    = 1,
+    V2D_BLEND_PRE_ALPHA_FUNC_MASK_MULTI_SOURCE      = 2,
+    V2D_BLEND_PRE_ALPHA_FUNC_BUTT
+} V2D_BLEND_PRE_ALPHA_FUNC_E;
+
+typedef enum SPACEMIT_V2D_BLEND_MODE_E {
+    V2D_BLEND_ZERO = 0x0,
+    V2D_BLEND_ONE,
+    V2D_BLEND_SRC_ALPHA,
+    V2D_BLEND_ONE_MINUS_SRC_ALPHA,
+    V2D_BLEND_DST_ALPHA,
+    V2D_BLEND_ONE_MINUS_DST_ALPHA,
+    V2D_BLEND_BUTT
+}V2D_BLEND_MODE_E;
+
+typedef enum SPACEMIT_V2D_ROP2_MODE_E {
+    V2D_ROP2_BLACK      =0,
+    V2D_ROP2_NOTMERGEPEN=1,
+    V2D_ROP2_MASKNOTPEN =2,
+    V2D_ROP2_NOTCOPYPEN =3,
+    V2D_ROP2_MASKPENNOT =4,
+    V2D_ROP2_NOT        =5,
+    V2D_ROP2_XORPEN     =6,
+    V2D_ROP2_NOTMASKPEN =7,
+    V2D_ROP2_MASKPEN    =8,
+    V2D_ROP2_NOTXORPEN  =9,
+    V2D_ROP2_NOP        =10,
+    V2D_ROP2_MERGENOTPEN=11,
+    V2D_ROP2_COPYPEN    =12,
+    V2D_ROP2_MERGEPENNOT=13,
+    V2D_ROP2_MERGEPEN   =14,
+    V2D_ROP2_WHITE      =15,
+    V2D_ROP2_BUTT       =16
+}V2D_ROP2_MODE_E;
+
+typedef enum SPACEMIT_V2D_COLOR_FORMAT_E {
+    V2D_COLOR_FORMAT_RGB888     =0,
+    V2D_COLOR_FORMAT_RGBX8888   =1,
+    V2D_COLOR_FORMAT_RGBA8888   =2,
+    V2D_COLOR_FORMAT_ARGB8888   =3,
+    V2D_COLOR_FORMAT_RGB565     =4,
+    V2D_COLOR_FORMAT_NV12       =5,
+    V2D_COLOR_FORMAT_RGBA5658   =6,
+    V2D_COLOR_FORMAT_ARGB8565   =7,
+    V2D_COLOR_FORMAT_A8         =8,
+    V2D_COLOR_FORMAT_Y8         =9,
+    V2D_COLOR_FORMAT_L8_RGBA8888=10,
+    V2D_COLOR_FORMAT_L8_RGB888  =11,
+    V2D_COLOR_FORMAT_L8_RGB565  =12,
+    V2D_COLOR_FORMAT_BGR888     =13,
+    V2D_COLOR_FORMAT_BGRX8888   =14,
+    V2D_COLOR_FORMAT_BGRA8888   =15,
+    V2D_COLOR_FORMAT_ABGR8888   =16,
+    V2D_COLOR_FORMAT_BGR565     =17,
+    V2D_COLOR_FORMAT_NV21       =18,
+    V2D_COLOR_FORMAT_BGRA5658   =19,
+    V2D_COLOR_FORMAT_ABGR8565   =20,
+    V2D_COLOR_FORMAT_L8_BGRA8888=21,
+    V2D_COLOR_FORMAT_L8_BGR888  =22,
+    V2D_COLOR_FORMAT_L8_BGR565  =23,
+    V2D_COLOR_FORMAT_BUTT,
+}V2D_COLOR_FORMAT_E;
+
+typedef enum SPACEMIT_V2D_CSC_MODE_E {
+    V2D_CSC_MODE_RGB_2_BT601WIDE            =0,
+    V2D_CSC_MODE_BT601WIDE_2_RGB            =1,
+    V2D_CSC_MODE_RGB_2_BT601NARROW          =2,
+    V2D_CSC_MODE_BT601NARROW_2_RGB          =3,
+    V2D_CSC_MODE_RGB_2_BT709WIDE            =4,
+    V2D_CSC_MODE_BT709WIDE_2_RGB            =5,
+    V2D_CSC_MODE_RGB_2_BT709NARROW          =6,
+    V2D_CSC_MODE_BT709NARROW_2_RGB          =7,
+    V2D_CSC_MODE_BT601WIDE_2_BT709WIDE      =8,
+    V2D_CSC_MODE_BT601WIDE_2_BT709NARROW    =9,
+    V2D_CSC_MODE_BT601WIDE_2_BT601NARROW    =10,
+    V2D_CSC_MODE_BT601NARROW_2_BT709WIDE    =11,
+    V2D_CSC_MODE_BT601NARROW_2_BT709NARROW  =12,
+    V2D_CSC_MODE_BT601NARROW_2_BT601WIDE    =13,
+    V2D_CSC_MODE_BT709WIDE_2_BT601WIDE      =14,
+    V2D_CSC_MODE_BT709WIDE_2_BT601NARROW    =15,
+    V2D_CSC_MODE_BT709WIDE_2_BT709NARROW    =16,
+    V2D_CSC_MODE_BT709NARROW_2_BT601WIDE    =17,
+    V2D_CSC_MODE_BT709NARROW_2_BT601NARROW  =18,
+    V2D_CSC_MODE_BT709NARROW_2_BT709WIDE    =19,
+    V2D_CSC_MODE_RGB_2_GREY                 =20,
+    V2D_CSC_MODE_RGB_2_RGB                  =21,
+    V2D_CSC_MODE_BUTT                       =22,
+} V2D_CSC_MODE_E;
+
+typedef enum SPACEMIT_FBC_DECODER_MODE_E {
+    FBC_DECODER_MODE_SCAN_LINE            =0,
+    FBC_DECODER_MODE_LDC_Y                =1,
+    FBC_DECODER_MODE_LDC_UV               =2,
+    FBC_DECODER_MODE_H264_32x16           =3,
+    FBC_DECODER_MODE_H265_32x32           =4,
+    FBC_DECODER_MODE_BUTT                 =5,
+} FBC_DECODER_MODE_E;
+
+typedef enum SPACEMIT_FBC_DECODER_FORMAT_E {
+    FBC_DECODER_FORMAT_NV12               =0,
+    FBC_DECODER_FORMAT_RGB888             =1,
+    FBC_DECODER_FORMAT_ARGB8888           =2,
+    FBC_DECODER_FORMAT_RGB565             =3,
+    FBC_DECODER_FORMAT_BUTT               =4,
+} FBC_DECODER_FORMAT_E;
+
+typedef struct {
+    uint16_t x; /* left */
+    uint16_t y; /* top */
+    uint16_t w; /* crop width */
+    uint16_t h; /* crop height */
+} V2D_AREA_S;
+
+typedef struct SPACEMIT_V2D_FILLCOLOR_S {
+    uint32_t colorvalue;
+    V2D_COLOR_FORMAT_E format;
+} V2D_FILLCOLOR_S;
+
+typedef struct SPACEMIT_V2D_BACKGROUND_S {
+    V2D_FILLCOLOR_S fillcolor;
+    bool enable;
+} V2D_BACKGROUND_S;
+
+typedef struct SPACEMIT_V2D_SOLIDCOLOR_S {
+    V2D_FILLCOLOR_S fillcolor;
+    bool enable;
+} V2D_SOLIDCOLOR_S;
+
+typedef struct SPACEMIT_V2D_PALETTE_S {
+    uint8_t palVal[1024];
+    int len;
+} V2D_PALETTE_S;
+
+typedef struct SPACEMIT_FBC_DECODER_S {
+    int fd;
+    uint32_t headerAddr_h;
+    uint32_t headerAddr_l;
+    uint16_t bboxLeft;
+    uint16_t bboxRight;
+    uint16_t bboxTop;
+    uint16_t bboxBottom;
+    bool rgb_pack_en;
+    bool is_split;
+    FBC_DECODER_MODE_E   enFbcdecMode;
+    FBC_DECODER_FORMAT_E enFbcdecFmt;
+} FBC_DECODER_S;
+
+typedef FBC_DECODER_FORMAT_E FBC_ENCODER_FORMAT_E;
+typedef struct SPACEMIT_FBC_ENCODER_S {
+    int fd;
+    int offset;
+    uint32_t headerAddr_h;
+    uint32_t headerAddr_l;
+    uint32_t payloadAddr_h;
+    uint32_t payloadAddr_l;
+    uint16_t bboxLeft;
+    uint16_t bboxRight;
+    uint16_t bboxTop;
+    uint16_t bboxBottom;
+    bool is_split;
+    FBC_ENCODER_FORMAT_E enFbcencFmt;
+} FBC_ENCODER_S;
+
+typedef struct SPACEMIT_V2D_SURFACE_S {
+    struct {
+        bool fbc_enable;
+        int fd;
+        int offset;
+        uint32_t phyaddr_y_l;
+        uint32_t phyaddr_y_h;
+        uint32_t phyaddr_uv_l;
+        uint32_t phyaddr_uv_h;
+        uint16_t w;
+        uint16_t h;
+        uint16_t stride;
+        V2D_COLOR_FORMAT_E format;
+    };
+    union {
+        FBC_DECODER_S fbcDecInfo;
+        FBC_ENCODER_S fbcEncInfo;
+    };
+    V2D_SOLIDCOLOR_S solidcolor;
+} V2D_SURFACE_S;
+
+typedef struct {
+    V2D_BLEND_MODE_E srcColorFactor;
+    V2D_BLEND_MODE_E dstColorFactor;
+    V2D_BLEND_MODE_E srcAlphaFactor;
+    V2D_BLEND_MODE_E dstAlphaFactor;
+} V2D_BLEND_FACTOR_S;
+
+typedef struct {
+    V2D_ROP2_MODE_E colorRop2Code;
+    V2D_ROP2_MODE_E alphaRop2Code;
+} V2D_ROP2_CODE_S;
+
+typedef struct {
+    V2D_BLENDALPHA_SOURCE_E blend_alpha_source;
+    V2D_BLEND_PRE_ALPHA_FUNC_E blend_pre_alpha_func;
+    uint8_t global_alpha;
+    union {
+        V2D_BLEND_FACTOR_S stBlendFactor;
+        V2D_ROP2_CODE_S stRop2Code;
+    };
+    V2D_AREA_S blend_area;
+} V2D_BLEND_LAYER_CONF_S;
+
+typedef struct {
+    V2D_BLENDCMD_E blend_cmd;
+    V2D_BACKGROUND_S bgcolor;
+    V2D_MASKCMD_E mask_cmd;
+    V2D_AREA_S blend_mask_area;
+    V2D_BLEND_LAYER_CONF_S blendlayer[V2D_INPUT_LAYER_NUM];
+} V2D_BLEND_CONF_S;
+
+typedef struct {
+    V2D_SURFACE_S layer0;
+    V2D_SURFACE_S layer1;
+    V2D_SURFACE_S mask;
+    V2D_SURFACE_S dst;
+    V2D_AREA_S l0_rect;
+    V2D_AREA_S l1_rect;
+    V2D_AREA_S mask_rect;
+    V2D_AREA_S dst_rect;
+    V2D_BLEND_CONF_S blendconf;
+    V2D_ROTATE_ANGLE_E l0_rt;
+    V2D_ROTATE_ANGLE_E l1_rt;
+    V2D_CSC_MODE_E l0_csc;
+    V2D_CSC_MODE_E l1_csc;
+    V2D_DITHER_E dither;
+    V2D_PALETTE_S palette;
+} V2D_PARAM_S;
+
+typedef struct  {
+    V2D_PARAM_S param;
+    int32_t acquireFencefd;
+    int32_t completeFencefd;
+} V2D_SUBMIT_TASK_S;
+
+struct v2d_dma_buf_info {
+    struct dma_buf *dmabuf;
+    struct dma_buf_attachment *attach;
+    struct sg_table *sgtable;
+    int tbu_id;
+    int valid;
+};
+
+struct v2d_pending_post_task {
+    struct list_head head;
+    V2D_SUBMIT_TASK_S *pTask;
+    struct dma_fence *pCompleteFence;
+    struct dma_fence *pAcquireFence;
+    struct v2d_dma_buf_info info[4];
+};
+
+void v2d_golbal_reset(void);
+uint32_t v2d_irq_status(void);
+uint32_t v2d_irqerr_status(void);
+void v2d_dump_irqraw_status(void);
+void v2d_irq_clear(uint32_t irqstaus);
+void v2d_irqerr_clear(uint32_t irqerr);
+void v2d_irq_enable(void);
+void v2d_irq_disable(void);
+void config_v2d_hw(V2D_SUBMIT_TASK_S *pTask);
+int v2d_iommu_map_sg(unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot);
+void v2d_iommu_map_end(void);
+void iommu_irq_reset(void);
+#endif
diff --git a/drivers/soc/spacemit/v2d/v2d_hw.c b/drivers/soc/spacemit/v2d/v2d_hw.c
new file mode 100644 (file)
index 0000000..998146b
--- /dev/null
@@ -0,0 +1,911 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+* V2D hardware driver for Spacemit
+* Copyright (C) 2023 Spacemit Co., Ltd.
+*
+*/
+
+#include "v2d_priv.h"
+#include "v2d_drv.h"
+#include "v2d_reg.h"
+#include "csc_matrix.h"
+#include <linux/dma-buf.h>
+
+enum {
+       R = 0,
+       G,
+       B,
+       A
+};
+
+static int32_t bicubic_coef[16][3] = {
+       { 246, 507, 246 },
+       { 222, 505, 270 },
+       { 199, 501, 294 },
+       { 177, 494, 318 },
+       { 155, 486, 342 },
+       { 136, 474, 365 },
+       { 117, 461, 387 },
+       { 100, 445, 408 },
+       { 85, 427, 427 },
+       { 71, 408, 445 },
+       { 59, 387, 461 },
+       { 49, 365, 474 },
+       { 41, 343, 485 },
+       { 35, 318, 494 },
+       { 30, 294, 501 },
+       { 27, 270, 505 }
+};
+
+extern struct v2d_info *v2dInfo;
+
+static void v2d_write(uint32_t reg, uint32_t val)
+{
+       writel(val, v2dInfo->v2dreg_iomem_base+reg);
+}
+
+static uint32_t v2d_read(uint32_t reg)
+{
+       return readl(v2dInfo->v2dreg_iomem_base+reg);
+}
+
+static void v2d_set_bits(uint32_t reg, uint32_t bits)
+{
+       v2d_write(reg, v2d_read(reg) | bits);
+}
+
+#if 0
+static void v2d_clear_bits(uint32_t reg, uint32_t bits)
+{
+       v2d_write(reg, v2d_read(reg) & ~bits);
+}
+#endif
+
+static void v2d_write_bits(uint32_t reg, uint32_t value, uint32_t mask, uint32_t shifts)
+{
+       uint32_t reg_val;
+
+       reg_val = v2d_read(reg);
+       reg_val &= ~(mask << shifts);
+       reg_val |= (value << shifts);
+       v2d_write(reg, reg_val);
+}
+
+void v2d_golbal_reset(void)
+{
+       v2d_set_bits(V2D_AUTO_CLK_REG, V2D_GLOBAL_RESET);
+       while (v2d_read(V2D_AUTO_CLK_REG) & V2D_GLOBAL_RESET);
+       V2DLOGD("v2d golbal reset done!\n");
+}
+
+uint32_t v2d_irq_status(void)
+{
+       return v2d_read(V2D_IRQ_STATUS);
+}
+
+void v2d_dump_irqraw_status(void)
+{
+       uint32_t irqerr_raw, encirq, dec0_irqraw, dec1_irqraw;
+       irqerr_raw  = v2d_read(V2D_ERR_IRQ_RAW);
+       dec0_irqraw = v2d_read(V2D_L0_DEC_REG8);
+       dec1_irqraw = v2d_read(V2D_L1_DEC_REG8);
+       encirq      = v2d_read(V2D_ENC_REG18);
+       printk(KERN_ERR "v2d dump core:0x%x, dec0:0x%x, dec1:0x%x, enc:0x%x\n",  irqerr_raw, dec0_irqraw, dec1_irqraw, encirq);
+}
+
+uint32_t v2d_irqerr_status(void)
+{
+       return v2d_read(V2D_ERR_IRQ_STATUS);
+}
+
+void v2d_irqerr_clear(uint32_t irqerr)
+{
+       v2d_write(V2D_ERR_IRQ_STATUS, irqerr);
+}
+
+void v2d_irq_clear(uint32_t irqstaus)
+{
+       v2d_write(V2D_ENC_REG18, 0x3ffff);
+       v2d_write(V2D_IRQ_STATUS, irqstaus);
+}
+
+void v2d_irq_enable(void)
+{
+       v2d_write(V2D_IRQ_MASK, V2D_EOF_IRQ_MASK|V2D_FBCENC_IRQ_MASK);
+       v2d_write(V2D_ERR_IRQ_MASK, 0xE07);
+       v2d_write_bits(V2D_DEBUG_REG0, 0x1, 0x1, 0);
+}
+
+void v2d_irq_disable(void)
+{
+       v2d_write(V2D_IRQ_MASK, 0x00);
+       v2d_write(V2D_ERR_IRQ_MASK, 0x00);
+}
+
+static void ConfigAxiBus(void)
+{
+       v2d_axi_bus_ctrl_reg_t ctrl;
+
+       ctrl.overlay = 0;
+       ctrl.field.arqos_m = 2;
+       ctrl.field.awqos_m = 2;
+       ctrl.field.shadow_mode = 1;
+       v2d_write(V2D_AXI_BUS_CTRL, ctrl.overlay);
+}
+
+int getBytePerPixel(V2D_COLOR_FORMAT_E enFormat)
+{
+       int Bpp=0;
+
+       switch(enFormat){
+       case V2D_COLOR_FORMAT_NV12:
+       case V2D_COLOR_FORMAT_NV21:
+               Bpp = 1;
+               break;
+       case V2D_COLOR_FORMAT_RGB888:
+       case V2D_COLOR_FORMAT_BGR888:
+               Bpp = 3;
+               break;
+       case V2D_COLOR_FORMAT_RGBX8888:
+       case V2D_COLOR_FORMAT_RGBA8888:
+       case V2D_COLOR_FORMAT_ARGB8888:
+       case V2D_COLOR_FORMAT_BGRX8888:
+       case V2D_COLOR_FORMAT_BGRA8888:
+       case V2D_COLOR_FORMAT_ABGR8888:
+               Bpp = 4;
+               break;
+       case V2D_COLOR_FORMAT_RGB565:
+       case V2D_COLOR_FORMAT_BGR565:
+               Bpp = 2;
+               break;
+       case V2D_COLOR_FORMAT_RGBA5658:
+       case V2D_COLOR_FORMAT_ARGB8565:
+       case V2D_COLOR_FORMAT_BGRA5658:
+       case V2D_COLOR_FORMAT_ABGR8565:
+               Bpp = 3;
+               break;
+       case V2D_COLOR_FORMAT_A8:
+       case V2D_COLOR_FORMAT_Y8:
+               Bpp = 1;
+               break;
+       default:
+               V2DLOGE("err format:%d not supported\n",enFormat);
+       }
+       return Bpp;
+}
+
+static void v2d_scaler_coef_init(void)
+{
+       int i;
+       int32_t *pCoef;
+       v2d_scaler_coef_reg_t scaler_coef;
+
+       scaler_coef.overlay = 0;
+       pCoef = &bicubic_coef[0][0];
+       for (i=0; i<SCALER_COEF_REG_NUM; i++) {
+               scaler_coef.field.scaler_coef0 = *(pCoef + 2*i);
+               scaler_coef.field.scaler_coef1 = *(pCoef + 2*i+1);
+               v2d_write(V2D_SCALER_COEF_REG(i), scaler_coef.overlay);
+       }
+}
+
+static void split_fillcolor(uint32_t fillcolor, V2D_COLOR_FORMAT_E enFormatIn, uint8_t *pChl)
+{
+       uint8_t r, g, b, a;
+
+       switch (enFormatIn) {
+               case V2D_COLOR_FORMAT_NV12:
+                       r = fillcolor & 0xFF;
+                       g = (fillcolor >> 8) & 0xFF;
+                       b = (fillcolor >> 16) & 0xFF;
+                       a = 0xFF;
+                       break;
+               case V2D_COLOR_FORMAT_NV21:
+                       r = fillcolor & 0xFF;
+                       b = (fillcolor >> 8) & 0xFF;
+                       g = (fillcolor >> 16) & 0xFF;
+                       a = 0xFF;
+                       break;
+               case V2D_COLOR_FORMAT_RGB888:
+               case V2D_COLOR_FORMAT_RGBX8888:
+                       r = fillcolor & 0xFF;
+                       g = (fillcolor >> 8) & 0xFF;
+                       b = (fillcolor >> 16) & 0xFF;
+                       a = 0xFF;
+                       break;
+               case V2D_COLOR_FORMAT_RGBA8888:
+                       r = fillcolor & 0xFF;
+                       g = (fillcolor >> 8) & 0xFF;
+                       b = (fillcolor >> 16) & 0xFF;
+                       a = (fillcolor >> 24) & 0xFF;
+                       break;
+               case V2D_COLOR_FORMAT_ARGB8888:
+                       a = fillcolor & 0xFF;
+                       r = (fillcolor >> 8) & 0xFF;
+                       g = (fillcolor >> 16) & 0xFF;
+                       b = (fillcolor >> 24) & 0xFF;
+                       break;
+               case V2D_COLOR_FORMAT_RGB565:
+                       a = 0xFF;
+                       r = fillcolor & 0x1F;
+                       g = (fillcolor >> 5) & 0x3F;
+                       b = (fillcolor >> 11) & 0x1F;
+                       break;
+               case V2D_COLOR_FORMAT_RGBA5658:
+                       r = fillcolor & 0x1F;
+                       g = (fillcolor >> 5) & 0x3F;
+                       b = (fillcolor >> 11) & 0x1F;
+                       a = (fillcolor >> 16) & 0xFF;
+                       break;
+               case V2D_COLOR_FORMAT_ARGB8565:
+                       a = fillcolor & 0xFF;
+                       r = (fillcolor >> 8) & 0x1F;
+                       g = (fillcolor >> 13) & 0x3F;
+                       b = (fillcolor >> 19) & 0x1F;
+                       break;
+               case V2D_COLOR_FORMAT_BGR888:
+               case V2D_COLOR_FORMAT_BGRX8888:
+                       b = fillcolor & 0xFF;
+                       g = (fillcolor >> 8) & 0xFF;
+                       r = (fillcolor >> 16) & 0xFF;
+                       a = 0xFF;
+                       break;
+               case V2D_COLOR_FORMAT_BGRA8888:
+                       b = fillcolor & 0xFF;
+                       g = (fillcolor >> 8) & 0xFF;
+                       r = (fillcolor >> 16) & 0xFF;
+                       a = (fillcolor >> 24) & 0xFF;
+                       break;
+               case V2D_COLOR_FORMAT_ABGR8888:
+                       a = fillcolor & 0xFF;
+                       b = (fillcolor >> 8) & 0xFF;
+                       g = (fillcolor >> 16) & 0xFF;
+                       r = (fillcolor >> 24) & 0xFF;
+                       break;
+               case V2D_COLOR_FORMAT_BGR565:
+                       a = 0xFF;
+                       b = fillcolor & 0x1F;
+                       g = (fillcolor >> 5) & 0x3F;
+                       r = (fillcolor >> 11) & 0x1F;
+                       break;
+               case V2D_COLOR_FORMAT_BGRA5658:
+                       b = fillcolor & 0x1F;
+                       g = (fillcolor >> 5) & 0x3F;
+                       r = (fillcolor >> 11) & 0x1F;
+                       a = (fillcolor >> 16) & 0xFF;
+                       break;
+               case V2D_COLOR_FORMAT_ABGR8565:
+                       a = fillcolor & 0xFF;
+                       b = (fillcolor >> 8) & 0x1F;
+                       g = (fillcolor >> 13) & 0x3F;
+                       r = (fillcolor >> 19) & 0x1F;
+                       break;
+               default:
+                       r = 0xFF;
+                       g = 0xFF;
+                       b = 0xFF;
+                       a = 0xFF;
+                       break;
+       }
+       pChl[R] = r;
+       pChl[G] = g;
+       pChl[B] = b;
+       pChl[A] = a;
+}
+
+static bool do_swap(V2D_COLOR_FORMAT_E enFormatIn)
+{
+       switch (enFormatIn) {
+               case V2D_COLOR_FORMAT_BGR888:
+               case V2D_COLOR_FORMAT_BGRX8888:
+               case V2D_COLOR_FORMAT_BGRA8888:
+               case V2D_COLOR_FORMAT_ABGR8888:
+               case V2D_COLOR_FORMAT_BGR565:
+               case V2D_COLOR_FORMAT_BGRA5658:
+               case V2D_COLOR_FORMAT_ABGR8565:
+               case V2D_COLOR_FORMAT_NV21:
+               case V2D_COLOR_FORMAT_L8_BGR565:
+               case V2D_COLOR_FORMAT_L8_BGR888:
+               case V2D_COLOR_FORMAT_L8_BGRA8888:
+                       return V2D_TRUE;
+               default:
+                       return V2D_FALSE;
+       }
+}
+
+static bool do_narrow(V2D_CSC_MODE_E enForeCSCMode, V2D_CSC_MODE_E enBackCSCMode)
+{
+       int ret = V2D_FALSE;
+
+       switch (enForeCSCMode) {
+               case V2D_CSC_MODE_RGB_2_BT601NARROW:
+               case V2D_CSC_MODE_RGB_2_BT709NARROW:
+               case V2D_CSC_MODE_BT601WIDE_2_BT709NARROW:
+               case V2D_CSC_MODE_BT601WIDE_2_BT601NARROW:
+               case V2D_CSC_MODE_BT601NARROW_2_BT709NARROW:
+               case V2D_CSC_MODE_BT709WIDE_2_BT601NARROW:
+               case V2D_CSC_MODE_BT709WIDE_2_BT709NARROW:
+               case V2D_CSC_MODE_BT709NARROW_2_BT601NARROW:
+                       ret = V2D_TRUE;
+                       break;
+               default:
+                       break;
+       }
+       switch (enBackCSCMode) {
+               case V2D_CSC_MODE_RGB_2_BT601NARROW:
+               case V2D_CSC_MODE_RGB_2_BT709NARROW:
+               case V2D_CSC_MODE_BT601WIDE_2_BT709NARROW:
+               case V2D_CSC_MODE_BT601WIDE_2_BT601NARROW:
+               case V2D_CSC_MODE_BT601NARROW_2_BT709NARROW:
+               case V2D_CSC_MODE_BT709WIDE_2_BT601NARROW:
+               case V2D_CSC_MODE_BT709WIDE_2_BT709NARROW:
+               case V2D_CSC_MODE_BT709NARROW_2_BT601NARROW:
+                       ret = V2D_TRUE;
+                       break;
+               default:
+                       break;
+       }
+       return ret;
+}
+
+static V2D_COLOR_FORMAT_E fmt_convert(V2D_COLOR_FORMAT_E enFormatIn)
+{
+       V2D_COLOR_FORMAT_E enFormatOut = 0;
+
+       switch (enFormatIn) {
+               case V2D_COLOR_FORMAT_RGB888:
+               case V2D_COLOR_FORMAT_RGBX8888:
+               case V2D_COLOR_FORMAT_RGBA8888:
+               case V2D_COLOR_FORMAT_ARGB8888:
+               case V2D_COLOR_FORMAT_RGB565:
+               case V2D_COLOR_FORMAT_NV12:
+               case V2D_COLOR_FORMAT_RGBA5658:
+               case V2D_COLOR_FORMAT_ARGB8565:
+               case V2D_COLOR_FORMAT_A8:
+               case V2D_COLOR_FORMAT_Y8:
+                       enFormatOut = enFormatIn;
+                       break;
+               case V2D_COLOR_FORMAT_BGR888:
+               case V2D_COLOR_FORMAT_BGRX8888:
+               case V2D_COLOR_FORMAT_BGRA8888:
+               case V2D_COLOR_FORMAT_ABGR8888:
+               case V2D_COLOR_FORMAT_BGR565:
+               case V2D_COLOR_FORMAT_NV21:
+               case V2D_COLOR_FORMAT_BGRA5658:
+               case V2D_COLOR_FORMAT_ABGR8565:
+                       enFormatOut = enFormatIn - V2D_COLOR_FORMAT_BGR888;
+                       break;
+               case V2D_COLOR_FORMAT_L8_RGBA8888:
+               case V2D_COLOR_FORMAT_L8_RGB888:
+               case V2D_COLOR_FORMAT_L8_RGB565:
+               case V2D_COLOR_FORMAT_L8_BGRA8888:
+               case V2D_COLOR_FORMAT_L8_BGR888:
+               case V2D_COLOR_FORMAT_L8_BGR565:
+                       enFormatOut = V2D_COLOR_FORMAT_L8_RGBA8888;
+                       break;
+               default:
+                       break;
+       }
+       return enFormatOut;
+}
+
+static int Get_L8_Palette_bytePerPixel(V2D_SURFACE_S *pstBackGround, V2D_SURFACE_S *pstForeGround)
+{
+       int bpp = 4;
+
+       if (pstBackGround) {
+               if (pstBackGround->format == V2D_COLOR_FORMAT_L8_RGBA8888 || pstBackGround->format == V2D_COLOR_FORMAT_L8_BGRA8888) {
+                       bpp = 4;
+               }
+               else if (pstBackGround->format == V2D_COLOR_FORMAT_L8_RGB888 || pstBackGround->format == V2D_COLOR_FORMAT_L8_BGR888) {
+                       bpp = 3;
+               }
+               else if (pstBackGround->format == V2D_COLOR_FORMAT_L8_RGB565 || pstBackGround->format == V2D_COLOR_FORMAT_L8_BGR565) {
+                       bpp = 2;
+               }
+       }
+       if (pstForeGround) {
+               if (pstForeGround->format == V2D_COLOR_FORMAT_L8_RGBA8888 || pstForeGround->format == V2D_COLOR_FORMAT_L8_BGRA8888) {
+                       bpp = 4;
+               }
+               else if (pstForeGround->format == V2D_COLOR_FORMAT_L8_RGB888 || pstForeGround->format == V2D_COLOR_FORMAT_L8_BGR888) {
+                       bpp = 3;
+               }
+               else if (pstForeGround->format == V2D_COLOR_FORMAT_L8_RGB565 || pstForeGround->format == V2D_COLOR_FORMAT_L8_BGR565) {
+                       bpp = 2;
+               }
+       }
+       return bpp;
+}
+
+static void ConfigV2dFbcDecoder(V2D_SURFACE_S *pstV2DSurface, V2D_INPUT_LAYER_E enInputLayer)
+{
+       v2d_fbc_decoder_bbox_reg_t bbox_x, bbox_y;
+       v2d_fbc_decoder_imgae_size_reg_t img_size;
+       v2d_fbc_decoder_mode_reg_t dec_mode;
+       v2d_fbc_decoder_dma_ctrl_reg_t dmac;
+       v2d_fbc_decoder_irq_ctrl_reg_t irqmask;
+       FBC_DECODER_S *pDecInfo = &pstV2DSurface->fbcDecInfo;
+
+       V2DLOGD("config %s fbc decoder \n", (enInputLayer > 0 ? "layer1" : "layer0"));
+       bbox_x.field.bbox_start = pDecInfo->bboxLeft;
+       bbox_x.field.bbox_end   = pDecInfo->bboxRight;
+       bbox_y.field.bbox_start = pDecInfo->bboxTop;
+       bbox_y.field.bbox_end   = pDecInfo->bboxBottom;
+       img_size.field.width    = pstV2DSurface->w;
+       img_size.field.height   = pstV2DSurface->h;
+       dec_mode.field.mode         = pDecInfo->enFbcdecMode;
+       dec_mode.field.format   = pDecInfo->enFbcdecFmt;
+       dec_mode.field.is_split = pDecInfo->is_split;
+       dec_mode.field.rgb_pack_en = pDecInfo->rgb_pack_en;
+       dmac.overlay = 0xffff1a02;
+       irqmask.overlay = 0x00000017;
+
+       v2d_write(V2D_LAYER_DEC_REG0_L(enInputLayer), pDecInfo->headerAddr_l);
+       v2d_write(V2D_LAYER_DEC_REG1_L(enInputLayer), pDecInfo->headerAddr_h);
+       v2d_write(V2D_LAYER_DEC_REG2_L(enInputLayer), bbox_x.overlay);
+       v2d_write(V2D_LAYER_DEC_REG3_L(enInputLayer), bbox_y.overlay);
+       v2d_write(V2D_LAYER_DEC_REG4_L(enInputLayer), img_size.overlay);
+       v2d_write(V2D_LAYER_DEC_REG5_L(enInputLayer), dec_mode.overlay);
+       v2d_write(V2D_LAYER_DEC_REG6_L(enInputLayer), dmac.overlay);
+       v2d_write(V2D_LAYER_DEC_REG7_L(enInputLayer), irqmask.overlay);
+}
+
+static void ConfigV2dFbcEncoder(V2D_SURFACE_S *pstV2DSurface)
+{
+       v2d_fbc_encoder_bbox_reg_t bbox_x, bbox_y;
+       v2d_fbc_encoder_buf_size_reg_t y_buf_size, uv_buf_size;
+       v2d_fbc_encoder_irq_reg_t irqmask;
+       v2d_fbc_encoder_mode_reg_t enc_mode;
+       v2d_fbc_encoder_dmac_burst_reg_t dmac_burst;
+       FBC_ENCODER_S *pEncInfo = &pstV2DSurface->fbcEncInfo;
+
+       V2DLOGD("config fbc encoder \n");
+       bbox_x.field.bbox_start = pEncInfo->bboxLeft;
+       bbox_x.field.bbox_end   = pEncInfo->bboxRight;
+       bbox_y.field.bbox_start = pEncInfo->bboxTop;
+       bbox_y.field.bbox_end   = pEncInfo->bboxBottom;
+       y_buf_size.field.x_size = pstV2DSurface->w;
+       y_buf_size.field.y_size = pstV2DSurface->h;
+       uv_buf_size.field.x_size = pstV2DSurface->w >> 1;
+       uv_buf_size.field.y_size = pstV2DSurface->w >> 1;
+       irqmask.overlay = 0x0001ffff;
+       enc_mode.field.encode_enable = 1;
+       enc_mode.field.split_mode_en = pEncInfo->is_split;
+       enc_mode.field.img_pix_format = pEncInfo->enFbcencFmt;
+       dmac_burst.field.burst_length = 0x10;
+
+       v2d_write(V2D_ENC_REG0, pEncInfo->headerAddr_l);
+       v2d_write(V2D_ENC_REG1, pEncInfo->headerAddr_h);
+       v2d_write(V2D_ENC_REG2, pEncInfo->payloadAddr_l);
+       v2d_write(V2D_ENC_REG3, pEncInfo->payloadAddr_h);
+       v2d_write(V2D_ENC_REG4, bbox_x.overlay);
+       v2d_write(V2D_ENC_REG5, bbox_y.overlay);
+       v2d_write(V2D_ENC_REG10, y_buf_size.overlay);
+       v2d_write(V2D_ENC_REG11, uv_buf_size.overlay);
+       v2d_write(V2D_ENC_REG13, irqmask.overlay);
+       v2d_write(V2D_ENC_REG15, 0x9e00ffff);
+       v2d_write(V2D_ENC_REG16, enc_mode.overlay);
+       v2d_write(V2D_ENC_REG17, dmac_burst.overlay);
+}
+
+static void ConfigV2dInputLayer(V2D_SURFACE_S *pstV2DSurface,
+                                                               V2D_AREA_S *pstV2DArea,
+                                                               V2D_BLEND_CONF_S *pstBlendConf,
+                                                               V2D_ROTATE_ANGLE_E enRotateAngle,
+                                                               V2D_CSC_MODE_E enCSCMode,
+                                                               V2D_INPUT_LAYER_E enInputLayer)
+{
+       int *pCscMatrix, i;
+       V2D_SCALER_MODE_E enScaleMode = V2D_NO_SCALE;
+       uint32_t width = 0, height = 0, tmp;
+       V2D_FILLCOLOR_S *pFillColor; uint8_t chl[4];
+       V2D_BLEND_LAYER_CONF_S *pBlendLayerConf;
+       v2d_blend_layer_ctrl0_reg_t bld_layer_ctrl0;
+       v2d_blend_layer_ctrl1_reg_t bld_layer_ctrl1;
+       v2d_blend_layer_ctrl2_reg_t bld_layer_ctrl2;
+       v2d_blend_layer_ctrl3_reg_t bld_layer_ctrl3;
+       v2d_blend_layer_factor_reg_t bld_layer_factor;
+       v2d_solid_color_ctrl0_reg_t solidcolor_ctrl0;
+       v2d_solid_color_ctrl1_reg_t solidcolor_ctrl1;
+       v2d_input_layer_width_height_reg_t layer_in_ori_w_h;
+       v2d_input_layer_ctrl_reg_t layer_in_ctrl;
+       v2d_input_layer_crop0_reg_t layer_in_crop0;
+       v2d_input_layer_crop1_reg_t layer_in_crop1;
+       v2d_input_layer_csc_ctrl0_reg_t layer_in_csc_ctrl0;
+       v2d_input_layer_csc_ctrl1_reg_t layer_in_csc_ctrl1;
+       v2d_input_layer_csc_ctrl2_reg_t layer_in_csc_ctrl2;
+       v2d_input_layer_scale_mode_reg_t layer_scale_mode;
+       v2d_input_layer_scale_delta_x_reg_t layer_scale_delta_x;
+       v2d_input_layer_scale_delta_y_reg_t layer_scale_delta_y;
+
+       bld_layer_ctrl1.overlay = 0;
+       bld_layer_ctrl3.overlay = 0;
+       bld_layer_factor.overlay = 0;
+       solidcolor_ctrl0.overlay = 0;
+       solidcolor_ctrl1.overlay = 0;
+       layer_in_ctrl.overlay = 0;
+       layer_in_csc_ctrl0.overlay = 0;
+       layer_in_csc_ctrl1.overlay = 0;
+       layer_in_csc_ctrl2.overlay = 0;
+       layer_scale_mode.overlay = 0;
+       layer_scale_delta_x.overlay = 0;
+       layer_scale_delta_y.overlay = 0;
+
+       if ((!pstV2DSurface->solidcolor.enable) && (pstV2DSurface->phyaddr_y_l == 0) && (!pstV2DSurface->fbc_enable)) {
+                       V2DLOGD("%s disable\n", (enInputLayer > 0 ? "layer1" : "layer0"));
+                       bld_layer_ctrl1.field.blend_en = V2D_FUNC_DISABLE;
+                       v2d_write(V2D_LAYER_BLD_CTRL1_LAYER(enInputLayer), bld_layer_ctrl1.overlay);
+       } else {
+               V2DLOGD("config %s\n", (enInputLayer > 0 ? "layer1" : "layer0"));
+               V2DLOGD("rot:%d,csc:%d\n", enRotateAngle, enCSCMode);
+               //blendlayer
+               pBlendLayerConf = &pstBlendConf->blendlayer[enInputLayer];
+               bld_layer_ctrl0.field.bld_alpha_source = pBlendLayerConf->blend_alpha_source;
+               bld_layer_ctrl0.field.bld_pre_alp_func = pBlendLayerConf->blend_pre_alpha_func;
+               bld_layer_ctrl0.field.bld_glb_alp = pBlendLayerConf->global_alpha;
+               bld_layer_ctrl0.field.scl_delta_y = 0;
+               bld_layer_ctrl1.field.blend_en = V2D_FUNC_ENABLE;
+               bld_layer_ctrl1.field.bld_rect_ltop_x = pBlendLayerConf->blend_area.x;
+               bld_layer_ctrl2.field.bld_rect_ltop_y = pBlendLayerConf->blend_area.y;
+               bld_layer_ctrl2.field.bld_rect_width = pBlendLayerConf->blend_area.w;
+               bld_layer_ctrl3.field.bld_rect_height = pBlendLayerConf->blend_area.h;
+               V2DLOGD("bld alpha_src:%d,pre_func:%d,glb_alpha:%d\n", pBlendLayerConf->blend_alpha_source, pBlendLayerConf->blend_pre_alpha_func, pBlendLayerConf->global_alpha);
+               V2DLOGD("bld_rect:(%d,%d,%d,%d)\n", pBlendLayerConf->blend_area.x, pBlendLayerConf->blend_area.y, pBlendLayerConf->blend_area.w, pBlendLayerConf->blend_area.h);
+               v2d_write(V2D_LAYER_BLD_CTRL0_LAYER(enInputLayer), bld_layer_ctrl0.overlay);
+               v2d_write(V2D_LAYER_BLD_CTRL1_LAYER(enInputLayer), bld_layer_ctrl1.overlay);
+               v2d_write(V2D_LAYER_BLD_CTRL2_LAYER(enInputLayer), bld_layer_ctrl2.overlay);
+               v2d_write(V2D_LAYER_BLD_CTRL3_LAYER(enInputLayer), bld_layer_ctrl3.overlay);
+
+               if (pstBlendConf->blend_cmd) {
+                       bld_layer_factor.field.bld_color_rop2_code = pBlendLayerConf->stRop2Code.colorRop2Code;
+                       bld_layer_factor.field.bld_alpha_rop2_code = pBlendLayerConf->stRop2Code.alphaRop2Code;
+               }
+               else {
+                       bld_layer_factor.field.bld_src_color_factor = pBlendLayerConf->stBlendFactor.srcColorFactor;
+                       bld_layer_factor.field.bld_src_alpha_factor = pBlendLayerConf->stBlendFactor.srcAlphaFactor;
+                       bld_layer_factor.field.bld_dst_color_factor = pBlendLayerConf->stBlendFactor.dstColorFactor;
+                       bld_layer_factor.field.bld_dst_alpha_factor = pBlendLayerConf->stBlendFactor.dstAlphaFactor;
+               }
+               V2DLOGD("bld factor:src_c=%d,src_a=%d,dst_c=%d,dst_a=%d\n", pBlendLayerConf->stBlendFactor.srcColorFactor, pBlendLayerConf->stBlendFactor.srcAlphaFactor,
+                               pBlendLayerConf->stBlendFactor.dstColorFactor, pBlendLayerConf->stBlendFactor.dstAlphaFactor);
+               v2d_write(V2D_LAYER_BLD_FACTOR_LAYER(enInputLayer), bld_layer_factor.overlay);
+
+               if (pstV2DSurface->solidcolor.enable) {//solid color
+                       pFillColor = &pstV2DSurface->solidcolor.fillcolor;
+                       split_fillcolor(pFillColor->colorvalue, pFillColor->format, chl);
+                       solidcolor_ctrl0.field.solid_en = V2D_FUNC_ENABLE;
+                       solidcolor_ctrl0.field.solid_R = chl[R];
+                       solidcolor_ctrl0.field.solid_G = chl[G];
+                       solidcolor_ctrl0.field.solid_B = chl[B];
+                       solidcolor_ctrl1.field.solid_A = chl[A];
+                       solidcolor_ctrl1.field.csc_en = V2D_FUNC_DISABLE;
+                       v2d_write(V2D_LAYER_SOLIDCOLOR_CTRL0_LAYER(enInputLayer), solidcolor_ctrl0.overlay);
+                       v2d_write(V2D_LAYER_SOLIDCOLOR_CTRL1_LAYER(enInputLayer), solidcolor_ctrl1.overlay);
+               }
+               else {  //input layer
+                       solidcolor_ctrl0.field.solid_en = V2D_FUNC_DISABLE;
+                       v2d_write(V2D_LAYER_SOLIDCOLOR_CTRL0_LAYER(enInputLayer), solidcolor_ctrl0.overlay);
+                       if (pstV2DSurface->fbc_enable) {
+                               ConfigV2dFbcDecoder(pstV2DSurface, enInputLayer);
+                       } else {
+                               v2d_write(V2D_LAYER_Y_ADDR_L_LAYER(enInputLayer),  pstV2DSurface->phyaddr_y_l);
+                               v2d_write(V2D_LAYER_Y_ADDR_H_LAYER(enInputLayer),  pstV2DSurface->phyaddr_y_h);
+                               v2d_write(V2D_LAYER_UV_ADDR_L_LAYER(enInputLayer), pstV2DSurface->phyaddr_uv_l);
+                               tmp = v2d_read(V2D_LAYER_UV_ADDR_H_LAYER(enInputLayer)) | (pstV2DSurface->phyaddr_uv_h & V2D_H_ADDR_MASK);
+                               v2d_write(V2D_LAYER_UV_ADDR_H_LAYER(enInputLayer), tmp);
+                       }
+                       layer_in_ori_w_h.field.layer_in_ori_width = pstV2DSurface->w;
+                       layer_in_ori_w_h.field.layer_in_ori_height = pstV2DSurface->h;
+                       v2d_write(V2D_LAYER_WIDTH_HEIGHT_LAYER(enInputLayer), layer_in_ori_w_h.overlay);
+
+                       layer_in_ctrl.field.stride = pstV2DSurface->stride;
+                       layer_in_ctrl.field.swap = do_swap(pstV2DSurface->format);
+                       layer_in_ctrl.field.format = fmt_convert(pstV2DSurface->format);
+                       layer_in_ctrl.field.rotation = enRotateAngle;
+                       layer_in_ctrl.field.fbc_en = pstV2DSurface->fbc_enable;
+                       v2d_write(V2D_LAYER_CTRL_LAYER(enInputLayer), layer_in_ctrl.overlay);
+                       //crop
+                       layer_in_crop0.field.layer_in_crop_ltop_x = pstV2DArea->x;
+                       layer_in_crop0.field.layer_in_crop_ltop_y = pstV2DArea->y;
+                       layer_in_crop1.field.layer_in_crop_width = pstV2DArea->w;
+                       layer_in_crop1.field.layer_in_crop_height = pstV2DArea->h;
+                       V2DLOGD("crop:(%d,%d,%d,%d)\n", pstV2DArea->x, pstV2DArea->y, pstV2DArea->w, pstV2DArea->h);
+                       v2d_write(V2D_LAYER_CROP_REG0_LAYER(enInputLayer), layer_in_crop0.overlay);
+                       v2d_write(V2D_LAYER_CROP_REG1_LAYER(enInputLayer), layer_in_crop1.overlay);
+                       //csc
+                       if (enCSCMode < V2D_CSC_MODE_BUTT) {
+                               layer_in_csc_ctrl0.field.csc_en = V2D_FUNC_ENABLE;
+                               pCscMatrix = &cscmatrix[enCSCMode][0][0];
+                               layer_in_csc_ctrl0.field.csc_matrix0 = pCscMatrix[0];
+                               for (i = 0; i < 5; i++) {
+                                       layer_in_csc_ctrl1.field.csc_matrix1 = pCscMatrix[2 * i + 1];
+                                       layer_in_csc_ctrl1.field.csc_matrix2 = pCscMatrix[2 * i + 2];
+                                       v2d_write(V2D_LAYER_CSC_CRTL1_LAYER(enInputLayer) + 0x4 * i, layer_in_csc_ctrl1.overlay);
+                               }
+                               layer_in_csc_ctrl2.field.csc_matrix11 = pCscMatrix[11];
+                               v2d_write(V2D_LAYER_CSC_CRTL6_LAYER(enInputLayer), layer_in_csc_ctrl2.overlay);
+                       }
+                       else {
+                               layer_in_csc_ctrl0.field.csc_en = V2D_FUNC_DISABLE;
+                       }
+                       v2d_write(V2D_LAYER_CSC_CRTL0_LAYER(enInputLayer), layer_in_csc_ctrl0.overlay);
+                       //scale
+                       if (enRotateAngle == V2D_ROT_90 || enRotateAngle == V2D_ROT_270) {
+                               width = pstV2DArea->h; height = pstV2DArea->w;
+                       }
+                       else {
+                               width = pstV2DArea->w; height = pstV2DArea->h;
+                       }
+                       if (width == pBlendLayerConf->blend_area.w && height == pBlendLayerConf->blend_area.h) {
+                               enScaleMode = V2D_NO_SCALE;
+                       }
+                       else if (width > pBlendLayerConf->blend_area.w || height > pBlendLayerConf->blend_area.h) {
+                               enScaleMode = V2D_SCALE_DOWN;
+                       }
+                       else if (width < pBlendLayerConf->blend_area.w || height < pBlendLayerConf->blend_area.h) {
+                               enScaleMode = V2D_SCALE_UP;
+                       }
+                       V2DLOGD("scale:%d\n", enScaleMode);
+                       layer_scale_mode.overlay = layer_in_csc_ctrl2.overlay;
+                       layer_scale_mode.field.scl_mode = enScaleMode;
+                       v2d_write(V2D_LAYER_SCALE_MODE_LAYER(enInputLayer), layer_scale_mode.overlay);
+                       if (enScaleMode) {
+                               layer_scale_delta_x.field.scl_delta_x = (width << 16) / pBlendLayerConf->blend_area.w;
+                               layer_scale_delta_y.overlay = bld_layer_ctrl0.overlay;
+                               layer_scale_delta_y.field.scl_delta_y = (height << 16) / pBlendLayerConf->blend_area.h;
+                               v2d_write(V2D_LAYER_SCALE_DELTA_X_LAYER(enInputLayer), layer_scale_delta_x.overlay);
+                               v2d_write(V2D_LAYER_SCALE_DELTA_Y_LAYER(enInputLayer), layer_scale_delta_y.overlay);
+                       }
+                       else {
+                               layer_scale_delta_x.field.scl_delta_x = (1 << 16);
+                               layer_scale_delta_y.overlay = bld_layer_ctrl0.overlay;
+                               layer_scale_delta_y.field.scl_delta_y = (1 << 16);
+                               v2d_write(V2D_LAYER_SCALE_DELTA_X_LAYER(enInputLayer), layer_scale_delta_x.overlay);
+                               v2d_write(V2D_LAYER_SCALE_DELTA_Y_LAYER(enInputLayer), layer_scale_delta_y.overlay);
+                       }
+               }
+       }
+}
+
+static void ConfigV2dMaskLayer(V2D_SURFACE_S *pstMask, V2D_AREA_S *pstMaskRect, V2D_BLEND_CONF_S *pstBlendConf)
+{
+       v2d_mask_width_reg_t mask_in_width;
+       v2d_mask_height_reg_t mask_in_height;
+       v2d_mask_crop0_reg_t mask_in_crop0;
+       v2d_mask_crop1_reg_t mask_in_crop1;
+       v2d_blend_mask_ctrl0_reg_t bld_mask_ctrl0;
+       v2d_blend_mask_ctrl1_reg_t bld_mask_ctrl1;
+       v2d_blend_mask_ctrl2_reg_t bld_mask_ctrl2;
+
+       bld_mask_ctrl0.overlay = 0;
+       mask_in_width.overlay = 0;
+       bld_mask_ctrl2.overlay = 0;
+
+       if (pstMask->phyaddr_y_l != 0) {
+               V2DLOGD("ConfigV2dMaskLayer\n");
+               mask_in_width.field.mask_addr_33_32 = (pstMask->phyaddr_y_h & V2D_H_ADDR_MASK);
+               mask_in_width.field.mask_ori_width = pstMask->w;
+               mask_in_height.field.mask_ori_height = pstMask->h;
+               mask_in_height.field.mask_ori_stride = pstMask->stride;
+               mask_in_crop0.field.mask_crop_ltop_x = pstMaskRect->x;
+               mask_in_crop0.field.mask_crop_ltop_y = pstMaskRect->y;
+               mask_in_crop1.field.mask_crop_width = pstMaskRect->w;
+               mask_in_crop1.field.mask_crop_height = pstMaskRect->h;
+               bld_mask_ctrl0.field.bld_mask_enable = pstBlendConf->mask_cmd;
+               bld_mask_ctrl0.field.bld_mask_rect_ltop_x = pstBlendConf->blend_mask_area.x;
+               bld_mask_ctrl1.field.bld_mask_rect_ltop_y = pstBlendConf->blend_mask_area.y;
+               bld_mask_ctrl1.field.bld_mask_rect_width = pstBlendConf->blend_mask_area.w;
+               bld_mask_ctrl2.field.bld_mask_rect_height = pstBlendConf->blend_mask_area.h;
+               v2d_write(V2D_MASK_ADDR_L, pstMask->phyaddr_y_l);
+               v2d_write(V2D_MASK_WIDTH,  mask_in_width.overlay);
+               v2d_write(V2D_MASK_HEIGHT, mask_in_height.overlay);
+               v2d_write(V2D_MASK_CROP_REG0, mask_in_crop0.overlay);
+               v2d_write(V2D_MASK_CROP_REG1, mask_in_crop1.overlay);
+               v2d_write(V2D_BLD_MASK_REG0,  bld_mask_ctrl0.overlay);
+               v2d_write(V2D_BLD_MASK_REG1,  bld_mask_ctrl1.overlay);
+               v2d_write(V2D_BLD_MASK_REG2,  bld_mask_ctrl2.overlay);
+       }
+       else {
+               V2DLOGD("mask layer disable\n");
+               bld_mask_ctrl0.field.bld_mask_enable = V2D_FUNC_DISABLE;
+               v2d_write(V2D_BLD_MASK_REG0,  bld_mask_ctrl0.overlay);
+       }
+}
+
+static void ConfigPaletteTable(V2D_SURFACE_S *pstBackGround, V2D_SURFACE_S *pstForeGround, V2D_PALETTE_S *pstPalette)
+{
+       int i, bpp;
+       uint32_t val;
+       uint8_t *pTmp, r, g, b, a;
+
+       if (pstPalette->len != 0) {
+               V2DLOGD("ConfigPaletteTable\n");
+               pTmp = pstPalette->palVal;
+               bpp = Get_L8_Palette_bytePerPixel(pstBackGround, pstForeGround);
+               V2DLOGD("bpp:%d, palette len:%d\n", bpp, pstPalette->len);
+               for (i = 0; i < (pstPalette->len / bpp); i++) {
+                       if (bpp == 4) {
+                               r = *(pTmp + 4 * i);  g = *(pTmp + 4 * i + 1);
+                               b = *(pTmp + 4 * i + 2); a = *(pTmp + 4 * i + 3);
+                       }
+                       else if (bpp == 3) {
+                               r = *(pTmp + 3 * i);   g = *(pTmp + 3 * i + 1);
+                               b = *(pTmp + 3 * i + 2); a = 0xFF;
+                       }
+                       else if (bpp == 2) {
+                               r = *(pTmp + 2 * i) & 0x1F;
+                               g = ((*(pTmp + 2 * i) >> 5) & 0x7) | ((*(pTmp + 2 * i + 1) & 0x7) << 3);
+                               b = (*(pTmp + 2 * i + 1) >> 3) & 0x1F;
+                               a = 0xFF;
+                       }
+                       val = r | (g << 8) | (b << 16) | (a << 24);
+                       v2d_write(V2D_PALETTE_TABLE(i), val);
+               }
+       }
+}
+
+static void ConfigV2dBlendMode_And_BgColor(V2D_BLEND_CONF_S *pstBlendConf)
+{
+       V2D_FILLCOLOR_S *pFillColor;
+       uint8_t chl[4];
+       v2d_blend_ctrl0_reg_t blend_ctrl0;
+       v2d_blend_ctrl1_reg_t blend_ctrl1;
+
+       V2DLOGD("ConfigV2dBlendMode_And_BgColor\n");
+       blend_ctrl0.overlay = 0;
+       blend_ctrl1.overlay = 0;
+       blend_ctrl0.field.bld_mode = pstBlendConf->blend_cmd;
+       blend_ctrl0.field.bld_bg_enable = pstBlendConf->bgcolor.enable;
+       pFillColor = &pstBlendConf->bgcolor.fillcolor;
+       split_fillcolor(pFillColor->colorvalue, V2D_COLOR_FORMAT_RGBA8888, chl);
+       V2DLOGD("bgcolor_en:%d,r:%d,g:%d,b:%d,alpha:%d\n",  pstBlendConf->bgcolor.enable, chl[R], chl[G], chl[B], chl[A]);
+       blend_ctrl0.field.bld_bg_r = chl[R];
+       blend_ctrl0.field.bld_bg_g = chl[G];
+       blend_ctrl0.field.bld_bg_b = chl[B];
+       blend_ctrl1.field.bld_bg_a = chl[A];
+       v2d_write(V2D_BLEND_REG0, blend_ctrl0.overlay);
+       v2d_write(V2D_BLEND_REG1, blend_ctrl1.overlay);
+}
+
+static void ConfigV2dOutput(V2D_SURFACE_S *pstDst,
+                                                       V2D_AREA_S *pstDstRect,
+                                                       V2D_CSC_MODE_E enForeCSCMode,
+                                                       V2D_CSC_MODE_E enBackCSCMode,
+                                                       V2D_DITHER_E dither)
+{
+       v2d_output_width_reg_t output_width;
+       v2d_output_height_reg_t output_height;
+       v2d_output_ctrl0_reg_t output_ctrl0;
+       v2d_output_ctrl1_reg_t output_ctrl1;
+       v2d_output_ctrl2_reg_t output_ctrl2;
+
+       V2DLOGD("config output\n");
+       output_width.overlay = 0;
+       output_ctrl0.overlay = 0;
+       output_ctrl2.overlay = 0;
+       output_width.field.out_ori_width = pstDst->w;
+       output_width.field.out_addr_uv_33_32 = (pstDst->phyaddr_uv_h & V2D_H_ADDR_MASK);
+       output_height.field.out_ori_height = pstDst->h;
+       output_height.field.out_ori_stride = pstDst->stride;
+       output_ctrl0.field.format = fmt_convert(pstDst->format);
+       output_ctrl0.field.range = do_narrow(enForeCSCMode, enBackCSCMode);
+       output_ctrl0.field.dither = dither;
+       output_ctrl0.field.swap = do_swap(pstDst->format);
+       output_ctrl0.field.fbc_en = pstDst->fbc_enable;
+       output_ctrl0.field.crop_ltop_x = pstDstRect->x;
+       output_ctrl1.field.crop_ltop_y = pstDstRect->y;
+       output_ctrl1.field.crop_width = pstDstRect->w;
+       output_ctrl2.field.crop_height = pstDstRect->h;
+       V2DLOGD("dst:w=%d,h=%d\n", pstDst->w, pstDst->h);
+       V2DLOGD("crop=(%d,%d,%d,%d)\n", pstDstRect->x, pstDstRect->y, pstDstRect->w, pstDstRect->h);
+       V2DLOGD("dst:fmt=%d, dither:%d,narrow=%d, swap=%d, stride=%d\n",
+                       output_ctrl0.field.format, output_ctrl0.field.dither, output_ctrl0.field.range, output_ctrl0.field.swap, pstDst->stride);
+
+       v2d_write(V2D_OUTPUT_WIDTH,  output_width.overlay);
+       v2d_write(V2D_OUTPUT_HEIGHT, output_height.overlay);
+       v2d_write(V2D_OUTPUT_CRTL0,  output_ctrl0.overlay);
+       v2d_write(V2D_OUTPUT_CRTL1,  output_ctrl1.overlay);
+       v2d_write(V2D_OUTPUT_CRTL2,  output_ctrl2.overlay);
+       if (pstDst->fbc_enable) {
+               ConfigV2dFbcEncoder(pstDst);
+       } else {
+               v2d_write(V2D_OUTPUT_Y_ADDR_L, pstDst->phyaddr_y_l);
+               v2d_write(V2D_OUTPUT_Y_ADDR_H, pstDst->phyaddr_y_h);
+               v2d_write(V2D_OUTPUT_UV_ADDR_L, pstDst->phyaddr_uv_l);
+       }
+}
+
+static void ConfigV2dDmac(void)
+{
+       v2d_dma_ctrl_reg_t dma_ctrl;
+
+       dma_ctrl.overlay = 0;
+       dma_ctrl.field.dmac_arb_mode = 2;
+       dma_ctrl.field.dmac_max_req_num = 7;
+       dma_ctrl.field.dmac_postwr_en = 255;
+       dma_ctrl.field.dmac_rst_n_pwr = 1;
+       dma_ctrl.field.dmac_arqos = 2;
+       dma_ctrl.field.dmac_awqos = 2;
+       v2d_write(V2D_DMA_CTRL, dma_ctrl.overlay);
+}
+
+static void TriggerV2dRun(V2D_PARAM_S *pParam)
+{
+       v2d_ctrl_reg_t ctrl;
+       v2d_fbc_decoder_trigger_reg_t decTrigger;
+       v2d_fbc_encoder_trigger_reg_t encTrigger;
+
+       if (pParam->layer0.fbc_enable) {
+               decTrigger.overlay = 0;
+               decTrigger.field.direct_swap = 1;
+               v2d_write(V2D_L0_DEC_REG10, decTrigger.overlay);
+       }
+       if (pParam->layer1.fbc_enable) {
+               decTrigger.overlay = 0;
+               decTrigger.field.direct_swap = 1;
+               v2d_write(V2D_L1_DEC_REG10, decTrigger.overlay);
+       }
+       if (pParam->dst.fbc_enable) {
+               encTrigger.overlay = 0;
+               encTrigger.field.direct_swap = 1;
+               v2d_write(V2D_ENC_REG12, encTrigger.overlay);
+       }
+       ctrl.overlay = 0;
+       ctrl.field.rdma_burst_len = 4;
+       ctrl.field.wdma_burst_len = 16;
+       ctrl.field.trigger        = 1;
+       v2d_write(V2D_CTRL_REG, ctrl.overlay);
+}
+
+void config_v2d_hw(V2D_SUBMIT_TASK_S *pTask)
+{
+       V2D_SURFACE_S *pstBackGround, *pstForeGround, *pstMask, *pstDst;
+       V2D_AREA_S *pstBackGroundRect, *pstForeGroundRect, *pstMaskRect, *pstDstRect;
+       V2D_BLEND_CONF_S *pstBlendConf;
+       V2D_ROTATE_ANGLE_E enForeRoate, enBackRotate;
+       V2D_CSC_MODE_E enForeCscMode, enBackCscMode;
+       V2D_PALETTE_S *pstPalette;
+       V2D_DITHER_E enDither;
+       V2D_PARAM_S *pV2dParam;
+
+       pV2dParam = &pTask->param;
+       pstBackGround = &pV2dParam->layer0;
+       pstBackGroundRect = &pV2dParam->l0_rect;
+       pstForeGround = &pV2dParam->layer1;
+       pstForeGroundRect = &pV2dParam->l1_rect;
+       pstMask = &pV2dParam->mask;
+       pstMaskRect = &pV2dParam->mask_rect;
+       pstDst = &pV2dParam->dst;
+       pstDstRect = &pV2dParam->dst_rect;
+       pstBlendConf = &pV2dParam->blendconf;
+       enBackRotate = pV2dParam->l0_rt;
+       enForeRoate = pV2dParam->l1_rt;
+       enBackCscMode = pV2dParam->l0_csc;
+       enForeCscMode = pV2dParam->l1_csc;
+       enDither = pV2dParam->dither;
+       pstPalette = &pV2dParam->palette;
+
+       //init scaler coef
+       v2d_scaler_coef_init();
+       //config layer0
+       ConfigV2dInputLayer(pstBackGround, pstBackGroundRect, pstBlendConf, enBackRotate, enBackCscMode, V2D_INPUT_LAYER0);
+       //config layer1
+       ConfigV2dInputLayer(pstForeGround, pstForeGroundRect, pstBlendConf, enForeRoate, enForeCscMode, V2D_INPUT_LAYER1);
+       //set palette
+       ConfigPaletteTable(pstBackGround, pstForeGround, pstPalette);
+       //config mask
+       ConfigV2dMaskLayer(pstMask, pstMaskRect, pstBlendConf);
+       //blend
+       ConfigV2dBlendMode_And_BgColor(pstBlendConf);
+       //output
+       ConfigV2dOutput(pstDst, pstDstRect, enForeCscMode, enBackCscMode, enDither);
+       //DMA control
+       ConfigV2dDmac();
+       //set v2d qos
+       ConfigAxiBus();
+       //trigger
+       TriggerV2dRun(pV2dParam);
+       V2DLOGD("v2d config done\n");
+}
+
diff --git a/drivers/soc/spacemit/v2d/v2d_iommu.c b/drivers/soc/spacemit/v2d/v2d_iommu.c
new file mode 100644 (file)
index 0000000..f26ea81
--- /dev/null
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+* V2D mmu driver for Spacemit
+* Copyright (C) 2023 Spacemit Co., Ltd.
+*
+*/
+#include "v2d_priv.h"
+#include "v2d_reg.h"
+
+struct v2d_iommu_res sV2dIommuRes;
+
+unsigned long phys_cpu2v2d(unsigned long phys_addr)
+{
+       if (phys_addr >= 0x100000000UL) {
+               phys_addr -= 0x80000000UL;
+       }
+       return phys_addr;
+}
+
+static u32 __read_reg(struct v2d_iommu_res *res, u64 offset)
+{
+       return readl(res->base + offset + V2D_IOMMU_BASE_OFFSET);
+}
+
+static inline void __write_reg(struct v2d_iommu_res *res, u64 offset, u32 data)
+{
+       writel(data, res->base + offset + V2D_IOMMU_BASE_OFFSET);
+}
+
+static void __set_reg_bits(struct v2d_iommu_res *res, u64 offset, u32 bits)
+{
+       __write_reg(res, offset, (__read_reg(res, offset) | bits));
+}
+
+static void __clear_reg_bits(struct v2d_iommu_res *res, u64 offset, u32 bits)
+{
+       __write_reg(res, offset, (__read_reg(res, offset) & ~bits));
+}
+
+static int __enable_spacemit_iommu_hw(struct v2d_iommu_res *res)
+{
+       int i;
+       struct tbu_instance *tbu;
+
+       if (res->is_hw_enable == false) {
+               for (i = 0; i < TBU_INSTANCES_NUM; i++) {
+                       tbu = &res->tbu_ins[i];
+                       tbu->ttb_size = 0;
+                       tbu->always_preload = false;
+                       tbu->enable_preload = true;
+                       tbu->nsaid = 0;
+                       tbu->qos = 2;
+                       tbu->secure_enable = false;
+               }
+               res->tbu_ins_map = -1;
+
+               /* Set V2D_MMU iova base */
+               __write_reg(res, V2D_MMU_BVA_LO, res->va_base&0xFFFFFFFF);
+               __write_reg(res, V2D_MMU_BVA_HI, res->va_base>>32);
+
+               /* Set V2D_MMU timeout cycles */
+               __write_reg(res, V2D_MMU_TIMEOUT_VALUE, res->time_out_cycs);
+
+               /* Enable V2D_MMU irq */
+               __set_reg_bits(res, V2D_MMU_IRQ_ENABLE, 0x00);
+
+               res->is_hw_enable = true;
+       }
+
+       return 0;
+}
+
+static void __disable_spacemit_iommu_hw(struct v2d_iommu_res *res)
+{
+       int i;
+       struct tbu_instance *tbu;
+
+       /* Waiting for post done. */
+       res->is_hw_enable = false;
+       for (i=0; i<TBU_INSTANCES_NUM; i++) {
+               tbu = &res->tbu_ins[i];
+               tbu->ttb_size = 0;
+       }
+       /* Disable all TBUs. */
+       for (i = 0; i < TBU_NUM; i++)
+               __write_reg(res, V2D_MMU_TCR0_BASE+V2D_MMU_TBUx_STEP*i, 0);
+
+       /* Disable V2D_MMU irq. */
+       __clear_reg_bits(res, V2D_MMU_IRQ_ENABLE, 0x1FF);
+}
+
+static void __write_tbu_table(struct v2d_iommu_res *res, struct tbu_instance *tbu,
+       unsigned long iova, phys_addr_t paddr, size_t size)
+{
+       u32 *ttb_entry;
+       uint64_t mask  = 0;
+       uint32_t val;
+
+       mask = (res->page_size == 4096) ? 0xFFFFFFFFFFFFF000 : 0xFFFFFFFFFFFF0000;
+       ttb_entry = tbu->ttb_va + (iova - tbu->va_base) / res->page_size;
+       while (size != 0) {
+               paddr = paddr & 0xFFFFFFFF;
+               val = ((paddr & mask) >> TTB_ENTRY_SHIFT) & 0x1FFFFF;
+               *ttb_entry = val;
+               size -= res->page_size;
+               ttb_entry++;
+               paddr += res->page_size;
+       }
+}
+
+void v2d_iommu_map_end(void)
+{
+       __disable_spacemit_iommu_hw(&sV2dIommuRes);
+}
+
+static void v2d_iommu_post(struct v2d_iommu_res *res, int *ins_id, int num)
+{
+       u32 reg;
+       struct tbu_instance *tbu;
+       int i, tbu_slot[TBU_NUM];
+
+       for (i = 0; i < TBU_NUM; i++)
+               tbu_slot[i] = -1;
+
+       for (i = 0; i < num; i++) {
+               int index;
+               tbu = &res->tbu_ins[ins_id[i]];
+               index = (tbu->va_base - res->va_base) / VA_STEP_PER_TBU;
+               tbu_slot[index] = ins_id[i];
+       }
+
+       if (!res->is_hw_enable) {
+               return;
+       }
+
+       for (i = 0; i < TBU_NUM; i++) {
+               if (tbu_slot[i] != -1) {
+                       tbu = &res->tbu_ins[tbu_slot[i]];
+                       if (tbu->ttb_size == 0) {
+                               __write_reg(res, V2D_MMU_TCR0_BASE+i*V2D_MMU_TBUx_STEP, 0);
+                       } else {
+                               __write_reg(res, V2D_MMU_TTBLR_BASE+i*V2D_MMU_TBUx_STEP, tbu->ttb_pa & 0xFFFFFFFF);
+                               __write_reg(res, V2D_MMU_TTBHR_BASE+i*V2D_MMU_TBUx_STEP, tbu->ttb_pa >> 32);
+
+                               reg = (tbu->ttb_size - 1) << 16;
+                               if (tbu->always_preload)
+                                       reg |= BIT(3);
+                               if (tbu->enable_preload)
+                                       reg |= BIT(2);
+                               reg |= (tbu->qos << 4);
+                               if (res->page_size == SZ_64K)
+                                       reg |= BIT(1);
+                               reg |= BIT(0);
+                               __write_reg(res, V2D_MMU_TCR0_BASE+i*V2D_MMU_TBUx_STEP, reg);
+                       }
+               }
+       }
+}
+
+int v2d_iommu_map_sg(unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot)
+{
+       struct v2d_iommu_res *res = &sV2dIommuRes;
+       struct tbu_instance *tbu;
+       struct scatterlist *s;
+       unsigned int i;
+       phys_addr_t paddr;
+       size_t size;
+       unsigned long orig_iova = iova;
+
+       if ((iova >= res->va_end) && (nents == 1))
+               return sg->length;
+
+       __enable_spacemit_iommu_hw(res);
+       res->tbu_ins_map = (iova - BASE_VIRTUAL_ADDRESS) / VA_STEP_PER_TBU;
+       pr_debug("tbu ins map:%d\n", res->tbu_ins_map);
+
+       if (res->tbu_ins_map < 0 || res->tbu_ins_map >= TBU_INSTANCES_NUM)
+               goto out_id_err;
+
+       tbu = &res->tbu_ins[res->tbu_ins_map];
+
+       if (tbu->ttb_size == 0) {
+               int index;
+               if (iova < res->va_base || iova >= res->va_end)
+                       goto out_iova_err;
+
+               index = (iova - res->va_base) / VA_STEP_PER_TBU;
+               tbu->va_base = res->va_base + index * VA_STEP_PER_TBU;
+               tbu->va_end = tbu->va_base + VA_STEP_PER_TBU;
+       }
+
+       if (iova < tbu->va_base || iova >= tbu->va_end)
+               goto out_iova_err;
+
+       for_each_sg(sg, s, nents, i) {
+               paddr = phys_cpu2v2d(page_to_phys(sg_page(s))) + s->offset;
+               size = s->length;
+               if (!IS_ALIGNED(s->offset, res->page_size)) {
+                       pr_err("v2d iommu paddr not aligned: iova %lx, paddr %llx, size %lx\n",
+                               iova, paddr, size);
+                       goto out_region_err;
+               }
+
+               if (iova+size > tbu->va_end || size == 0)
+                       goto out_region_err;
+
+               __write_tbu_table(res, tbu, iova, paddr, size);
+               iova += size;
+       }
+
+       if (iova > tbu->va_base + res->page_size * tbu->ttb_size)
+               tbu->ttb_size = (iova - tbu->va_base) / res->page_size;
+
+       v2d_iommu_post(res, &res->tbu_ins_map, 1);
+
+       return (iova - orig_iova);
+
+out_region_err:
+       pr_err("v2d map_sg is wrong: iova %lx, paddr %llx, size %lx\n",
+                       iova, paddr, size);
+       return 0;
+
+out_iova_err:
+       pr_err("v2d map_sg is wrong: iova %lx", iova);
+
+       return 0;
+
+out_id_err:
+       pr_err("v2d tbu ins_id is wrong: %d\n", res->tbu_ins_map);
+
+       return 0;
+}
+
+void iommu_irq_reset(void)
+{
+       u64 last_va, last_pa;
+       u32 IRQ_status;
+       u32 reg;
+       int i;
+       struct v2d_iommu_res *res = &sV2dIommuRes;
+
+       IRQ_status = __read_reg(res, V2D_MMU_IRQ_STATUS);
+
+       if (IRQ_status == 0) {
+               return;
+       }
+
+       reg = __read_reg(res, V2D_MMU_LAST_PA_ADDR_HI);
+       last_pa = reg & 0x1;
+       reg = __read_reg(res, V2D_MMU_LAST_PA_ADDR_LO);
+       last_pa = (last_pa << 32) | reg;
+       reg = __read_reg(res, V2D_MMU_LAST_VA_ADDR_HI);
+       last_va = reg & 0x1;
+       reg = __read_reg(res, V2D_MMU_LAST_VA_ADDR_LO);
+       last_va = (last_va << 32) | reg;
+
+       /* Print IRQ status. */
+       pr_err("V2d Iommu Unexpected fault: IRQ status 0x%x, last PA 0x%09llx, last VA 0x%09llx\n", IRQ_status, last_pa, last_va);
+
+       if (IRQ_status & BIT(8)) {
+               u64 timeout_va_addr;
+               reg = __read_reg(res, V2D_MMU_TIMEOUT_VA_ADDR_HI);
+               timeout_va_addr = reg & 0x1;
+               reg = __read_reg(res, V2D_MMU_TIMEOUT_VA_ADDR_LO);
+               timeout_va_addr = (timeout_va_addr << 32) | reg;
+               pr_err("v2d iommu timeout error: timeout_va 0x%09llx\n", timeout_va_addr);
+       }
+
+       for (i = 0; i < TBU_NUM; i++) {
+               if (IRQ_status & BIT(i)) {
+                       reg = __read_reg(res,
+                               V2D_MMU_TBU_STATUS_BASE+i*V2D_MMU_TBUx_STEP);
+                       pr_err("V2d Iommu TBU%d error: read addr 0x%08x, write addr 0x%08x\n",
+                                       i, ((reg >> 16) & 0xFFF), reg &0x1FFF);
+               }
+       }
+
+       /* clear DMA error */
+       if (IRQ_status & 0xFF)
+               __set_reg_bits(res, V2D_MMU_ERROR_CLEAR, BIT(1));
+
+       /* reset IRQ status */
+       __write_reg(res, V2D_MMU_IRQ_STATUS, IRQ_status);
+}
+
diff --git a/drivers/soc/spacemit/v2d/v2d_priv.h b/drivers/soc/spacemit/v2d/v2d_priv.h
new file mode 100644 (file)
index 0000000..6a88604
--- /dev/null
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __SPACEMIT_V2D_PRIV_H__
+#define __SPACEMIT_V2D_PRIV_H__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/miscdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/genalloc.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/pm_qos.h>
+#include <linux/reset.h>
+
+#define V2D_PRINT_DEBUG
+#define V2D_FALSE 0
+#define V2D_TRUE  1
+
+#ifdef V2D_PRINT_ERROR
+#define V2D_LOG_LEVEL_ERROR
+#endif
+
+#ifdef V2D_PRINT_WARNING
+#define V2D_LOG_LEVEL_ERROR
+#define V2D_LOG_LEVEL_WARNING
+#endif
+
+#ifdef V2D_PRINT_INFO
+#define V2D_LOG_LEVEL_ERROR
+#define V2D_LOG_LEVEL_WARNING
+#define V2D_LOG_LEVEL_INFO
+#endif
+
+#ifdef V2D_PRINT_DEBUG
+#define V2D_LOG_LEVEL_ERROR
+#define V2D_LOG_LEVEL_WARNING
+#define V2D_LOG_LEVEL_INFO
+#define V2D_LOG_LEVEL_DEBUG
+#endif
+
+#ifdef V2D_LOG_LEVEL_ERROR
+#define V2DLOGE(fmt, ...) pr_err(fmt, ## __VA_ARGS__)
+#else
+#define V2DLOGE(fmt, ...)
+#endif
+
+#ifdef V2D_LOG_LEVEL_WARNING
+#define V2DLOGW(fmt, ...) pr_warn(fmt, ## __VA_ARGS__)
+#else
+#define V2DLOGW(fmt, ...)
+#endif
+
+#ifdef V2D_LOG_LEVEL_INFO
+#define V2DLOGI(fmt, ...) pr_info(fmt, ## __VA_ARGS__)
+#else
+#define V2DLOGI(fmt, ...)
+#endif
+
+#ifdef V2D_LOG_LEVEL_DEBUG
+#define V2DLOGD(fmt, ...) pr_debug(fmt, ## __VA_ARGS__)
+#else
+#define V2DLOGD(fmt, ...)
+#endif
+#define V2D_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC)
+#define V2D_LONG_FENCE_TIMEOUT (2 * MSEC_PER_SEC)
+#define V2D_DISABLE_BAND_CAL
+struct v2d_info {
+       struct platform_device  *pdev;
+       struct miscdevice       mdev;
+       int32_t                 irq;
+       void __iomem            *v2dreg_iomem_base;
+       struct clk              *clkcore;
+       struct clk              *clkio;
+       struct reset_control    *v2d_reset;
+       int                     refcount;
+       int                     do_reset;
+       struct mutex            power_mutex;
+       spinlock_t              power_spinlock;
+       struct work_struct      work;
+       struct workqueue_struct *v2d_job_done_wq;
+       uint64_t                context;
+       atomic_t                seqno;
+       struct semaphore        sem_lock;
+       struct mutex            client_lock;
+       struct list_head        post_list;
+       struct mutex            post_lock;
+       struct kthread_worker   post_worker;
+       struct task_struct      *post_thread;
+       struct kthread_work     post_work;
+       struct list_head        free_list;
+       struct mutex            free_lock;
+#if IS_ENABLED(CONFIG_SPACEMIT_DDR_FC) && defined(CONFIG_PM)
+#ifndef V2D_DISABLE_BAND_CAL
+       struct spacemit_bw_con *ddr_qos_cons;
+#endif
+#endif
+#ifdef CONFIG_SPACEMIT_DEBUG
+       bool b_v2d_running;
+       bool (*is_v2d_running)(struct v2d_info *pV2dInfo);
+       struct notifier_block nb;
+#endif
+};
+
+#define BASE_VIRTUAL_ADDRESS 0x80000000
+#define VA_STEP_PER_TBU 0x2000000
+#define MAX_ENTRIES_PER_TTB 8096
+#define ENTRY_SIZE 4
+#define MAX_SIZE_PER_TTB (MAX_ENTRIES_PER_TTB*ENTRY_SIZE)
+#define DEFAULT_TIMEOUT_CYCS 0x80000
+#define V2D_MMU_PGSIZE_BITMAP 0x02FFF000 /* 4K~32M */
+#define TBU_INSTANCES_NUM (3)
+#define TTB_ENTRY_SHIFT 12
+#define AQUIRE_TIMEOUT_MS 100
+
+struct tbu_instance {
+       int ins_id;
+       u32 *ttb_va;
+       dma_addr_t ttb_pa;
+       u64 ttb_size;
+       u64 va_base;
+       u64 va_end;
+       bool always_preload;
+       bool enable_preload;
+       u32 nsaid;
+       u32 qos;
+       bool secure_enable;
+};
+
+struct v2d_iommu_res {
+       void __iomem *base;
+       u32 time_out_cycs;
+       u32 page_size;
+       u64 va_base;
+       u64 va_end;
+       struct tbu_instance tbu_ins[TBU_INSTANCES_NUM];
+       int tbu_ins_map;
+       bool is_hw_enable;
+};
+#endif  /* __SPACEMIT_V2D_PRIV_H__*/
diff --git a/drivers/soc/spacemit/v2d/v2d_reg.h b/drivers/soc/spacemit/v2d/v2d_reg.h
new file mode 100644 (file)
index 0000000..9e00fdf
--- /dev/null
@@ -0,0 +1,824 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _V2D_REG_H_
+#define _V2D_REG_H_
+
+#define V2D_REG_BASE       0xC0100000
+#define PMUA_REG_BASE      0xd4282800
+#define V2D_TOP_BASE       (0x000)
+#define V2D_CORE_BASE      (0x100)
+#define V2D_ENC_BASE       (0x800)
+#define V2D_L0_DEC_BASE    (0x900)
+#define V2D_L1_DEC_BASE    (0xa00)
+
+//v2d clk offset
+#define V2D_CLK_RES_CTRL0  (0x44)
+#define V2D_CLK_RES_CTRL1  (0x4c)
+
+//v2d top offset
+#define V2D_AUTO_CLK_REG               (0x00 + V2D_TOP_BASE)
+#define V2D_ERR_IRQ_MASK               (0x04 + V2D_TOP_BASE)
+#define V2D_IRQ_MASK                   (0x08 + V2D_TOP_BASE)
+#define V2D_ERR_IRQ_STATUS             (0x0C + V2D_TOP_BASE)
+#define V2D_IRQ_STATUS                 (0x10 + V2D_TOP_BASE)
+#define V2D_ERR_IRQ_RAW                (0x14 + V2D_TOP_BASE)
+#define V2D_IRQ_RAW                    (0x18 + V2D_TOP_BASE)
+#define V2D_AXI_BUS_CTRL               (0x1C + V2D_TOP_BASE)
+
+#define V2D_GLOBAL_RESET               BIT(8)
+#define V2D_AUTO_CLK_EN                BIT(9)
+#define V2D_ENC_AUTO_CLK_EN            BIT(10)
+#define V2D_TOP_AUTO_CLK_EN            BIT(11)
+#define V2D_EOF_IRQ_STATUS             BIT(0)
+#define V2D_FBCENC_IRQ_STATUS          BIT(1)
+#define V2D_EOF_IRQ_MASK               BIT(0)
+#define V2D_FBCENC_IRQ_MASK            BIT(1)
+
+#define V2D_H_ADDR_MASK                (0x3)
+//v2d core offset
+#define V2D_CTRL_REG                   (0x00 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG0           (0x04 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_RGG1           (0x08 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG2           (0x0C + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_RGG3           (0x10 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG4           (0x14 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_RGG5           (0x18 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG6           (0x1C + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_RGG7           (0x20 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG8           (0x24 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_RGG9           (0x28 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG10          (0x2C + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_RGG11          (0x30 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG12          (0x34 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_RGG13          (0x38 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG14          (0x3C + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_RGG15          (0x40 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG16          (0x44 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_RGG17          (0x48 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG18          (0x4C + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_RGG19          (0x50 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG20          (0x54 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_RGG21          (0x58 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG22          (0x5C + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_RGG23          (0x60 + V2D_CORE_BASE)
+#define V2D_SCALER_COEF_REG(i)         (0x04 * (i + 1) + V2D_CORE_BASE)
+#define SCALER_COEF_REG_NUM            (24)
+
+#define V2D_BLEND_REG0                 (0x64 + V2D_CORE_BASE)
+#define V2D_BLEND_REG1                 (0x68 + V2D_CORE_BASE)
+#define V2D_BLD_MASK_REG0              (0x6C + V2D_CORE_BASE)
+#define V2D_BLD_MASK_REG1              (0x70 + V2D_CORE_BASE)
+#define V2D_BLD_MASK_REG2              (0x74 + V2D_CORE_BASE)
+
+#define V2D_OUTPUT_Y_ADDR_L            (0x78 + V2D_CORE_BASE)
+#define V2D_OUTPUT_Y_ADDR_H            (0x7C + V2D_CORE_BASE)
+#define V2D_OUTPUT_UV_ADDR_L           (0x80 + V2D_CORE_BASE)
+#define V2D_OUTPUT_UV_ADDR_H           (0x84 + V2D_CORE_BASE)
+#define V2D_OUTPUT_WIDTH               (0x84 + V2D_CORE_BASE)
+#define V2D_OUTPUT_HEIGHT              (0x88 + V2D_CORE_BASE)
+#define V2D_OUTPUT_CRTL0               (0x8C + V2D_CORE_BASE)
+#define V2D_OUTPUT_CRTL1               (0x90 + V2D_CORE_BASE)
+#define V2D_OUTPUT_CRTL2               (0x94 + V2D_CORE_BASE)
+
+#define V2D_MASK_ADDR_L                (0x98 + V2D_CORE_BASE)
+#define V2D_MASK_ADDR_H                (0x9C + V2D_CORE_BASE)
+#define V2D_MASK_WIDTH                 (0x9C + V2D_CORE_BASE)
+#define V2D_MASK_HEIGHT                (0xA0 + V2D_CORE_BASE)
+#define V2D_MASK_CROP_REG0             (0xA4 + V2D_CORE_BASE)
+#define V2D_MASK_CROP_REG1             (0xA8 + V2D_CORE_BASE)
+
+#define V2D_LAYER0_Y_ADDR_L            (0xAC + V2D_CORE_BASE)
+#define V2D_LAYER0_Y_ADDR_H            (0xB0 + V2D_CORE_BASE)
+#define V2D_LAYER0_UV_ADDR_L           (0xB4 + V2D_CORE_BASE)
+#define V2D_LAYER0_UV_ADDR_H           (0xB8 + V2D_CORE_BASE)
+#define V2D_LAYER0_BLD_FACTOR          (0xB8 + V2D_CORE_BASE)
+#define V2D_LAYER0_WIDTH_HEIGHT        (0xBC + V2D_CORE_BASE)
+#define V2D_LAYER0_CTRL                (0xC0 + V2D_CORE_BASE)
+#define V2D_LAYER0_CROP_REG0           (0xC4 + V2D_CORE_BASE)
+#define V2D_LAYER0_CROP_REG1           (0xC8 + V2D_CORE_BASE)
+#define V2D_LAYER0_SOLIDCOLOR_CTRL0    (0xCC + V2D_CORE_BASE)
+#define V2D_LAYER0_SOLIDCOLOR_CTRL1    (0xD0 + V2D_CORE_BASE)
+#define V2D_LAYER0_CSC_CRTL0           (0xD0 + V2D_CORE_BASE)
+#define V2D_LAYER0_CSC_CRTL1           (0xD4 + V2D_CORE_BASE)
+#define V2D_LAYER0_CSC_CRTL2           (0xD8 + V2D_CORE_BASE)
+#define V2D_LAYER0_CSC_CRTL3           (0xDC + V2D_CORE_BASE)
+#define V2D_LAYER0_CSC_CRTL4           (0xE0 + V2D_CORE_BASE)
+#define V2D_LAYER0_CSC_CRTL5           (0xE4 + V2D_CORE_BASE)
+#define V2D_LAYER0_CSC_CRTL6           (0xE8 + V2D_CORE_BASE)
+#define V2D_LAYER0_SCALE_MODE          (0xE8 + V2D_CORE_BASE)
+#define V2D_LAYER0_SCALE_DELTA_X       (0xEC + V2D_CORE_BASE)
+#define V2D_LAYER0_SCALE_DELTA_Y       (0xF0 + V2D_CORE_BASE)
+#define V2D_LAYER0_BLD_CTRL0           (0xF0 + V2D_CORE_BASE)
+#define V2D_LAYER0_BLD_CTRL1           (0xF4 + V2D_CORE_BASE)
+#define V2D_LAYER0_BLD_CTRL2           (0xF8 + V2D_CORE_BASE)
+#define V2D_LAYER0_BLD_CTRL3           (0xFC + V2D_CORE_BASE)
+
+#define V2D_LAYER1_Y_ADDR_L            (0x100 + V2D_CORE_BASE)
+#define V2D_LAYER1_Y_ADDR_H            (0x104 + V2D_CORE_BASE)
+#define V2D_LAYER1_UV_ADDR_L           (0x108 + V2D_CORE_BASE)
+#define V2D_LAYER1_UV_ADDR_H           (0x10C + V2D_CORE_BASE)
+#define V2D_LAYER1_BLD_FACTOR          (0x10C + V2D_CORE_BASE)
+#define V2D_LAYER1_WIDTH_HEIGHT        (0x110 + V2D_CORE_BASE)
+#define V2D_LAYER1_CTRL                (0x114 + V2D_CORE_BASE)
+#define V2D_LAYER1_CROP_REG0           (0x118 + V2D_CORE_BASE)
+#define V2D_LAYER1_CROP_REG1           (0x11C + V2D_CORE_BASE)
+#define V2D_LAYER1_SOLIDCOLOR_CTRL0    (0x120 + V2D_CORE_BASE)
+#define V2D_LAYER1_SOLIDCOLOR_CTRL1    (0x124 + V2D_CORE_BASE)
+#define V2D_LAYER1_CSC_CRTL0           (0x124 + V2D_CORE_BASE)
+#define V2D_LAYER1_CSC_CRTL1           (0x128 + V2D_CORE_BASE)
+#define V2D_LAYER1_CSC_CRTL2           (0x12C + V2D_CORE_BASE)
+#define V2D_LAYER1_CSC_CRTL3           (0x130 + V2D_CORE_BASE)
+#define V2D_LAYER1_CSC_CRTL4           (0x134 + V2D_CORE_BASE)
+#define V2D_LAYER1_CSC_CRTL5           (0x138 + V2D_CORE_BASE)
+#define V2D_LAYER1_CSC_CRTL6           (0x13C + V2D_CORE_BASE)
+#define V2D_LAYER1_SCALE_MODE          (0x13C + V2D_CORE_BASE)
+#define V2D_LAYER1_SCALE_DELTA_X       (0x140 + V2D_CORE_BASE)
+#define V2D_LAYER1_SCALE_DELTA_Y       (0x144 + V2D_CORE_BASE)
+#define V2D_LAYER1_BLD_CTRL0           (0x144 + V2D_CORE_BASE)
+#define V2D_LAYER1_BLD_CTRL1           (0x148 + V2D_CORE_BASE)
+#define V2D_LAYER1_BLD_CTRL2           (0x14C + V2D_CORE_BASE)
+#define V2D_LAYER1_BLD_CTRL3           (0x150 + V2D_CORE_BASE)
+
+#define V2D_LAYER_Y_ADDR_L_LAYER(i)           (0xAC + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_Y_ADDR_H_LAYER(i)           (0xB0 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_UV_ADDR_L_LAYER(i)          (0xB4 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_UV_ADDR_H_LAYER(i)          (0xB8 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_BLD_FACTOR_LAYER(i)         (0xB8 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_WIDTH_HEIGHT_LAYER(i)       (0xBC + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_CTRL_LAYER(i)               (0xC0 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_CROP_REG0_LAYER(i)          (0xC4 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_CROP_REG1_LAYER(i)          (0xC8 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_SOLIDCOLOR_CTRL0_LAYER(i)   (0xCC + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_SOLIDCOLOR_CTRL1_LAYER(i)   (0xD0 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_CSC_CRTL0_LAYER(i)          (0xD0 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_CSC_CRTL1_LAYER(i)          (0xD4 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_CSC_CRTL2_LAYER(i)          (0xD8 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_CSC_CRTL3_LAYER(i)          (0xDC + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_CSC_CRTL4_LAYER(i)          (0xE0 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_CSC_CRTL5_LAYER(i)          (0xE4 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_CSC_CRTL6_LAYER(i)          (0xE8 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_SCALE_MODE_LAYER(i)         (0xE8 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_SCALE_DELTA_X_LAYER(i)      (0xEC + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_SCALE_DELTA_Y_LAYER(i)      (0xF0 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_BLD_CTRL0_LAYER(i)          (0xF0 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_BLD_CTRL1_LAYER(i)          (0xF4 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_BLD_CTRL2_LAYER(i)          (0xF8 + i * 0x54 + V2D_CORE_BASE)
+#define V2D_LAYER_BLD_CTRL3_LAYER(i)          (0xFC + i * 0x54 + V2D_CORE_BASE)
+
+#define V2D_DEBUG_REG0                 (0x1FC + V2D_CORE_BASE)
+#define V2D_DEBUG_REG1                 (0x200 + V2D_CORE_BASE)
+#define V2D_DMA_CTRL                   (0x204 + V2D_CORE_BASE)
+#define V2D_PALETTE_TABLE(i)           (0x208 + i * 0x4 + V2D_CORE_BASE)
+
+#define V2D_L0_DEC_REG0       (V2D_L0_DEC_BASE + 0x000)  //register hdr_base_addr_low
+#define V2D_L0_DEC_REG1       (V2D_L0_DEC_BASE + 0x004)  //register hdr_base_addr_high
+#define V2D_L0_DEC_REG2       (V2D_L0_DEC_BASE + 0x008)  //register bbox_coor_x
+#define V2D_L0_DEC_REG3       (V2D_L0_DEC_BASE + 0x00c)  //register bbox_coor_y
+#define V2D_L0_DEC_REG4       (V2D_L0_DEC_BASE + 0x010)  //register image_size
+#define V2D_L0_DEC_REG5       (V2D_L0_DEC_BASE + 0x014)  //register dec_mode
+#define V2D_L0_DEC_REG6       (V2D_L0_DEC_BASE + 0x018)  //register dmac_ctrl
+#define V2D_L0_DEC_REG7       (V2D_L0_DEC_BASE + 0x01c)  //register irq_mask
+#define V2D_L0_DEC_REG8       (V2D_L0_DEC_BASE + 0x020)  //register irq_raw
+#define V2D_L0_DEC_REG9       (V2D_L0_DEC_BASE + 0x024)  //register irq_status
+#define V2D_L0_DEC_REG10      (V2D_L0_DEC_BASE + 0x028)  //register trig_ctrl
+#define V2D_L0_DEC_REG11      (V2D_L0_DEC_BASE + 0x02c)  //register output_ybase
+#define V2D_L0_DEC_REG12      (V2D_L0_DEC_BASE + 0x030)  //register output_cbase
+#define V2D_L0_DEC_REG13      (V2D_L0_DEC_BASE + 0x034)  //register output_stride
+
+#define V2D_L1_DEC_REG0       (V2D_L1_DEC_BASE + 0x000)
+#define V2D_L1_DEC_REG1       (V2D_L1_DEC_BASE + 0x004)
+#define V2D_L1_DEC_REG2       (V2D_L1_DEC_BASE + 0x008)
+#define V2D_L1_DEC_REG3       (V2D_L1_DEC_BASE + 0x00c)
+#define V2D_L1_DEC_REG4       (V2D_L1_DEC_BASE + 0x010)
+#define V2D_L1_DEC_REG5       (V2D_L1_DEC_BASE + 0x014)
+#define V2D_L1_DEC_REG6       (V2D_L1_DEC_BASE + 0x018)
+#define V2D_L1_DEC_REG7       (V2D_L1_DEC_BASE + 0x01c)
+#define V2D_L1_DEC_REG8       (V2D_L1_DEC_BASE + 0x020)
+#define V2D_L1_DEC_REG9       (V2D_L1_DEC_BASE + 0x024)
+#define V2D_L1_DEC_REG10      (V2D_L1_DEC_BASE + 0x028)
+#define V2D_L1_DEC_REG11      (V2D_L1_DEC_BASE + 0x02c)
+#define V2D_L1_DEC_REG12      (V2D_L1_DEC_BASE + 0x030)
+#define V2D_L1_DEC_REG13      (V2D_L1_DEC_BASE + 0x034)
+
+#define V2D_LAYER_DEC_REG0_L(i)       (V2D_L0_DEC_BASE + i * 0x100 + 0x000)
+#define V2D_LAYER_DEC_REG1_L(i)       (V2D_L0_DEC_BASE + i * 0x100 + 0x004)
+#define V2D_LAYER_DEC_REG2_L(i)       (V2D_L0_DEC_BASE + i * 0x100 + 0x008)
+#define V2D_LAYER_DEC_REG3_L(i)       (V2D_L0_DEC_BASE + i * 0x100 + 0x00c)
+#define V2D_LAYER_DEC_REG4_L(i)       (V2D_L0_DEC_BASE + i * 0x100 + 0x010)
+#define V2D_LAYER_DEC_REG5_L(i)       (V2D_L0_DEC_BASE + i * 0x100 + 0x014)
+#define V2D_LAYER_DEC_REG6_L(i)       (V2D_L0_DEC_BASE + i * 0x100 + 0x018)
+#define V2D_LAYER_DEC_REG7_L(i)       (V2D_L0_DEC_BASE + i * 0x100 + 0x01c)
+#define V2D_LAYER_DEC_REG8_L(i)       (V2D_L0_DEC_BASE + i * 0x100 + 0x020)
+#define V2D_LAYER_DEC_REG9_L(i)       (V2D_L0_DEC_BASE + i * 0x100 + 0x024)
+#define V2D_LAYER_DEC_REG10_L(i)      (V2D_L0_DEC_BASE + i * 0x100 + 0x028)
+#define V2D_LAYER_DEC_REG11_L(i)      (V2D_L0_DEC_BASE + i * 0x100 + 0x02c)
+#define V2D_LAYER_DEC_REG12_L(i)      (V2D_L0_DEC_BASE + i * 0x100 + 0x030)
+#define V2D_LAYER_DEC_REG13_L(i)      (V2D_L0_DEC_BASE + i * 0x100 + 0x034)
+
+#define V2D_ENC_REG0       (V2D_ENC_BASE + 0x000)  //REGISTER HEADER_BASE_ADDR_LOW
+#define V2D_ENC_REG1       (V2D_ENC_BASE + 0x004)  //REGISTER HEADER_BASE_ADDR_HIGH
+#define V2D_ENC_REG2       (V2D_ENC_BASE + 0x008)  //REGISTER PAYLOAD_BASE_ADDR_LOW
+#define V2D_ENC_REG3       (V2D_ENC_BASE + 0x00c)  //REGISTER PAYLOAD_BASE_ADDR_HIGH
+#define V2D_ENC_REG4       (V2D_ENC_BASE + 0x010)  //REGISTER Bbox_coor_x
+#define V2D_ENC_REG5       (V2D_ENC_BASE + 0x014)  //REGISTER Bbox_coor_y
+#define V2D_ENC_REG6       (V2D_ENC_BASE + 0x018)  //REGISTER Y_BUF_BASE_ADDR
+#define V2D_ENC_REG7       (V2D_ENC_BASE + 0x01c)  //REGISTER Y_BUF_PITCH
+#define V2D_ENC_REG8       (V2D_ENC_BASE + 0x020)  //REGISTER UV_BUF_BASE_ADDR
+#define V2D_ENC_REG9       (V2D_ENC_BASE + 0x024)  //REGISTER UV_BUF_PITCH
+#define V2D_ENC_REG10      (V2D_ENC_BASE + 0x028)  //REGISTER Y_BUF_SIZE
+#define V2D_ENC_REG11      (V2D_ENC_BASE + 0x02c)  //REGISTER UV_BUF_SIZE
+#define V2D_ENC_REG12      (V2D_ENC_BASE + 0x030)  //REGISTER REG_SHADOW_CTRL
+#define V2D_ENC_REG13      (V2D_ENC_BASE + 0x034)  //REGISTER IRQ_MASK
+#define V2D_ENC_REG14      (V2D_ENC_BASE + 0x038)  //REGISTER IRQ_CLEAR
+#define V2D_ENC_REG15      (V2D_ENC_BASE + 0x03c)  //REGISTER DMAC_CTRL_0
+#define V2D_ENC_REG16      (V2D_ENC_BASE + 0x040)  //REGISTER ENC_MODE
+#define V2D_ENC_REG17      (V2D_ENC_BASE + 0x044)  //REGISTER DMAC_LENGTH
+#define V2D_ENC_REG18      (V2D_ENC_BASE + 0x048)  //REGISTER IRQ_STATUS
+//v2d iommu
+#define TBU_NUM 32
+#define V2D_MMU_TTBLR_BASE         (0x40)
+#define V2D_MMU_TTBHR_BASE         (0x44)
+#define V2D_MMU_TCR0_BASE          (0x48)
+#define V2D_MMU_TCR1_BASE          (0x4c)
+#define V2D_MMU_TBU_STATUS_BASE    (0x50)
+#define V2D_MMU_TBUx_STEP          (0x20)
+#define V2D_MMU_BVA_LO             (0x00)
+#define V2D_MMU_BVA_HI             (0x04)
+#define V2D_MMU_TIMEOUT_VA_ADDR_LO (0x08)
+#define V2D_MMU_TIMEOUT_VA_ADDR_HI (0x0C)
+#define V2D_MMU_IRQ_STATUS         (0x10)
+#define V2D_MMU_IRQ_ENABLE         (0x14)
+#define V2D_MMU_TIMEOUT_VALUE      (0x18)
+#define V2D_MMU_ERROR_CLEAR        (0x1C)
+#define V2D_MMU_LAST_VA_ADDR_LO    (0x20)
+#define V2D_MMU_LAST_VA_ADDR_HI    (0x24)
+#define V2D_MMU_LAST_PA_ADDR_LO    (0x28)
+#define V2D_MMU_LAST_PA_ADDR_HI    (0x2C)
+#define V2D_MMU_VERSION            (0x3C)
+#define V2D_IOMMU_BASE_OFFSET      (0xB00)
+
+/**
+ *@brief V2D Control register
+ */
+typedef union {
+    struct {
+        unsigned int trigger                  : 1;      /**< trigger v2d to work */
+        unsigned int rdma_burst_len           : 3;      /**< set rdma burst lenght */
+        unsigned int reserved1                : 4;      /**< Reserved */
+        unsigned int wdma_burst_len           : 5;      /**< set wdma burst length */
+        unsigned int reserved2                : 19;     /**< Reserved */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_ctrl_reg_t;
+
+/**
+ *@brief V2D DMA Control register
+ */
+typedef union {
+    struct {
+        unsigned int dmac_arb_mode            : 2;
+        unsigned int dmac_arqos               : 4;
+        unsigned int reserved                 : 2;   /**< Reserved */
+        unsigned int dmac_awqos               : 4;
+        unsigned int dmac_axi_sec             : 1;
+        unsigned int dmac_max_req_num         : 3;
+        unsigned int dmac_postwr_en           : 8;
+        unsigned int dmac_rst_n_pwr           : 1;
+        unsigned int dmac_rst_req             : 1;
+        unsigned int dmac_user_id             : 4;
+        unsigned int damc_rd_int_clr          : 1;
+        unsigned int damc_wr_int_clr          : 1;
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_dma_ctrl_reg_t;
+
+/**
+ *@brief V2D Scaler Coefficient register
+ */
+typedef union {
+    struct {
+        int scaler_coef0                        : 12;   /**< scaler coefficient0 */
+        unsigned int reserved1                  : 4;    /**< Reserved */
+        int scaler_coef1                        : 12;   /**< scaler coefficient1 */
+        unsigned int reserved2                  : 4;    /**< Reserved */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_scaler_coef_reg_t;
+
+/**
+ *@brief V2D Blend Control0 register
+ */
+typedef union {
+    struct {
+        unsigned int bld_mode                  :  1;    /**< blend mode alpha blending or ROP operation */
+        unsigned int bld_bg_enable             :  1;    /**< background color enable */
+        unsigned int reserved                  :  6;    /**< Rerserved */
+        unsigned int bld_bg_r                  :  8;    /**< background color R value */
+        unsigned int bld_bg_g                  :  8;    /**< background color G value */
+        unsigned int bld_bg_b                  :  8;    /**< background color B value */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_blend_ctrl0_reg_t;
+
+/**
+ *@brief V2D Blend Control1 register
+ */
+typedef union {
+    struct {
+        unsigned int bld_bg_a                  :  8;    /**< background color alpha value */
+        unsigned int reserved                  :  24;   /**< Rerserved */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_blend_ctrl1_reg_t;
+
+/**
+ *@brief V2D Blend Mask Control0 register
+ */
+typedef union {
+    struct {
+        unsigned int bld_mask_enable           : 2;     /**< blend mask enable */
+        unsigned int reserved1                 : 6;     /**< Rerserved */
+        unsigned int bld_mask_rect_ltop_x      : 16;    /**< blend mask rectangle left-top point x-axis coordinate */
+        unsigned int reserved2                 : 8;     /**< Rerserved */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_blend_mask_ctrl0_reg_t;
+
+/**
+ *@brief V2D Blend Mask Control1 register
+ */
+typedef union {
+    struct {
+        unsigned int bld_mask_rect_ltop_y      : 16;    /**< blend mask rectangle left-top point y-axis coordinate */
+        unsigned int bld_mask_rect_width       : 16;    /**< blend mask rectangle width */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_blend_mask_ctrl1_reg_t;
+
+/**
+ *@brief V2D Blend Mask Control2 register
+ */
+typedef union {
+    struct {
+        unsigned int bld_mask_rect_height      : 16;    /**< blend mask rectangle height */
+        unsigned int reserved                  : 16;    /**< Rerserved */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_blend_mask_ctrl2_reg_t;
+
+/**
+ *@brief V2D Output Width register
+ */
+typedef union {
+    struct {
+        unsigned int out_addr_uv_33_32         : 2;     /**< output uv address_h_bit */
+        unsigned int reserved1                 : 6;     /**< Rerserved */
+        unsigned int out_ori_width             : 16;    /**< output width */
+        unsigned int reserved2                 : 8;     /**< Rerserved */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_output_width_reg_t;
+
+/**
+ *@brief V2D Output Height register
+ */
+typedef union {
+    struct {
+        unsigned int out_ori_height            : 16;    /**< output height */
+        unsigned int out_ori_stride            : 16;    /**< output stride */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_output_height_reg_t;
+
+/**
+ *@brief V2D Output Control0 register
+ */
+typedef union {
+    struct {
+        unsigned int format                  : 4;   /**< output format */
+        unsigned int range                   : 1;   /**< output range yuv narrow/wide */
+        unsigned int dither                  : 2;   /**< output dither mode */
+        unsigned int swap                    : 1;   /**< output swap */
+        unsigned int fbc_en                  : 1;   /**< output fbc enable */
+        unsigned int reserved                : 7;   /**< Rerserved */
+        unsigned int crop_ltop_x             : 16;  /**< output crop left-top point x-axis coordinate */
+    } field;                                        /**< Fields view */
+    unsigned int overlay;                           /**< Overlay view */
+} v2d_output_ctrl0_reg_t;
+
+/**
+ *@brief V2D Output Control1 register
+ */
+typedef union {
+    struct {
+        unsigned int crop_ltop_y             : 16;  /**< output crop left-top point y-axis coordinate */
+        unsigned int crop_width              : 16;  /**< output crop width */
+    } field;                                        /**< Fields view */
+    unsigned int overlay;                           /**< Overlay view */
+} v2d_output_ctrl1_reg_t;
+
+/**
+ *@brief V2D Output Control2 register
+ */
+typedef union {
+    struct {
+        unsigned int crop_height             : 16;  /**< output crop height */
+        unsigned int reserved                : 16;  /**< Rerserved */
+    } field;                                        /**< Fields view */
+    unsigned int overlay;                           /**< Overlay view */
+} v2d_output_ctrl2_reg_t;
+
+/**
+ *@brief V2D mask input Width register
+ */
+typedef union {
+    struct {
+        unsigned int mask_addr_33_32         : 2;   /**< mask address_h_bit */
+        unsigned int reserved1               : 6;   /**< Rerserved */
+        unsigned int mask_ori_width          : 16;  /**< mask in width */
+        unsigned int reserved2               : 8;   /**< Rerserved */
+    } field;                                        /**< Fields view */
+    unsigned int overlay;                           /**< Overlay view */
+} v2d_mask_width_reg_t;
+
+/**
+ *@brief V2D mask input Height register
+ */
+typedef union {
+    struct {
+        unsigned int mask_ori_height            : 16;   /**< mask in height */
+        unsigned int mask_ori_stride            : 16;   /**< mask in stride */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_mask_height_reg_t;
+
+/**
+ *@brief V2D mask input crop0 register
+ */
+typedef union {
+    struct {
+        unsigned int mask_crop_ltop_x           : 16;   /**< mask crop left-top point x-axis coordinate */
+        unsigned int mask_crop_ltop_y           : 16;   /**< mask crop left-top point y-axis coordinate */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_mask_crop0_reg_t;
+
+/**
+ *@brief V2D mask input crop1 register
+ */
+typedef union {
+    struct {
+        unsigned int mask_crop_width            : 16;   /**< mask crop in width */
+        unsigned int mask_crop_height           : 16;   /**< mask crop in height */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_mask_crop1_reg_t;
+
+/**
+ *@brief V2D Blend Layer Factor register
+ */
+typedef union {
+    struct {
+        unsigned int in_addr_uv_33_32               : 2;    /**< input layer uv address_h_bit */
+        unsigned int bld_src_color_factor           : 3;    /**< blend source color factor */
+        unsigned int bld_dst_color_factor           : 3;    /**< blend dst color factor */
+        unsigned int bld_src_alpha_factor           : 3;    /**< blend source alpha factor */
+        unsigned int bld_dst_alpha_factor           : 3;    /**< blend dst alpha factor */
+        unsigned int reserved1                      : 2;    /**< Reserved */
+        unsigned int bld_color_rop2_code            : 4;    /**< ROP color code */
+        unsigned int bld_alpha_rop2_code            : 4;    /**< ROP alpha code */
+        unsigned int reserved2                      : 8;    /**< Reserved */
+    } field;                                                /**< Fields view */
+    unsigned int overlay;                                   /**< Overlay view */
+} v2d_blend_layer_factor_reg_t;
+
+/**
+ *@brief V2D Input Layer width/height register
+ */
+typedef union {
+    struct {
+        unsigned int layer_in_ori_width            : 16;    /**< input layer width */
+        unsigned int layer_in_ori_height           : 16;    /**< input layer height */
+    } field;                                                /**< Fields view */
+    unsigned int overlay;                                   /**< Overlay view */
+} v2d_input_layer_width_height_reg_t;
+
+/**
+ *@brief V2D Input Layer Control register
+ */
+typedef union {
+    struct {
+        unsigned int stride               : 16; /**< input layer stride */
+        unsigned int format               : 4;  /**< input layer format */
+        unsigned int rotation             : 3;  /**< input layer rotation */
+        unsigned int swap                 : 1;  /**< input layer swap */
+        unsigned int fbc_en               : 1;  /**< input layer fbc enbale */
+        unsigned int reserved             : 7;  /**< Reserved */
+    } field;                                    /**< Fields view */
+    unsigned int overlay;                       /**< Overlay view */
+} v2d_input_layer_ctrl_reg_t;
+
+/**
+ *@brief V2D input layer crop0 register
+ */
+typedef union {
+    struct {
+        unsigned int layer_in_crop_ltop_x           : 16;   /**< input layer crop left-top point x-axis coordinate */
+        unsigned int layer_in_crop_ltop_y           : 16;   /**< input layer crop left-top point y-axis coordinate */
+    } field;                                                /**< Fields view */
+    unsigned int overlay;                                   /**< Overlay view */
+} v2d_input_layer_crop0_reg_t;
+
+/**
+ *@brief V2D input layer crop1 register
+ */
+typedef union {
+    struct {
+        unsigned int layer_in_crop_width            : 16;   /**< input layer crop in width */
+        unsigned int layer_in_crop_height           : 16;   /**< input layer crop in height */
+    } field;                                                /**< Fields view */
+    unsigned int overlay;                                   /**< Overlay view */
+} v2d_input_layer_crop1_reg_t;
+
+/**
+ *@brief V2D input solid color control0 register
+ */
+typedef union {
+    struct {
+        unsigned int solid_en              : 1;     /**< input layer solid color enable */
+        unsigned int reserved              : 7;     /**< Rrserved */
+        unsigned int solid_R               : 8;     /**< solid color R channel value */
+        unsigned int solid_G               : 8;     /**< solid color G channel value */
+        unsigned int solid_B               : 8;     /**< solid color B channel value */
+    } field;                                        /**< Fields view */
+    unsigned int overlay;                           /**< Overlay view */
+} v2d_solid_color_ctrl0_reg_t;
+
+/**
+ *@brief V2D input solid color control1 register
+ */
+typedef union {
+    struct {
+        unsigned int solid_A               : 8;     /**< solid color alpha channel value */
+        unsigned int csc_en                : 1;     /**< input layer csc enable */
+        unsigned int reserved1             : 7;     /**< Rrserved */
+        int csc_matrix0           : 13;             /**< input layer csc matrix0 */
+        unsigned int reserved2             : 3;     /**< Rrserved */
+    } field;                                        /**< Fields view */
+    unsigned int overlay;                           /**< Overlay view */
+} v2d_solid_color_ctrl1_reg_t;
+typedef v2d_solid_color_ctrl1_reg_t v2d_input_layer_csc_ctrl0_reg_t;
+
+/**
+ *@brief V2D input layer csc control1~5 register
+ */
+typedef union {
+    struct {
+        int csc_matrix1                    : 13;         /**< input layer csc matrix 2*i-1 */
+        unsigned int reserved1             : 3; /**< Rrserved */
+        int csc_matrix2                    : 13;         /**< input layer csc matrix 2*i */
+        unsigned int reserved2             : 3; /**< Rrserved */
+    } field;                                    /**< Fields view */
+    unsigned int overlay;                       /**< Overlay view */
+} v2d_input_layer_csc_ctrl1_reg_t;
+
+/**
+ *@brief V2D input layer csc control6 register
+ */
+typedef union {
+    struct {
+        int csc_matrix11                   : 13;             /**< input layer csc matrix11  */
+        unsigned int scl_mode              : 2;     /**< scaler mode 0:bypass, 1:scale down, 2:scale up */
+        unsigned int reserved1             : 17;    /**< Rrserved */
+    } field;                                        /**< Fields view */
+    unsigned int overlay;                           /**< Overlay view */
+} v2d_input_layer_csc_ctrl2_reg_t;
+typedef v2d_input_layer_csc_ctrl2_reg_t v2d_input_layer_scale_mode_reg_t;
+
+/**
+ *@brief V2D input layer scale delta x register
+ */
+typedef union {
+    struct {
+        unsigned int scl_delta_x           : 20;    /**< input layer scale delta x, (in_width<<16) /bld_rectWidth  */
+        unsigned int reserved              : 12;    /**< Rrserved */
+    } field;                                        /**< Fields view */
+    unsigned int overlay;                           /**< Overlay view */
+} v2d_input_layer_scale_delta_x_reg_t;
+
+/**
+ *@brief V2D input layer scale delta y register
+ */
+typedef union {
+    struct {
+        unsigned int scl_delta_y           : 20;    /**< input layer scale delta y, (in_height<<16) /bld_rectHeight  */
+        unsigned int bld_alpha_source      : 2;     /**< blend alpha source, 0:pixel, 1:golbal, 2: mask value */
+        unsigned int bld_pre_alp_func      : 2;     /**< blend premultiplied function, 0:disable, 1:global alpha* src_alpha, 2:mask*src_a */
+        unsigned int bld_glb_alp           : 8;     /**< global alpha value */
+    } field;                                        /**< Fields view */
+    unsigned int overlay;                           /**< Overlay view */
+} v2d_input_layer_scale_delta_y_reg_t;
+typedef v2d_input_layer_scale_delta_y_reg_t v2d_blend_layer_ctrl0_reg_t;
+
+/**
+ * @brief V2D Blend Layer Control1 register
+ */
+typedef union {
+    struct {
+        unsigned int blend_en             : 1;  /**< blend layer enable */
+        unsigned int reserved1            : 7;  /**< Reserved */
+        unsigned int bld_rect_ltop_x      : 16; /**< blend layer rectangle left-top point x-axis coordinate */
+        unsigned int reserved2            : 8;  /**< Reserved */
+    } field;                                    /**< Fields view */
+    unsigned int overlay;                       /**< Overlay view */
+} v2d_blend_layer_ctrl1_reg_t;
+
+/**
+ * @brief V2D Blend Layer Control2 register
+ */
+typedef union {
+    struct {
+        unsigned int bld_rect_ltop_y       : 16;    /**< blend layer rectangle left-top point y-axis coordinate */
+        unsigned int bld_rect_width        : 16;    /**< blend layer rectangle width */
+    } field;                                        /**< Fields view */
+    unsigned int overlay;                           /**< Overlay view */
+} v2d_blend_layer_ctrl2_reg_t;
+
+/**
+ * @brief V2D Blend Layer Control3 register
+ */
+typedef union {
+    struct {
+        unsigned int bld_rect_height            : 16;   /**< blend layer rectangle height */
+        unsigned int reserved                   : 16;   /**< Reserved */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_blend_layer_ctrl3_reg_t;
+
+/**
+ * @brief V2D FBC decoder bbox register
+ */
+typedef union {
+    struct {
+        unsigned int bbox_start            : 13;      /**< v2d fbc decoder bbox start */
+        unsigned int reserved1             : 3;       /**< Reserved */
+        unsigned int bbox_end              : 13;      /**< v2d fbc decoder bbox end */
+        unsigned int reserved2             : 3;       /**< Reserved */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_fbc_decoder_bbox_reg_t;
+
+/**
+ * @brief V2D FBC decoder image size register
+ */
+typedef union {
+    struct {
+        unsigned int width               : 16;          /**< v2d fbc decoder image width  */
+        unsigned int height              : 16;          /**< v2d fbc decoder image height */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_fbc_decoder_imgae_size_reg_t;
+
+/**
+ * @brief V2D FBC decoder mode register
+ */
+typedef union {
+    struct {
+        unsigned int mode                : 3;          /**< v2d fbc decoder mode  */
+        unsigned int format              : 3;          /**< v2d fbc decoder pixel format */
+        unsigned int is_split            : 1;          /**< v2d fbc decoder split mode */
+        unsigned int rgb_pack_en         : 1;          /**< v2d fbc decoder rgb pack enable */
+        unsigned int reserved            : 24;         /**< Reserved */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_fbc_decoder_mode_reg_t;
+
+/**
+ * @brief V2D FBC decoder dma control register
+ */
+typedef union {
+    struct {
+        unsigned int dmac_arqos                : 4;    /**< v2d fbc decoder dma qos  */
+        unsigned int damc_axi_sec              : 1;    /**< v2d fbc decoder dma axi sec */
+        unsigned int dmac_user_id              : 4;    /**< v2d fbc decoder dma user id */
+        unsigned int dmac_rstn_pwr             : 1;    /**< v2d fbc decoder dma rstn pwr */
+        unsigned int dmac_rst_req              : 1;    /**< v2d fbc decoder dma rst req*/
+        unsigned int dmac_max_req_num          : 3;    /**< v2d fbc decoder dma max req num */
+        unsigned int dmac_arb_mode             : 2;    /**< v2d fbc decoder dma arb mode */
+        unsigned int rdma_timeout_num          : 16;   /**< v2d fbc decoder dma timeout num */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_fbc_decoder_dma_ctrl_reg_t;
+
+/**
+ * @brief V2D FBC decoder irq mask/raw/status register
+ */
+typedef union {
+    struct {
+        unsigned int decode_eof               : 1;    /**< v2d fbc decoder eof irq mask  */
+        unsigned int cfg_swaped               : 1;    /**< v2d fbc decoder cfg sswap irq mask */
+        unsigned int dmac_err                 : 1;    /**< v2d fbc decoder dmac err irq mask */
+        unsigned int rdma_timeout             : 1;    /**< v2d fbc decoder rdma timeout mask */
+        unsigned int dec_err                  : 1;    /**< v2d fbc decoder decode err irq mask */
+        unsigned int reserved                 : 27;   /**< Reserved */
+    } field;                                               /**< Fields view */
+    unsigned int overlay;                                  /**< Overlay view */
+} v2d_fbc_decoder_irq_ctrl_reg_t;
+
+/**
+ * @brief V2D FBC decoder trigger register
+ */
+typedef union {
+    struct {
+        unsigned int direct_swap               : 1;    /**< v2d fbc decoder direct swap  */
+        unsigned int pending_swap              : 1;    /**< v2d fbc decoder pending swap */
+        unsigned int reserved                  : 30;   /**< Reserved */
+    } field;                                               /**< Fields view */
+    unsigned int overlay;                                  /**< Overlay view */
+} v2d_fbc_decoder_trigger_reg_t;
+
+/**
+ * @brief V2D FBC encoder bbox register
+ */
+typedef union {
+    struct {
+        unsigned int bbox_start            : 16;      /**< v2d fbc encoder bbox start */
+        unsigned int bbox_end              : 16;      /**< v2d fbc encoder bbox end */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_fbc_encoder_bbox_reg_t;
+
+/**
+ * @brief V2D FBC encoder y or uv buf szie register
+ */
+typedef union {
+    struct {
+        unsigned int x_size              : 16;      /**< v2d fbc encoder buf x size  */
+        unsigned int y_size              : 16;      /**< v2d fbc encoder buf y size */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_fbc_encoder_buf_size_reg_t;
+
+/**
+ * @brief V2D FBC encoder trigger register
+ */
+typedef union {
+    struct {
+        unsigned int direct_swap               : 1;    /**< v2d fbc encoder direct swap  */
+        unsigned int pending_swap              : 1;    /**< v2d fbc encoder pending swap */
+        unsigned int reserved                  : 30;   /**< Reserved */
+    } field;                                               /**< Fields view */
+    unsigned int overlay;                                  /**< Overlay view */
+} v2d_fbc_encoder_trigger_reg_t;
+
+/**
+ * @brief V2D FBC encoder irq mask,raw,status register
+ */
+typedef union {
+    struct {
+        unsigned int dma_wr_err                    : 16;   /**< v2d fbc encoder dma wr err  */
+        unsigned int dma_wr_eof                    : 1;    /**< v2d fbc encoder dma wr eof */
+        unsigned int cfg_update_done               : 1;    /**< v2d fbc encoder cfg update done */
+        unsigned int reserved                      : 14;   /**< Reserved */
+    } field;                                               /**< Fields view */
+    unsigned int overlay;                                  /**< Overlay view */
+} v2d_fbc_encoder_irq_reg_t;
+
+/**
+ * @brief V2D FBC encoder mode register
+ */
+typedef union {
+    struct {
+        unsigned int encode_enable                    : 1;   /**< v2d fbc encoder enable */
+        unsigned int split_mode_en                    : 1;   /**< v2d fbc encoder split mode */
+        unsigned int img_pix_format                   : 2;   /**< v2d fbc encoder pixel format */
+        unsigned int reserved                         : 28;  /**< Reserved */
+    } field;                                               /**< Fields view */
+    unsigned int overlay;                                  /**< Overlay view */
+} v2d_fbc_encoder_mode_reg_t;
+
+/**
+ * @brief V2D FBC encoder dmac length register
+ */
+typedef union {
+    struct {
+        unsigned int burst_length                     : 7;   /**< v2d fbc encoder dmac burst length */
+        unsigned int reserved                         : 25;  /**< Reserved */
+    } field;                                               /**< Fields view */
+    unsigned int overlay;                                  /**< Overlay view */
+} v2d_fbc_encoder_dmac_burst_reg_t;
+
+/**
+*@brief V2D Top AXI bus control register
+*/
+typedef union {
+    struct {
+        unsigned int arqos_m                      : 4; /**< v2d axi bus read qos */
+        unsigned int aruser_m                     : 4;  /**< v2d axi bus read user */
+        unsigned int awqos_m                      : 4;  /** <v2d axi bus write qos */
+        unsigned int awuser_m                     : 4;  /**< v2d axi bus write user */
+        unsigned int shadow_mode                  : 1;  /**< v2d reg cfg is shadow mode */
+        unsigned int reserved                     : 15; /**< Reserved */
+    } field;                                            /**< Fields view */
+    unsigned int overlay;                               /**< Overlay view */
+} v2d_axi_bus_ctrl_reg_t;
+#endif
diff --git a/include/dt-bindings/pmu/k1x_pmu.h b/include/dt-bindings/pmu/k1x_pmu.h
new file mode 100644 (file)
index 0000000..b424ca9
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DT_BINDINGS_PMU_K1X_H__
+#define __DT_BINDINGS_PMU_K1X_H__
+
+#define K1X_PMU_BUS_PWR_DOMAIN 0
+#define K1X_PMU_VPU_PWR_DOMAIN 1
+#define K1X_PMU_GPU_PWR_DOMAIN 2
+#define K1X_PMU_LCD_PWR_DOMAIN 3
+#define K1X_PMU_ISP_PWR_DOMAIN 4
+#define K1X_PMU_AUD_PWR_DOMAIN 5
+#define K1X_PMU_GNSS_PWR_DOMAIN        6
+#define K1X_PMU_HDMI_PWR_DOMAIN        7
+#define K1X_PMU_DUMMY_PWR_DOMAIN       8
+
+#endif