drm/exynos/ipp: move nodes cleaning to separate function
authorAndrzej Hajda <a.hajda@samsung.com>
Thu, 28 Aug 2014 09:07:31 +0000 (11:07 +0200)
committerChanho Park <chanho61.park@samsung.com>
Tue, 18 Nov 2014 03:00:45 +0000 (12:00 +0900)
The patch introduces ipp_clean_mem_nodes function which replaces
redundant code. Additionally memory node function definitions
are moved up to increase its visibility.

Change-Id: I3143c773be9646f28f8f3c55b711e17e21f67a8a
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Reviewed-by: Joonyoung Shim <jy0922.shim@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
drivers/gpu/drm/exynos/exynos_drm_ipp.c

index 8b311ef..cc2fed8 100644 (file)
@@ -545,6 +545,129 @@ err_clear:
        return ret;
 }
 
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_mem_node *m_node)
+{
+       int i;
+
+       DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
+
+       if (!m_node) {
+               DRM_ERROR("invalid dequeue node.\n");
+               return -EFAULT;
+       }
+
+       DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
+
+       /* put gem buffer */
+       for_each_ipp_planar(i) {
+               unsigned long handle = m_node->buf_info.handles[i];
+               if (handle)
+                       exynos_drm_gem_put_dma_addr(drm_dev, handle,
+                                                       c_node->filp);
+       }
+
+       list_del(&m_node->list);
+       kfree(m_node);
+
+       return 0;
+}
+
+static struct drm_exynos_ipp_mem_node
+               *ipp_get_mem_node(struct drm_device *drm_dev,
+               struct drm_file *file,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct drm_exynos_ipp_buf_info buf_info;
+       void *addr;
+       unsigned long size;
+       int i;
+
+       mutex_lock(&c_node->mem_lock);
+
+       m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+       if (!m_node)
+               goto err_unlock;
+
+       /* clear base address for error handling */
+       memset(&buf_info, 0x0, sizeof(buf_info));
+
+       /* operations, buffer id */
+       m_node->ops_id = qbuf->ops_id;
+       m_node->prop_id = qbuf->prop_id;
+       m_node->buf_id = qbuf->buf_id;
+       INIT_LIST_HEAD(&m_node->list);
+
+       DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
+               (int)m_node, qbuf->ops_id);
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
+               qbuf->prop_id, m_node->buf_id);
+
+       for_each_ipp_planar(i) {
+               DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
+                       i, qbuf->handle[i]);
+
+               /* get dma address by handle */
+               if (qbuf->handle[i]) {
+                       addr = exynos_drm_gem_get_dma_addr(drm_dev,
+                                       qbuf->handle[i], file);
+                       if (IS_ERR(addr)) {
+                               DRM_ERROR("failed to get addr.\n");
+                               goto err_clear;
+                       }
+
+                       size = exynos_drm_gem_get_size(drm_dev,
+                                               qbuf->handle[i], file);
+                       if (!size) {
+                               DRM_ERROR("failed to get size.\n");
+                               goto err_clear;
+                       }
+
+                       buf_info.handles[i] = qbuf->handle[i];
+                       buf_info.base[i] = *(dma_addr_t *) addr;
+                       buf_info.size[i] = (uint64_t) size;
+                       DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]sz[%d]\n",
+                               __func__, i, buf_info.base[i],
+                               (int)buf_info.handles[i],
+                               (int)buf_info.size[i]);
+               }
+       }
+
+       m_node->buf_info = buf_info;
+       list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+
+       mutex_unlock(&c_node->mem_lock);
+       return m_node;
+
+err_clear:
+       ipp_put_mem_node(drm_dev, c_node, m_node);
+err_unlock:
+       mutex_unlock(&c_node->mem_lock);
+       return ERR_PTR(-EFAULT);
+}
+
+static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
+               struct drm_exynos_ipp_cmd_node *c_node, int ops)
+{
+       struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+       struct list_head *head = &c_node->mem_list[ops];
+
+       mutex_lock(&c_node->mem_lock);
+
+       list_for_each_entry_safe(m_node, tm_node, head, list) {
+               int ret;
+
+               ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+               if (ret)
+                       DRM_ERROR("failed to put m_node.\n");
+       }
+
+       mutex_unlock(&c_node->mem_lock);
+}
+
 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
 {
        /* cancel works */
@@ -682,111 +805,6 @@ err_unlock:
        return ret;
 }
 
-static int ipp_put_mem_node(struct drm_device *drm_dev,
-               struct drm_exynos_ipp_cmd_node *c_node,
-               struct drm_exynos_ipp_mem_node *m_node)
-{
-       int i;
-
-       DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
-
-       if (!m_node) {
-               DRM_ERROR("invalid dequeue node.\n");
-               return -EFAULT;
-       }
-
-       DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
-
-       /* put gem buffer */
-       for_each_ipp_planar(i) {
-               unsigned long handle = m_node->buf_info.handles[i];
-               if (handle)
-                       exynos_drm_gem_put_dma_addr(drm_dev, handle,
-                                                       c_node->filp);
-       }
-
-       list_del(&m_node->list);
-       kfree(m_node);
-
-       return 0;
-}
-
-static struct drm_exynos_ipp_mem_node
-               *ipp_get_mem_node(struct drm_device *drm_dev,
-               struct drm_file *file,
-               struct drm_exynos_ipp_cmd_node *c_node,
-               struct drm_exynos_ipp_queue_buf *qbuf)
-{
-       struct drm_exynos_ipp_mem_node *m_node;
-       struct drm_exynos_ipp_buf_info buf_info;
-       void *addr;
-       unsigned long size;
-       int i;
-
-       mutex_lock(&c_node->mem_lock);
-
-       m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
-       if (!m_node)
-               goto err_unlock;
-
-       /* clear base address for error handling */
-       memset(&buf_info, 0x0, sizeof(buf_info));
-
-       /* operations, buffer id */
-       m_node->ops_id = qbuf->ops_id;
-       m_node->prop_id = qbuf->prop_id;
-       m_node->buf_id = qbuf->buf_id;
-       INIT_LIST_HEAD(&m_node->list);
-
-       DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
-               (int)m_node, qbuf->ops_id);
-       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
-               qbuf->prop_id, m_node->buf_id);
-
-       for_each_ipp_planar(i) {
-               DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
-                       i, qbuf->handle[i]);
-
-               /* get dma address by handle */
-               if (qbuf->handle[i]) {
-                       addr = exynos_drm_gem_get_dma_addr(drm_dev,
-                                       qbuf->handle[i], file);
-                       if (IS_ERR(addr)) {
-                               DRM_ERROR("failed to get addr.\n");
-                               goto err_clear;
-                       }
-
-                       size = exynos_drm_gem_get_size(drm_dev,
-                                               qbuf->handle[i], file);
-                       if (!size) {
-                               DRM_ERROR("failed to get size.\n");
-                               goto err_clear;
-                       }
-
-                       buf_info.handles[i] = qbuf->handle[i];
-                       buf_info.base[i] = *(dma_addr_t *) addr;
-                       buf_info.size[i] = (uint64_t) size;
-                       DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]sz[%d]\n",
-                               __func__, i, buf_info.base[i],
-                               (int)buf_info.handles[i],
-                               (int)buf_info.size[i]);
-               }
-       }
-
-       m_node->buf_info = buf_info;
-       list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
-
-       mutex_unlock(&c_node->mem_lock);
-       return m_node;
-
-err_clear:
-       ipp_put_mem_node(drm_dev, c_node, m_node);
-err_unlock:
-       mutex_unlock(&c_node->mem_lock);
-       return ERR_PTR(-EFAULT);
-}
-
-
 static void ipp_free_event(struct drm_pending_event *event)
 {
        kfree(event);
@@ -1362,9 +1380,7 @@ static int ipp_stop_property(struct drm_device *drm_dev,
                struct exynos_drm_ippdrv *ippdrv,
                struct drm_exynos_ipp_cmd_node *c_node)
 {
-       struct drm_exynos_ipp_mem_node *m_node, *tm_node;
        struct drm_exynos_ipp_property *property = &c_node->property;
-       struct list_head *head;
        int ret = 0, i;
 
        DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
@@ -1375,60 +1391,14 @@ static int ipp_stop_property(struct drm_device *drm_dev,
        /* check command */
        switch (property->cmd) {
        case IPP_CMD_M2M:
-               for_each_ipp_ops(i) {
-                       /* source/destination memory list */
-                       head = &c_node->mem_list[i];
-
-                       if (list_empty(head)) {
-                               DRM_DEBUG_KMS("%s:mem_list is empty.\n",
-                                       __func__);
-                               break;
-                       }
-
-                       list_for_each_entry_safe(m_node, tm_node,
-                               head, list) {
-                               ret = ipp_put_mem_node(drm_dev, c_node,
-                                       m_node);
-                               if (ret) {
-                                       DRM_ERROR("failed to put m_node.\n");
-                                       goto err_clear;
-                               }
-                       }
-               }
+               for_each_ipp_ops(i)
+                       ipp_clean_mem_nodes(drm_dev, c_node, i);
                break;
        case IPP_CMD_WB:
-               /* destination memory list */
-               head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
-
-               if (list_empty(head)) {
-                       DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
-                       break;
-               }
-
-               list_for_each_entry_safe(m_node, tm_node, head, list) {
-                       ret = ipp_put_mem_node(drm_dev, c_node, m_node);
-                       if (ret) {
-                               DRM_ERROR("failed to put m_node.\n");
-                               goto err_clear;
-                       }
-               }
+               ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
                break;
        case IPP_CMD_OUTPUT:
-               /* source memory list */
-               head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
-
-               if (list_empty(head)) {
-                       DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
-                       break;
-               }
-
-               list_for_each_entry_safe(m_node, tm_node, head, list) {
-                       ret = ipp_put_mem_node(drm_dev, c_node, m_node);
-                       if (ret) {
-                               DRM_ERROR("failed to put m_node.\n");
-                               goto err_clear;
-                       }
-               }
+               ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
                break;
        default:
                DRM_ERROR("invalid operations.\n");