test: Drop armnn and mlapi test apps
authorInki Dae <inki.dae@samsung.com>
Fri, 25 Sep 2020 07:47:36 +0000 (16:47 +0900)
committerInki Dae <inki.dae@samsung.com>
Fri, 25 Sep 2020 07:47:36 +0000 (16:47 +0900)
Change-Id: I96d817a890df6541f928b5eaf5eb5a7e9e3204aa
Signed-off-by: Inki Dae <inki.dae@samsung.com>
test/testsuites/stream_infer/stream_infer_armnn.c [deleted file]
test/testsuites/stream_infer/stream_infer_mlapi.c [deleted file]

diff --git a/test/testsuites/stream_infer/stream_infer_armnn.c b/test/testsuites/stream_infer/stream_infer_armnn.c
deleted file mode 100644 (file)
index d803bfb..0000000
+++ /dev/null
@@ -1,1881 +0,0 @@
-/**
- * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define _USE_MATH_DEFINES
-#include <fcntl.h>
-#include <mv_common.h>
-#include <mv_inference.h>
-
-#include <mv_testsuite_common.h>
-
-#include <image_helper.h>
-#include <mv_video_helper.h>
-
-#include <mv_log_cfg.h>
-
-#include <math.h>
-#include <ctype.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <dirent.h>
-#include <string.h>
-#include <limits.h>
-#include <time.h>
-#define MAX(a, b) \
-({ __typeof__ (a) _a = (a); \
-__typeof__ (b) _b = (b); \
-_a > _b ? _a : _b; })
-
-#define MIN(a,b) \
-({ __typeof__ (a) _a = (a); \
-__typeof__ (b) _b = (b); \
-_a < _b ? _a : _b; })
-
-#include <glib-2.0/glib.h>
-#include <gst/gst.h>
-#include <gst/video/video.h>
-#include <cairo.h>
-#include <cairo-gobject.h>
-
-#include <Elementary.h>
-#include <appcore-efl.h>
-#include <Ecore.h>
-#include <Ecore_Evas.h>
-#include <Ecore_Wl2.h>
-#include <tizen-extension-client-protocol.h>
-#include <efl_util.h>
-
-#include <gst/gst.h>
-#include <gst/video/videooverlay.h>
-#include <unistd.h>
-#include <time.h>
-#include <stream_infer_common_util.h>
-
-#define WIDTH  (480)
-#define HEIGHT (270)
-
-#ifdef PACKAGE
-#undef PACKAGE
-#endif
-#define PACKAGE "test"
-
-static char* gHandGestureLabel[] = { "None", "One", "Two", "Three", "Four", "Five",
-                                       "Six", "Seven", "SmallHeart", "OK", "DisLike", "Like",
-                                       "Fist", "Rock", "Love"};
-static int st = 0;
-static Evas_Object *g_eo = NULL;
-static Evas_Object *icon = NULL;
-
-/* for video display */
-static Evas_Object *g_win_id;
-static Evas_Object *selected_win_id;
-
-typedef enum {
-       MODEL_TYPE_POSE_CPM = 0,
-       MODEL_TYPE_POSE_HAND_AICLite, // posenet lite 224
-       MODEL_TYPE_POSE_HAND_AICLite2, // posenet 0709 f
-       MODEL_TYPE_POSE_HAND_AICLite2Q // posenet 0709 q
-};
-
-typedef struct {
-       gchar *filename;
-       gchar *filename2;
-       int numbuffers;
-       int modelType;
-       Evas_Object *win;
-       Evas_Object *layout_main;       /* layout widget based on EDJ */
-       /* add more variables here */
-
-} appdata;
-
-static mv_rectangle_s poseRoi;
-
-static appdata ad;
-static GstBus *bus;
-static guint bus_watch_id;
-
-#define FILE_PATH_SIZE 1024
-
-// pose estimation
-#define PE_TFLITE_CPM_WEIGHT_PATH "/usr/share/capi-media-vision/models/PLD/tflite/pld-tflite-001.tflite"
-#define PE_POSE_LABEL_PATH "/usr/share/capi-media-vision/models/PLD/tflite/pose-label.txt"
-
-
-#define PE_TFLITE_AIC_1_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite"
-#define PE_TFLITE_AIC_2_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite"
-
-
-#define PE_TFLITE_AICLite_1_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE/tflite/posenet1_0709_f.tflite"
-#define PE_TFLITE_AICLite_2_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE/tflite/posenet2_0709_f.tflite"
-
-#define PE_TFLITE_AICLiteQ_1_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE/tflite/posenet1_0709_dq.tflite"
-#define PE_TFLITE_AICLiteQ_2_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE/tflite/posenet2_0709_f.tflite"
-
-#define PLD_MOTION_CAPTURE_FILE_PATH "/usr/share/capi-media-vision/models/PLD/mocap/example.bvh"
-#define PLD_MOTION_CAPTURE_MAPPING_FILE_PATH "/usr/share/capi-media-vision/models/PLD/mocap/example-mocap-mapping.txt"
-
-#define POSE_RESET_COUNT 5
-
-static float thPoseScore = 0.3f;
-static int thResetCount = 5;
-static float thCustom = 0.0f;
-
-typedef struct _rect {
-  int left;
-  int top;
-  int right;
-  int bottom;
-  int type;
-  bool updated;
-  bool cr_updated;
-} rect;
-
-
-typedef struct _humanSkeleton {
-  mv_point_s pose[21/*14*/];
-  mv_point_s prevPose[21/*14*/];
-  mv_rectangle_s loc;
-  mv_rectangle_s prevLoc;
-  mv_rectangle_s handRoi;
-  float scores[21/*14*/];
-  bool isPrevPose;
-  bool updated;    // detection is find and pose is also good. then update drawing
-  bool locUpdated; // track previous detection
-  bool IsDetected; // detection
-  int label;
-} HumanSkeleton;
-
-typedef struct
-{
-  gboolean valid;
-  GstVideoInfo vinfo;
-} CairoOverlayState;
-
-typedef struct
-{
-  GstBuffer *buffer;
-  gpointer user_data;
-} HandOffData;
-
-//gchar *gesturenames;
-
-static HandOffData hdata_p;
-
-static GMutex pose_mutex;
-static guint old_timeout = 0;
-static guint nFrames = 0;
-
-// Gstreamer
-GstElement *pipeline, *source, *flip, *filter, *toverlay, *sink, *sink2;
-GstElement *tee, *vscale, *vsfilter, *vconv, *vcfilter;
-GstElement *vrate, *vrfilter, *fsink, *vrsink;
-GstElement *queue1, *queue2, *queue3;
-GstElement *oconv, *coverlay;
-GstElement *vcrop, *vcrscale, *vcrsfilter, *vcrsconv, *vcrscfilter, *vcrssink;
-CairoOverlayState *overlay_state;
-
-GstElement *dbin, *dscale, *dconv;
-GstElement *dsfilter, *dcfilter;
-
-GstElement *tee2, *enc, *muxmp4, *fsink2, *queue4, *queue5, *encconv;
-
-//static HandSkeleton handSkeleton;
-static HumanSkeleton humanSkeleton;
-gulong handler_p;
-GList *line_list = NULL;
-
-mv_source_h mv_src_p;
-mv_source_h mv_src_p2;
-
-// Human pose
-mv_engine_config_h hp_mv_engine_cfg;
-mv_inference_h hp_mv_infer;
-
-mv_engine_config_h hp_mv_engine_cfg2;
-mv_inference_h hp_mv_infer2;
-
-// pose comparison
-mv_pose_h hpPoser;
-float hpPoseScore;
-int hpPoseHoldTime;
-int hpPoseCount;
-
-static void * outputTensorData;
-
-FILE *fp;
-
-static bool IsGestureMode;
-
-static int poseCropSize = 0;
-
-#define IMAGE_SIZE_WIDTH 640
-#define IMAGE_SIZE_HEIGHT 480
-
-#define NANO_PER_SEC ((__clock_t) 1000000000)
-#define NANO_PER_MILLI  ((__clock_t) 1000000)
-#define MILLI_PER_SEC  ((__clock_t) 1000)
-
-struct timespec diff(struct timespec start, struct timespec end)
-{
-    struct timespec temp;
-    if ((end.tv_nsec - start.tv_nsec) < 0) {
-        temp.tv_sec = end.tv_sec - start.tv_sec - 1;
-        temp.tv_nsec = NANO_PER_SEC + end.tv_nsec - start.tv_nsec;
-    }
-    else {
-        temp.tv_sec = end.tv_sec - start.tv_sec;
-        temp.tv_nsec = end.tv_nsec - start.tv_nsec;
-    }
-    return temp;
-}
-
-unsigned long gettotalmillisec(const struct timespec time)
-{
-    return time.tv_sec * MILLI_PER_SEC + time.tv_nsec / NANO_PER_MILLI;
-}
-
-
-void int_handler(int sig)
-{
-       char c;
-
-       signal(sig, SIG_IGN);
-       while ((getchar()) != '\n');
-
-       printf(TEXT_YELLOW "Do you want to quit? [y/n]\n" TEXT_RESET);
-       c = getchar();
-       if (c == 'y' || c == "Y") {
-
-               g_signal_handler_disconnect(vrsink, handler_p);
-#if 0
-               g_signal_handler_disconnect(vcrssink, handler_gp);
-#endif
-
-               gst_element_send_event(pipeline, gst_event_new_eos());
-
-               sleep(4);
-
-
-               if (mv_src_p)
-                       mv_destroy_source(mv_src_p);
-
-               if (hp_mv_infer)
-                       mv_inference_destroy(hp_mv_infer);
-
-               if (hp_mv_engine_cfg)
-                       mv_destroy_engine_config(hp_mv_engine_cfg);
-
-               if (mv_src_p2)
-                       mv_destroy_source(mv_src_p2);
-
-               if (hp_mv_infer2)
-                       mv_inference_destroy(hp_mv_infer2);
-
-               if (hp_mv_engine_cfg)
-                       mv_destroy_engine_config(hp_mv_engine_cfg2);
-
-               if (outputTensorData) {
-                       free(outputTensorData);
-                       outputTensorData = NULL;
-               }
-
-               if (hpPoser)
-                       mv_pose_destroy(hpPoser);
-
-               printf(TEXT_YELLOW "exit..\n" TEXT_RESET);
-               signal(SIGINT, SIG_DFL);
-               exit(0);
-       } else {
-               printf("no");
-               signal(SIGINT, int_handler);
-       }
-
-       getchar(); // Get new line character
-}
-
-void _hand_pose_cb (
-       mv_source_h source,
-       int number_of_pose_estimation,
-       mv_inference_pose_s *locations,
-       int label,
-    void *user_data)
-{
-       printf("%d landmarks\n", number_of_pose_estimation);
-       int width, height;
-       mv_source_get_height(source, &width);
-       mv_source_get_height(source, &height);
-       for (int n = 0; n < number_of_pose_estimation; ++n) {
-               humanSkeleton.pose[n].x = (int)(640.f*(float)locations->landmarks[n].point.x/(float)width);
-               humanSkeleton.pose[n].y = (int)(480.f*(float)locations->landmarks[n].point.y/(float)height);
-               humanSkeleton.scores[n] = 1.0f; /* confidences[n];*/
-
-               //printf("(%d,%d): %f\n", humanSkeleton.pose[n].x, humanSkeleton.pose[n].y, confidences[n]);
-               //printf("(%d,%d)\n", humanSkeleton.pose[n].x, humanSkeleton.pose[n].y);
-       }
-       humanSkeleton.label = label;
-       humanSkeleton.IsDetected = true;
-}
-
-static void _hand_detected_cb (
-        mv_source_h source,
-        const int number_of_hands,
-        const float *confidences,
-        const mv_rectangle_s *locations,
-        void *user_data) //user_data  can be mv_source?
-{
-
-#if 0
-       if (0 /*confidences[1] < thValNeck*/) {
-               printf("lost pose\n");
-               humanSkeleton.IsDetected = false;
-               humanSkeleton.isPrevPose = false;
-               return;
-       }
-       
-       printf("%d landmarks, %d crop\n", number_of_landmarks, poseCropSize);
-       for (int n = 0; n < number_of_landmarks; ++n) {
-
-               humanSkeleton.pose[n].x = (int)((float)(locations[n].x + poseRoi.point.x) / (float)poseCropSize * 640.f);
-               humanSkeleton.pose[n].y = (int)((float)(locations[n].y + poseRoi.point.y) / (float)poseCropSize * 480.f);
-               humanSkeleton.scores[n] = 1.0f; /* confidences[n];*/
-
-               //printf("(%d,%d): %f\n", humanSkeleton.pose[n].x, humanSkeleton.pose[n].y, confidences[n]);
-               printf("(%d,%d)\n", humanSkeleton.pose[n].x, humanSkeleton.pose[n].y);
-       }
-       humanSkeleton.IsDetected = true;
-#else
-
-       if (number_of_hands <= 0) {
-               humanSkeleton.IsDetected = false;
-               humanSkeleton.label = -1;
-               return;
-       }
-
-
-       struct timespec s_tspec;
-       struct timespec e_tspec;
-
-       clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
-       mv_source_clear(mv_src_p2);
-       mv_source_fill_by_tensor_buffer(mv_src_p2, user_data,
-                                       MV_INFERENCE_DATA_FLOAT32,
-                                       56 * 56 * 21 * sizeof(float),
-                                       56, 56, 21, 3);
-
-       clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-
-       struct timespec diffspec = diff(s_tspec, e_tspec);
-       unsigned long timeDiff = gettotalmillisec(diffspec);
-       printf("memcpy time: %lu(ms)\n", timeDiff);
-
-       clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
-       mv_inference_pose_landmark_detect(mv_src_p2, hp_mv_infer2, NULL,  _hand_pose_cb, NULL);
-
-       clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-
-       diffspec = diff(s_tspec, e_tspec);
-       timeDiff = gettotalmillisec(diffspec);
-       printf("pose_estimation time: %lu(ms)\n", timeDiff);
-       //humanSkeleton.IsDetected = true;
-#endif
-       return;
-}
-
-
-void _human_pose_cb (
-       mv_source_h source,
-       int number_of_pose_estimation,
-       mv_inference_pose_s *locations,
-       int label,
-    void *user_data)
-{
-       printf("%d landmarks\n", number_of_pose_estimation);
-
-       mv_pose_compare(hpPoser, locations,
-                                       (MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT | MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT),
-                                       &hpPoseScore);
-
-
-       if (hpPoseScore >= thPoseScore) {
-               hpPoseHoldTime++;
-       } else {
-               if (hpPoseHoldTime > thResetCount) {
-                       hpPoseCount++;
-               }
-               hpPoseHoldTime = 0;
-       }
-
-       printf("Score: %.4f, HoldTime: %d, Count: %d\n", hpPoseScore, hpPoseHoldTime, hpPoseCount);
-       for (int n = 0; n < number_of_pose_estimation; ++n) {
-
-               humanSkeleton.pose[n].x = (int)(640.f*(float)(locations->landmarks[n].point.x)/192.f);
-               humanSkeleton.pose[n].y = (int)(480.f*(float)(locations->landmarks[n].point.y)/192.f);
-               humanSkeleton.scores[n] = locations->landmarks[n].score;
-
-       }
-       humanSkeleton.IsDetected = true;
-}
-
-static gboolean
-run_pose (void *user_data)
-{
-       while (1) {
-               // We should guerantee a new camera buffer not to be pushed to stack
-               // until latest camera buffer in the stack is popped and copied
-               // to mv_src_p.
-               g_mutex_lock(&pose_mutex);
-
-               user_stack_t *s = PopBuffer();
-               if (s == NULL) {
-                       g_mutex_unlock(&pose_mutex);
-                       usleep(10);
-                       continue;
-               }
-
-               if (!GST_IS_BUFFER((GstBuffer *)s->buffer)) {
-                       g_mutex_unlock(&pose_mutex);
-                       usleep(10);
-                       return FALSE;
-               }
-
-               GstMapInfo map;
-
-               gst_buffer_map((GstBuffer *)s->buffer, &map, GST_MAP_READ);
-
-               mv_source_clear(mv_src_p);
-
-               mv_source_fill_by_buffer(mv_src_p, map.data, 192*192*3, 192, 192, MEDIA_VISION_COLORSPACE_RGB888);
-
-               gst_buffer_unmap((GstBuffer *)s->buffer, &map);
-
-               g_mutex_unlock(&pose_mutex);
-
-               struct timespec s_tspec;
-               struct timespec e_tspec;
-
-               clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
-               mv_inference_pose_landmark_detect(mv_src_p, hp_mv_infer, &poseRoi, _human_pose_cb, NULL);
-
-               clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-               struct timespec diffspec = diff(s_tspec, e_tspec);
-               unsigned long timeDiff = gettotalmillisec(diffspec);
-               printf("detect + pose time: %lu(ms)\n", timeDiff);
-       }
-
-       return FALSE;
-}
-
-static gboolean
-run_hand (void *user_data)
-{
-       while (1) {
-               // We should guerantee a new camera buffer not to be pushed to stack
-               // until latest camera buffer in the stack is popped and copied
-               // to mv_src_p.
-               g_mutex_lock(&pose_mutex);
-
-               user_stack_t *s = PopBuffer();
-               if (s == NULL) {
-                       g_mutex_unlock(&pose_mutex);
-                       usleep(10);
-                       continue;
-               }
-
-               if (!GST_IS_BUFFER((GstBuffer *)s->buffer)) {
-                       g_mutex_unlock(&pose_mutex);
-                       usleep(10);
-                       continue;
-               }
-
-               GstMapInfo map;
-
-               gst_buffer_map((GstBuffer *)s->buffer, &map, GST_MAP_READ);
-
-               mv_source_clear(mv_src_p);
-
-               mv_source_fill_by_buffer(mv_src_p, map.data, 224*224*3, 224, 224, MEDIA_VISION_COLORSPACE_RGB888);
-
-               gst_buffer_unmap((GstBuffer *)s->buffer, &map);
-
-               g_mutex_unlock(&pose_mutex);
-
-               struct timespec s_tspec;
-               struct timespec e_tspec;
-
-               void *outputTensorBuffer = s->user_data;
-
-               clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
-               // invoke tflite -> _hand_detected_cb -> memcpy output -> invoke tflite -> _pose_cb
-               mv_inference_hand_detect(mv_src_p, hp_mv_infer, _hand_detected_cb, outputTensorBuffer);
-
-               clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-               struct timespec diffspec = diff(s_tspec, e_tspec);
-               unsigned long timeDiff = gettotalmillisec(diffspec);
-               printf("detect + pose time: %lu(ms)\n", timeDiff);
-       }
-
-       return FALSE;
-}
-
-static void
-_pose_est_handoff(GstElement *object, GstBuffer *buffer, GstPad *pad, gpointer user_data)
-{
-
-       nFrames++;
-       user_stack_t *s = GetNewStack();
-       s->buffer = buffer;
-       s->user_data = user_data;
-
-       g_mutex_lock(&pose_mutex);
-       PushBuffer(s);
-       g_mutex_unlock(&pose_mutex);
-       hdata_p.buffer = buffer;
-       hdata_p.user_data = user_data;
-}
-
-static void
-_hand_est_handoff(GstElement *object, GstBuffer *buffer, GstPad *pad, gpointer user_data)
-{
-       nFrames++;
-       user_stack_t *s = GetNewStack();
-       s->buffer = buffer;
-       s->user_data = user_data;
-
-       g_mutex_lock(&pose_mutex);
-       PushBuffer(s);
-       g_mutex_unlock(&pose_mutex);
-}
-
-static void
-prepare_overlay (GstElement * overlay, GstCaps * caps, gpointer user_data)
-{
-       CairoOverlayState *state = (CairoOverlayState *) user_data;
-
-       state->valid = gst_video_info_from_caps (&state->vinfo, caps);
-}
-
-static void
-draw_overlay_pose (GstElement * overlay, cairo_t * cr, guint64 timestamp,
-    guint64 duration, gpointer user_data)
-{
-       CairoOverlayState *s = (CairoOverlayState *) user_data;
-
-       if (!s->valid) {
-               printf("not ready draw_overlay");
-               return;
-       }
-
-       cairo_set_source_rgba(cr, 0.1, 0.9, 0.0, 0.7);
-       cairo_set_line_width(cr, 2.0);
-
-       float poseWeight = 0.7f;
-       float prevPoseWeight = 0.3f;
-       if (!humanSkeleton.IsDetected)
-               return;
-
-       for (int k = 0; k < 16; ++k) {
-               if (humanSkeleton.isPrevPose == false) {
-                       humanSkeleton.prevPose[k] = humanSkeleton.pose[k];
-               } else {
-
-                       humanSkeleton.prevPose[k].x = (poseWeight * humanSkeleton.pose[k].x +
-                                                                               prevPoseWeight * humanSkeleton.prevPose[k].x);
-                       humanSkeleton.prevPose[k].y = (poseWeight * humanSkeleton.pose[k].y +
-                                                                               prevPoseWeight * humanSkeleton.prevPose[k].y);
-               }
-       }
-
-       for (int k = 0; k < 16; ++k) {
-               if (humanSkeleton.scores[k] > 0.0f) {
-                       if (k > 9)
-                               cairo_set_source_rgba(cr, 0.9, 0.1, 0.0, 0.7);
-
-                       cairo_arc(cr, humanSkeleton.prevPose[k].x, humanSkeleton.prevPose[k].y, 3,0, 2*M_PI);
-                       cairo_stroke(cr);
-               }
-       }
-
-
-       cairo_select_font_face(cr, "sans-serif", CAIRO_FONT_SLANT_NORMAL, CAIRO_FONT_WEIGHT_BOLD);
-       cairo_set_font_size(cr,15);
-
-       char howToText[2][1024];
-       char tmpText[3][1024];
-       snprintf(howToText[0], 1024, "Until %d hold time over Score: %.2f", thResetCount, thPoseScore);
-       snprintf(howToText[1], 1024, "Green-Hold, Blue-Release");
-       snprintf(tmpText[0], 1024, "Score: %.4f", hpPoseScore);
-       snprintf(tmpText[1], 1024, "Hold time: %d", hpPoseHoldTime > thResetCount ? thResetCount : hpPoseHoldTime);
-       snprintf(tmpText[2], 1024, "Count: %2d", hpPoseCount);
-
-
-       cairo_set_source_rgba(cr, 1.0, 1.0, 1.0, 0.7);
-       cairo_move_to(cr, 400.0, 30.0);
-       cairo_show_text(cr, howToText[0]);
-       cairo_move_to(cr, 420.0, 45.0);
-       cairo_show_text(cr, howToText[1]);
-       if (hpPoseScore >= thPoseScore) {
-               if (hpPoseHoldTime > thResetCount)
-                       cairo_set_source_rgba(cr, 0.0, 0.1, 0.9, 0.7);
-               else
-                       cairo_set_source_rgba(cr, 0.1, 0.9, 0.0, 0.7);
-
-               cairo_move_to(cr, 420.0, 60.0);
-               cairo_show_text(cr, tmpText[0]);
-               cairo_move_to(cr, 420.0, 75.0);
-               cairo_show_text(cr, tmpText[1]);
-               cairo_move_to(cr, 420.0, 90.0);
-               cairo_show_text(cr, tmpText[2]);
-
-       } else {
-               cairo_set_source_rgba(cr, 0.9, 0.1, 0.0, 0.7);
-               cairo_move_to(cr, 420.0, 60.0);
-               cairo_show_text(cr, tmpText[0]);
-               cairo_move_to(cr, 420.0, 75.0);
-               cairo_show_text(cr, tmpText[1]);
-               cairo_move_to(cr, 420.0, 90.0);
-               cairo_show_text(cr, tmpText[2]);
-       }
-}
-
-static void
-draw_overlay_hand (GstElement * overlay, cairo_t * cr, guint64 timestamp,
-    guint64 duration, gpointer user_data)
-{
-       CairoOverlayState *s = (CairoOverlayState *) user_data;
-
-       if (!s->valid) {
-               printf("not ready draw_overlay");
-               return;
-       }
-
-       cairo_set_source_rgba(cr, 0.0, 0.0, 1.0, 0.7);
-       cairo_set_line_width(cr, 2.0);
-
-       if (!humanSkeleton.IsDetected)
-               return;
-
-       cairo_select_font_face(cr, "sans-serif", CAIRO_FONT_SLANT_NORMAL, CAIRO_FONT_WEIGHT_BOLD);
-       cairo_set_font_size(cr,15);
-       cairo_move_to(cr, 320.0, 48.0);
-       cairo_show_text(cr, gHandGestureLabel[humanSkeleton.label]);
-
-       cairo_set_source_rgba(cr, 0.1, 0.9, 0.0, 0.7);
-       cairo_set_line_width(cr, 2.0);
-
-    // thumb - red
-       cairo_set_source_rgba (cr, 0.9, 0.1, 0.0, 0.7);
-       cairo_move_to(cr, humanSkeleton.pose[0].x, humanSkeleton.pose[0].y);
-       for (int k = 1 ; k < 5; ++k) {
-               cairo_line_to(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y);
-       }
-       cairo_stroke(cr);
-
-       // fore - red
-       cairo_set_source_rgba (cr, 0.9, 0.1, 0.0, 0.7);
-       cairo_move_to(cr, humanSkeleton.pose[0].x, humanSkeleton.pose[0].y);
-       for (int k = 5 ; k < 9; ++k) {
-               cairo_line_to(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y);
-       }
-       cairo_stroke(cr);
-
-       // middle - grean
-       cairo_set_source_rgba (cr, 0.1, 0.9, 0.0, 0.7);
-       cairo_move_to(cr, humanSkeleton.pose[0].x, humanSkeleton.pose[0].y);
-       for (int k = 9 ; k < 13; ++k) {
-               cairo_line_to(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y);
-       }
-       cairo_stroke(cr);
-
-       // ring - blue
-       cairo_set_source_rgba (cr, 0.1, 0.0, 0.9, 0.7);
-       cairo_move_to(cr, humanSkeleton.pose[0].x, humanSkeleton.pose[0].y);
-       for (int k = 13 ; k < 17; ++k) {
-               cairo_line_to(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y);
-       }
-       cairo_stroke(cr);
-
-       // little - purple
-       cairo_set_source_rgba (cr, 0.5, 0.0, 0.5, 0.7);
-       cairo_move_to(cr, humanSkeleton.pose[0].x, humanSkeleton.pose[0].y);
-       for (int k = 17 ; k < 21; ++k) {
-               cairo_line_to(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y);
-       }
-       cairo_stroke(cr);
-}
-
-static gboolean bus_call (GstBus *bus, GstMessage *msg, gpointer data)
-{
-
-  switch (GST_MESSAGE_TYPE (msg)) {
-
-    case GST_MESSAGE_EOS:
-      printf ("End of stream\n");
-      break;
-
-    case GST_MESSAGE_ERROR: {
-      gchar  *debug;
-      GError *error;
-
-      gst_message_parse_error (msg, &error, &debug);
-      g_free (debug);
-
-      printf ("Error: %s\n", error->message);
-      g_error_free (error);
-
-      break;
-    }
-    default:
-      break;
-  }
-
-  return TRUE;
-}
-
-int perform_armnn_human_pose_cpm_configure(mv_engine_config_h mv_engine_cfg)
-{
-       if (mv_engine_cfg == NULL) {
-               printf("mv_engine_cfg is null\n");
-               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-       }
-
-       char *inputNodeName = "image";
-    char *outputNodeName[1] = {"Convolutional_Pose_Machine/stage_5_out"};
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_CPM_WEIGHT_PATH);
-
-       mv_engine_config_set_string_attribute(
-                                               mv_engine_cfg,
-                                               MV_INFERENCE_MODEL_USER_FILE_PATH,
-                                               PE_POSE_LABEL_PATH);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ARMNN);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_TYPE,
-                        MV_INFERENCE_TARGET_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        192);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        192);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeName,
-                        1);
-
-       return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_human_pose_cpm_configure(mv_engine_config_h mv_engine_cfg)
-{
-       if (mv_engine_cfg == NULL) {
-               printf("mv_engine_cfg is null\n");
-               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-       }
-
-       char *inputNodeName = "image";
-    char *outputNodeName[1] = {"Convolutional_Pose_Machine/stage_5_out"};
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_CPM_WEIGHT_PATH);
-
-       mv_engine_config_set_string_attribute(
-                                               mv_engine_cfg,
-                                               MV_INFERENCE_MODEL_USER_FILE_PATH,
-                                               PE_POSE_LABEL_PATH);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ARMNN);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_TYPE,
-                        MV_INFERENCE_TARGET_CPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        192);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        192);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeName,
-                        1);
-
-       return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_hand_detection_AIC(mv_engine_config_h mv_engine_cfg)
-{
-       if (mv_engine_cfg == NULL) {
-               printf("mv_engine_cfg is null\n");
-               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-       }
-
-    char *inputNodeName = "input";
-    char *outputNodeNames[2] = {"mobilenetv2/boundingbox", "mobilenetv2/heatmap"};
-
-    //outputTensorData = (void*)calloc(56*56*21, sizeof(float));
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_AIC_1_WEIGHT_PATH);
-
-       mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ARMNN);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        224);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        224);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeNames,
-                        2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_hand_detection_AIC2(mv_engine_config_h mv_engine_cfg)
-{
-    char *inputNodeName = "input";
-    char *outputNodeNames[2] = {"mobilenetv2/coord_refine", "mobilenetv2/gesture"};
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_AIC_2_WEIGHT_PATH);
-
-       mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ARMNN);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        56);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        56);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        21);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeNames,
-                        2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_hand_detection_AICLite(mv_engine_config_h mv_engine_cfg)
-{
-       if (mv_engine_cfg == NULL) {
-               printf("mv_engine_cfg is null\n");
-               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-       }
-
-    char *inputNodeName = "input";
-    char *outputNodeNames[2] = {"mobilenetv2/boundingbox", "mobilenetv2/heatmap"};
-
-    //outputTensorData = (void*)calloc(56*56*21, sizeof(float));
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_AICLite_1_WEIGHT_PATH);
-
-       mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ARMNN);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        224);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        224);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeNames,
-                        2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_hand_detection_AICLite2(mv_engine_config_h mv_engine_cfg)
-{
-    char *inputNodeName = "input";
-    char *outputNodeNames[2] = {"mobilenetv2/coord_refine", "mobilenetv2/gesture"};
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_AICLite_2_WEIGHT_PATH);
-
-       mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ARMNN);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        56);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        56);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        21);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeNames,
-                        2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_hand_detection_AICLiteQ(mv_engine_config_h mv_engine_cfg)
-{
-       if (mv_engine_cfg == NULL) {
-               printf("mv_engine_cfg is null\n");
-               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-       }
-
-    char *inputNodeName = "input";
-    char *outputNodeNames[2] = {"mobilenetv2/boundingbox", "mobilenetv2/heatmap"};
-
-    //outputTensorData = (void*)calloc(56*56*21, sizeof(char));
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_AICLiteQ_1_WEIGHT_PATH);
-
-       mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ARMNN);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        224);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        224);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeNames,
-                        2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_hand_detection_AICLiteQ2(mv_engine_config_h mv_engine_cfg)
-{
-    char *inputNodeName = "input";
-    char *outputNodeNames[2] = {"mobilenetv2/coord_refine", "mobilenetv2/gesture"};
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_AICLiteQ_2_WEIGHT_PATH);
-
-       mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ARMNN);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        56);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        56);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        21);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeNames,
-                        2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-static void __global(void *data, struct wl_registry *registry,
-       uint32_t name, const char *interface, uint32_t version)
-{
-       struct tizen_surface **tz_surface = NULL;
-
-       if (!data) {
-               printf("NULL data\n");
-               return;
-       }
-
-       tz_surface = (struct tizen_surface **)data;
-
-       if (!interface) {
-               printf("NULL interface\n");
-               return;
-       }
-
-       if (strcmp(interface, "tizen_surface") == 0) {
-               printf("binding tizen surface for wayland\n");
-
-               *tz_surface = wl_registry_bind(registry, name, &tizen_surface_interface, 1);
-               if (*tz_surface == NULL)
-                       printf("failed to bind\n");
-
-               printf("done\n");
-       }
-
-       return;
-}
-
-static void __global_remove(void *data, struct wl_registry *wl_registry, uint32_t name)
-{
-       printf("enter\n");
-       return;
-}
-
-static const struct wl_registry_listener _wl_registry_listener = {
-       __global,
-       __global_remove
-};
-
-void __parent_id_getter(void *data, struct tizen_resource *tizen_resource, uint32_t id)
-{
-       if (!data) {
-               printf("NULL data\n");
-               return;
-       }
-
-       *((unsigned int *)data) = id;
-
-       printf("[CLIENT] got parent_id [%u] from server\n", id);
-
-       return;
-}
-
-static const struct tizen_resource_listener _tz_resource_listener = {
-       __parent_id_getter
-};
-
-static void set_overlay(Ecore_Evas *ee)
-{
-   Ecore_Wl2_Window *window = NULL;
-   Ecore_Wl2_Display *e_wl2_display = NULL;
-
-   struct wl_display *display = NULL;
-   struct wl_display *display_wrapper = NULL;
-   struct wl_surface *surface = NULL;
-   struct wl_registry *registry = NULL;
-   struct wl_event_queue *queue = NULL;
-   struct tizen_surface *tz_surface = NULL;
-   struct tizen_resource *tz_resource = NULL;
-
-       window = ecore_evas_wayland2_window_get(ee);
-       if (!window) {
-               printf("failed to get wayland window\n");
-               goto _DONE;
-       }
-
-       /* set video_has flag to a video application window */
-       ecore_wl2_window_video_has(window, EINA_TRUE);
-
-       surface = (struct wl_surface *)ecore_wl2_window_surface_get(window);
-       if (!surface) {
-               printf("failed to get wayland surface\n");
-               goto _DONE;
-       }
-
-       e_wl2_display = ecore_wl2_connected_display_get(NULL);
-       if (!e_wl2_display) {
-               printf("failed to get ecore wl2 display\n");
-               goto _DONE;
-       }
-
-       display = (struct wl_display *)ecore_wl2_display_get(e_wl2_display);
-       if (!display) {
-               printf("failed to get wayland display\n");
-               goto _DONE;
-       }
-
-       display_wrapper = wl_proxy_create_wrapper(display);
-       if (!display_wrapper) {
-               printf("failed to create wl display wrapper\n");
-       }
-
-       queue = wl_display_create_queue(display);
-       if (!queue) {
-               printf("failed to create wl display queue\n");
-               goto _DONE;
-       }
-
-       wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
-
-       registry = wl_display_get_registry(display_wrapper);
-       if (!registry) {
-               printf("failed to get wayland registry\n");
-               goto _DONE;
-       }
-
-       wl_registry_add_listener(registry, &_wl_registry_listener, &tz_surface);
-
-       wl_display_dispatch_queue(display, queue);
-       wl_display_roundtrip_queue(display, queue);
-
-       if (!tz_surface) {
-               printf("failed to get tizen surface\n");
-               goto _DONE;
-       }
-
-       /* Get parent_id which is unique in a entire systemw. */
-       tz_resource = tizen_surface_get_tizen_resource(tz_surface, surface);
-       if (!tz_resource) {
-               printf("failed to get tizen resurce\n");
-               goto _DONE;
-       }
-
-       int parent_id = 0;
-
-       tizen_resource_add_listener(tz_resource, &_tz_resource_listener, &parent_id);
-
-       wl_display_roundtrip_queue(display, queue);
-
-       if (parent_id > 0) {
-               printf("parent id : %u\n", parent_id);
-
-       gst_video_overlay_set_wl_window_wl_surface_id(GST_VIDEO_OVERLAY(sink2), parent_id);
-    gst_element_set_state (pipeline, GST_STATE_PLAYING);
-       } else {
-               printf("failed to get parent id\n");
-       }
-
-_DONE:
-       if (tz_surface) {
-               tizen_surface_destroy(tz_surface);
-               tz_surface = NULL;
-       }
-
-       if (tz_resource) {
-               tizen_resource_destroy(tz_resource);
-               tz_resource = NULL;
-       }
-
-       if (registry) {
-               wl_registry_destroy(registry);
-               registry = NULL;
-       }
-
-       if (queue) {
-               wl_event_queue_destroy(queue);
-               queue = NULL;
-       }
-
-       if (display_wrapper) {
-               wl_proxy_wrapper_destroy(display_wrapper);
-               display_wrapper = NULL;
-       }
-}
-
-static void win_resize_cb (void *data, Evas *e, Evas_Object *obj, void *event_info)
-{
-   int w, h;
-   int wid;
-
-   printf("_canvas_resize_cb\n");
-
-   Evas * _e = evas_object_evas_get(obj);
-   Ecore_Evas *ee = ecore_evas_ecore_evas_get(_e);
-   ecore_evas_geometry_get(ee, NULL, NULL, &w, &h);
-
-   if (!st) {
-    set_overlay(ee);
-    int x, y;
-    elm_win_screen_position_get(obj, &x, &y);
-    printf("x = %d, y = %d\n", x, y);
-       } else
-           st++;
-}
-
-static void cb_new_pad (GstElement *element, GstPad *pad, gpointer data)
-{
-  gchar *name;
-  GstElement *other = data;
-
-  name = gst_pad_get_name (pad);
-  printf ("A new pad %s was created for %s\n", name, gst_element_get_name(element));
-  g_free (name);
-
-  printf ("element %s will be linked to %s\n",
-           gst_element_get_name(element),
-           gst_element_get_name(dscale));
-  gst_element_link(element, dscale);
-}
-
-static int app_create(void *data)
-{
-       appdata *ad = data;
-       Evas_Object *win = NULL;
-
-       InitProfiler();
-
-       // GST
-       g_mutex_init(&pose_mutex);
-
-       signal(SIGINT, int_handler);
-
-       /* initialization */
-       gst_init(NULL, NULL);
-
-       /* mediavision configuration*/
-       IsGestureMode = false;
-       if (hp_mv_engine_cfg == NULL) {
-               mv_create_engine_config(&hp_mv_engine_cfg);
-       }
-
-       if (hp_mv_engine_cfg2 == NULL) {
-               mv_create_engine_config(&hp_mv_engine_cfg2);
-       }
-
-       int err = MEDIA_VISION_ERROR_NONE;
-
-
-       if (ad->modelType == MODEL_TYPE_POSE_CPM) {
-               err = perform_armnn_human_pose_cpm_configure(hp_mv_engine_cfg);
-               //err = perform_tflite_human_pose_cpm_configure(hp_mv_engine_cfg);
-
-               mv_pose_create(&hpPoser);
-               mv_pose_set_from_file(hpPoser,
-                                               PLD_MOTION_CAPTURE_FILE_PATH,
-                                               PLD_MOTION_CAPTURE_MAPPING_FILE_PATH);
-
-       } else if (ad->modelType == MODEL_TYPE_POSE_HAND_AICLite) {
-               outputTensorData = (void*)calloc(56*56*21, sizeof(float));
-               err = perform_tflite_hand_detection_AIC(hp_mv_engine_cfg);
-
-               err = perform_tflite_hand_detection_AIC2(hp_mv_engine_cfg2);
-       } else if (ad->modelType == MODEL_TYPE_POSE_HAND_AICLite2) {
-               outputTensorData = (void*)calloc(56*56*21, sizeof(float));
-               err = perform_tflite_hand_detection_AICLite(hp_mv_engine_cfg);
-
-               err = perform_tflite_hand_detection_AICLite2(hp_mv_engine_cfg2);
-       } else {
-               outputTensorData = (void*)calloc(56*56*21, sizeof(float));
-               err = perform_tflite_hand_detection_AICLiteQ(hp_mv_engine_cfg);
-
-               err = perform_tflite_hand_detection_AICLiteQ2(hp_mv_engine_cfg2);
-       }
-
-       if (err != MEDIA_VISION_ERROR_NONE) {
-               printf("Error on perform_armnn_human_pose_configure");
-       }
-
-       printf("configuration done\n");
-
-       printf("loading pose model: ");
-       mv_inference_create(&hp_mv_infer);
-
-       mv_inference_configure(hp_mv_infer, hp_mv_engine_cfg);
-
-       clock_t start = clock();
-       mv_inference_prepare(hp_mv_infer);
-       clock_t end = clock();
-       printf("time: %2.3f\n", (double)(end - start)/CLOCKS_PER_SEC);
-
-       /* mediavision source */
-       mv_create_source(&mv_src_p);
-
-       if (ad->modelType >= MODEL_TYPE_POSE_HAND_AICLite) {
-               mv_inference_create(&hp_mv_infer2);
-
-               mv_inference_configure(hp_mv_infer2, hp_mv_engine_cfg2);
-
-               start = clock();
-               mv_inference_prepare(hp_mv_infer2);
-               end = clock();
-               printf("time: %2.3f\n", (double)(end - start)/CLOCKS_PER_SEC);
-
-               mv_create_source(&mv_src_p2);
-       }
-
-       pipeline = gst_pipeline_new("app");
-
-       overlay_state = g_new0 (CairoOverlayState, 1);
-
-       /* create gstreamer elements */
-       if (!ad->filename) {
-               source = gst_element_factory_make("v4l2src", "src");
-               filter = gst_element_factory_make("capsfilter", "filter");
-       } else {
-               source = gst_element_factory_make("filesrc", "src");
-               
-               dbin = gst_element_factory_make("decodebin", "dbin");
-               dscale = gst_element_factory_make("videoscale", "dscale");
-               dconv = gst_element_factory_make("videoconvert", "dconv");
-               dsfilter = gst_element_factory_make("capsfilter", "dsfilter");
-               dcfilter = gst_element_factory_make("capsfilter", "dcfilter");
-       }
-
-       if (ad->filename2) {
-               tee2 = gst_element_factory_make("tee", "tee2");
-               enc = gst_element_factory_make("avenc_mpeg4", "enc");
-               muxmp4 = gst_element_factory_make("mp4mux", "muxmp4");
-               fsink2 = gst_element_factory_make("filesink", "fsink2");
-               queue4 = gst_element_factory_make("queue", "queue4");
-               queue5 = gst_element_factory_make("queue", "queue5");
-               encconv = gst_element_factory_make("videoconvert", "encconv");
-               g_object_set(G_OBJECT(enc), "bitrate", 800000, NULL);
-       }
-
-       flip = gst_element_factory_make("videoflip", "vflip" );
-       tee = gst_element_factory_make("tee", "tee");
-       queue1 = gst_element_factory_make("queue", "queue1");
-       queue2 = gst_element_factory_make("queue", "queue2");
-
-       if (0 /*ad->modelType == MODEL_TYPE_POSE_HAND_AIC*/) {
-               queue3 = gst_element_factory_make("queue", "queue3");
-       }
-
-       // queue1 - videoscale - capsfilter -viedoeconvert - capsfilter - videorate - capsfilter -fakesink
-       vscale = gst_element_factory_make("videoscale", "scale");
-       vsfilter = gst_element_factory_make("capsfilter", "vsfilter");
-       vconv = gst_element_factory_make("videoconvert", "convert");
-       vcfilter = gst_element_factory_make("capsfilter", "vcfilter");
-       vrate = gst_element_factory_make("videorate", "rate");
-       vrfilter = gst_element_factory_make("capsfilter", "vrfilter");
-       vrsink = gst_element_factory_make("fakesink", "vrsink");
-
-       // queue2 - videoconvert - cairooveray - tizenwlsink
-       oconv = gst_element_factory_make("videoconvert", "oconv");
-       coverlay = gst_element_factory_make("cairooverlay", "coverlay");
-       sink = gst_element_factory_make("fpsdisplaysink", "vsink");
-       sink2 = gst_element_factory_make("tizenwlsink", "vsink2");
-
-       // after detection, crop using video crop
-       // queue3 - videocrop - videoscale -capsfilter - videoconvert - capsfilter -fakesink
-
-       if (0/*ad->modelType == MODEL_TYPE_POSE_HAND_AIC*/) {
-               vcrop = gst_element_factory_make("videocrop", "crop");
-               vcrscale = gst_element_factory_make("videoscale", "crscale");
-               vcrsfilter = gst_element_factory_make("capsfilter", "vcrsfilter");
-               vcrsconv = gst_element_factory_make("videoconvert", "vcrsconvert");
-               vcrscfilter = gst_element_factory_make("capsfilter", "vcrscfilter");
-               vcrssink = gst_element_factory_make("fakesink", "vcrssink");
-       }
-
-       if (!pipeline || !source ||
-               !tee || !queue1 || !vscale || !vsfilter || !vconv || !vcfilter ||
-               !vrate || !vrfilter || !vrsink ||
-               !queue2 || !oconv || !coverlay || !sink || !sink2) {
-               printf(TEXT_RED "One element(queue1 or queue2) might be not created. Exiting.\n" TEXT_RESET);
-               return -1;
-       }
-
-       if (0 /*ad->modelType == MODEL_TYPE_POSE_HAND_AIC && (!pipeline || !queue3 
-               || !vcrop || !vcrscale || !vcrsfilter || !vcrsconv || !vcrscfilter || !vcrssink*/) {
-               printf(TEXT_RED "One element(queue3) might be not created. Exiting.\n" TEXT_RESET);
-               return -1;
-       }
-
-       if (!ad->filename) {
-               if (!filter) {
-                       printf(TEXT_RED "One element might be not created. Existing.\n" TEXT_RESET);
-                       return -1;
-               }
-               g_object_set(G_OBJECT(filter), "caps", gst_caps_from_string("video/x-raw, format=YUY2, width=640, height=480"), NULL);
-       } else {
-               if (!dbin || !dscale || !dconv) {
-                       printf(TEXT_RED "One element might be not created. Exiting.\n" TEXT_RESET);
-                       return -1;
-               }
-               g_object_set(G_OBJECT(dsfilter), "caps", gst_caps_from_string("video/x-raw, width=640, height=480"), NULL);
-               g_object_set(G_OBJECT(dcfilter), "caps", gst_caps_from_string("video/x-raw, format=YUY2, width=640, height=480"), NULL);
-               g_signal_connect (dbin, "pad-added", G_CALLBACK (cb_new_pad), NULL);
-       }
-
-       if (ad->filename2) {
-               if (!tee2 || !enc || !muxmp4 || !queue4 || !queue5 || !fsink2 || !encconv) {
-                       printf(TEXT_RED "One element might be not created. Exiting.\n" TEXT_RESET);
-                       printf("%p, %p, %p, %p, %p, %p, %p\n", tee2, enc, muxmp4, queue4, queue5, fsink2, encconv);
-                       return -1;
-               }
-               g_object_set(G_OBJECT(fsink2), "location", ad->filename2, NULL);
-
-       }
-
-       /* set up the pipeline */
-       //g_signal_connect (coverlay, "draw", G_CALLBACK (draw_overlay), overlay_state);
-       if (ad->modelType == MODEL_TYPE_POSE_CPM) {
-               g_signal_connect (coverlay, "draw", G_CALLBACK (draw_overlay_pose), overlay_state);
-       } else {
-               g_signal_connect (coverlay, "draw", G_CALLBACK (draw_overlay_hand), overlay_state);
-       }
-       
-       g_signal_connect (coverlay, "caps-changed", G_CALLBACK (prepare_overlay), overlay_state);
-
-       if (!ad->filename) {
-               g_object_set(G_OBJECT(source), "device", "/dev/video252", NULL); // 252
-
-       } else {
-               g_object_set(G_OBJECT(source), "location", ad->filename, NULL);
-       }
-       g_object_set(G_OBJECT(flip), "method", 4, NULL);
-       g_object_set(G_OBJECT(sink2), "use-tbm", FALSE, NULL);
-       g_object_set(G_OBJECT(sink2), "sync", FALSE, NULL);
-       g_object_set(G_OBJECT(sink), "video-sink", sink2, NULL);
-       g_object_set(G_OBJECT(sink), "sync", FALSE, NULL);
-
-       if (ad->modelType == MODEL_TYPE_POSE_HAND_AICLite ||
-               ad->modelType == MODEL_TYPE_POSE_HAND_AICLite2 ||
-               ad->modelType == MODEL_TYPE_POSE_HAND_AICLite2Q) {
-               g_object_set(G_OBJECT(vsfilter), "caps", gst_caps_from_string("video/x-raw, width=224, height=224"), NULL);
-               poseCropSize = 224;
-       } else {
-               g_object_set(G_OBJECT(vsfilter), "caps", gst_caps_from_string("video/x-raw, width=192, height=192"), NULL);
-               poseCropSize = 192;
-       }
-
-       g_object_set(G_OBJECT(vcfilter), "caps", gst_caps_from_string("video/x-raw, format=RGB"), NULL);
-       g_object_set(G_OBJECT(vrfilter), "caps", gst_caps_from_string("video/x-raw, framerate=15/1"), NULL);
-
-       //g_object_set(G_OBJECT(vrate), "drop-only", TRUE, NULL);
-
-       //g_object_set(G_OBJECT(queue2), "leaky", 2, NULL);
-#if 0
-       g_object_set(G_OBJECT(queue3), "max-size-buffers", 0, NULL);
-       g_object_set(G_OBJECT(queue3), "max-size-time", 0, NULL);
-       g_object_set(G_OBJECT(queue3), "max-size-bytes", 0, NULL);
-#endif
-       //g_object_set(G_OBJECT(queue3), "leaky", 2, NULL);
-
-       // here to be continue
-       printf("vrsink signal-handoffs\n");
-       g_object_set(G_OBJECT(vrsink), "signal-handoffs", TRUE, NULL);
-
-       if (ad->modelType == MODEL_TYPE_POSE_CPM) {
-               g_idle_add (run_pose, NULL);
-               handler_p = g_signal_connect (vrsink, "handoff", G_CALLBACK(_pose_est_handoff), NULL);
-       } else {
-               g_idle_add (run_hand, NULL);
-               handler_p = g_signal_connect (vrsink, "handoff", G_CALLBACK(_hand_est_handoff), outputTensorData);
-       }
-
-       g_object_set(G_OBJECT(vrsink), "sync", FALSE, NULL);
-
-
-       g_object_set(G_OBJECT(vcrssink), "sync", FALSE, NULL);
-
-
-       /*  add a message handler */
-       bus = gst_pipeline_get_bus (GST_PIPELINE(pipeline));
-       bus_watch_id = gst_bus_add_watch(bus, bus_call, NULL);
-       gst_object_unref(bus);
-
-       printf("eleeent would be added.\n");
-       /* add elements into the pipeline */
-       gst_bin_add_many(GST_BIN(pipeline),
-                                       source,
-                                       flip, tee, queue1, vscale, vsfilter, vconv, vcfilter,
-                                       vrate, vrfilter, vrsink,
-                                       queue2, oconv, coverlay, sink,
-                                       NULL);
-
-       if (0 /*ad->modelType == MODEL_TYPE_POSE_HAND_AIC*/) {
-               gst_bin_add_many(GST_BIN(pipeline),
-                                       queue3, vcrop, vcrscale, vcrsfilter, vcrsconv, vcrscfilter, vcrssink,
-                                       NULL);
-               gst_element_link_many (tee, queue3, vcrop, vcrscale, vcrsfilter, vcrsconv, vcrscfilter, vcrssink, NULL);
-       }
-
-       /* link elements */
-       if (!ad->filename) {
-               printf("take !ad->filename 1\n");
-               gst_bin_add(GST_BIN(pipeline), filter);
-               gst_element_link_many(source, flip, filter, tee, NULL);
-       } else {
-               gst_bin_add_many(GST_BIN(pipeline), dbin, dscale, dconv, dsfilter, dcfilter, NULL);
-               gst_element_link_many(source, dbin, NULL);
-               gst_element_link_many(dscale, dsfilter, dconv, dcfilter, tee, NULL);
-       }
-
-       if (!ad->filename2) {
-               // display
-               gst_element_link_many (tee, queue2, oconv, coverlay, /*toverlay,*/ sink, NULL);
-               // pose
-               gst_element_link_many (tee, queue1, vrate, vrfilter, vconv, vcfilter, vscale, vsfilter, vrsink, NULL);
-       } else {
-
-               gst_bin_add_many(GST_BIN(pipeline), tee2, enc, muxmp4, fsink2, queue4, queue5, encconv);
-
-               // display
-               gst_element_link_many (tee, queue2, oconv, coverlay, tee2, NULL);
-               gst_element_link_many (tee2, queue4, sink, NULL);
-               gst_element_link_many (tee2, queue5, encconv, enc, muxmp4, fsink2, NULL);
-
-               // pose
-               gst_element_link_many (tee, queue1, vrate, vrfilter, vconv, vcfilter, vscale, vsfilter, vrsink, NULL);
-       }
-       
-       /* set the pipeline state to "playing" state */
-       //gst_element_set_state(pipeline, GST_STATE_PLAYING);
-
-       /* loop */
-       humanSkeleton.IsDetected = false;
-       humanSkeleton.isPrevPose = false;
-       humanSkeleton.label = -1;
-       printf(TEXT_GREEN "Running.....\n" TEXT_RESET);
-       // GST_END
-#if 0
-       /* use gl backend */
-       elm_config_accel_preference_set("opengl");
-
-       /* create window */
-       //win = elm_win_add(NULL, PACKAGE, ELM_WIN_SPLASH );
-       win = elm_win_add(NULL, PACKAGE, ELM_WIN_BASIC);
-       if (win) {
-               elm_win_title_set(win, PACKAGE);
-               elm_win_borderless_set(win, EINA_TRUE);
-               elm_win_autodel_set(win, EINA_TRUE);
-               elm_win_alpha_set(win, EINA_FALSE);
-         evas_object_show(win);
-       }
-       elm_win_layer_set(win, 9);
-       elm_win_prop_focus_skip_set(win, EINA_TRUE);
-
-       ad->win = win;
-       g_win_id = win;
-       selected_win_id = g_win_id;
-
-       Evas_Object *bg = elm_bg_add(win);
-       elm_win_resize_object_add(win, bg);
-       evas_object_size_hint_min_set(bg, WIDTH, HEIGHT);
-       evas_object_size_hint_max_set(bg, WIDTH, HEIGHT);
-       evas_object_show(bg);
-
-       elm_win_activate(win);
-
-
-       evas_object_event_callback_add(win, EVAS_CALLBACK_RESIZE, win_resize_cb, NULL);
-#else
-       gst_element_set_state (pipeline, GST_STATE_PLAYING);
-#endif
-
-       return 0;
-}
-
-
-static int app_terminate(void *data)
-{
-       appdata *ad = data;
-       int i = 0;
-
-       DeinitProfiler();
-
-       /* out of loop */
-       printf(TEXT_GREEN "Stopping.....\n" TEXT_RESET);
-       gst_element_set_state(pipeline, GST_STATE_NULL);
-
-       printf(TEXT_GREEN "pipe unref.....\n" TEXT_RESET);
-       gst_object_unref(GST_OBJECT(pipeline));
-
-       printf(TEXT_GREEN "remove bus_watch id.....\n" TEXT_RESET);
-       g_source_remove(bus_watch_id);
-
-       if (overlay_state != NULL) {
-               printf(TEXT_GREEN "g_free overlay.....\n" TEXT_RESET);
-               g_free(overlay_state);
-       }
-
-       g_mutex_clear(&pose_mutex);
-       printf(TEXT_GREEN "done.....\n" TEXT_RESET);
-
-       if (g_win_id) {
-               evas_object_del(g_win_id);
-               g_win_id = NULL;
-       }
-       ad->win = NULL;
-       selected_win_id = NULL;
-
-       return 0;
-}
-struct appcore_ops ops = {
-       .create = app_create,
-       .terminate = app_terminate,
-};
-
-int main (int argc, char *argv[])
-{
-       memset(&ad, 0x0, sizeof(appdata));
-       ops.data = &ad;
-
-       if (argc >= 6) {
-               ad.filename = g_strdup(argv[5]);
-               printf("launch with file source (%s)\n", ad.filename);
-               if (argc > 6) {
-                       ad.filename2 = g_strdup(argv[6]);
-                       ad.numbuffers = -1;
-                       printf("records output(%s)\n", ad.filename2);
-               }
-               if (argc > 7) {
-                       ad.numbuffers = atoi(argv[7]);
-               }
-       } else {
-               printf("launch with camera source\n");
-       }
-
-       if (argc < 2) {
-               printf("usage: mv_stream_infer model [thPoseScore, thResetCount, thCustom, [filename]]");
-               printf("model: 0(CPM), 1(AIC Hand), 2(AIC Lite Hand), 3(AIC Lite Q Hand)\n");
-               return -1;
-       }
-
-       ad.modelType = atoi(argv[1]);
-       if (ad.modelType < 0 || ad.modelType > 3) {
-               printf("not supported model type [%d]\n", ad.modelType);
-               return -1;
-       }
-
-       if (ad.modelType != MODEL_TYPE_POSE_HAND_AICLite &&
-               ad.modelType != MODEL_TYPE_POSE_HAND_AICLite2 &&
-               ad.modelType != MODEL_TYPE_POSE_HAND_AICLite2Q) {
-               if (argc > 2 && atoi(argv[2]) > 0) {
-                       thPoseScore = (float)atoi(argv[2]) / 100.f;
-               } else {
-                       if (thPoseScore != 0)
-                               thPoseScore = thPoseScore / 100.f;
-               }
-               if (argc > 3)
-                       thResetCount = atoi(argv[3]);
-               if (argc > 4 && atoi(argv[3]) > 0) {
-                       thCustom = (float)atoi(argv[4]) / 100.f;
-               } else {
-                       if (thCustom != 0)
-                               thCustom = thCustom / 100.f;
-               }
-
-               poseRoi.point.x = 50;
-               poseRoi.point.y = 0;
-               poseRoi.width = 100;
-               poseRoi.height = 192;
-       } else {
-               if (argc > 2) {
-                       ad.filename2 = g_strdup(argv[2]);
-               }
-               poseRoi.point.x = 0;
-               poseRoi.point.y = 0;
-               poseRoi.width = 0;
-               poseRoi.height = 0;
-       }
-
-       return appcore_efl_main(PACKAGE, &argc, &argv, &ops);
-}
diff --git a/test/testsuites/stream_infer/stream_infer_mlapi.c b/test/testsuites/stream_infer/stream_infer_mlapi.c
deleted file mode 100644 (file)
index 2cc2169..0000000
+++ /dev/null
@@ -1,1846 +0,0 @@
-/**
- * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define _USE_MATH_DEFINES
-#include <fcntl.h>
-#include <mv_common.h>
-#include <mv_inference.h>
-
-#include <mv_testsuite_common.h>
-
-#include <image_helper.h>
-#include <mv_video_helper.h>
-
-#include <mv_log_cfg.h>
-
-#include <math.h>
-#include <ctype.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <dirent.h>
-#include <string.h>
-#include <limits.h>
-#include <time.h>
-#define MAX(a, b) \
-({ __typeof__ (a) _a = (a); \
-__typeof__ (b) _b = (b); \
-_a > _b ? _a : _b; })
-
-#define MIN(a,b) \
-({ __typeof__ (a) _a = (a); \
-__typeof__ (b) _b = (b); \
-_a < _b ? _a : _b; })
-
-#include <glib-2.0/glib.h>
-#include <gst/gst.h>
-#include <gst/video/video.h>
-#include <cairo.h>
-#include <cairo-gobject.h>
-
-#include <Elementary.h>
-#include <appcore-efl.h>
-#include <Ecore.h>
-#include <Ecore_Evas.h>
-#include <Ecore_Wl2.h>
-#include <tizen-extension-client-protocol.h>
-#include <efl_util.h>
-
-#include <gst/gst.h>
-#include <gst/video/videooverlay.h>
-#include <unistd.h>
-#include <time.h>
-
-#define WIDTH  (480)
-#define HEIGHT (270)
-
-#ifdef PACKAGE
-#undef PACKAGE
-#endif
-#define PACKAGE "test"
-
-static char* gHandGestureLabel[] = { "None", "One", "Two", "Three", "Four", "Five",
-                                       "Six", "Seven", "SmallHeart", "OK", "DisLike", "Like",
-                                       "Fist", "Rock", "Love"};
-static int st = 0;
-static Evas_Object *g_eo = NULL;
-static Evas_Object *icon = NULL;
-
-/* for video display */
-static Evas_Object *g_win_id;
-static Evas_Object *selected_win_id;
-
-typedef enum {
-       MODEL_TYPE_POSE_CPM = 0,
-       MODEL_TYPE_POSE_HAND_AICLite, // posenet lite 224
-       MODEL_TYPE_POSE_HAND_AICLite2, // posenet 0709 f
-       MODEL_TYPE_POSE_HAND_AICLite2Q // posenet 0709 q
-};
-
-typedef struct {
-       gchar *filename;
-       gchar *filename2;
-       int numbuffers;
-       int modelType;
-       Evas_Object *win;
-       Evas_Object *layout_main;       /* layout widget based on EDJ */
-       /* add more variables here */
-
-} appdata;
-
-static mv_rectangle_s poseRoi;
-
-static appdata ad;
-static GstBus *bus;
-static guint bus_watch_id;
-
-#define FILE_PATH_SIZE 1024
-
-// pose estimation
-#define PE_TFLITE_CPM_WEIGHT_PATH "/usr/share/capi-media-vision/models/PLD/tflite/pld-tflite-001.tflite"
-#define PE_POSE_LABEL_PATH "/usr/share/capi-media-vision/models/PLD/tflite/pose-label.txt"
-
-
-#define PE_TFLITE_AIC_1_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite"
-#define PE_TFLITE_AIC_2_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite"
-
-
-#define PE_TFLITE_AICLite_1_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE/tflite/posenet1_0709_f.tflite"
-#define PE_TFLITE_AICLite_2_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE/tflite/posenet2_0709_f.tflite"
-
-#define PE_TFLITE_AICLiteQ_1_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE/tflite/posenet1_0709_dq.tflite"
-#define PE_TFLITE_AICLiteQ_2_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE/tflite/posenet2_0709_f.tflite"
-
-#define PLD_MOTION_CAPTURE_FILE_PATH "/usr/share/capi-media-vision/models/PLD/mocap/example.bvh"
-#define PLD_MOTION_CAPTURE_MAPPING_FILE_PATH "/usr/share/capi-media-vision/models/PLD/mocap/example-mocap-mapping.txt"
-
-#define POSE_RESET_COUNT 5
-
-static float thPoseScore = 0.3f;
-static int thResetCount = 5;
-static float thCustom = 0.0f;
-
-typedef struct _rect {
-  int left;
-  int top;
-  int right;
-  int bottom;
-  int type;
-  bool updated;
-  bool cr_updated;
-} rect;
-
-
-typedef struct _humanSkeleton {
-  mv_point_s pose[21/*14*/];
-  mv_point_s prevPose[21/*14*/];
-  mv_rectangle_s loc;
-  mv_rectangle_s prevLoc;
-  mv_rectangle_s handRoi;
-  float scores[21/*14*/];
-  bool isPrevPose;
-  bool updated;    // detection is find and pose is also good. then update drawing
-  bool locUpdated; // track previous detection
-  bool IsDetected; // detection
-  int label;
-} HumanSkeleton;
-
-typedef struct
-{
-  gboolean valid;
-  GstVideoInfo vinfo;
-} CairoOverlayState;
-
-typedef struct
-{
-  GstBuffer *buffer;
-  gpointer user_data;
-} HandOffData;
-
-//gchar *gesturenames;
-
-static HandOffData hdata_p;
-
-static GMutex pose_mutex;
-static guint old_timeout = 0;
-static guint nFrames = 0;
-
-// Gstreamer
-GstElement *pipeline, *source, *flip, *filter, *toverlay, *sink, *sink2;
-GstElement *tee, *vscale, *vsfilter, *vconv, *vcfilter;
-GstElement *vrate, *vrfilter, *fsink, *vrsink;
-GstElement *queue1, *queue2, *queue3;
-GstElement *oconv, *coverlay;
-GstElement *vcrop, *vcrscale, *vcrsfilter, *vcrsconv, *vcrscfilter, *vcrssink;
-CairoOverlayState *overlay_state;
-
-GstElement *dbin, *dscale, *dconv;
-GstElement *dsfilter, *dcfilter;
-
-GstElement *tee2, *enc, *muxmp4, *fsink2, *queue4, *queue5, *encconv;
-
-//static HandSkeleton handSkeleton;
-static HumanSkeleton humanSkeleton;
-gulong handler_p;
-GList *line_list = NULL;
-
-mv_source_h mv_src_p;
-mv_source_h mv_src_p2;
-
-// Human pose
-mv_engine_config_h hp_mv_engine_cfg;
-mv_inference_h hp_mv_infer;
-
-mv_engine_config_h hp_mv_engine_cfg2;
-mv_inference_h hp_mv_infer2;
-
-// pose comparison
-mv_pose_h hpPoser;
-float hpPoseScore;
-int hpPoseHoldTime;
-int hpPoseCount;
-
-static void * outputTensorData;
-
-FILE *fp;
-
-static bool IsGestureMode;
-
-static int poseCropSize = 0;
-
-#define IMAGE_SIZE_WIDTH 640
-#define IMAGE_SIZE_HEIGHT 480
-
-#define NANO_PER_SEC ((__clock_t) 1000000000)
-#define NANO_PER_MILLI  ((__clock_t) 1000000)
-#define MILLI_PER_SEC  ((__clock_t) 1000)
-
-struct timespec diff(struct timespec start, struct timespec end)
-{
-    struct timespec temp;
-    if ((end.tv_nsec - start.tv_nsec) < 0) {
-        temp.tv_sec = end.tv_sec - start.tv_sec - 1;
-        temp.tv_nsec = NANO_PER_SEC + end.tv_nsec - start.tv_nsec;
-    }
-    else {
-        temp.tv_sec = end.tv_sec - start.tv_sec;
-        temp.tv_nsec = end.tv_nsec - start.tv_nsec;
-    }
-    return temp;
-}
-
-unsigned long gettotalmillisec(const struct timespec time)
-{
-    return time.tv_sec * MILLI_PER_SEC + time.tv_nsec / NANO_PER_MILLI;
-}
-
-
-void int_handler(int sig)
-{
-       char c;
-
-       signal(sig, SIG_IGN);
-       while ((getchar()) != '\n');
-
-       printf(TEXT_YELLOW "Do you want to quit? [y/n]\n" TEXT_RESET);
-       c = getchar();
-       if (c == 'y' || c == "Y") {
-
-               g_signal_handler_disconnect(vrsink, handler_p);
-#if 0
-               g_signal_handler_disconnect(vcrssink, handler_gp);
-#endif
-
-               gst_element_send_event(pipeline, gst_event_new_eos());
-
-               sleep(4);
-
-
-               if (mv_src_p)
-                       mv_destroy_source(mv_src_p);
-
-               if (hp_mv_infer)
-                       mv_inference_destroy(hp_mv_infer);
-
-               if (hp_mv_engine_cfg)
-                       mv_destroy_engine_config(hp_mv_engine_cfg);
-
-               if (mv_src_p2)
-                       mv_destroy_source(mv_src_p2);
-
-               if (hp_mv_infer2)
-                       mv_inference_destroy(hp_mv_infer2);
-
-               if (hp_mv_engine_cfg)
-                       mv_destroy_engine_config(hp_mv_engine_cfg2);
-
-               if (outputTensorData) {
-                       free(outputTensorData);
-                       outputTensorData = NULL;
-               }
-
-               if (hpPoser)
-                       mv_pose_destroy(hpPoser);
-
-               printf(TEXT_YELLOW "exit..\n" TEXT_RESET);
-               signal(SIGINT, SIG_DFL);
-               exit(0);
-       } else {
-               printf("no");
-               signal(SIGINT, int_handler);
-       }
-
-       getchar(); // Get new line character
-}
-
-void _hand_pose_cb (
-       mv_source_h source,
-       int number_of_pose_estimation,
-       mv_inference_pose_s *locations,
-       int label,
-    void *user_data)
-{
-       printf("%d landmarks\n", number_of_pose_estimation);
-       int width, height;
-       mv_source_get_height(source, &width);
-       mv_source_get_height(source, &height);
-       for (int n = 0; n < number_of_pose_estimation; ++n) {
-               humanSkeleton.pose[n].x = (int)(640.f*(float)locations->landmarks[n].point.x/(float)width);
-               humanSkeleton.pose[n].y = (int)(480.f*(float)locations->landmarks[n].point.y/(float)height);
-               humanSkeleton.scores[n] = 1.0f; /* confidences[n];*/
-
-               //printf("(%d,%d): %f\n", humanSkeleton.pose[n].x, humanSkeleton.pose[n].y, confidences[n]);
-               //printf("(%d,%d)\n", humanSkeleton.pose[n].x, humanSkeleton.pose[n].y);
-       }
-       humanSkeleton.label = label;
-       humanSkeleton.IsDetected = true;
-}
-
-static void _hand_detected_cb (
-        mv_source_h source,
-        const int number_of_hands,
-        const float *confidences,
-        const mv_rectangle_s *locations,
-        void *user_data) //user_data  can be mv_source?
-{
-
-#if 0
-       if (0 /*confidences[1] < thValNeck*/) {
-               printf("lost pose\n");
-               humanSkeleton.IsDetected = false;
-               humanSkeleton.isPrevPose = false;
-               return;
-       }
-       
-       printf("%d landmarks, %d crop\n", number_of_landmarks, poseCropSize);
-       for (int n = 0; n < number_of_landmarks; ++n) {
-
-               humanSkeleton.pose[n].x = (int)((float)(locations[n].x + poseRoi.point.x) / (float)poseCropSize * 640.f);
-               humanSkeleton.pose[n].y = (int)((float)(locations[n].y + poseRoi.point.y) / (float)poseCropSize * 480.f);
-               humanSkeleton.scores[n] = 1.0f; /* confidences[n];*/
-
-               //printf("(%d,%d): %f\n", humanSkeleton.pose[n].x, humanSkeleton.pose[n].y, confidences[n]);
-               printf("(%d,%d)\n", humanSkeleton.pose[n].x, humanSkeleton.pose[n].y);
-       }
-       humanSkeleton.IsDetected = true;
-#else
-
-       if (number_of_hands <= 0) {
-               humanSkeleton.IsDetected = false;
-               humanSkeleton.label = -1;
-               return;
-       }
-
-
-       struct timespec s_tspec;
-       struct timespec e_tspec;
-
-       clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
-       mv_source_clear(mv_src_p2);
-       mv_source_fill_by_tensor_buffer(mv_src_p2, user_data,
-                                       MV_INFERENCE_DATA_FLOAT32,
-                                       56 * 56 * 21 * sizeof(float),
-                                       56, 56, 21, 3);
-
-       clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-
-       struct timespec diffspec = diff(s_tspec, e_tspec);
-       unsigned long timeDiff = gettotalmillisec(diffspec);
-       printf("memcpy time: %lu(ms)\n", timeDiff);
-
-       clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
-       mv_inference_pose_landmark_detect(mv_src_p2, hp_mv_infer2, NULL,  _hand_pose_cb, NULL);
-
-       clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-
-       diffspec = diff(s_tspec, e_tspec);
-       timeDiff = gettotalmillisec(diffspec);
-       printf("pose_estimation time: %lu(ms)\n", timeDiff);
-       //humanSkeleton.IsDetected = true;
-#endif
-       return;
-}
-
-
-void _human_pose_cb (
-       mv_source_h source,
-       int number_of_pose_estimation,
-       mv_inference_pose_s *locations,
-       int label,
-    void *user_data)
-{
-       printf("%d landmarks\n", number_of_pose_estimation);
-
-       mv_pose_compare(hpPoser, locations,
-                                       (MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT | MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT),
-                                       &hpPoseScore);
-
-
-       if (hpPoseScore >= thPoseScore) {
-               hpPoseHoldTime++;
-       } else {
-               if (hpPoseHoldTime > thResetCount) {
-                       hpPoseCount++;
-               }
-               hpPoseHoldTime = 0;
-       }
-
-       printf("Score: %.4f, HoldTime: %d, Count: %d\n", hpPoseScore, hpPoseHoldTime, hpPoseCount);
-       for (int n = 0; n < number_of_pose_estimation; ++n) {
-
-               humanSkeleton.pose[n].x = (int)(640.f*(float)(locations->landmarks[n].point.x)/192.f);
-               humanSkeleton.pose[n].y = (int)(480.f*(float)(locations->landmarks[n].point.y)/192.f);
-               humanSkeleton.scores[n] = locations->landmarks[n].score;
-
-       }
-       humanSkeleton.IsDetected = true;
-}
-
-static gboolean
-run_pose (void *user_data)
-{
-       HandOffData *udata = (HandOffData *)user_data;
-       if (!GST_IS_BUFFER(udata->buffer))
-               return FALSE;
-
-       GstMapInfo map;
-
-       gst_buffer_map(udata->buffer, &map, GST_MAP_READ);
-
-       mv_source_clear(mv_src_p);
-
-       mv_source_fill_by_buffer(mv_src_p, map.data, 192*192*3, 192, 192, MEDIA_VISION_COLORSPACE_RGB888);
-
-       gst_buffer_unmap(udata->buffer, &map);
-
-       struct timespec s_tspec;
-       struct timespec e_tspec;
-
-       clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
-       mv_inference_pose_landmark_detect(mv_src_p, hp_mv_infer, &poseRoi, _human_pose_cb, NULL);
-
-       clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-       struct timespec diffspec = diff(s_tspec, e_tspec);
-       unsigned long timeDiff = gettotalmillisec(diffspec);
-       printf("detect + pose time: %lu(ms)\n", timeDiff);
-
-       return FALSE;
-
-}
-
-static gboolean
-run_hand (void *user_data)
-{
-       HandOffData *udata = (HandOffData *)user_data;
-       if (!GST_IS_BUFFER(udata->buffer))
-               return FALSE;
-
-       GstMapInfo map;
-
-       gst_buffer_map(udata->buffer, &map, GST_MAP_READ);
-
-       mv_source_clear(mv_src_p);
-
-       mv_source_fill_by_buffer(mv_src_p, map.data, 224*224*3, 224, 224, MEDIA_VISION_COLORSPACE_RGB888);
-
-       gst_buffer_unmap(udata->buffer, &map);
-
-
-       struct timespec s_tspec;
-       struct timespec e_tspec;
-
-       void * outputTensorBuffer = (void*)udata->user_data;
-
-       clock_gettime(CLOCK_MONOTONIC, &s_tspec);
-
-       // invoke tflite -> _hand_detected_cb -> memcpy output -> invoke tflite -> _pose_cb
-       mv_inference_hand_detect(mv_src_p, hp_mv_infer, _hand_detected_cb, outputTensorBuffer);
-
-       clock_gettime(CLOCK_MONOTONIC, &e_tspec);
-       struct timespec diffspec = diff(s_tspec, e_tspec);
-       unsigned long timeDiff = gettotalmillisec(diffspec);
-       printf("detect + pose time: %lu(ms)\n", timeDiff);
-
-       return FALSE;
-
-}
-
-static void
-_pose_est_handoff(GstElement *object, GstBuffer *buffer, GstPad *pad, gpointer user_data)
-{
-
-       nFrames++;
-       hdata_p.buffer = buffer;
-       hdata_p.user_data = user_data;
-
-#if 0
-       if (nFrames % 15 == 0) {
-               g_mutex_lock(&pose_mutex);
-               g_idle_add (run_pose, &hdata_p);
-               g_mutex_unlock(&pose_mutex);
-       }
-#else
-       g_mutex_lock(&pose_mutex);
-       g_idle_add (run_pose, &hdata_p);
-       g_mutex_unlock(&pose_mutex);
-#endif
-}
-
-static void
-_hand_est_handoff(GstElement *object, GstBuffer *buffer, GstPad *pad, gpointer user_data)
-{
-
-       nFrames++;
-       hdata_p.buffer = buffer;
-       hdata_p.user_data = user_data;
-
-#if 0
-       if (nFrames % 15 == 0) {
-               g_mutex_lock(&pose_mutex);
-               g_idle_add (run_hand, &hdata_p);
-               g_mutex_unlock(&pose_mutex);
-       }
-#else
-       g_mutex_lock(&pose_mutex);
-       g_idle_add (run_hand, &hdata_p);
-       g_mutex_unlock(&pose_mutex);
-#endif
-}
-
-
-static void
-prepare_overlay (GstElement * overlay, GstCaps * caps, gpointer user_data)
-{
-       CairoOverlayState *state = (CairoOverlayState *) user_data;
-
-       state->valid = gst_video_info_from_caps (&state->vinfo, caps);
-}
-
-static void
-draw_overlay_pose (GstElement * overlay, cairo_t * cr, guint64 timestamp,
-    guint64 duration, gpointer user_data)
-{
-       CairoOverlayState *s = (CairoOverlayState *) user_data;
-
-       if (!s->valid) {
-               printf("not ready draw_overlay");
-               return;
-       }
-
-       cairo_set_source_rgba(cr, 0.1, 0.9, 0.0, 0.7);
-       cairo_set_line_width(cr, 2.0);
-
-       float poseWeight = 0.7f;
-       float prevPoseWeight = 0.3f;
-       if (!humanSkeleton.IsDetected)
-               return;
-
-       for (int k = 0; k < 16; ++k) {
-               if (humanSkeleton.isPrevPose == false) {
-                       humanSkeleton.prevPose[k] = humanSkeleton.pose[k];
-               } else {
-
-                       humanSkeleton.prevPose[k].x = (poseWeight * humanSkeleton.pose[k].x +
-                                                                               prevPoseWeight * humanSkeleton.prevPose[k].x);
-                       humanSkeleton.prevPose[k].y = (poseWeight * humanSkeleton.pose[k].y +
-                                                                               prevPoseWeight * humanSkeleton.prevPose[k].y);
-               }
-       }
-
-       for (int k = 0; k < 16; ++k) {
-               if (humanSkeleton.scores[k] > 0.0f) {
-                       if (k > 9)
-                               cairo_set_source_rgba(cr, 0.9, 0.1, 0.0, 0.7);
-
-                       cairo_arc(cr, humanSkeleton.prevPose[k].x, humanSkeleton.prevPose[k].y, 3,0, 2*M_PI);
-                       cairo_stroke(cr);
-               }
-       }
-
-
-       cairo_select_font_face(cr, "sans-serif", CAIRO_FONT_SLANT_NORMAL, CAIRO_FONT_WEIGHT_BOLD);
-       cairo_set_font_size(cr,15);
-
-       char howToText[2][1024];
-       char tmpText[3][1024];
-       snprintf(howToText[0], 1024, "Until %d hold time over Score: %.2f", thResetCount, thPoseScore);
-       snprintf(howToText[1], 1024, "Green-Hold, Blue-Release");
-       snprintf(tmpText[0], 1024, "Score: %.4f", hpPoseScore);
-       snprintf(tmpText[1], 1024, "Hold time: %d", hpPoseHoldTime > thResetCount ? thResetCount : hpPoseHoldTime);
-       snprintf(tmpText[2], 1024, "Count: %2d", hpPoseCount);
-
-
-       cairo_set_source_rgba(cr, 1.0, 1.0, 1.0, 0.7);
-       cairo_move_to(cr, 400.0, 30.0);
-       cairo_show_text(cr, howToText[0]);
-       cairo_move_to(cr, 420.0, 45.0);
-       cairo_show_text(cr, howToText[1]);
-       if (hpPoseScore >= thPoseScore) {
-               if (hpPoseHoldTime > thResetCount)
-                       cairo_set_source_rgba(cr, 0.0, 0.1, 0.9, 0.7);
-               else
-                       cairo_set_source_rgba(cr, 0.1, 0.9, 0.0, 0.7);
-
-               cairo_move_to(cr, 420.0, 60.0);
-               cairo_show_text(cr, tmpText[0]);
-               cairo_move_to(cr, 420.0, 75.0);
-               cairo_show_text(cr, tmpText[1]);
-               cairo_move_to(cr, 420.0, 90.0);
-               cairo_show_text(cr, tmpText[2]);
-
-       } else {
-               cairo_set_source_rgba(cr, 0.9, 0.1, 0.0, 0.7);
-               cairo_move_to(cr, 420.0, 60.0);
-               cairo_show_text(cr, tmpText[0]);
-               cairo_move_to(cr, 420.0, 75.0);
-               cairo_show_text(cr, tmpText[1]);
-               cairo_move_to(cr, 420.0, 90.0);
-               cairo_show_text(cr, tmpText[2]);
-       }
-}
-
-static void
-draw_overlay_hand (GstElement * overlay, cairo_t * cr, guint64 timestamp,
-    guint64 duration, gpointer user_data)
-{
-       CairoOverlayState *s = (CairoOverlayState *) user_data;
-
-       if (!s->valid) {
-               printf("not ready draw_overlay");
-               return;
-       }
-
-
-       cairo_set_source_rgba(cr, 0.0, 0.0, 1.0, 0.7);
-       cairo_set_line_width(cr, 2.0);
-
-       if (!humanSkeleton.IsDetected)
-               return;
-
-       cairo_select_font_face(cr, "sans-serif", CAIRO_FONT_SLANT_NORMAL, CAIRO_FONT_WEIGHT_BOLD);
-       cairo_set_font_size(cr,15);
-       cairo_move_to(cr, 320.0, 48.0);
-       cairo_show_text(cr, gHandGestureLabel[humanSkeleton.label]);
-
-       cairo_set_source_rgba(cr, 0.1, 0.9, 0.0, 0.7);
-       cairo_set_line_width(cr, 2.0);
-
-    // thumb - red
-       cairo_set_source_rgba (cr, 0.9, 0.1, 0.0, 0.7);
-       cairo_move_to(cr, humanSkeleton.pose[0].x, humanSkeleton.pose[0].y);
-       for (int k = 1 ; k < 5; ++k) {
-               cairo_line_to(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y);
-       }
-       cairo_stroke(cr);
-
-       // fore - red
-       cairo_set_source_rgba (cr, 0.9, 0.1, 0.0, 0.7);
-       cairo_move_to(cr, humanSkeleton.pose[0].x, humanSkeleton.pose[0].y);
-       for (int k = 5 ; k < 9; ++k) {
-               cairo_line_to(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y);
-       }
-       cairo_stroke(cr);
-
-       // middle - grean
-       cairo_set_source_rgba (cr, 0.1, 0.9, 0.0, 0.7);
-       cairo_move_to(cr, humanSkeleton.pose[0].x, humanSkeleton.pose[0].y);
-       for (int k = 9 ; k < 13; ++k) {
-               cairo_line_to(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y);
-       }
-       cairo_stroke(cr);
-
-       // ring - blue
-       cairo_set_source_rgba (cr, 0.1, 0.0, 0.9, 0.7);
-       cairo_move_to(cr, humanSkeleton.pose[0].x, humanSkeleton.pose[0].y);
-       for (int k = 13 ; k < 17; ++k) {
-               cairo_line_to(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y);
-       }
-       cairo_stroke(cr);
-
-       // little - purple
-       cairo_set_source_rgba (cr, 0.5, 0.0, 0.5, 0.7);
-       cairo_move_to(cr, humanSkeleton.pose[0].x, humanSkeleton.pose[0].y);
-       for (int k = 17 ; k < 21; ++k) {
-               cairo_line_to(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y);
-       }
-       cairo_stroke(cr);
-
-
-}
-
-static gboolean bus_call (GstBus *bus, GstMessage *msg, gpointer data)
-{
-
-  switch (GST_MESSAGE_TYPE (msg)) {
-
-    case GST_MESSAGE_EOS:
-      printf ("End of stream\n");
-      break;
-
-    case GST_MESSAGE_ERROR: {
-      gchar  *debug;
-      GError *error;
-
-      gst_message_parse_error (msg, &error, &debug);
-      g_free (debug);
-
-      printf ("Error: %s\n", error->message);
-      g_error_free (error);
-
-      break;
-    }
-    default:
-      break;
-  }
-
-  return TRUE;
-}
-
-int perform_armnn_human_pose_cpm_configure(mv_engine_config_h mv_engine_cfg)
-{
-       if (mv_engine_cfg == NULL) {
-               printf("mv_engine_cfg is null\n");
-               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-       }
-
-       char *inputNodeName = "image";
-    char *outputNodeName[1] = {"Convolutional_Pose_Machine/stage_5_out"};
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_CPM_WEIGHT_PATH);
-
-       mv_engine_config_set_string_attribute(
-                                               mv_engine_cfg,
-                                               MV_INFERENCE_MODEL_USER_FILE_PATH,
-                                               PE_POSE_LABEL_PATH);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ONE);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_TYPE,
-                        MV_INFERENCE_TARGET_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        192);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        192);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeName,
-                        1);
-
-       return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_human_pose_cpm_configure(mv_engine_config_h mv_engine_cfg)
-{
-       if (mv_engine_cfg == NULL) {
-               printf("mv_engine_cfg is null\n");
-               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-       }
-
-       char *inputNodeName = "image";
-    char *outputNodeName[1] = {"Convolutional_Pose_Machine/stage_5_out"};
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_CPM_WEIGHT_PATH);
-
-       mv_engine_config_set_string_attribute(
-                                               mv_engine_cfg,
-                                               MV_INFERENCE_MODEL_USER_FILE_PATH,
-                                               PE_POSE_LABEL_PATH);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ONE);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_TYPE,
-                        MV_INFERENCE_TARGET_CPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        192);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        192);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeName,
-                        1);
-
-       return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_hand_detection_AIC(mv_engine_config_h mv_engine_cfg)
-{
-       if (mv_engine_cfg == NULL) {
-               printf("mv_engine_cfg is null\n");
-               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-       }
-
-    char *inputNodeName = "input";
-    char *outputNodeNames[2] = {"mobilenetv2/boundingbox", "mobilenetv2/heatmap"};
-
-    //outputTensorData = (void*)calloc(56*56*21, sizeof(float));
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_AIC_1_WEIGHT_PATH);
-
-       mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ONE);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        224);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        224);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeNames,
-                        2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_hand_detection_AIC2(mv_engine_config_h mv_engine_cfg)
-{
-    char *inputNodeName = "input";
-    char *outputNodeNames[2] = {"mobilenetv2/coord_refine", "mobilenetv2/gesture"};
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_AIC_2_WEIGHT_PATH);
-
-       mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ONE);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        56);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        56);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        21);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeNames,
-                        2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_hand_detection_AICLite(mv_engine_config_h mv_engine_cfg)
-{
-       if (mv_engine_cfg == NULL) {
-               printf("mv_engine_cfg is null\n");
-               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-       }
-
-    char *inputNodeName = "input";
-    char *outputNodeNames[2] = {"mobilenetv2/boundingbox", "mobilenetv2/heatmap"};
-
-    //outputTensorData = (void*)calloc(56*56*21, sizeof(float));
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_AICLite_1_WEIGHT_PATH);
-
-       mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ONE);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        224);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        224);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeNames,
-                        2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_hand_detection_AICLite2(mv_engine_config_h mv_engine_cfg)
-{
-    char *inputNodeName = "input";
-    char *outputNodeNames[2] = {"mobilenetv2/coord_refine", "mobilenetv2/gesture"};
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_AICLite_2_WEIGHT_PATH);
-
-       mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ONE);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        56);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        56);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        21);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeNames,
-                        2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_hand_detection_AICLiteQ(mv_engine_config_h mv_engine_cfg)
-{
-       if (mv_engine_cfg == NULL) {
-               printf("mv_engine_cfg is null\n");
-               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-       }
-
-    char *inputNodeName = "input";
-    char *outputNodeNames[2] = {"mobilenetv2/boundingbox", "mobilenetv2/heatmap"};
-
-    //outputTensorData = (void*)calloc(56*56*21, sizeof(char));
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_AICLiteQ_1_WEIGHT_PATH);
-
-       mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ONE);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        224);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        224);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeNames,
-                        2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-int perform_tflite_hand_detection_AICLiteQ2(mv_engine_config_h mv_engine_cfg)
-{
-    char *inputNodeName = "input";
-    char *outputNodeNames[2] = {"mobilenetv2/coord_refine", "mobilenetv2/gesture"};
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        PE_TFLITE_AICLiteQ_2_WEIGHT_PATH);
-
-       mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-
-    mv_engine_config_set_double_attribute(mv_engine_cfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_ONE);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_GPU);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        56);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        56);
-
-    mv_engine_config_set_int_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        21);
-
-    mv_engine_config_set_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-
-    mv_engine_config_set_array_string_attribute(mv_engine_cfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeNames,
-                        2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-static void __global(void *data, struct wl_registry *registry,
-       uint32_t name, const char *interface, uint32_t version)
-{
-       struct tizen_surface **tz_surface = NULL;
-
-       if (!data) {
-               printf("NULL data\n");
-               return;
-       }
-
-       tz_surface = (struct tizen_surface **)data;
-
-       if (!interface) {
-               printf("NULL interface\n");
-               return;
-       }
-
-       if (strcmp(interface, "tizen_surface") == 0) {
-               printf("binding tizen surface for wayland\n");
-
-               *tz_surface = wl_registry_bind(registry, name, &tizen_surface_interface, 1);
-               if (*tz_surface == NULL)
-                       printf("failed to bind\n");
-
-               printf("done\n");
-       }
-
-       return;
-}
-
-static void __global_remove(void *data, struct wl_registry *wl_registry, uint32_t name)
-{
-       printf("enter\n");
-       return;
-}
-
-static const struct wl_registry_listener _wl_registry_listener = {
-       __global,
-       __global_remove
-};
-
-void __parent_id_getter(void *data, struct tizen_resource *tizen_resource, uint32_t id)
-{
-       if (!data) {
-               printf("NULL data\n");
-               return;
-       }
-
-       *((unsigned int *)data) = id;
-
-       printf("[CLIENT] got parent_id [%u] from server\n", id);
-
-       return;
-}
-
-static const struct tizen_resource_listener _tz_resource_listener = {
-       __parent_id_getter
-};
-
-static void set_overlay(Ecore_Evas *ee)
-{
-   Ecore_Wl2_Window *window = NULL;
-   Ecore_Wl2_Display *e_wl2_display = NULL;
-
-   struct wl_display *display = NULL;
-   struct wl_display *display_wrapper = NULL;
-   struct wl_surface *surface = NULL;
-   struct wl_registry *registry = NULL;
-   struct wl_event_queue *queue = NULL;
-   struct tizen_surface *tz_surface = NULL;
-   struct tizen_resource *tz_resource = NULL;
-
-       window = ecore_evas_wayland2_window_get(ee);
-       if (!window) {
-               printf("failed to get wayland window\n");
-               goto _DONE;
-       }
-
-       /* set video_has flag to a video application window */
-       ecore_wl2_window_video_has(window, EINA_TRUE);
-
-       surface = (struct wl_surface *)ecore_wl2_window_surface_get(window);
-       if (!surface) {
-               printf("failed to get wayland surface\n");
-               goto _DONE;
-       }
-
-       e_wl2_display = ecore_wl2_connected_display_get(NULL);
-       if (!e_wl2_display) {
-               printf("failed to get ecore wl2 display\n");
-               goto _DONE;
-       }
-
-       display = (struct wl_display *)ecore_wl2_display_get(e_wl2_display);
-       if (!display) {
-               printf("failed to get wayland display\n");
-               goto _DONE;
-       }
-
-       display_wrapper = wl_proxy_create_wrapper(display);
-       if (!display_wrapper) {
-               printf("failed to create wl display wrapper\n");
-       }
-
-       queue = wl_display_create_queue(display);
-       if (!queue) {
-               printf("failed to create wl display queue\n");
-               goto _DONE;
-       }
-
-       wl_proxy_set_queue((struct wl_proxy *)display_wrapper, queue);
-
-       registry = wl_display_get_registry(display_wrapper);
-       if (!registry) {
-               printf("failed to get wayland registry\n");
-               goto _DONE;
-       }
-
-       wl_registry_add_listener(registry, &_wl_registry_listener, &tz_surface);
-
-       wl_display_dispatch_queue(display, queue);
-       wl_display_roundtrip_queue(display, queue);
-
-       if (!tz_surface) {
-               printf("failed to get tizen surface\n");
-               goto _DONE;
-       }
-
-       /* Get parent_id which is unique in a entire systemw. */
-       tz_resource = tizen_surface_get_tizen_resource(tz_surface, surface);
-       if (!tz_resource) {
-               printf("failed to get tizen resurce\n");
-               goto _DONE;
-       }
-
-       int parent_id = 0;
-
-       tizen_resource_add_listener(tz_resource, &_tz_resource_listener, &parent_id);
-
-       wl_display_roundtrip_queue(display, queue);
-
-       if (parent_id > 0) {
-               printf("parent id : %u\n", parent_id);
-
-       gst_video_overlay_set_wl_window_wl_surface_id(GST_VIDEO_OVERLAY(sink2), parent_id);
-    gst_element_set_state (pipeline, GST_STATE_PLAYING);
-       } else {
-               printf("failed to get parent id\n");
-       }
-
-_DONE:
-       if (tz_surface) {
-               tizen_surface_destroy(tz_surface);
-               tz_surface = NULL;
-       }
-
-       if (tz_resource) {
-               tizen_resource_destroy(tz_resource);
-               tz_resource = NULL;
-       }
-
-       if (registry) {
-               wl_registry_destroy(registry);
-               registry = NULL;
-       }
-
-       if (queue) {
-               wl_event_queue_destroy(queue);
-               queue = NULL;
-       }
-
-       if (display_wrapper) {
-               wl_proxy_wrapper_destroy(display_wrapper);
-               display_wrapper = NULL;
-       }
-}
-
-static void win_resize_cb (void *data, Evas *e, Evas_Object *obj, void *event_info)
-{
-   int w, h;
-   int wid;
-
-   printf("_canvas_resize_cb\n");
-
-   Evas * _e = evas_object_evas_get(obj);
-   Ecore_Evas *ee = ecore_evas_ecore_evas_get(_e);
-   ecore_evas_geometry_get(ee, NULL, NULL, &w, &h);
-
-   if (!st) {
-    set_overlay(ee);
-    int x, y;
-    elm_win_screen_position_get(obj, &x, &y);
-    printf("x = %d, y = %d\n", x, y);
-       } else
-           st++;
-}
-
-static void cb_new_pad (GstElement *element, GstPad *pad, gpointer data)
-{
-  gchar *name;
-  GstElement *other = data;
-
-  name = gst_pad_get_name (pad);
-  printf ("A new pad %s was created for %s\n", name, gst_element_get_name(element));
-  g_free (name);
-
-  printf ("element %s will be linked to %s\n",
-           gst_element_get_name(element),
-           gst_element_get_name(dscale));
-  gst_element_link(element, dscale);
-}
-
-static int app_create(void *data)
-{
-       appdata *ad = data;
-       Evas_Object *win = NULL;
-
-       // GST
-       g_mutex_init(&pose_mutex);
-
-       signal(SIGINT, int_handler);
-
-       /* initialization */
-       gst_init(NULL, NULL);
-
-       /* mediavision configuration*/
-       IsGestureMode = false;
-       if (hp_mv_engine_cfg == NULL) {
-               mv_create_engine_config(&hp_mv_engine_cfg);
-       }
-
-       if (hp_mv_engine_cfg2 == NULL) {
-               mv_create_engine_config(&hp_mv_engine_cfg2);
-       }
-
-       int err = MEDIA_VISION_ERROR_NONE;
-
-
-       if (ad->modelType == MODEL_TYPE_POSE_CPM) {
-               err = perform_armnn_human_pose_cpm_configure(hp_mv_engine_cfg);
-               //err = perform_tflite_human_pose_cpm_configure(hp_mv_engine_cfg);
-
-               mv_pose_create(&hpPoser);
-               mv_pose_set_from_file(hpPoser,
-                                               PLD_MOTION_CAPTURE_FILE_PATH,
-                                               PLD_MOTION_CAPTURE_MAPPING_FILE_PATH);
-
-       } else if (ad->modelType == MODEL_TYPE_POSE_HAND_AICLite) {
-               outputTensorData = (void*)calloc(56*56*21, sizeof(float));
-               err = perform_tflite_hand_detection_AIC(hp_mv_engine_cfg);
-
-               err = perform_tflite_hand_detection_AIC2(hp_mv_engine_cfg2);
-       } else if (ad->modelType == MODEL_TYPE_POSE_HAND_AICLite2) {
-               outputTensorData = (void*)calloc(56*56*21, sizeof(float));
-               err = perform_tflite_hand_detection_AICLite(hp_mv_engine_cfg);
-
-               err = perform_tflite_hand_detection_AICLite2(hp_mv_engine_cfg2);
-       } else {
-               outputTensorData = (void*)calloc(56*56*21, sizeof(float));
-               err = perform_tflite_hand_detection_AICLiteQ(hp_mv_engine_cfg);
-
-               err = perform_tflite_hand_detection_AICLiteQ2(hp_mv_engine_cfg2);
-       }
-
-       if (err != MEDIA_VISION_ERROR_NONE) {
-               printf("Error on perform_armnn_human_pose_configure");
-       }
-
-       printf("configuration done\n");
-
-       printf("loading pose model: ");
-       mv_inference_create(&hp_mv_infer);
-
-       mv_inference_configure(hp_mv_infer, hp_mv_engine_cfg);
-
-       clock_t start = clock();
-       mv_inference_prepare(hp_mv_infer);
-       clock_t end = clock();
-       printf("time: %2.3f\n", (double)(end - start)/CLOCKS_PER_SEC);
-
-       /* mediavision source */
-       mv_create_source(&mv_src_p);
-
-       if (ad->modelType >= MODEL_TYPE_POSE_HAND_AICLite) {
-               mv_inference_create(&hp_mv_infer2);
-
-               mv_inference_configure(hp_mv_infer2, hp_mv_engine_cfg2);
-
-               start = clock();
-               mv_inference_prepare(hp_mv_infer2);
-               end = clock();
-               printf("time: %2.3f\n", (double)(end - start)/CLOCKS_PER_SEC);
-
-               mv_create_source(&mv_src_p2);
-       }
-
-       pipeline = gst_pipeline_new("app");
-
-       overlay_state = g_new0 (CairoOverlayState, 1);
-
-       /* create gstreamer elements */
-       if (!ad->filename) {
-               source = gst_element_factory_make("v4l2src", "src");
-               filter = gst_element_factory_make("capsfilter", "filter");
-       } else {
-               source = gst_element_factory_make("filesrc", "src");
-               
-               dbin = gst_element_factory_make("decodebin", "dbin");
-               dscale = gst_element_factory_make("videoscale", "dscale");
-               dconv = gst_element_factory_make("videoconvert", "dconv");
-               dsfilter = gst_element_factory_make("capsfilter", "dsfilter");
-               dcfilter = gst_element_factory_make("capsfilter", "dcfilter");
-       }
-
-       if (ad->filename2) {
-               tee2 = gst_element_factory_make("tee", "tee2");
-               enc = gst_element_factory_make("avenc_mpeg4", "enc");
-               muxmp4 = gst_element_factory_make("mp4mux", "muxmp4");
-               fsink2 = gst_element_factory_make("filesink", "fsink2");
-               queue4 = gst_element_factory_make("queue", "queue4");
-               queue5 = gst_element_factory_make("queue", "queue5");
-               encconv = gst_element_factory_make("videoconvert", "encconv");
-               g_object_set(G_OBJECT(enc), "bitrate", 800000, NULL);
-       }
-
-       flip = gst_element_factory_make("videoflip", "vflip" );
-       tee = gst_element_factory_make("tee", "tee");
-       queue1 = gst_element_factory_make("queue", "queue1");
-       queue2 = gst_element_factory_make("queue", "queue2");
-
-       if (0 /*ad->modelType == MODEL_TYPE_POSE_HAND_AIC*/) {
-               queue3 = gst_element_factory_make("queue", "queue3");
-       }
-
-       // queue1 - videoscale - capsfilter -viedoeconvert - capsfilter - videorate - capsfilter -fakesink
-       vscale = gst_element_factory_make("videoscale", "scale");
-       vsfilter = gst_element_factory_make("capsfilter", "vsfilter");
-       vconv = gst_element_factory_make("videoconvert", "convert");
-       vcfilter = gst_element_factory_make("capsfilter", "vcfilter");
-       vrate = gst_element_factory_make("videorate", "rate");
-       vrfilter = gst_element_factory_make("capsfilter", "vrfilter");
-       vrsink = gst_element_factory_make("fakesink", "vrsink");
-
-       // queue2 - videoconvert - cairooveray - tizenwlsink
-       oconv = gst_element_factory_make("videoconvert", "oconv");
-       coverlay = gst_element_factory_make("cairooverlay", "coverlay");
-       sink = gst_element_factory_make("fpsdisplaysink", "vsink");
-       sink2 = gst_element_factory_make("tizenwlsink", "vsink2");
-
-       // after detection, crop using video crop
-       // queue3 - videocrop - videoscale -capsfilter - videoconvert - capsfilter -fakesink
-
-       if (0/*ad->modelType == MODEL_TYPE_POSE_HAND_AIC*/) {
-               vcrop = gst_element_factory_make("videocrop", "crop");
-               vcrscale = gst_element_factory_make("videoscale", "crscale");
-               vcrsfilter = gst_element_factory_make("capsfilter", "vcrsfilter");
-               vcrsconv = gst_element_factory_make("videoconvert", "vcrsconvert");
-               vcrscfilter = gst_element_factory_make("capsfilter", "vcrscfilter");
-               vcrssink = gst_element_factory_make("fakesink", "vcrssink");
-       }
-
-       if (!pipeline || !source ||
-               !tee || !queue1 || !vscale || !vsfilter || !vconv || !vcfilter ||
-               !vrate || !vrfilter || !vrsink ||
-               !queue2 || !oconv || !coverlay || !sink || !sink2) {
-               printf(TEXT_RED "One element(queue1 or queue2) might be not created. Exiting.\n" TEXT_RESET);
-               return -1;
-       }
-
-       if (0 /*ad->modelType == MODEL_TYPE_POSE_HAND_AIC && (!pipeline || !queue3 
-               || !vcrop || !vcrscale || !vcrsfilter || !vcrsconv || !vcrscfilter || !vcrssink*/) {
-               printf(TEXT_RED "One element(queue3) might be not created. Exiting.\n" TEXT_RESET);
-               return -1;
-       }
-
-       if (!ad->filename) {
-               if (!filter) {
-                       printf(TEXT_RED "One element might be not created. Existing.\n" TEXT_RESET);
-                       return -1;
-               }
-               g_object_set(G_OBJECT(filter), "caps", gst_caps_from_string("video/x-raw, format=YUY2, width=640, height=480"), NULL);
-       } else {
-               if (!dbin || !dscale || !dconv) {
-                       printf(TEXT_RED "One element might be not created. Exiting.\n" TEXT_RESET);
-                       return -1;
-               }
-               g_object_set(G_OBJECT(dsfilter), "caps", gst_caps_from_string("video/x-raw, width=640, height=480"), NULL);
-               g_object_set(G_OBJECT(dcfilter), "caps", gst_caps_from_string("video/x-raw, format=YUY2, width=640, height=480"), NULL);
-               g_signal_connect (dbin, "pad-added", G_CALLBACK (cb_new_pad), NULL);
-       }
-
-       if (ad->filename2) {
-               if (!tee2 || !enc || !muxmp4 || !queue4 || !queue5 || !fsink2 || !encconv) {
-                       printf(TEXT_RED "One element might be not created. Exiting.\n" TEXT_RESET);
-                       printf("%p, %p, %p, %p, %p, %p, %p\n", tee2, enc, muxmp4, queue4, queue5, fsink2, encconv);
-                       return -1;
-               }
-               g_object_set(G_OBJECT(fsink2), "location", ad->filename2, NULL);
-
-       }
-
-       /* set up the pipeline */
-       //g_signal_connect (coverlay, "draw", G_CALLBACK (draw_overlay), overlay_state);
-       if (ad->modelType == MODEL_TYPE_POSE_CPM) {
-               g_signal_connect (coverlay, "draw", G_CALLBACK (draw_overlay_pose), overlay_state);
-       } else {
-               g_signal_connect (coverlay, "draw", G_CALLBACK (draw_overlay_hand), overlay_state);
-       }
-       
-       g_signal_connect (coverlay, "caps-changed", G_CALLBACK (prepare_overlay), overlay_state);
-
-       if (!ad->filename) {
-               g_object_set(G_OBJECT(source), "device", "/dev/video252", NULL); // 252
-
-       } else {
-               g_object_set(G_OBJECT(source), "location", ad->filename, NULL);
-       }
-       g_object_set(G_OBJECT(flip), "method", 4, NULL);
-       g_object_set(G_OBJECT(sink2), "use-tbm", FALSE, NULL);
-       g_object_set(G_OBJECT(sink2), "sync", FALSE, NULL);
-       g_object_set(G_OBJECT(sink), "video-sink", sink2, NULL);
-       g_object_set(G_OBJECT(sink), "sync", FALSE, NULL);
-
-       if (ad->modelType == MODEL_TYPE_POSE_HAND_AICLite ||
-               ad->modelType == MODEL_TYPE_POSE_HAND_AICLite2 ||
-               ad->modelType == MODEL_TYPE_POSE_HAND_AICLite2Q) {
-               g_object_set(G_OBJECT(vsfilter), "caps", gst_caps_from_string("video/x-raw, width=224, height=224"), NULL);
-               poseCropSize = 224;
-       } else {
-               g_object_set(G_OBJECT(vsfilter), "caps", gst_caps_from_string("video/x-raw, width=192, height=192"), NULL);
-               poseCropSize = 192;
-       }
-
-       g_object_set(G_OBJECT(vcfilter), "caps", gst_caps_from_string("video/x-raw, format=RGB"), NULL);
-       g_object_set(G_OBJECT(vrfilter), "caps", gst_caps_from_string("video/x-raw, framerate=15/1"), NULL);
-
-       //g_object_set(G_OBJECT(vrate), "drop-only", TRUE, NULL);
-
-       //g_object_set(G_OBJECT(queue2), "leaky", 2, NULL);
-#if 0
-       g_object_set(G_OBJECT(queue3), "max-size-buffers", 0, NULL);
-       g_object_set(G_OBJECT(queue3), "max-size-time", 0, NULL);
-       g_object_set(G_OBJECT(queue3), "max-size-bytes", 0, NULL);
-#endif
-       //g_object_set(G_OBJECT(queue3), "leaky", 2, NULL);
-
-       // here to be continue
-       printf("vrsink signal-handoffs\n");
-       g_object_set(G_OBJECT(vrsink), "signal-handoffs", TRUE, NULL);
-
-       if (ad->modelType == MODEL_TYPE_POSE_CPM) {
-               handler_p = g_signal_connect (vrsink, "handoff", G_CALLBACK(_pose_est_handoff), NULL);
-       } else {
-               handler_p = g_signal_connect (vrsink, "handoff", G_CALLBACK(_hand_est_handoff), outputTensorData);
-       }
-
-       g_object_set(G_OBJECT(vrsink), "sync", FALSE, NULL);
-
-
-       g_object_set(G_OBJECT(vcrssink), "sync", FALSE, NULL);
-
-
-       /*  add a message handler */
-       bus = gst_pipeline_get_bus (GST_PIPELINE(pipeline));
-       bus_watch_id = gst_bus_add_watch(bus, bus_call, NULL);
-       gst_object_unref(bus);
-
-       printf("eleeent would be added.\n");
-       /* add elements into the pipeline */
-       gst_bin_add_many(GST_BIN(pipeline),
-                                       source,
-                                       flip, tee, queue1, vscale, vsfilter, vconv, vcfilter,
-                                       vrate, vrfilter, vrsink,
-                                       queue2, oconv, coverlay, sink,
-                                       NULL);
-
-       if (0 /*ad->modelType == MODEL_TYPE_POSE_HAND_AIC*/) {
-               gst_bin_add_many(GST_BIN(pipeline),
-                                       queue3, vcrop, vcrscale, vcrsfilter, vcrsconv, vcrscfilter, vcrssink,
-                                       NULL);
-               gst_element_link_many (tee, queue3, vcrop, vcrscale, vcrsfilter, vcrsconv, vcrscfilter, vcrssink, NULL);
-       }
-
-       /* link elements */
-       if (!ad->filename) {
-               printf("take !ad->filename 1\n");
-               gst_bin_add(GST_BIN(pipeline), filter);
-               gst_element_link_many(source, flip, filter, tee, NULL);
-       } else {
-               gst_bin_add_many(GST_BIN(pipeline), dbin, dscale, dconv, dsfilter, dcfilter, NULL);
-               gst_element_link_many(source, dbin, NULL);
-               gst_element_link_many(dscale, dsfilter, dconv, dcfilter, tee, NULL);
-       }
-
-       if (!ad->filename2) {
-               // display
-               gst_element_link_many (tee, queue2, oconv, coverlay, /*toverlay,*/ sink, NULL);
-               // pose
-               gst_element_link_many (tee, queue1, vrate, vrfilter, vconv, vcfilter, vscale, vsfilter, vrsink, NULL);
-       } else {
-
-               gst_bin_add_many(GST_BIN(pipeline), tee2, enc, muxmp4, fsink2, queue4, queue5, encconv);
-
-               // display
-               gst_element_link_many (tee, queue2, oconv, coverlay, tee2, NULL);
-               gst_element_link_many (tee2, queue4, sink, NULL);
-               gst_element_link_many (tee2, queue5, encconv, enc, muxmp4, fsink2, NULL);
-
-               // pose
-               gst_element_link_many (tee, queue1, vrate, vrfilter, vconv, vcfilter, vscale, vsfilter, vrsink, NULL);
-       }
-       
-       /* set the pipeline state to "playing" state */
-       //gst_element_set_state(pipeline, GST_STATE_PLAYING);
-
-       /* loop */
-       humanSkeleton.IsDetected = false;
-       humanSkeleton.isPrevPose = false;
-       humanSkeleton.label = -1;
-       printf(TEXT_GREEN "Running.....\n" TEXT_RESET);
-       // GST_END
-#if 0
-       /* use gl backend */
-       elm_config_accel_preference_set("opengl");
-
-       /* create window */
-       //win = elm_win_add(NULL, PACKAGE, ELM_WIN_SPLASH );
-       win = elm_win_add(NULL, PACKAGE, ELM_WIN_BASIC);
-       if (win) {
-               elm_win_title_set(win, PACKAGE);
-               elm_win_borderless_set(win, EINA_TRUE);
-               elm_win_autodel_set(win, EINA_TRUE);
-               elm_win_alpha_set(win, EINA_FALSE);
-         evas_object_show(win);
-       }
-       elm_win_layer_set(win, 9);
-       elm_win_prop_focus_skip_set(win, EINA_TRUE);
-
-       ad->win = win;
-       g_win_id = win;
-       selected_win_id = g_win_id;
-
-       Evas_Object *bg = elm_bg_add(win);
-       elm_win_resize_object_add(win, bg);
-       evas_object_size_hint_min_set(bg, WIDTH, HEIGHT);
-       evas_object_size_hint_max_set(bg, WIDTH, HEIGHT);
-       evas_object_show(bg);
-
-       elm_win_activate(win);
-
-
-       evas_object_event_callback_add(win, EVAS_CALLBACK_RESIZE, win_resize_cb, NULL);
-#else
-       gst_element_set_state (pipeline, GST_STATE_PLAYING);
-#endif
-       return 0;
-}
-
-
-static int app_terminate(void *data)
-{
-       appdata *ad = data;
-       int i = 0;
-
-       /* out of loop */
-       printf(TEXT_GREEN "Stopping.....\n" TEXT_RESET);
-       gst_element_set_state(pipeline, GST_STATE_NULL);
-
-       printf(TEXT_GREEN "pipe unref.....\n" TEXT_RESET);
-       gst_object_unref(GST_OBJECT(pipeline));
-
-       printf(TEXT_GREEN "remove bus_watch id.....\n" TEXT_RESET);
-       g_source_remove(bus_watch_id);
-
-       if (overlay_state != NULL) {
-               printf(TEXT_GREEN "g_free overlay.....\n" TEXT_RESET);
-               g_free(overlay_state);
-       }
-
-       g_mutex_clear(&pose_mutex);
-       printf(TEXT_GREEN "done.....\n" TEXT_RESET);
-
-       if (g_win_id) {
-               evas_object_del(g_win_id);
-               g_win_id = NULL;
-       }
-       ad->win = NULL;
-       selected_win_id = NULL;
-
-       return 0;
-}
-struct appcore_ops ops = {
-       .create = app_create,
-       .terminate = app_terminate,
-};
-
-int main (int argc, char *argv[])
-{
-       memset(&ad, 0x0, sizeof(appdata));
-       ops.data = &ad;
-
-       if (argc >= 6) {
-               ad.filename = g_strdup(argv[5]);
-               printf("launch with file source (%s)\n", ad.filename);
-               if (argc > 6) {
-                       ad.filename2 = g_strdup(argv[6]);
-                       ad.numbuffers = -1;
-                       printf("records output(%s)\n", ad.filename2);
-               }
-               if (argc > 7) {
-                       ad.numbuffers = atoi(argv[7]);
-               }
-       } else {
-               printf("launch with camera source\n");
-       }
-
-       if (argc < 2) {
-               printf("usage: mv_stream_infer model [thPoseScore, thResetCount, thCustom, [filename]]");
-               printf("model: 0(CPM), 1(AIC Hand), 2(AIC Lite Hand), 3(AIC Lite Q Hand)\n");
-               return -1;
-       }
-
-       ad.modelType = atoi(argv[1]);
-       if (ad.modelType < 0 || ad.modelType > 3) {
-               printf("not supported model type [%d]\n", ad.modelType);
-               return -1;
-       }
-
-       if (ad.modelType != MODEL_TYPE_POSE_HAND_AICLite &&
-               ad.modelType != MODEL_TYPE_POSE_HAND_AICLite2 &&
-               ad.modelType != MODEL_TYPE_POSE_HAND_AICLite2Q) {
-               thPoseScore = (float)atoi(argv[2])/100.f;
-               thResetCount = atoi(argv[3]);
-               thCustom = (float)atoi(argv[4])/100.f;
-
-               poseRoi.point.x = 50;
-               poseRoi.point.y = 0;
-               poseRoi.width = 100;
-               poseRoi.height = 192;
-       } else {
-               if (argc > 2) {
-                       ad.filename2 = g_strdup(argv[2]);
-               }
-               poseRoi.point.x = 0;
-               poseRoi.point.y = 0;
-               poseRoi.width = 0;
-               poseRoi.height = 0;
-       }
-
-       return appcore_efl_main(PACKAGE, &argc, &argv, &ops);
-}