GstElement *vrate, *vrfilter, *fsink, *vrsink;
GstElement *queue1, *queue2, *queue3;
GstElement *oconv, *coverlay;
-GstElement *vcrop, *vcrscale, *vcrsfilter, *vcrsconv, *vcrscfilter, *vcrssink;
+GstElement *vcrop, *vcrscale, *vcrsfilter, *vcrsconv, *vcrscfilter;
CairoOverlayState *overlay_state;
GstElement *dbin, *dscale, *dconv;
static int poseCropSize = 0;
-#define MAX_BACKEND_TYPE 3
+#define MAX_BACKEND_TYPE 4
const static int gBackendType[MAX_BACKEND_TYPE] = {
MV_INFERENCE_BACKEND_TFLITE,
MV_INFERENCE_BACKEND_ARMNN,
- MV_INFERENCE_BACKEND_ONE
+ MV_INFERENCE_BACKEND_ONE,
+ MV_INFERENCE_BACKEND_BEYOND
};
#define MAX_DEVICE_TYPE 2
{
printf("%d landmarks\n", number_of_pose_estimation);
int width, height;
- mv_source_get_height(source, &width);
+ mv_source_get_width(source, &width);
mv_source_get_height(source, &height);
for (int n = 0; n < number_of_pose_estimation; ++n) {
humanSkeleton.pose[n].x = (int)(640.f*(float)locations->landmarks[n].point.x/(float)width);
const int number_of_hands,
const float *confidences,
const mv_rectangle_s *locations,
- void *user_data) //user_data can be mv_source?
+ void *user_data)
{
if (number_of_hands <= 0) {
humanSkeleton.IsDetected = false;
return;
}
-
struct timespec s_tspec;
struct timespec e_tspec;
struct timespec diffspec = diff(s_tspec, e_tspec);
unsigned long timeDiff = gettotalmillisec(diffspec);
- printf("memcpy time: %lu(ms)\n", timeDiff);
+ //printf("memcpy time: %lu(ms)\n", timeDiff);
clock_gettime(CLOCK_MONOTONIC, &s_tspec);
- mv_inference_pose_landmark_detect(mv_src_p2, hp_mv_infer2, NULL, _hand_pose_cb, NULL);
+ mv_inference_pose_landmark_detect(mv_src_p2, hp_mv_infer2, NULL, _hand_pose_cb, NULL);
clock_gettime(CLOCK_MONOTONIC, &e_tspec);
diffspec = diff(s_tspec, e_tspec);
timeDiff = gettotalmillisec(diffspec);
- printf("pose_estimation time: %lu(ms)\n", timeDiff);
+ //printf("pose_estimation time: %lu(ms)\n", timeDiff);
return;
}
CairoOverlayState *s = (CairoOverlayState *) user_data;
if (!s->valid) {
- printf("not ready draw_overlay");
+ printf("not ready draw_overlay\n");
return;
}
-
cairo_set_source_rgba(cr, 0.0, 0.0, 1.0, 0.7);
cairo_set_line_width(cr, 2.0);
}
cairo_stroke(cr);
-
}
static gboolean bus_call (GstBus *bus, GstMessage *msg, gpointer data)
return MEDIA_VISION_ERROR_NONE;
}
+
static void __global(void *data, struct wl_registry *registry,
uint32_t name, const char *interface, uint32_t version)
{
}
pipeline = gst_pipeline_new("app");
-
overlay_state = g_new0 (CairoOverlayState, 1);
/* create gstreamer elements */
// queue2 - videoconvert - cairooveray - tizenwlsink
oconv = gst_element_factory_make("videoconvert", "oconv");
coverlay = gst_element_factory_make("cairooverlay", "coverlay");
+
sink = gst_element_factory_make("fpsdisplaysink", "vsink");
sink2 = gst_element_factory_make("tizenwlsink", "vsink2");
printf(TEXT_RED "One element might be not created. Exiting.\n" TEXT_RESET);
return -1;
}
- g_object_set(G_OBJECT(dsfilter), "caps", gst_caps_from_string("video/x-raw, width=640, height=480"), NULL);
- g_object_set(G_OBJECT(dcfilter), "caps", gst_caps_from_string("video/x-raw, format=YUY2, width=640, height=480"), NULL);
+ g_object_set(G_OBJECT(dsfilter), "caps", gst_caps_from_string("video/x-raw, width=224, height=224"), NULL);
+ g_object_set(G_OBJECT(dcfilter), "caps", gst_caps_from_string("video/x-raw, format=YUY2, width=224, height=224"), NULL);
g_signal_connect (dbin, "pad-added", G_CALLBACK (cb_new_pad), NULL);
}
-
if (ad->filename2) {
if (!tee2 || !enc || !muxmp4 || !queue4 || !queue5 || !fsink2 || !encconv) {
printf(TEXT_RED "One element might be not created. Exiting.\n" TEXT_RESET);
}
/* set up the pipeline */
- //g_signal_connect (coverlay, "draw", G_CALLBACK (draw_overlay), overlay_state);
if (ad->modelType == MODEL_TYPE_POSE_CPM || ad->modelType == MODEL_TYPE_POSE_BLAZE) {
g_signal_connect (coverlay, "draw", G_CALLBACK (draw_overlay_pose), overlay_state);
} else {
if (!ad->filename) {
#if NIKE_M
- g_object_set(G_OBJECT(source), "device", "/dev/video250", NULL);
-#else
g_object_set(G_OBJECT(source), "device", "/dev/video252", NULL);
+#else
+ g_object_set(G_OBJECT(source), "device", "/dev/video252", NULL); //rpi <->realsense
#endif
} else {
g_object_set(G_OBJECT(source), "location", ad->filename, NULL);
}
g_object_set(G_OBJECT(flip), "method", 4, NULL);
g_object_set(G_OBJECT(sink2), "use-tbm", FALSE, NULL);
- g_object_set(G_OBJECT(sink2), "sync", FALSE, NULL);
+ g_object_set(G_OBJECT(sink2), "sync", TRUE, NULL);
g_object_set(G_OBJECT(sink), "video-sink", sink2, NULL);
- g_object_set(G_OBJECT(sink), "sync", FALSE, NULL);
+ g_object_set(G_OBJECT(sink), "sync", TRUE, NULL);
+ g_object_set(G_OBJECT(sink), "text-overlay", FALSE, NULL);
if (ad->modelType == MODEL_TYPE_POSE_HAND_AICLite ||
ad->modelType == MODEL_TYPE_POSE_HAND_AICLite3 ||
}
g_object_set(G_OBJECT(vcfilter), "caps", gst_caps_from_string("video/x-raw, format=RGB"), NULL);
- g_object_set(G_OBJECT(vrfilter), "caps", gst_caps_from_string("video/x-raw, framerate=15/1"), NULL);
+ g_object_set(G_OBJECT(vrfilter), "caps", gst_caps_from_string("video/x-raw, framerate=10/1"), NULL);
#if NIKE_M
g_object_set(G_OBJECT(queue2), "max-size-buffers", 1, NULL);
#endif
-
// here to be continue
printf("vrsink signal-handoffs\n");
g_object_set(G_OBJECT(vrsink), "signal-handoffs", TRUE, NULL);
if (ad->modelType == MODEL_TYPE_POSE_CPM || ad->modelType == MODEL_TYPE_POSE_BLAZE) {
+
handler_p = g_signal_connect (vrsink, "handoff", G_CALLBACK(_pose_est_handoff), NULL);
} else {
handler_p = g_signal_connect (vrsink, "handoff", G_CALLBACK(_hand_est_handoff), outputTensorData);
}
- g_object_set(G_OBJECT(vrsink), "sync", FALSE, NULL);
-
-
- g_object_set(G_OBJECT(vcrssink), "sync", FALSE, NULL);
-
+ g_object_set(G_OBJECT(vrsink), "sync", TRUE, NULL);
/* add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE(pipeline));
gst_element_link_many(source, dbin, NULL);
gst_element_link_many(dscale, dsfilter, dconv, dcfilter, tee, NULL);
}
-
if (!ad->filename2) {
// display
gst_element_link_many (tee, queue2, oconv, coverlay, /*toverlay,*/ sink, NULL);
// pose
gst_element_link_many (tee, queue1, vrate, vrfilter, vconv, vcfilter, vscale, vsfilter, vrsink, NULL);
+
} else {
gst_bin_add_many(GST_BIN(pipeline), tee2, enc, muxmp4, fsink2, queue4, queue5, encconv);
/* create window */
win = elm_win_add(NULL, PACKAGE, ELM_WIN_SPLASH );
- //win = elm_win_add(NULL, PACKAGE, ELM_WIN_BASIC);
if (win) {
elm_win_title_set(win, PACKAGE);
elm_win_borderless_set(win, EINA_TRUE);
elm_win_activate(win);
-
evas_object_event_callback_add(win, EVAS_CALLBACK_RESIZE, win_resize_cb, NULL);
#else
gst_element_set_state (pipeline, GST_STATE_PLAYING);
+
#endif
return 0;
}
printf("usage: mv_stream_infer model backend device [thPoseScore, thResetCount, thCustom, [filename]]\n");
printf("model: 0(CPM), 5(BlazePose),\n");
printf("\t 1(AIC Lite Hand), 2(AIC Lite_3 Hand), 3(AIC Lite_2 Hand), 4(AIC Lite_2 Q Hand)\n");
- printf("backend: 0(TFLITE), 1(ARMNN), 2(ONE)\n");
+ printf("backend: 0(TFLITE), 1(ARMNN), 2(ONE), 3(BEYOND)\n");
printf("device: 0(CPU), 1(GPU)\n");
printf("ex: mv_stream_infer 5 0 0 0.8 5 0\n");
return -1;
}
ad.backendType = atoi(argv[2]);
- if (ad.backendType < 0 || ad.backendType > 2) {
+ if (ad.backendType < 0 || ad.backendType > 3) {
printf("not supported backend type [%d]\n", ad.backendType);
- printf("0(TFLITE), 1(ARMNN) or 2(ONE) are valid.\n");
+ printf("0(TFLITE), 1(ARMNN) or 2(ONE) or 3(BEYOND) are valid.\n");
return -1;
}
poseRoi.width = 0;
poseRoi.height = 0;
} else {
- if (argc > 4) {
+ if (argc > 8) {
ad.filename2 = g_strdup(argv[4]);
}
poseRoi.point.x = 0;
--- /dev/null
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _USE_MATH_DEFINES
+#include <fcntl.h>
+#include <mv_common.h>
+#include <mv_inference.h>
+
+#include <mv_testsuite_common.h>
+
+#include <image_helper.h>
+#include <mv_video_helper.h>
+
+#include <mv_log_cfg.h>
+
+#include <math.h>
+#include <ctype.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <dirent.h>
+#include <string.h>
+#include <limits.h>
+#include <time.h>
+#define MAX(a, b) \
+({ __typeof__ (a) _a = (a); \
+__typeof__ (b) _b = (b); \
+_a > _b ? _a : _b; })
+
+#define MIN(a,b) \
+({ __typeof__ (a) _a = (a); \
+__typeof__ (b) _b = (b); \
+_a < _b ? _a : _b; })
+
+#include <glib-2.0/glib.h>
+#include <gst/gst.h>
+#include <gst/video/video.h>
+#include <cairo.h>
+#include <cairo-gobject.h>
+
+#include <Elementary.h>
+#include <appcore-efl.h>
+#include <Ecore.h>
+#include <Ecore_Evas.h>
+#include <Ecore_Wl2.h>
+#include <tizen-extension-client-protocol.h>
+#include <efl_util.h>
+
+#include <gst/gst.h>
+#include <gst/video/videooverlay.h>
+#include <unistd.h>
+#include <time.h>
+#include <argp.h>
+
+#ifdef PACKAGE
+#undef PACKAGE
+#endif
+#define PACKAGE "test"
+
+#define WIDTH 640.0
+#define HEIGHT 480.0
+
+#define SCREEN_WIDTH 2112.0
+#define SCREEN_HEIGHT 1188.0
+
+#define HEADED 1
+#define HEADLESS 0
+
+static int old_x = 0;
+static int old_y = 0;
+static int cnt = 0;
+
+static char* gHandGestureLabel[] = { "None", "One", "Two", "Three", "Four", "Five",
+ "Six", "Seven", "SmallHeart", "OK", "DisLike", "Like",
+ "Fist", "Rock", "Love"};
+static int st = 0;
+static Evas_Object *g_eo = NULL;
+static Evas_Object *icon = NULL;
+
+/* for video display */
+static Evas_Object *g_win_id;
+static Evas_Object *selected_win_id;
+
+typedef struct {
+ gchar *filename;
+ gchar *filename2;
+ int numbuffers;
+ int modelType;
+ int backendType;
+ int deviceType;
+ Evas_Object *win;
+ Evas_Object *layout_main; /* layout widget based on EDJ */
+ /* add more variables here */
+
+} appdata;
+
+static mv_rectangle_s poseRoi;
+
+static appdata ad;
+static GstBus *bus;
+static guint bus_watch_id;
+
+/* input */
+static efl_util_inputgen_h my_cursor = NULL;
+static efl_util_inputgen_h my_touch = NULL;
+static efl_util_inputgen_h my_keyboard = NULL;
+#define FILE_PATH_SIZE 1024
+
+#define PE_TFLITE_AIC_1_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite"
+#define PE_TFLITE_AIC_2_WEIGHT_PATH "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite"
+
+#define POSE_RESET_COUNT 5
+
+void send_mouse_event_efl(int x, int y, efl_util_input_pointer_type_e event){
+ efl_util_input_generate_pointer(my_cursor, 1, event, x, y);
+}
+
+void send_touch_event_efl(int x, int y, efl_util_input_touch_type_e event){
+ efl_util_input_generate_touch(my_touch, 0, event, x, y);
+}
+static float thPoseScore = 0.3f;
+static int thResetCount = 5;
+static float thCustom = 0.0f;
+
+typedef struct _rect {
+ int left;
+ int top;
+ int right;
+ int bottom;
+ int type;
+ bool updated;
+ bool cr_updated;
+} rect;
+
+
+typedef struct _humanSkeleton {
+ mv_point_s pose[21/*14*/];
+ mv_point_s prevPose[21/*14*/];
+ mv_rectangle_s loc;
+ mv_rectangle_s prevLoc;
+ mv_rectangle_s handRoi;
+ float scores[21/*14*/];
+ bool isPrevPose;
+ bool updated; // detection is find and pose is also good. then update drawing
+ bool locUpdated; // track previous detection
+ bool IsPosed; // detection
+ bool IsDetected; // detection
+ int label;
+} HumanSkeleton;
+
+typedef struct
+{
+ gboolean valid;
+ GstVideoInfo vinfo;
+} CairoOverlayState;
+
+typedef struct
+{
+ GstBuffer *buffer;
+ gpointer user_data;
+ int modelType;
+} HandOffData;
+
+
+typedef struct
+{
+ int x;
+ int y;
+} MovePos;
+
+static MovePos movePos[5] = {0};
+//gchar *gesturenames;
+
+static HandOffData hdata_p;
+
+static GMutex pose_mutex;
+static guint old_timeout = 0;
+static guint nFrames = 0;
+
+// Gstreamer
+GstElement *pipeline, *source, *flip, *filter, *toverlay, *sink, *sink2;
+GstElement *tee, *vscale, *vsfilter, *vconv, *vcfilter;
+GstElement *vrate, *vrfilter, *fsink, *vrsink;
+GstElement *queue1, *queue2, *queue3;
+GstElement *oconv, *coverlay;
+GstElement *vcrop, *vcrscale, *vcrsfilter, *vcrsconv, *vcrscfilter;
+CairoOverlayState *overlay_state;
+
+GstElement *dbin, *dscale, *dconv;
+GstElement *dsfilter, *dcfilter;
+
+GstElement *tee2, *enc, *muxmp4, *fsink2, *queue4, *queue5, *encconv;
+
+static HumanSkeleton humanSkeleton;
+gulong handler_p;
+GList *line_list = NULL;
+
+mv_source_h mv_src_p;
+mv_source_h mv_src_p2;
+
+// Human pose
+mv_engine_config_h hp_mv_engine_cfg;
+mv_inference_h hp_mv_infer;
+
+mv_engine_config_h hp_mv_engine_cfg2;
+mv_inference_h hp_mv_infer2;
+
+// pose comparison
+mv_pose_h hpPoser;
+float hpPoseScore;
+int hpPoseHoldTime;
+int hpPoseCount;
+
+static void * outputTensorData;
+
+FILE *fp;
+
+static bool IsGestureMode;
+static region_s handRegion = {0.0f, {0.0f, 0.0f}, {0.0f, 0.0f}, {0.0f, 0.0f}};
+static int poseCropSize = 0;
+
+
+static int mode = HEADED;
+static const char doc[] = "mv_stream_infer_browser -- mediavision inference test";
+static const char args_doc[] = "";
+static struct arguments arguments;
+
+#define MAX_BACKEND_TYPE 3
+const static int gBackendType[MAX_BACKEND_TYPE] = {
+ MV_INFERENCE_BACKEND_TFLITE,
+ MV_INFERENCE_BACKEND_ARMNN,
+ MV_INFERENCE_BACKEND_ONE
+};
+
+#define MAX_DEVICE_TYPE 3
+const static int gDeviceType[MAX_DEVICE_TYPE] = {
+ MV_INFERENCE_TARGET_DEVICE_NONE,
+ MV_INFERENCE_TARGET_DEVICE_CPU,
+ MV_INFERENCE_TARGET_DEVICE_GPU
+};
+
+#define IMAGE_SIZE_WIDTH 640
+#define IMAGE_SIZE_HEIGHT 480
+
+#define NANO_PER_SEC ((__clock_t) 1000000000)
+#define NANO_PER_MILLI ((__clock_t) 1000000)
+#define MILLI_PER_SEC ((__clock_t) 1000)
+
+struct arguments
+{
+ int mode;
+ int debug;
+};
+
+static error_t parse_opt (int key, char *arg, struct argp_state *state)
+{
+ struct arguments *arguments = state->input;
+
+ switch (key) {
+ case 'h':
+ arguments->mode = arg ? atoi (arg) : 0;
+ break;
+ case 'd':
+ arguments->debug = arg ? atoi (arg) : 0;
+ break;
+ case 'm':
+ ad.modelType = arg ? atoi (arg) : 1;
+ break;
+ case ARGP_KEY_NO_ARGS:
+ /* do nothing */
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+ return 0;
+}
+
+static struct argp_option arg_options[] = {
+ {"head", 'h', "HEAD", OPTION_ARG_OPTIONAL, "Run test by HEAd [0=headless|1=head] (default 0)"},
+ {"debug", 'd', "DEBUG", OPTION_ARG_OPTIONAL, "Run test by DEBUG [0=notdebug|1=debug] (default 0)"},
+ {"model", 'm', "MODEL", OPTION_ARG_OPTIONAL, "Run test by MODEL [1=AIC|6=GOOGLE] (default 1)"},
+ { 0 }
+};
+
+static struct argp argp = { arg_options, parse_opt, args_doc, doc };
+
+struct timespec diff(struct timespec start, struct timespec end)
+{
+ struct timespec temp;
+ if ((end.tv_nsec - start.tv_nsec) < 0) {
+ temp.tv_sec = end.tv_sec - start.tv_sec - 1;
+ temp.tv_nsec = NANO_PER_SEC + end.tv_nsec - start.tv_nsec;
+ }
+ else {
+ temp.tv_sec = end.tv_sec - start.tv_sec;
+ temp.tv_nsec = end.tv_nsec - start.tv_nsec;
+ }
+ return temp;
+}
+
+unsigned long gettotalmillisec(const struct timespec time)
+{
+ return time.tv_sec * MILLI_PER_SEC + time.tv_nsec / NANO_PER_MILLI;
+}
+
+static void cb_new_pad (GstElement *element, GstPad *pad, gpointer data)
+{
+ gchar *name;
+ GstElement *other = data;
+
+ name = gst_pad_get_name (pad);
+ printf ("A new pad %s was created for %s\n", name, gst_element_get_name(element));
+ g_free (name);
+
+ printf ("element %s will be linked to %s\n",
+ gst_element_get_name(element),
+ gst_element_get_name(dscale));
+ gst_element_link(element, dscale);
+}
+
+
+void int_handler(int sig)
+{
+ char c = 0;
+
+ signal(sig, SIG_IGN);
+ printf(TEXT_YELLOW "\nDo you want to quit? [y/n]\n" TEXT_RESET);
+ while (( c = getchar()) != EOF){
+ if (c == 'y' || c == "Y") {
+
+ gst_element_send_event(pipeline, gst_event_new_eos());
+
+ sleep(4);
+
+
+ if (mv_src_p)
+ mv_destroy_source(mv_src_p);
+
+ if (hp_mv_infer)
+ mv_inference_destroy(hp_mv_infer);
+
+ if (hp_mv_engine_cfg)
+ mv_destroy_engine_config(hp_mv_engine_cfg);
+
+ if (mv_src_p2)
+ mv_destroy_source(mv_src_p2);
+
+ if (hp_mv_infer2)
+ mv_inference_destroy(hp_mv_infer2);
+
+ if (hp_mv_engine_cfg)
+ mv_destroy_engine_config(hp_mv_engine_cfg2);
+
+ if (outputTensorData) {
+ free(outputTensorData);
+ outputTensorData = NULL;
+ }
+
+ if (hpPoser)
+ mv_pose_destroy(hpPoser);
+
+ printf(TEXT_YELLOW "exit..\n" TEXT_RESET);
+ signal(SIGINT, SIG_DFL);
+ exit(0);
+ } else {
+ printf("no");
+ signal(SIGINT, int_handler);
+ }
+
+ getchar(); // Get new line character
+ }
+
+}
+
+void _hand_pose_cb_bu (
+ mv_source_h source,
+ int number_of_pose_estimation,
+ mv_inference_pose_s *locations,
+ int label,
+ void *user_data)
+{
+ printf("%d landmarks\n", number_of_pose_estimation);
+#if 1
+ int width, height;
+ mv_source_get_width(source, &width);//aic model :56
+ mv_source_get_height(source, &height);//aic model :56
+ for (int n = 0; n < number_of_pose_estimation; ++n) {
+ humanSkeleton.pose[n].x = (int)(224.f*(float)locations->landmarks[n].point.x/(float)width);
+ humanSkeleton.pose[n].y = (int)(224.f*(float)locations->landmarks[n].point.y/(float)height);
+ humanSkeleton.scores[n] = 1.0f; /* confidences[n];*/
+ }
+ humanSkeleton.label = label;
+ humanSkeleton.IsDetected = true;
+ humanSkeleton.IsPosed = true;
+
+ int xpos = 1920-(int)(SCREEN_WIDTH *(float)locations->landmarks[9].point.x/width);
+ int ypos = (int)(SCREEN_HEIGHT *(float)locations->landmarks[9].point.y/height);
+
+ send_mouse_event_efl(xpos, ypos, EFL_UTIL_INPUT_POINTER_MOVE);
+
+ static struct timespec s_tspec2;
+ static struct timespec e_tspec2;
+ static bool first_time = TRUE;
+ if (first_time) {
+ first_time = FALSE;
+ clock_gettime(CLOCK_MONOTONIC, &s_tspec2);
+ } else {
+ clock_gettime(CLOCK_MONOTONIC, &e_tspec2);
+ struct timespec diffspec = diff(s_tspec2, e_tspec2);
+ unsigned long timeDiff = gettotalmillisec(diffspec);
+
+ clock_gettime(CLOCK_MONOTONIC, &s_tspec2);
+ printf("humanSkeleton.label %d \n", humanSkeleton.label);
+
+ if (humanSkeleton.label == 1){
+ printf("one is click\n");
+ send_mouse_event_efl(xpos,ypos,EFL_UTIL_INPUT_POINTER_BUTTON_DOWN);
+ send_mouse_event_efl(xpos,ypos,EFL_UTIL_INPUT_POINTER_BUTTON_UP);
+ } else if (humanSkeleton.label == 13){
+ printf("rock is right\n");
+ efl_util_input_generate_key(my_keyboard, "Right", 1);
+ efl_util_input_generate_key(my_keyboard, "Right", 0);
+ } else if (humanSkeleton.label == 14){
+ printf("love is left\n");
+ efl_util_input_generate_key(my_keyboard, "Left", 1);
+ efl_util_input_generate_key(my_keyboard, "Left", 0);
+ } else if (humanSkeleton.label == 7){
+ printf("seven is up\n");
+ efl_util_input_generate_key(my_keyboard, "Up", 1);
+ efl_util_input_generate_key(my_keyboard, "Up", 0);
+ } else if (humanSkeleton.label == 2){
+ printf("two is down\n");
+ efl_util_input_generate_key(my_keyboard, "Down", 1);
+ efl_util_input_generate_key(my_keyboard, "Down", 0);
+ } else if (humanSkeleton.label == 8){ // F5 key
+ printf("smalheart is F5\n");
+ efl_util_input_generate_key(my_keyboard, "XF86Home", 1);
+ efl_util_input_generate_key(my_keyboard, "XF86Home", 0);
+ }
+ }
+#endif
+}
+
+void _hand_pose_cb (
+ mv_source_h source,
+ int number_of_pose_estimation,
+ mv_inference_pose_s *locations,
+ int label,
+ void *user_data)
+{
+ printf("%d landmarks is genereated\n", number_of_pose_estimation);
+ int width, height;
+ mv_source_get_width(source, &width);
+ mv_source_get_height(source, &height);
+ for (int n = 0; n < number_of_pose_estimation; ++n) {
+ humanSkeleton.pose[n].x = (int)(640.f*(float)locations->landmarks[n].point.x/(float)width);
+ humanSkeleton.pose[n].y = (int)(480.f*(float)locations->landmarks[n].point.y/(float)height);
+ humanSkeleton.scores[n] = 1.0f; /* confidences[n];*/
+ }
+
+ humanSkeleton.label = label;
+ humanSkeleton.IsDetected = true;
+ humanSkeleton.IsPosed = true;
+
+ int xpos = (int)(SCREEN_WIDTH*((float)humanSkeleton.pose[9].x/(float)640));
+ int ypos = (int)(SCREEN_HEIGHT*((float)humanSkeleton.pose[9].y/(float)480));
+
+ send_mouse_event_efl(xpos, ypos, EFL_UTIL_INPUT_POINTER_MOVE);
+
+ printf("humanSkeleton.label %d \n", humanSkeleton.label);
+
+ if (humanSkeleton.label == 1){
+ printf("one is click\n");
+ send_mouse_event_efl(xpos,ypos,EFL_UTIL_INPUT_POINTER_BUTTON_DOWN);
+ send_mouse_event_efl(xpos,ypos,EFL_UTIL_INPUT_POINTER_BUTTON_UP);
+ } else if (humanSkeleton.label == 13){
+ printf("rock is right\n");
+ efl_util_input_generate_key(my_keyboard, "Right", 1);
+ efl_util_input_generate_key(my_keyboard, "Right", 0);
+ } else if (humanSkeleton.label == 14){
+ printf("love is left\n");
+ efl_util_input_generate_key(my_keyboard, "Left", 1);
+ efl_util_input_generate_key(my_keyboard, "Left", 0);
+ } else if (humanSkeleton.label == 7){
+ printf("seven is up\n");
+ efl_util_input_generate_key(my_keyboard, "Up", 1);
+ efl_util_input_generate_key(my_keyboard, "Up", 0);
+ } else if (humanSkeleton.label == 2){
+ printf("two is down\n");
+ efl_util_input_generate_key(my_keyboard, "Down", 1);
+ efl_util_input_generate_key(my_keyboard, "Down", 0);
+ } else if (humanSkeleton.label == 8){ // F5 key
+ printf("smalheart is F5\n");
+ efl_util_input_generate_key(my_keyboard, "XF86Home", 1);
+ efl_util_input_generate_key(my_keyboard, "XF86Home", 0);
+ }
+}
+
+static void _hand_detected_cb (
+ mv_source_h source,
+ const int number_of_hands,
+ const float *confidences,
+ const mv_rectangle_s *locations, // always exists for testing.
+ void *user_data) //user_data can be mv_source?
+{
+ if (number_of_hands <= 0) {
+ humanSkeleton.IsDetected = false;
+ humanSkeleton.label = -1;
+ return;
+ }
+
+ struct timespec s_tspec;
+ struct timespec e_tspec;
+
+ clock_gettime(CLOCK_MONOTONIC, &s_tspec);
+
+ mv_source_clear(mv_src_p2);
+ mv_source_fill_by_tensor_buffer(mv_src_p2, user_data,
+ MV_INFERENCE_DATA_FLOAT32,
+ 56 * 56 * 21 * sizeof(float),
+ 56, 56, 21, 3);
+
+ clock_gettime(CLOCK_MONOTONIC, &e_tspec);
+
+ struct timespec diffspec = diff(s_tspec, e_tspec);
+ unsigned long timeDiff = gettotalmillisec(diffspec);
+ //printf("memcpy time: %lu(ms)\n", timeDiff);
+
+ clock_gettime(CLOCK_MONOTONIC, &s_tspec);
+ int width, height;
+ mv_source_get_width(mv_src_p2, &width);
+ mv_source_get_height(mv_src_p2, &height);
+
+ mv_inference_pose_landmark_detect(mv_src_p2, hp_mv_infer2, NULL, _hand_pose_cb, NULL);
+
+ clock_gettime(CLOCK_MONOTONIC, &e_tspec);
+
+ diffspec = diff(s_tspec, e_tspec);
+ timeDiff = gettotalmillisec(diffspec);
+ //printf("pose_estimation time: %lu(ms)\n", timeDiff);
+ return;
+}
+
+static gboolean
+run_hand (void *user_data)
+{
+ HandOffData *udata = (HandOffData *)user_data;
+ if (!GST_IS_BUFFER(udata->buffer))
+ return FALSE;
+
+ GstMapInfo map;
+
+ gst_buffer_map(udata->buffer, &map, GST_MAP_READ);
+
+ mv_source_clear(mv_src_p);
+
+ mv_source_fill_by_buffer(mv_src_p, map.data, 224*224*3, 224, 224, MEDIA_VISION_COLORSPACE_RGB888);
+
+ gst_buffer_unmap(udata->buffer, &map);
+
+ struct timespec s_tspec;
+ struct timespec e_tspec;
+
+ void * outputTensorBuffer = (void*)udata->user_data;
+
+ clock_gettime(CLOCK_MONOTONIC, &s_tspec);
+
+ int width, height;
+ mv_source_get_width(mv_src_p, &width);
+ mv_source_get_height(mv_src_p, &height);
+ // invoke tflite -> _hand_detected_cb -> memcpy output -> invoke tflite -> _pose_cb
+ mv_inference_hand_detect(mv_src_p, hp_mv_infer, _hand_detected_cb, outputTensorBuffer);
+
+ clock_gettime(CLOCK_MONOTONIC, &e_tspec);
+ struct timespec diffspec = diff(s_tspec, e_tspec);
+ unsigned long timeDiff = gettotalmillisec(diffspec);
+ printf("detect + pose time: %lu(ms)\n", timeDiff);
+
+ return FALSE;
+}
+
+static void
+_hand_est_handoff(GstElement *object, GstBuffer *buffer, GstPad *pad, gpointer user_data)
+{
+ nFrames++;
+ hdata_p.buffer = buffer;
+ hdata_p.user_data = user_data;
+
+ g_mutex_lock(&pose_mutex);
+ g_idle_add (run_hand, &hdata_p);
+ g_mutex_unlock(&pose_mutex);
+}
+
+static void
+prepare_overlay (GstElement * overlay, GstCaps * caps, gpointer user_data)
+{
+ CairoOverlayState *state = (CairoOverlayState *) user_data;
+
+ state->valid = gst_video_info_from_caps (&state->vinfo, caps);
+}
+
+static void
+draw_overlay_hand (GstElement * overlay, cairo_t * cr, guint64 timestamp,
+ guint64 duration, gpointer user_data)
+{
+ CairoOverlayState *s = (CairoOverlayState *) user_data;
+
+ if (!s->valid) {
+ printf("not ready draw_overlay");
+ return;
+ }
+
+ if (!humanSkeleton.IsPosed) {
+ printf("\t\n Fail to DRAW!!!!!\n\t");
+ return;
+ }
+
+ cairo_select_font_face(cr, "sans-serif", CAIRO_FONT_SLANT_NORMAL, CAIRO_FONT_WEIGHT_BOLD);
+
+ cairo_set_line_width(cr, 1.0);
+
+ cairo_move_to(cr, humanSkeleton.pose[0].x, humanSkeleton.pose[0].y);
+ for (int k = 1 ; k < 21; ++k) {
+ cairo_set_source_rgba (cr, 1.0, 1.0, 1.0, 1.0);
+ cairo_line_to(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y);
+ cairo_stroke(cr);
+ cairo_set_source_rgba (cr, 0.0, 1.0, 1.0, 1.0);
+ cairo_rectangle(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y, 1, 1);
+ cairo_stroke(cr);
+ cairo_move_to(cr, humanSkeleton.pose[k].x, humanSkeleton.pose[k].y);
+
+ if (k%4 == 0)
+ cairo_stroke(cr);
+ }
+ cairo_set_source_rgba (cr, 1.0, 1.0, 1.0, 1.0);
+ cairo_move_to(cr, humanSkeleton.pose[0].x, humanSkeleton.pose[0].y);
+ cairo_line_to(cr, humanSkeleton.pose[17].x, humanSkeleton.pose[17].y);
+ cairo_line_to(cr, humanSkeleton.pose[13].x, humanSkeleton.pose[13].y);
+ cairo_line_to(cr, humanSkeleton.pose[9].x, humanSkeleton.pose[9].y);
+ cairo_line_to(cr, humanSkeleton.pose[5].x, humanSkeleton.pose[5].y);
+ cairo_line_to(cr, humanSkeleton.pose[1].x, humanSkeleton.pose[1].y);
+ cairo_stroke(cr);
+
+ cairo_set_source_rgba(cr, 1.0, 0.0, 0.0, 0.7);
+
+
+ cairo_set_font_size(cr,30);
+ cairo_move_to(cr, 180, 40);
+
+
+ if (humanSkeleton.label < 0) {
+ cairo_show_text(cr, "None");
+ } else {
+ cairo_scale(cr, -1.0f, 1.0f);
+ cairo_show_text(cr, gHandGestureLabel[humanSkeleton.label]);
+ }
+}
+
+static gboolean bus_call (GstBus *bus, GstMessage *msg, gpointer data)
+{
+
+ switch (GST_MESSAGE_TYPE (msg)) {
+
+ case GST_MESSAGE_EOS:
+ printf ("End of stream\n");
+ break;
+
+ case GST_MESSAGE_ERROR: {
+ gchar *debug;
+ GError *error;
+
+ gst_message_parse_error (msg, &error, &debug);
+ g_free (debug);
+
+ printf ("Error: %s\n", error->message);
+ g_error_free (error);
+
+ break;
+ }
+ default:
+ break;
+ }
+
+ return TRUE;
+}
+
+int perform_tflite_hand_detection_AIC(mv_engine_config_h mv_engine_cfg)
+{
+ if (mv_engine_cfg == NULL) {
+ printf("mv_engine_cfg is null\n");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ char *inputNodeName = "input";
+ char *outputNodeNames[2] = {"mobilenetv2/boundingbox", "mobilenetv2/heatmap"};
+
+ mv_engine_config_set_string_attribute(mv_engine_cfg,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ PE_TFLITE_AIC_1_WEIGHT_PATH);
+
+ mv_engine_config_set_int_attribute(mv_engine_cfg,
+ MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
+
+ mv_engine_config_set_double_attribute(mv_engine_cfg,
+ MV_INFERENCE_MODEL_MEAN_VALUE,
+ 0.0);
+
+ mv_engine_config_set_double_attribute(mv_engine_cfg,
+ MV_INFERENCE_MODEL_STD_VALUE,
+ 1.0);
+
+ mv_engine_config_set_int_attribute(mv_engine_cfg,
+ MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_BEYOND);
+
+ mv_engine_config_set_int_attribute(mv_engine_cfg,
+ MV_INFERENCE_TARGET_DEVICE_TYPE,
+ MV_INFERENCE_TARGET_DEVICE_CPU);
+
+ mv_engine_config_set_int_attribute(mv_engine_cfg,
+ MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 224);
+
+ mv_engine_config_set_int_attribute(mv_engine_cfg,
+ MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 224);
+
+ mv_engine_config_set_int_attribute(mv_engine_cfg,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+ 3);
+
+ mv_engine_config_set_string_attribute(mv_engine_cfg,
+ MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+
+ mv_engine_config_set_array_string_attribute(mv_engine_cfg,
+ MV_INFERENCE_OUTPUT_NODE_NAMES,
+ outputNodeNames,
+ 2);
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int perform_tflite_hand_detection_AIC2(mv_engine_config_h mv_engine_cfg)
+{
+ char *inputNodeName = "input";
+ char *outputNodeNames[2] = {"mobilenetv2/coord_refine", "mobilenetv2/gesture"};
+
+ mv_engine_config_set_string_attribute(mv_engine_cfg,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ PE_TFLITE_AIC_2_WEIGHT_PATH);
+
+ mv_engine_config_set_int_attribute(mv_engine_cfg,
+ MV_INFERENCE_INPUT_DATA_TYPE,
+ MV_INFERENCE_DATA_FLOAT32);
+
+ mv_engine_config_set_double_attribute(mv_engine_cfg,
+ MV_INFERENCE_MODEL_MEAN_VALUE,
+ 0.0);
+
+ mv_engine_config_set_double_attribute(mv_engine_cfg,
+ MV_INFERENCE_MODEL_STD_VALUE,
+ 1.0);
+
+ mv_engine_config_set_int_attribute(mv_engine_cfg,
+ MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_BEYOND);
+
+ mv_engine_config_set_int_attribute(mv_engine_cfg,
+ MV_INFERENCE_TARGET_DEVICE_TYPE,
+ MV_INFERENCE_TARGET_DEVICE_CPU);
+
+ mv_engine_config_set_int_attribute(mv_engine_cfg,
+ MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 56);
+
+ mv_engine_config_set_int_attribute(mv_engine_cfg,
+ MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 56);
+
+ mv_engine_config_set_int_attribute(mv_engine_cfg,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+ 21);
+
+ mv_engine_config_set_string_attribute(mv_engine_cfg,
+ MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+
+ mv_engine_config_set_array_string_attribute(mv_engine_cfg,
+ MV_INFERENCE_OUTPUT_NODE_NAMES,
+ outputNodeNames,
+ 2);
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+static void __global(void *data, struct wl_registry *registry,
+ uint32_t name, const char *interface, uint32_t version)
+{
+ struct tizen_surface **tz_surface = NULL;
+
+ if (!data) {
+ printf("NULL data\n");
+ return;
+ }
+
+ tz_surface = (struct tizen_surface **)data;
+
+ if (!interface) {
+ printf("NULL interface\n");
+ return;
+ }
+
+ if (strcmp(interface, "tizen_surface") == 0) {
+ printf("binding tizen surface for wayland\n");
+
+ *tz_surface = wl_registry_bind(registry, name, &tizen_surface_interface, 1);
+ if (*tz_surface == NULL)
+ printf("failed to bind\n");
+
+ printf("done\n");
+ }
+ return;
+}
+
+static void __global_remove(void *data, struct wl_registry *wl_registry, uint32_t name)
+{
+ printf("enter\n");
+ return;
+}
+
+static const struct wl_registry_listener _wl_registry_listener = {
+ __global,
+ __global_remove
+};
+
+void __parent_id_getter(void *data, struct tizen_resource *tizen_resource, uint32_t id)
+{
+ if (!data) {
+ printf("NULL data\n");
+ return;
+ }
+
+ *((unsigned int *)data) = id;
+
+ printf("[CLIENT] got parent_id [%u] from server\n", id);
+
+ return;
+}
+
+static const struct tizen_resource_listener _tz_resource_listener = {
+ __parent_id_getter
+};
+
+static int app_create(void *data)
+{
+ appdata *ad = data;
+ Evas_Object *win = NULL;
+
+ // GST
+ g_mutex_init(&pose_mutex);
+
+ signal(SIGINT, int_handler);
+
+ /* initialization */
+ gst_init(NULL, NULL);
+
+ /* mediavision configuration*/
+ IsGestureMode = false;
+ if (hp_mv_engine_cfg == NULL) {
+ mv_create_engine_config(&hp_mv_engine_cfg);
+ }
+
+ if (hp_mv_engine_cfg2 == NULL) {
+ mv_create_engine_config(&hp_mv_engine_cfg2);
+ }
+
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ outputTensorData = (void*)calloc(56*56*21, sizeof(float));
+ err = perform_tflite_hand_detection_AIC(hp_mv_engine_cfg);
+ err = perform_tflite_hand_detection_AIC2(hp_mv_engine_cfg2);
+
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Error on perform_armnn_human_pose_configure");
+ }
+
+ printf("configuration done\n");
+
+ printf("loading pose model: ");
+ mv_inference_create(&hp_mv_infer);
+
+ mv_inference_configure(hp_mv_infer, hp_mv_engine_cfg);
+
+ clock_t start = clock();
+ mv_inference_prepare(hp_mv_infer);
+ clock_t end = clock();
+ printf("time: %2.3f\n", (double)(end - start)/CLOCKS_PER_SEC);
+
+ /* mediavision source */
+ mv_create_source(&mv_src_p);
+
+ mv_inference_create(&hp_mv_infer2);
+
+ mv_inference_configure(hp_mv_infer2, hp_mv_engine_cfg2);
+
+ start = clock();
+ mv_inference_prepare(hp_mv_infer2);
+ end = clock();
+ printf("time: %2.3f\n", (double)(end - start)/CLOCKS_PER_SEC);
+
+ mv_create_source(&mv_src_p2);
+
+ pipeline = gst_pipeline_new("app");
+ overlay_state = g_new0 (CairoOverlayState, 1);
+
+ /* create gstreamer elements */
+ source = gst_element_factory_make("v4l2src", "src");
+ filter = gst_element_factory_make("capsfilter", "filter");
+ flip = gst_element_factory_make("videoflip", "vflip" );
+ tee = gst_element_factory_make("tee", "tee");
+ queue1 = gst_element_factory_make("queue", "queue1");
+ queue2 = gst_element_factory_make("queue", "queue2");
+
+ // queue1 - videoscale - capsfilter -viedoeconvert - capsfilter - videorate - capsfilter -fakesink
+ vscale = gst_element_factory_make("videoscale", "scale");
+ vsfilter = gst_element_factory_make("capsfilter", "vsfilter");
+ vconv = gst_element_factory_make("videoconvert", "convert");
+ vcfilter = gst_element_factory_make("capsfilter", "vcfilter");
+ vrate = gst_element_factory_make("videorate", "rate");
+ vrfilter = gst_element_factory_make("capsfilter", "vrfilter");
+ vrsink = gst_element_factory_make("fakesink", "vrsink");
+
+ // queue2 - videoconvert - cairooveray - tizenwlsink
+ oconv = gst_element_factory_make("videoconvert", "oconv");
+ coverlay = gst_element_factory_make("cairooverlay", "coverlay");
+
+ sink = gst_element_factory_make("fpsdisplaysink", "vsink");
+ sink2 = gst_element_factory_make("tizenwlsink", "vsink2");
+
+ if (!pipeline || !source ||
+ !tee || !queue1 || !vscale || !vsfilter || !vconv || !vcfilter ||
+ !vrate || !vrfilter || !vrsink ||
+ !queue2 || !oconv || !coverlay || !sink || !sink2) {
+ printf(TEXT_RED "One element(queue1 or queue2) might be not created. Exiting.\n" TEXT_RESET);
+ return -1;
+ }
+
+ if (!filter) {
+ printf(TEXT_RED "One element might be not created. Existing.\n" TEXT_RESET);
+ return -1;
+ }
+ g_object_set(G_OBJECT(filter), "caps", gst_caps_from_string("video/x-raw, format=YUY2, width=640, height=480"), NULL);
+
+ g_signal_connect (coverlay, "draw", G_CALLBACK (draw_overlay_hand), overlay_state);
+
+ g_signal_connect (coverlay, "caps-changed", G_CALLBACK (prepare_overlay), overlay_state);
+
+ g_object_set(G_OBJECT(source), "device", "/dev/video252", NULL); //tv <->realsense
+
+ g_object_set(G_OBJECT(flip), "method", 4, NULL);
+ g_object_set(G_OBJECT(sink2), "use-tbm", FALSE, NULL);
+ g_object_set(G_OBJECT(sink2), "sync", TRUE, NULL);
+ g_object_set(G_OBJECT(sink), "video-sink", sink2, NULL);
+ g_object_set(G_OBJECT(sink), "sync", TRUE, NULL);
+ g_object_set(G_OBJECT(sink), "text-overlay", FALSE, NULL);
+
+ g_object_set(G_OBJECT(vsfilter), "caps", gst_caps_from_string("video/x-raw, width=224, height=224"), NULL);
+ poseCropSize = 224;
+
+ g_object_set(G_OBJECT(vcfilter), "caps", gst_caps_from_string("video/x-raw, format=RGB"), NULL);
+ g_object_set(G_OBJECT(vrfilter), "caps", gst_caps_from_string("video/x-raw, framerate=10/1"), NULL);
+
+ // here to be continue
+ printf("vrsink signal-handoffs\n");
+ g_object_set(G_OBJECT(vrsink), "signal-handoffs", TRUE, NULL);
+ handler_p = g_signal_connect (vrsink, "handoff", G_CALLBACK(_hand_est_handoff), outputTensorData);
+ g_object_set(G_OBJECT(vrsink), "sync", TRUE, NULL);
+
+ /* add a message handler */
+ bus = gst_pipeline_get_bus (GST_PIPELINE(pipeline));
+ bus_watch_id = gst_bus_add_watch(bus, bus_call, NULL);
+ gst_object_unref(bus);
+
+ /* add elements into the pipeline */
+ gst_bin_add_many(GST_BIN(pipeline),
+ source,
+ flip, tee, queue1, vscale, vsfilter, vconv, vcfilter,
+ vrate, vrfilter, vrsink,
+ queue2, oconv, coverlay, sink,
+ //queue2, oconv, sink,
+ NULL);
+
+ /* link elements */
+ gst_bin_add(GST_BIN(pipeline), filter);
+ gst_element_link_many(source, flip, filter, tee, NULL);
+
+ // display
+ gst_element_link_many (tee, queue2, oconv, coverlay, /*toverlay,*/ sink, NULL);
+ // pose
+ gst_element_link_many (tee, queue1, vrate, vrfilter, vconv, vcfilter, vscale, vsfilter, vrsink, NULL);
+
+ /* loop */
+ humanSkeleton.IsDetected = false;
+ humanSkeleton.isPrevPose = false;
+ humanSkeleton.label = -1;
+ printf(TEXT_GREEN "Running.....\n" TEXT_RESET);
+ gst_element_set_state (pipeline, GST_STATE_PLAYING);
+
+ return 0;
+}
+
+
+static int app_terminate(void *data)
+{
+ appdata *ad = data;
+ int i = 0;
+
+ /* out of loop */
+ printf(TEXT_GREEN "Stopping.....\n" TEXT_RESET);
+ gst_element_set_state(pipeline, GST_STATE_NULL);
+
+ printf(TEXT_GREEN "pipe unref.....\n" TEXT_RESET);
+ gst_object_unref(GST_OBJECT(pipeline));
+
+ printf(TEXT_GREEN "remove bus_watch id.....\n" TEXT_RESET);
+ g_source_remove(bus_watch_id);
+ g_mutex_clear(&pose_mutex);
+ printf(TEXT_GREEN "done.....\n" TEXT_RESET);
+ return 0;
+}
+struct appcore_ops ops = {
+ .create = app_create,
+ .terminate = app_terminate,
+};
+
+static void show_test_setting(void)
+{
+ printf("Backend : %s\n", ad.backendType == MV_INFERENCE_BACKEND_TFLITE ? "TFLITE" :
+ ad.backendType == MV_INFERENCE_BACKEND_ARMNN ? "ARMNN" : "ONE");
+ printf("Device : %s\n", ad.deviceType == MV_INFERENCE_TARGET_DEVICE_CPU ? "CPU" : "GPU");
+}
+
+int main (int argc, char *argv[])
+{
+ memset(&ad, 0x0, sizeof(appdata));
+ ops.data = &ad;
+
+ ad.backendType = MV_INFERENCE_BACKEND_TFLITE;
+ ad.deviceType = MV_INFERENCE_TARGET_DEVICE_GPU;
+
+ show_test_setting();
+
+ poseRoi.point.x = 0;
+ poseRoi.point.y = 0;
+ poseRoi.width = 0;
+ poseRoi.height = 0;
+ my_cursor = efl_util_input_initialize_generator(EFL_UTIL_INPUT_DEVTYPE_POINTER);
+ my_touch = efl_util_input_initialize_generator(EFL_UTIL_INPUT_DEVTYPE_TOUCHSCREEN);
+ my_keyboard = efl_util_input_initialize_generator(EFL_UTIL_INPUT_DEVTYPE_KEYBOARD);
+
+ arguments.mode = 0;
+ argp_parse (&argp, argc, argv, 0, 0, &arguments);
+
+ return appcore_efl_main(PACKAGE, &argc, &argv, &ops);
+}