[UTC][MediaVision][Non-ACR]: update mediavision utc and itc 06/282506/7
authorInki Dae <inki.dae@samsung.com>
Tue, 4 Oct 2022 05:32:06 +0000 (14:32 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 11 Jan 2023 07:44:17 +0000 (16:44 +0900)
This patch separates existing UTC into legacy and deep learning test cases
due to OOM issue by resourced - Mediavision UTC can exceed maximum memory
usage limited by resourced memcg policy, and also updates meta files.

Change-Id: I3e67aa8559da1e51e032efe666b0e33866fd4c74
Signed-off-by: Inki Dae <inki.dae@samsung.com>
111 files changed:
packaging/utc/core-capi-media-vision-dl-tests.spec [new file with mode: 0755]
packaging/utc/core-capi-media-vision-dl-tests.xml [new file with mode: 0755]
packaging/utc/core-capi-media-vision-tests.spec
scripts_tpk/spec.sh
scripts_tpk/tizeniot_service_modules.txt
src/itc/media-vision/res/face_recognition/models/facenet.json [new file with mode: 0644]
src/itc/media-vision/res/inference/models/fd_tflite_model1_meta.json
src/itc/media-vision/res/inference/models/fld_tflite_model1_meta.json
src/itc/media-vision/res/inference/models/ic_tflite_model_meta.json
src/itc/media-vision/res/inference/models/od_tflite_model_meta.json
src/itc/media-vision/res/inference/models/pld_tflite_model_meta.json
src/utc/capi-media-vision-dl/CMakeLists.txt [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/face_recognition/images/P1/00.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P1/01.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P1/02.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P1/03.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P1/04.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P1/05.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P1/06.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P1/07.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P1/08.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P1/09.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P2/00.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P2/01.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P2/02.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P2/03.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P2/04.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P2/05.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P2/06.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P2/07.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P2/08.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/images/P2/09.jpg [new file with mode: 0755]
src/utc/capi-media-vision-dl/res/face_recognition/models/facenet.json [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/face_recognition/models/facenet.tflite [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/images/banana.jpg [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/images/dog2.jpg [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/images/faceDetection.jpg [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/images/faceLandmark.jpg [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/images/poseLandmark.jpg [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/fd_tflite_model1.tflite [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/fd_tflite_model1_meta.json [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/fld_tflite_model1.tflite [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/fld_tflite_model1_meta.json [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/ic_label.txt [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/ic_tflite_model.tflite [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/ic_tflite_model_meta.json [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/od_label.txt [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/od_tflite_model.tflite [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/od_tflite_model_meta.json [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/pld_mocap.bvh [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/pld_mocap_mapping.txt [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/pld_pose_mapping.txt [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/pld_tflite_model.tflite [new file with mode: 0644]
src/utc/capi-media-vision-dl/res/inference/models/pld_tflite_model_meta.json [new file with mode: 0644]
src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core.c [new file with mode: 0644]
src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core_mobile.h [new file with mode: 0755]
src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core_tizeniot.h [new file with mode: 0755]
src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core_tv.h [new file with mode: 0755]
src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core_wearable.h [new file with mode: 0755]
src/utc/capi-media-vision-dl/utc-mv_common.c [new file with mode: 0755]
src/utc/capi-media-vision-dl/utc-mv_face_recognition.c [new file with mode: 0755]
src/utc/capi-media-vision-dl/utc-mv_inference.c [new file with mode: 0755]
src/utc/capi-media-vision/CMakeLists.txt
src/utc/capi-media-vision/res/face_recognition/images/P1/00.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P1/01.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P1/02.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P1/03.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P1/04.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P1/05.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P1/06.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P1/07.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P1/08.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P1/09.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P2/00.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P2/01.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P2/02.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P2/03.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P2/04.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P2/05.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P2/06.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P2/07.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P2/08.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/images/P2/09.jpg [deleted file]
src/utc/capi-media-vision/res/face_recognition/models/facenet.tflite [deleted file]
src/utc/capi-media-vision/res/inference/images/banana.jpg [deleted file]
src/utc/capi-media-vision/res/inference/images/dog2.jpg [deleted file]
src/utc/capi-media-vision/res/inference/images/faceDetection.jpg [deleted file]
src/utc/capi-media-vision/res/inference/images/faceLandmark.jpg [deleted file]
src/utc/capi-media-vision/res/inference/images/poseLandmark.jpg [deleted file]
src/utc/capi-media-vision/res/inference/models/fd_tflite_model1.tflite [deleted file]
src/utc/capi-media-vision/res/inference/models/fd_tflite_model1_meta.json [deleted file]
src/utc/capi-media-vision/res/inference/models/fld_tflite_model1.tflite [deleted file]
src/utc/capi-media-vision/res/inference/models/fld_tflite_model1_meta.json [deleted file]
src/utc/capi-media-vision/res/inference/models/ic_label.txt [deleted file]
src/utc/capi-media-vision/res/inference/models/ic_tflite_model.tflite [deleted file]
src/utc/capi-media-vision/res/inference/models/ic_tflite_model_meta.json [deleted file]
src/utc/capi-media-vision/res/inference/models/od_label.txt [deleted file]
src/utc/capi-media-vision/res/inference/models/od_tflite_model.tflite [deleted file]
src/utc/capi-media-vision/res/inference/models/od_tflite_model_meta.json [deleted file]
src/utc/capi-media-vision/res/inference/models/pld_mocap.bvh [deleted file]
src/utc/capi-media-vision/res/inference/models/pld_mocap_mapping.txt [deleted file]
src/utc/capi-media-vision/res/inference/models/pld_pose_mapping.txt [deleted file]
src/utc/capi-media-vision/res/inference/models/pld_tflite_model.tflite [deleted file]
src/utc/capi-media-vision/res/inference/models/pld_tflite_model_meta.json [deleted file]
src/utc/capi-media-vision/tct-capi-media-vision-core.c
src/utc/capi-media-vision/tct-capi-media-vision-core_mobile.h
src/utc/capi-media-vision/tct-capi-media-vision-core_tizeniot.h
src/utc/capi-media-vision/tct-capi-media-vision-core_tv.h
src/utc/capi-media-vision/tct-capi-media-vision-core_wearable.h
src/utc/capi-media-vision/utc-mv_face_recognition.c [deleted file]
src/utc/capi-media-vision/utc-mv_inference.c [deleted file]

diff --git a/packaging/utc/core-capi-media-vision-dl-tests.spec b/packaging/utc/core-capi-media-vision-dl-tests.spec
new file mode 100755 (executable)
index 0000000..a587e82
--- /dev/null
@@ -0,0 +1,97 @@
+%define MODULE_NAME capi-media-vision-dl
+%define MODULE_LIBNAME capi-media-vision
+Name:       core-%{MODULE_NAME}-tests
+Summary:    Core API unit TC (%{name})
+Version:    0.1
+Release:    0
+Group:      Development/Tools
+License:    Apache License, Version 2.0
+Source0:    %{name}-%{version}.tar.gz
+BuildRequires: pkgconfig(%{MODULE_LIBNAME})
+BuildRequires: pkgconfig(glib-2.0)
+BuildRequires: cmake
+BuildRequires: pkgconfig(capi-appfw-application)
+BuildRequires: pkgconfig(elementary)
+BuildRequires:  pkgconfig(bundle)
+BuildRequires:  pkgconfig(storage)
+BuildRequires:  pkgconfig(json-glib-1.0)
+BuildRequires:  pkgconfig(capi-media-tool)
+BuildRequires:  pkgconfig(libtbm)
+BuildRequires:  pkgconfig(dlog)
+BuildRequires:  pkgconfig(capi-media-image-util)
+BuildRequires:  zint
+BuildRequires:  zint-devel
+BuildRequires:  zbar
+BuildRequires:  pkgconfig(capi-system-info)
+
+%description
+Core API unit TC (%{name})
+
+%prep
+%setup -q
+
+%build
+
+%define PREFIX "%{_libdir}/%{name}"
+
+export LDFLAGS+="-Wl,--rpath=%{PREFIX} -Wl,--as-needed"
+
+%if %{?ASAN_BUILD:1}0
+       %if %{?DEVICE_BUILD_TYPE_MOBILE:1}0
+       cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="mobile" -DASANBUILD="true" -DCMAKE_INSTALL_PREFIX=%{_prefix}
+       %endif
+       %if %{?DEVICE_BUILD_TYPE_WEARABLE:1}0
+       cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="wearable" -DASANBUILD="true" -DCMAKE_INSTALL_PREFIX=%{_prefix}
+       %endif
+       %if %{?DEVICE_BUILD_TYPE_TV:1}0
+       cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="tv" -DASANBUILD="true" -DCMAKE_INSTALL_PREFIX=%{_prefix}
+       %endif
+       %if %{?DEVICE_BUILD_TYPE_TIZENIOT:1}0
+       cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="tizeniot" -DASANBUILD="true" -DCMAKE_INSTALL_PREFIX=%{_prefix}
+       %endif
+%else
+       %if %{?DEVICE_BUILD_TYPE_MOBILE:1}0
+       cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="mobile" -DASANBUILD="false" -DCMAKE_INSTALL_PREFIX=%{_prefix}
+       %endif
+       %if %{?DEVICE_BUILD_TYPE_WEARABLE:1}0
+       cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="wearable" -DASANBUILD="false" -DCMAKE_INSTALL_PREFIX=%{_prefix}
+       %endif
+       %if %{?DEVICE_BUILD_TYPE_TV:1}0
+       cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="tv" -DASANBUILD="false" -DCMAKE_INSTALL_PREFIX=%{_prefix}
+       %endif
+       %if %{?DEVICE_BUILD_TYPE_TIZENIOT:1}0
+       cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="tizeniot" -DASANBUILD="false" -DCMAKE_INSTALL_PREFIX=%{_prefix}
+       %endif
+%endif
+
+make %{?jobs:-j%jobs}
+
+%install
+rm -rf %{buildroot}
+%make_install
+mkdir -p %{buildroot}/usr/share/license
+cp LICENSE %{buildroot}/usr/share/license/%{name}
+
+mkdir -p %{buildroot}/usr/share/packages/
+cp packaging/utc/%{name}*.xml %{buildroot}/usr/share/packages/
+
+mkdir -p %{buildroot}%{APP_PATH}%{name}/bin
+mkdir -p %{buildroot}%{APP_PATH}%{name}/data/
+mkdir -p %{buildroot}%{APP_PATH}%{name}/data/res/inference/models
+mkdir -p %{buildroot}%{APP_PATH}%{name}/data/res/inference/images
+cp src/utc/capi-media-vision-dl/res/inference/models/* %{buildroot}%{APP_PATH}%{name}/data/res/inference/models/
+cp src/utc/capi-media-vision-dl/res/inference/images/* %{buildroot}%{APP_PATH}%{name}/data/res/inference/images/
+
+mkdir -p %{buildroot}%{APP_PATH}%{name}/data/res/face_recognition/models
+mkdir -p %{buildroot}%{APP_PATH}%{name}/data/res/face_recognition/images
+cp src/utc/capi-media-vision-dl/res/face_recognition/models/* %{buildroot}%{APP_PATH}%{name}/data/res/face_recognition/models/
+cp src/utc/capi-media-vision-dl/res/face_recognition/images/* %{buildroot}%{APP_PATH}%{name}/data/res/face_recognition/images/
+
+%post
+
+%postun
+
+%files
+%{APP_PATH}%{name}/*
+/usr/share/packages/%{name}.xml
+/usr/share/license/%{name}
diff --git a/packaging/utc/core-capi-media-vision-dl-tests.xml b/packaging/utc/core-capi-media-vision-dl-tests.xml
new file mode 100755 (executable)
index 0000000..26c4609
--- /dev/null
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+<manifest xmlns="http://tizen.org/ns/packages" package="core-capi-media-vision-dl-tests" version="0.1.0" api-version="6.0">
+    <label>CoreCapiMediaVisionDlTest</label>
+    <author email="test@tizen.org" href="www.tizen.org">test</author>
+    <description>Core API test Application</description>
+    <ui-application appid="core.capi-media-vision-dl-tests" exec="/usr/apps/core-capi-media-vision-dl-tests/bin/tct-capi-media-vision-dl-core" nodisplay="false" multiple="false" type="capp" taskmanage="true">
+    <background-category value="background-network"/>
+       <background-category value="download"/>
+       <background-category value="iot-communication"/>
+       <background-category value="location"/>
+       <background-category value="media"/>
+       <background-category value="sensor"/>
+    </ui-application>
+    <privileges>
+        <privilege>http://tizen.org/privilege/externalstorage</privilege>
+        <privilege>http://tizen.org/privilege/mediastorage</privilege>
+    </privileges>
+</manifest>
index 8d6a04b59962b44aa73f5079638c69f9b8754bec..e0d3f0c1fc0dc6d16a7b5ff1075e55062e7081c6 100755 (executable)
@@ -115,26 +115,15 @@ cp src/utc/capi-media-vision/res/image/tracking/*.jpg %{buildroot}%{APP_PATH}%{n
 mkdir -p %{buildroot}%{APP_PATH}%{name}/data/res/surv
 cp src/utc/capi-media-vision/res/surv/* %{buildroot}%{APP_PATH}%{name}/data/res/surv/
 
-mkdir -p %{buildroot}%{APP_PATH}%{name}/data/res/inference/models
-mkdir -p %{buildroot}%{APP_PATH}%{name}/data/res/inference/images
-cp src/utc/capi-media-vision/res/inference/models/* %{buildroot}%{APP_PATH}%{name}/data/res/inference/models/
-cp src/utc/capi-media-vision/res/inference/images/* %{buildroot}%{APP_PATH}%{name}/data/res/inference/images/
-
 mkdir -p %{buildroot}%{APP_PATH}%{name}/data/res/roi_tracker/
 cp src/utc/capi-media-vision/res/roi_tracker/* %{buildroot}%{APP_PATH}%{name}/data/res/roi_tracker/
 
-mkdir -p %{buildroot}%{APP_PATH}%{name}/data/res/face_recognition/models
-mkdir -p %{buildroot}%{APP_PATH}%{name}/data/res/face_recognition/images
-cp src/utc/capi-media-vision/res/face_recognition/models/* %{buildroot}%{APP_PATH}%{name}/data/res/face_recognition/models/
-cp src/utc/capi-media-vision/res/face_recognition/images/* %{buildroot}%{APP_PATH}%{name}/data/res/face_recognition/images/
-
 mkdir -p %{buildroot}%{APP_PATH}%{name}/data/res/3d/
 cp src/utc/capi-media-vision/res/3d/* %{buildroot}%{APP_PATH}%{name}/data/res/3d/
 %post
 
 %postun
 
-
 %files
 %{APP_PATH}%{name}/*
 /usr/share/packages/%{name}.xml
index 5d04d002c753213f293dc769842111cfc70f4fb3..6bf6e395c11753efd4c74ed0c8417f25c043cce5 100644 (file)
@@ -861,6 +861,23 @@ case "$1" in
                        chsmack -a "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/roi_tracker/*
                        chsmack -e "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/roi_tracker/*
 
+                       mkdir -p $DEVICE_PHYSICAL_STORAGE_30/res/3d
+                       chsmack -a "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/3d
+                       chsmack -e "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/3d
+                       cp res/3d/* $DEVICE_PHYSICAL_STORAGE_30/res/3d/
+                       chsmack -a "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/3d/*
+                       chsmack -e "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/3d/*
+                       chmod -R 777 $DEVICE_SUITE_TARGET_30/res/3d
+                       chown -R 5000:5000 $DEVICE_SUITE_TARGET_30/res/3d
+                       ;;
+       "org.tizen.capi-media-vision-dl-native-utc")
+                       echo "Installing pre-requisites for the package $1"
+                       mkdir -p $DEVICE_SUITE_TARGET_30/data
+                       chsmack -a "User::App::Shared" $DEVICE_SUITE_TARGET_30/data
+                       chsmack -e "User::App::Shared" $DEVICE_SUITE_TARGET_30/data
+                       chmod -R 777 $DEVICE_SUITE_TARGET_30/data
+                       chown -R 5000:5000 $DEVICE_SUITE_TARGET_30/data
+
                        mkdir -p $DEVICE_PHYSICAL_STORAGE_30/res/inference/models
                        mkdir -p $DEVICE_PHYSICAL_STORAGE_30/res/inference/images
                        chsmack -a "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/inference
@@ -907,15 +924,6 @@ case "$1" in
                        chsmack -e "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/face_recognition/images/P1/*
                        chsmack -a "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/face_recognition/images/P2/*
                        chsmack -e "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/face_recognition/images/P2/*
-
-                       mkdir -p $DEVICE_PHYSICAL_STORAGE_30/res/3d
-                       chsmack -a "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/3d
-                       chsmack -e "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/3d
-                       cp res/3d/* $DEVICE_PHYSICAL_STORAGE_30/res/3d/
-                       chsmack -a "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/3d/*
-                       chsmack -e "User::App::Shared" $DEVICE_PHYSICAL_STORAGE_30/res/3d/*
-                       chmod -R 777 $DEVICE_SUITE_TARGET_30/res/3d
-                       chown -R 5000:5000 $DEVICE_SUITE_TARGET_30/res/3d
                        ;;
        "org.tizen.capi-message-port-native-utc")
                        echo "Installing pre-requisites for the package $1"
index ec2159616e1f564067f9ccc55f3af93eb87f3aca..83ed6681ec87f547c882353d8cd3e9bd5ba61550 100755 (executable)
@@ -8,6 +8,7 @@ bluetooth
 bundle
 calendar-service2
 capi-media-vision
+capi-media-vision-dl
 capi-message-port
 connection
 contacts-service2
diff --git a/src/itc/media-vision/res/face_recognition/models/facenet.json b/src/itc/media-vision/res/face_recognition/models/facenet.json
new file mode 100644 (file)
index 0000000..f644f0d
--- /dev/null
@@ -0,0 +1,26 @@
+{
+  "input": [
+    {
+      "tensor1": {
+        "name": "input_1",
+        "shape_type": "NHWC",
+        "shape_dims": [ 1, 160, 160, 3 ],
+        "data_type": "FLOAT32",
+        "color_space": "RGB888",
+        "preprocess": {
+          "normalization": {
+            "mean": [ 127.5, 127.5, 127.5 ],
+            "std": [ 127.5, 127.5, 127.5 ]
+          }
+        }
+      }
+    }
+  ],
+  "output": [
+    {
+      "tensor1": {
+        "name": "normalize/l2_normalize"
+      }
+    }
+  ]
+}
index 86b7b3c456de676c8b2b506fcfaca116ccb6e8e4..40bf54fbada7f2e3d767ca6b507b9d85444acc60 100644 (file)
@@ -1,56 +1,68 @@
 {
-    "inputmetadata" :
+  "input": [
     {
-        "tensor_info" : [
-            {
-                "name" : "normalized_input_image_tensor",
-                "shape_type" : "NHWC",
-                "shape_dims" : [ 1, 300, 300, 3],
-                "data_type" : "FLOAT32",
-                "color_space" : "RGB888"
-            }
-        ],
-        "preprocess" : [
-            {
-                "normalization" : [
-                    {
-                        "mean" : [127.5, 127.5, 127.5],
-                        "std" : [127.5, 127.5, 127.5]
-                    }
-                ]
-            }
-        ]
+      "tensor1": {
+        "name": "normalized_input_image_tensor",
+        "shape_type": "NHWC",
+        "shape_dims": [ 1, 300, 300, 3 ],
+        "data_type": "FLOAT32",
+        "color_space": "RGB888",
+        "preprocess": {
+          "normalization": {
+            "mean": [ 127.5, 127.5, 127.5 ],
+            "std": [ 127.5, 127.5, 127.5 ]
+          }
+        }
+      }
+    }
+  ],
+  "output": [
+    {
+      "tensor1": {
+        "name": "TFLite_Detection_PostProcess:2",
+        "postprocess": {
+          "score": {
+            "index": [ -1, 1 ],
+            "top_number": 5,
+            "threshold": 0.3,
+            "score_type": "NORMAL"
+          }
+        }
+      }
+    },
+    {
+      "tensor2": {
+        "name": "TFLite_Detection_PostProcess",
+        "postprocess": {
+          "box": {
+            "index": [ -1, -1, 1 ],
+            "box_type": "ORIGIN_LEFTTOP",
+            "box_order": [ 1, 0, 3, 2 ],
+            "box_coordinate": "RATIO",
+            "decoding_type": "BYPASS"
+          }
+        }
+      }
+    },
+    {
+      "tensor3": {
+        "name": "TFLite_Detection_PostProcess:1",
+        "postprocess": {
+          "label": {
+            "index": [ -1, 1 ]
+          }
+        }
+      }
     },
-    "outputmetadata" :
     {
-        "score" :
-            {
-                "name" : ["TFLite_Detection_PostProcess:2"],
-                "index" : [-1, 1],
-                "top_number" : 5,
-                "threshold" : 0.3,
-                "score_type" : "NORMAL"
-            },
-        "box" :
-            {
-               "name" : ["TFLite_Detection_PostProcess"],
-               "index" : [-1, -1, 1],
-               "box_type" : "ORIGIN_LEFTTOP",
-               "box_order" : [1, 0, 3, 2],
-               "box_coordinate" : "RATIO",
-               "decoding_type": "BYPASS"
-            },
-        "label" : [
-            {
-                "name" : "TFLite_Detection_PostProcess:1",
-                "index" : [-1, 1]
-            }
-        ],
-        "number" : [
-            {
-                "name" : "TFLite_Detection_PostProcess:3",
-                "index" : [1]
-            }
-        ]
+      "tensor4": {
+        "name": "TFLite_Detection_PostProcess:3",
+        "postprocess": {
+          "number": {
+            "index": [ 1 ]
+          }
+        }
+      }
     }
-}
+  ]
+}
\ No newline at end of file
index f0b4c902e6e6663de049d8fe9f161916326c9d6e..7f5d7f255ebf83bc8ae1cfc84978126fc193df91 100644 (file)
@@ -1,46 +1,41 @@
 {
-    "inputmetadata" :
+  "input": [
     {
-        "tensor_info" : [
-            {
-                "name" : "Placeholder",
-                "shape_type" : "NHWC",
-                "shape_dims" : [ 1, 128, 128, 3],
-                "data_type" : "FLOAT32",
-                "color_space" : "RGB888"
-            }
-        ],
-        "preprocess" : [
-            {
-                "normalization" : [
-                    {
-                        "mean" : [0.0, 0.0, 0.0],
-                        "std" : [1.0, 1.0, 1.0]
-                    }
-                ]
-            }
-        ]
-    },
-    "outputmetadata" :
+      "tensor1": {
+        "name": "Placeholder",
+        "shape_type": "NHWC",
+        "shape_dims": [ 1, 128, 128, 3 ],
+        "data_type": "FLOAT32",
+        "color_space": "RGB888",
+        "preprocess": {
+          "normalization": {
+            "mean": [ 0.0, 0.0, 0.0 ],
+            "std": [ 1.0, 1.0, 1.0 ]
+          }
+        }
+      }
+    }
+  ],
+  "output": [
     {
-        "score" :
-            {
-                "name" : ["fanet8ss_inference/fully_connected_1/Sigmoid"],
-                "index" : [-1, -1, -1, -1],
-                "top_number" : 1,
-                "threshold" : 0.0,
-                "score_type" : "NORMAL"
-            }
-        ,
-        "landmark" : [
-            {
-                "name" : "fanet8ss_inference/fully_connected_1/Sigmoid",
-                "index" : [-1, 1],
-                "landmark_type" : "2D_SINGLE",
-                "landmark_coordinate" : "RATIO",
-                "landmark_offset" : 2,
-                "decoding_type" : "BYPASS"
-            }
-        ]
+      "tensor1": {
+        "name": "fanet8ss_inference/fully_connected_1/Sigmoid",
+        "postprocess": {
+          "score": {
+            "index": [ -1, -1, -1, -1 ],
+            "top_number": 1,
+            "threshold": 0.0,
+            "score_type": "NORMAL"
+          },
+          "landmark": {
+            "index": [ -1, 1 ],
+            "landmark_type": "2D_SINGLE",
+            "landmark_coordinate": "RATIO",
+            "landmark_offset": 2,
+            "decoding_type": "BYPASS"
+          }
+        }
+      }
     }
-}
+  ]
+}
\ No newline at end of file
index 5b1782db3e089ca2234dce7c284ebfd882d50991..a22c544252d37a0e8b2513f021a5d8047c681487 100644 (file)
@@ -1,35 +1,34 @@
 {
-    "inputmetadata" :
+  "input": [
     {
-        "tensor_info" : [
-            {
-                "name" : "input_2",
-                "shape_type" : "NHWC",
-                "shape_dims" : [ 1, 224, 224, 3],
-                "data_type" : "FLOAT32",
-                "color_space" : "RGB888"
-            }
-        ],
-        "preprocess" : [
-            {
-                "normalization" : [
-                    {
-                        "mean" : [127.5, 127.5, 127.5],
-                        "std" : [127.5, 127.5, 127.5]
-                    }
-                ]
-            }
-        ]
-    },
-    "outputmetadata" :
+      "tensor1": {
+        "name": "input_2",
+        "shape_type": "NHWC",
+        "shape_dims": [ 1, 224, 224, 3 ],
+        "data_type": "FLOAT32",
+        "color_space": "RGB888",
+        "preprocess": {
+          "normalization": {
+            "mean": [ 127.5, 127.5, 127.5 ],
+            "std": [ 127.5, 127.5, 127.5 ]
+          }
+        }
+      }
+    }
+  ],
+  "output": [
     {
-        "score" :
-            {
-                "name" : ["dense_3/Softmax"],
-                "index" : [-1, 1],
-                               "top_number" : 5,
-                "threshold" : 0.3,
-                "score_type" : "NORMAL"
-            }
+      "tensor1": {
+        "name": "dense_3/Softmax",
+        "postprocess": {
+          "score": {
+            "index": [ -1, 1 ],
+            "top_number": 5,
+            "threshold": 0.3,
+            "score_type": "NORMAL"
+          }
+        }
+      }
     }
-}
+  ]
+}
\ No newline at end of file
index 86b7b3c456de676c8b2b506fcfaca116ccb6e8e4..0d7837326adc13fa9faea058e2c16ec75015bca1 100644 (file)
@@ -1,56 +1,69 @@
 {
-    "inputmetadata" :
+  "input": [
     {
-        "tensor_info" : [
-            {
-                "name" : "normalized_input_image_tensor",
-                "shape_type" : "NHWC",
-                "shape_dims" : [ 1, 300, 300, 3],
-                "data_type" : "FLOAT32",
-                "color_space" : "RGB888"
-            }
-        ],
-        "preprocess" : [
-            {
-                "normalization" : [
-                    {
-                        "mean" : [127.5, 127.5, 127.5],
-                        "std" : [127.5, 127.5, 127.5]
-                    }
-                ]
-            }
-        ]
+      "tensor1": {
+        "name": "normalized_input_image_tensor",
+        "shape_type": "NHWC",
+        "shape_dims": [ 1, 300, 300, 3 ],
+        "data_type": "FLOAT32",
+        "color_space": "RGB888",
+        "preprocess": {
+          "normalization": {
+            "mean": [ 127.5, 127.5, 127.5 ],
+            "std": [ 127.5, 127.5, 127.5 ]
+          }
+        }
+      }
+    }
+  ],
+
+  "output": [
+    {
+      "tensor1": {
+        "name": "TFLite_Detection_PostProcess:2",
+        "postprocess": {
+          "score": {
+            "index": [ -1, 1 ],
+            "top_number": 5,
+            "threshold": 0.3,
+            "score_type": "NORMAL"
+          }
+        }
+      }
+    },
+    {
+      "tensor2": {
+        "name": "TFLite_Detection_PostProcess",
+        "postprocess": {
+          "box": {
+            "index": [ -1, -1, 1 ],
+            "box_type": "ORIGIN_LEFTTOP",
+            "box_order": [ 1, 0, 3, 2 ],
+            "box_coordinate": "RATIO",
+            "decoding_type": "BYPASS"
+          }
+        }
+      }
+    },
+    {
+      "tensor3": {
+        "name": "TFLite_Detection_PostProcess:1",
+        "postprocess": {
+          "label": {
+            "index": [ -1, 1 ]
+          }
+        }
+      }
     },
-    "outputmetadata" :
     {
-        "score" :
-            {
-                "name" : ["TFLite_Detection_PostProcess:2"],
-                "index" : [-1, 1],
-                "top_number" : 5,
-                "threshold" : 0.3,
-                "score_type" : "NORMAL"
-            },
-        "box" :
-            {
-               "name" : ["TFLite_Detection_PostProcess"],
-               "index" : [-1, -1, 1],
-               "box_type" : "ORIGIN_LEFTTOP",
-               "box_order" : [1, 0, 3, 2],
-               "box_coordinate" : "RATIO",
-               "decoding_type": "BYPASS"
-            },
-        "label" : [
-            {
-                "name" : "TFLite_Detection_PostProcess:1",
-                "index" : [-1, 1]
-            }
-        ],
-        "number" : [
-            {
-                "name" : "TFLite_Detection_PostProcess:3",
-                "index" : [1]
-            }
-        ]
+      "tensor4": {
+        "name": "TFLite_Detection_PostProcess:3",
+        "postprocess": {
+          "number": {
+            "index": [ 1 ]
+          }
+        }
+      }
     }
+  ]
 }
index a6718d5e0547a27dfa4b4a6b9f58108da207d668..7b4e49d899300797bbf03d070fbeadf54b76bf62 100644 (file)
@@ -1,52 +1,45 @@
 {
-    "inputmetadata" :
+  "input": [
     {
-        "tensor_info" : [
-            {
-                "name" : "image",
-                "shape_type" : "NHWC",
-                "shape_dims" : [ 1, 192, 192, 3],
-                "data_type" : "FLOAT32",
-                "color_space" : "RGB888"
-            }
-        ],
-        "preprocess" : [
-            {
-                "normalization" : [
-                    {
-                        "mean" : [0.0, 0.0, 0.0],
-                        "std" : [1.0, 1.0, 1.0]
-                    }
-                ]
-            }
-        ]
-    },
-    "outputmetadata" :
+      "tensor1": {
+        "name": "image",
+        "shape_type": "NHWC",
+        "shape_dims": [ 1, 192, 192, 3 ],
+        "data_type": "FLOAT32",
+        "color_space": "RGB888",
+        "preprocess": {
+          "normalization": {
+            "mean": [ 0.0, 0.0, 0.0 ],
+            "std": [ 1.0, 1.0, 1.0 ]
+          }
+        }
+      }
+    }
+  ],
+  "output": [
     {
-        "score" :
-            {
-                "name" : ["Convolutional_Pose_Machine/stage_5_out"],
-                "index" : [-1, 1, 1, 1],
-                "top_number" : 1 ,
-                "threshold" : 0.3,
-                "score_type" : "NORMAL"
-            }
-        ,
-        "landmark" : [
-            {
-                "name" : "Convolutional_Pose_Machine/stage_5_out",
-                "index" : [-1, 1, 1, 1],
-                "landmark_type" : "2D_SINGLE",
-                "landmark_coordinate" : "PIXEL",
-                "decoding_type" : "HEATMAP",
-                "decoding_info" :
-                {
-                    "heatmap" :
-                     {
-                         "shape_type": "NHWC"
-                     }
-                }
+      "tensor1": {
+        "name": "Convolutional_Pose_Machine/stage_5_out",
+        "postprocess": {
+          "score": {
+            "index": [ -1, 1, 1, 1 ],
+            "top_number": 1,
+            "threshold": 0.3,
+            "score_type": "NORMAL"
+          },
+          "landmark": {
+            "index": [ -1, 1, 1, 1 ],
+            "landmark_type": "2D_SINGLE",
+            "landmark_coordinate": "PIXEL",
+            "decoding_type": "HEATMAP",
+            "decoding_info": {
+              "heatmap": {
+                "shape_type": "NHWC"
+              }
             }
-        ]
+          }
+        }
+      }
     }
-}
+  ]
+}
\ No newline at end of file
diff --git a/src/utc/capi-media-vision-dl/CMakeLists.txt b/src/utc/capi-media-vision-dl/CMakeLists.txt
new file mode 100644 (file)
index 0000000..19b1f6f
--- /dev/null
@@ -0,0 +1,55 @@
+SET(PKG_NAME "capi-media-vision-dl")
+
+SET(EXEC_NAME "tct-${PKG_NAME}-core")
+SET(RPM_NAME "core-${PKG_NAME}-tests")
+
+SET(CAPI_LIB "capi-media-vision")
+SET(TC_SOURCES
+    utc-mv_common.c
+    utc-mv_inference.c
+)
+
+IF( DEFINED TIZENIOT || DEFINED MOBILE || DEFINED WEARABLE)
+SET(TC_SOURCES
+    utc-mv_face_recognition.c
+)
+ENDIF()
+
+PKG_CHECK_MODULES(${CAPI_LIB} REQUIRED
+    ${CAPI_LIB}
+    capi-appfw-application
+    capi-system-info
+    capi-media-image-util
+    bundle
+    storage
+    dlog
+    json-glib-1.0
+    libtbm
+    glib-2.0
+    dlog
+)
+
+INCLUDE_DIRECTORIES(
+    ${${CAPI_LIB}_INCLUDE_DIRS}
+)
+
+ADD_EXECUTABLE(${EXEC_NAME} ${EXEC_NAME}.c ${TC_SOURCES} ${COMMON_FILE})
+TARGET_LINK_LIBRARIES(${EXEC_NAME}
+    ${${CAPI_LIB}_LIBRARIES}
+    bundle
+    capi-media-tool
+    dlog
+    glib-2.0
+)
+
+INSTALL(PROGRAMS ${EXEC_NAME}
+    DESTINATION ${BIN_DIR}/${RPM_NAME}/bin
+)
+
+IF( DEFINED ASAN )
+SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC -Wall -pie -g -fsanitize=address -fsanitize-recover=address -U_FORTIFY_SOURCE -fno-omit-frame-pointer")
+SET(CMAKE_EXE_LINKER_FLAGS "-Wl,--as-needed -Wl,--rpath=/usr/lib -Wl,-fsanitize=address")
+ELSE()
+SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O0 -g -fPIE -Wall")
+SET(CMAKE_EXE_LINKER_FLAGS "-Wl,--as-needed -Wl,--rpath=/usr/lib -pie")
+ENDIF()
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/00.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/00.jpg
new file mode 100755 (executable)
index 0000000..3fa77e1
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/00.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/01.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/01.jpg
new file mode 100755 (executable)
index 0000000..f2b27ba
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/01.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/02.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/02.jpg
new file mode 100755 (executable)
index 0000000..27bf5f0
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/02.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/03.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/03.jpg
new file mode 100755 (executable)
index 0000000..3a317ad
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/03.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/04.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/04.jpg
new file mode 100755 (executable)
index 0000000..5b3f8c6
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/04.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/05.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/05.jpg
new file mode 100755 (executable)
index 0000000..bedc5f1
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/05.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/06.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/06.jpg
new file mode 100755 (executable)
index 0000000..180173a
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/06.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/07.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/07.jpg
new file mode 100755 (executable)
index 0000000..70a0f9d
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/07.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/08.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/08.jpg
new file mode 100755 (executable)
index 0000000..c6556a4
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/08.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/09.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/09.jpg
new file mode 100755 (executable)
index 0000000..4b510db
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P1/09.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/00.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/00.jpg
new file mode 100755 (executable)
index 0000000..54be030
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/00.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/01.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/01.jpg
new file mode 100755 (executable)
index 0000000..aa86935
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/01.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/02.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/02.jpg
new file mode 100755 (executable)
index 0000000..8650349
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/02.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/03.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/03.jpg
new file mode 100755 (executable)
index 0000000..1f8f469
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/03.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/04.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/04.jpg
new file mode 100755 (executable)
index 0000000..5a68372
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/04.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/05.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/05.jpg
new file mode 100755 (executable)
index 0000000..8e1300f
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/05.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/06.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/06.jpg
new file mode 100755 (executable)
index 0000000..b507918
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/06.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/07.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/07.jpg
new file mode 100755 (executable)
index 0000000..78d1e06
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/07.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/08.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/08.jpg
new file mode 100755 (executable)
index 0000000..1989d27
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/08.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/09.jpg b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/09.jpg
new file mode 100755 (executable)
index 0000000..5d8288c
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/images/P2/09.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/models/facenet.json b/src/utc/capi-media-vision-dl/res/face_recognition/models/facenet.json
new file mode 100644 (file)
index 0000000..f644f0d
--- /dev/null
@@ -0,0 +1,26 @@
+{
+  "input": [
+    {
+      "tensor1": {
+        "name": "input_1",
+        "shape_type": "NHWC",
+        "shape_dims": [ 1, 160, 160, 3 ],
+        "data_type": "FLOAT32",
+        "color_space": "RGB888",
+        "preprocess": {
+          "normalization": {
+            "mean": [ 127.5, 127.5, 127.5 ],
+            "std": [ 127.5, 127.5, 127.5 ]
+          }
+        }
+      }
+    }
+  ],
+  "output": [
+    {
+      "tensor1": {
+        "name": "normalize/l2_normalize"
+      }
+    }
+  ]
+}
diff --git a/src/utc/capi-media-vision-dl/res/face_recognition/models/facenet.tflite b/src/utc/capi-media-vision-dl/res/face_recognition/models/facenet.tflite
new file mode 100644 (file)
index 0000000..4c19477
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/face_recognition/models/facenet.tflite differ
diff --git a/src/utc/capi-media-vision-dl/res/inference/images/banana.jpg b/src/utc/capi-media-vision-dl/res/inference/images/banana.jpg
new file mode 100644 (file)
index 0000000..74a3527
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/inference/images/banana.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/inference/images/dog2.jpg b/src/utc/capi-media-vision-dl/res/inference/images/dog2.jpg
new file mode 100644 (file)
index 0000000..1d9e77c
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/inference/images/dog2.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/inference/images/faceDetection.jpg b/src/utc/capi-media-vision-dl/res/inference/images/faceDetection.jpg
new file mode 100644 (file)
index 0000000..faa36fc
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/inference/images/faceDetection.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/inference/images/faceLandmark.jpg b/src/utc/capi-media-vision-dl/res/inference/images/faceLandmark.jpg
new file mode 100644 (file)
index 0000000..375fb0b
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/inference/images/faceLandmark.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/inference/images/poseLandmark.jpg b/src/utc/capi-media-vision-dl/res/inference/images/poseLandmark.jpg
new file mode 100644 (file)
index 0000000..199db8f
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/inference/images/poseLandmark.jpg differ
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/fd_tflite_model1.tflite b/src/utc/capi-media-vision-dl/res/inference/models/fd_tflite_model1.tflite
new file mode 100644 (file)
index 0000000..5425fcf
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/inference/models/fd_tflite_model1.tflite differ
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/fd_tflite_model1_meta.json b/src/utc/capi-media-vision-dl/res/inference/models/fd_tflite_model1_meta.json
new file mode 100644 (file)
index 0000000..40bf54f
--- /dev/null
@@ -0,0 +1,68 @@
+{
+  "input": [
+    {
+      "tensor1": {
+        "name": "normalized_input_image_tensor",
+        "shape_type": "NHWC",
+        "shape_dims": [ 1, 300, 300, 3 ],
+        "data_type": "FLOAT32",
+        "color_space": "RGB888",
+        "preprocess": {
+          "normalization": {
+            "mean": [ 127.5, 127.5, 127.5 ],
+            "std": [ 127.5, 127.5, 127.5 ]
+          }
+        }
+      }
+    }
+  ],
+  "output": [
+    {
+      "tensor1": {
+        "name": "TFLite_Detection_PostProcess:2",
+        "postprocess": {
+          "score": {
+            "index": [ -1, 1 ],
+            "top_number": 5,
+            "threshold": 0.3,
+            "score_type": "NORMAL"
+          }
+        }
+      }
+    },
+    {
+      "tensor2": {
+        "name": "TFLite_Detection_PostProcess",
+        "postprocess": {
+          "box": {
+            "index": [ -1, -1, 1 ],
+            "box_type": "ORIGIN_LEFTTOP",
+            "box_order": [ 1, 0, 3, 2 ],
+            "box_coordinate": "RATIO",
+            "decoding_type": "BYPASS"
+          }
+        }
+      }
+    },
+    {
+      "tensor3": {
+        "name": "TFLite_Detection_PostProcess:1",
+        "postprocess": {
+          "label": {
+            "index": [ -1, 1 ]
+          }
+        }
+      }
+    },
+    {
+      "tensor4": {
+        "name": "TFLite_Detection_PostProcess:3",
+        "postprocess": {
+          "number": {
+            "index": [ 1 ]
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/fld_tflite_model1.tflite b/src/utc/capi-media-vision-dl/res/inference/models/fld_tflite_model1.tflite
new file mode 100644 (file)
index 0000000..2449c5e
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/inference/models/fld_tflite_model1.tflite differ
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/fld_tflite_model1_meta.json b/src/utc/capi-media-vision-dl/res/inference/models/fld_tflite_model1_meta.json
new file mode 100644 (file)
index 0000000..7f5d7f2
--- /dev/null
@@ -0,0 +1,41 @@
+{
+  "input": [
+    {
+      "tensor1": {
+        "name": "Placeholder",
+        "shape_type": "NHWC",
+        "shape_dims": [ 1, 128, 128, 3 ],
+        "data_type": "FLOAT32",
+        "color_space": "RGB888",
+        "preprocess": {
+          "normalization": {
+            "mean": [ 0.0, 0.0, 0.0 ],
+            "std": [ 1.0, 1.0, 1.0 ]
+          }
+        }
+      }
+    }
+  ],
+  "output": [
+    {
+      "tensor1": {
+        "name": "fanet8ss_inference/fully_connected_1/Sigmoid",
+        "postprocess": {
+          "score": {
+            "index": [ -1, -1, -1, -1 ],
+            "top_number": 1,
+            "threshold": 0.0,
+            "score_type": "NORMAL"
+          },
+          "landmark": {
+            "index": [ -1, 1 ],
+            "landmark_type": "2D_SINGLE",
+            "landmark_coordinate": "RATIO",
+            "landmark_offset": 2,
+            "decoding_type": "BYPASS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/ic_label.txt b/src/utc/capi-media-vision-dl/res/inference/models/ic_label.txt
new file mode 100644 (file)
index 0000000..fe81123
--- /dev/null
@@ -0,0 +1,1001 @@
+background
+tench
+goldfish
+great white shark
+tiger shark
+hammerhead
+electric ray
+stingray
+cock
+hen
+ostrich
+brambling
+goldfinch
+house finch
+junco
+indigo bunting
+robin
+bulbul
+jay
+magpie
+chickadee
+water ouzel
+kite
+bald eagle
+vulture
+great grey owl
+European fire salamander
+common newt
+eft
+spotted salamander
+axolotl
+bullfrog
+tree frog
+tailed frog
+loggerhead
+leatherback turtle
+mud turtle
+terrapin
+box turtle
+banded gecko
+common iguana
+American chameleon
+whiptail
+agama
+frilled lizard
+alligator lizard
+Gila monster
+green lizard
+African chameleon
+Komodo dragon
+African crocodile
+American alligator
+triceratops
+thunder snake
+ringneck snake
+hognose snake
+green snake
+king snake
+garter snake
+water snake
+vine snake
+night snake
+boa constrictor
+rock python
+Indian cobra
+green mamba
+sea snake
+horned viper
+diamondback
+sidewinder
+trilobite
+harvestman
+scorpion
+black and gold garden spider
+barn spider
+garden spider
+black widow
+tarantula
+wolf spider
+tick
+centipede
+black grouse
+ptarmigan
+ruffed grouse
+prairie chicken
+peacock
+quail
+partridge
+African grey
+macaw
+sulphur-crested cockatoo
+lorikeet
+coucal
+bee eater
+hornbill
+hummingbird
+jacamar
+toucan
+drake
+red-breasted merganser
+goose
+black swan
+tusker
+echidna
+platypus
+wallaby
+koala
+wombat
+jellyfish
+sea anemone
+brain coral
+flatworm
+nematode
+conch
+snail
+slug
+sea slug
+chiton
+chambered nautilus
+Dungeness crab
+rock crab
+fiddler crab
+king crab
+American lobster
+spiny lobster
+crayfish
+hermit crab
+isopod
+white stork
+black stork
+spoonbill
+flamingo
+little blue heron
+American egret
+bittern
+crane
+limpkin
+European gallinule
+American coot
+bustard
+ruddy turnstone
+red-backed sandpiper
+redshank
+dowitcher
+oystercatcher
+pelican
+king penguin
+albatross
+grey whale
+killer whale
+dugong
+sea lion
+Chihuahua
+Japanese spaniel
+Maltese dog
+Pekinese
+Shih-Tzu
+Blenheim spaniel
+papillon
+toy terrier
+Rhodesian ridgeback
+Afghan hound
+basset
+beagle
+bloodhound
+bluetick
+black-and-tan coonhound
+Walker hound
+English foxhound
+redbone
+borzoi
+Irish wolfhound
+Italian greyhound
+whippet
+Ibizan hound
+Norwegian elkhound
+otterhound
+Saluki
+Scottish deerhound
+Weimaraner
+Staffordshire bullterrier
+American Staffordshire terrier
+Bedlington terrier
+Border terrier
+Kerry blue terrier
+Irish terrier
+Norfolk terrier
+Norwich terrier
+Yorkshire terrier
+wire-haired fox terrier
+Lakeland terrier
+Sealyham terrier
+Airedale
+cairn
+Australian terrier
+Dandie Dinmont
+Boston bull
+miniature schnauzer
+giant schnauzer
+standard schnauzer
+Scotch terrier
+Tibetan terrier
+silky terrier
+soft-coated wheaten terrier
+West Highland white terrier
+Lhasa
+flat-coated retriever
+curly-coated retriever
+golden retriever
+Labrador retriever
+Chesapeake Bay retriever
+German short-haired pointer
+vizsla
+English setter
+Irish setter
+Gordon setter
+Brittany spaniel
+clumber
+English springer
+Welsh springer spaniel
+cocker spaniel
+Sussex spaniel
+Irish water spaniel
+kuvasz
+schipperke
+groenendael
+malinois
+briard
+kelpie
+komondor
+Old English sheepdog
+Shetland sheepdog
+collie
+Border collie
+Bouvier des Flandres
+Rottweiler
+German shepherd
+Doberman
+miniature pinscher
+Greater Swiss Mountain dog
+Bernese mountain dog
+Appenzeller
+EntleBucher
+boxer
+bull mastiff
+Tibetan mastiff
+French bulldog
+Great Dane
+Saint Bernard
+Eskimo dog
+malamute
+Siberian husky
+dalmatian
+affenpinscher
+basenji
+pug
+Leonberg
+Newfoundland
+Great Pyrenees
+Samoyed
+Pomeranian
+chow
+keeshond
+Brabancon griffon
+Pembroke
+Cardigan
+toy poodle
+miniature poodle
+standard poodle
+Mexican hairless
+timber wolf
+white wolf
+red wolf
+coyote
+dingo
+dhole
+African hunting dog
+hyena
+red fox
+kit fox
+Arctic fox
+grey fox
+tabby
+tiger cat
+Persian cat
+Siamese cat
+Egyptian cat
+cougar
+lynx
+leopard
+snow leopard
+jaguar
+lion
+tiger
+cheetah
+brown bear
+American black bear
+ice bear
+sloth bear
+mongoose
+meerkat
+tiger beetle
+ladybug
+ground beetle
+long-horned beetle
+leaf beetle
+dung beetle
+rhinoceros beetle
+weevil
+fly
+bee
+ant
+grasshopper
+cricket
+walking stick
+cockroach
+mantis
+cicada
+leafhopper
+lacewing
+dragonfly
+damselfly
+admiral
+ringlet
+monarch
+cabbage butterfly
+sulphur butterfly
+lycaenid
+starfish
+sea urchin
+sea cucumber
+wood rabbit
+hare
+Angora
+hamster
+porcupine
+fox squirrel
+marmot
+beaver
+guinea pig
+sorrel
+zebra
+hog
+wild boar
+warthog
+hippopotamus
+ox
+water buffalo
+bison
+ram
+bighorn
+ibex
+hartebeest
+impala
+gazelle
+Arabian camel
+llama
+weasel
+mink
+polecat
+black-footed ferret
+otter
+skunk
+badger
+armadillo
+three-toed sloth
+orangutan
+gorilla
+chimpanzee
+gibbon
+siamang
+guenon
+patas
+baboon
+macaque
+langur
+colobus
+proboscis monkey
+marmoset
+capuchin
+howler monkey
+titi
+spider monkey
+squirrel monkey
+Madagascar cat
+indri
+Indian elephant
+African elephant
+lesser panda
+giant panda
+barracouta
+eel
+coho
+rock beauty
+anemone fish
+sturgeon
+gar
+lionfish
+puffer
+abacus
+abaya
+academic gown
+accordion
+acoustic guitar
+aircraft carrier
+airliner
+airship
+altar
+ambulance
+amphibian
+analog clock
+apiary
+apron
+ashcan
+assault rifle
+backpack
+bakery
+balance beam
+balloon
+ballpoint
+Band Aid
+banjo
+bannister
+barbell
+barber chair
+barbershop
+barn
+barometer
+barrel
+barrow
+baseball
+basketball
+bassinet
+bassoon
+bathing cap
+bath towel
+bathtub
+beach wagon
+beacon
+beaker
+bearskin
+beer bottle
+beer glass
+bell cote
+bib
+bicycle-built-for-two
+bikini
+binder
+binoculars
+birdhouse
+boathouse
+bobsled
+bolo tie
+bonnet
+bookcase
+bookshop
+bottlecap
+bow
+bow tie
+brass
+brassiere
+breakwater
+breastplate
+broom
+bucket
+buckle
+bulletproof vest
+bullet train
+butcher shop
+cab
+caldron
+candle
+cannon
+canoe
+can opener
+cardigan
+car mirror
+carousel
+carpenter's kit
+carton
+car wheel
+cash machine
+cassette
+cassette player
+castle
+catamaran
+CD player
+cello
+cellular telephone
+chain
+chainlink fence
+chain mail
+chain saw
+chest
+chiffonier
+chime
+china cabinet
+Christmas stocking
+church
+cinema
+cleaver
+cliff dwelling
+cloak
+clog
+cocktail shaker
+coffee mug
+coffeepot
+coil
+combination lock
+computer keyboard
+confectionery
+container ship
+convertible
+corkscrew
+cornet
+cowboy boot
+cowboy hat
+cradle
+crane
+crash helmet
+crate
+crib
+Crock Pot
+croquet ball
+crutch
+cuirass
+dam
+desk
+desktop computer
+dial telephone
+diaper
+digital clock
+digital watch
+dining table
+dishrag
+dishwasher
+disk brake
+dock
+dogsled
+dome
+doormat
+drilling platform
+drum
+drumstick
+dumbbell
+Dutch oven
+electric fan
+electric guitar
+electric locomotive
+entertainment center
+envelope
+espresso maker
+face powder
+feather boa
+file
+fireboat
+fire engine
+fire screen
+flagpole
+flute
+folding chair
+football helmet
+forklift
+fountain
+fountain pen
+four-poster
+freight car
+French horn
+frying pan
+fur coat
+garbage truck
+gasmask
+gas pump
+goblet
+go-kart
+golf ball
+golfcart
+gondola
+gong
+gown
+grand piano
+greenhouse
+grille
+grocery store
+guillotine
+hair slide
+hair spray
+half track
+hammer
+hamper
+hand blower
+hand-held computer
+handkerchief
+hard disc
+harmonica
+harp
+harvester
+hatchet
+holster
+home theater
+honeycomb
+hook
+hoopskirt
+horizontal bar
+horse cart
+hourglass
+iPod
+iron
+jack-o'-lantern
+jean
+jeep
+jersey
+jigsaw puzzle
+jinrikisha
+joystick
+kimono
+knee pad
+knot
+lab coat
+ladle
+lampshade
+laptop
+lawn mower
+lens cap
+letter opener
+library
+lifeboat
+lighter
+limousine
+liner
+lipstick
+Loafer
+lotion
+loudspeaker
+loupe
+lumbermill
+magnetic compass
+mailbag
+mailbox
+maillot
+maillot
+manhole cover
+maraca
+marimba
+mask
+matchstick
+maypole
+maze
+measuring cup
+medicine chest
+megalith
+microphone
+microwave
+military uniform
+milk can
+minibus
+miniskirt
+minivan
+missile
+mitten
+mixing bowl
+mobile home
+Model T
+modem
+monastery
+monitor
+moped
+mortar
+mortarboard
+mosque
+mosquito net
+motor scooter
+mountain bike
+mountain tent
+mouse
+mousetrap
+moving van
+muzzle
+nail
+neck brace
+necklace
+nipple
+notebook
+obelisk
+oboe
+ocarina
+odometer
+oil filter
+organ
+oscilloscope
+overskirt
+oxcart
+oxygen mask
+packet
+paddle
+paddlewheel
+padlock
+paintbrush
+pajama
+palace
+panpipe
+paper towel
+parachute
+parallel bars
+park bench
+parking meter
+passenger car
+patio
+pay-phone
+pedestal
+pencil box
+pencil sharpener
+perfume
+Petri dish
+photocopier
+pick
+pickelhaube
+picket fence
+pickup
+pier
+piggy bank
+pill bottle
+pillow
+ping-pong ball
+pinwheel
+pirate
+pitcher
+plane
+planetarium
+plastic bag
+plate rack
+plow
+plunger
+Polaroid camera
+pole
+police van
+poncho
+pool table
+pop bottle
+pot
+potter's wheel
+power drill
+prayer rug
+printer
+prison
+projectile
+projector
+puck
+punching bag
+purse
+quill
+quilt
+racer
+racket
+radiator
+radio
+radio telescope
+rain barrel
+recreational vehicle
+reel
+reflex camera
+refrigerator
+remote control
+restaurant
+revolver
+rifle
+rocking chair
+rotisserie
+rubber eraser
+rugby ball
+rule
+running shoe
+safe
+safety pin
+saltshaker
+sandal
+sarong
+sax
+scabbard
+scale
+school bus
+schooner
+scoreboard
+screen
+screw
+screwdriver
+seat belt
+sewing machine
+shield
+shoe shop
+shoji
+shopping basket
+shopping cart
+shovel
+shower cap
+shower curtain
+ski
+ski mask
+sleeping bag
+slide rule
+sliding door
+slot
+snorkel
+snowmobile
+snowplow
+soap dispenser
+soccer ball
+sock
+solar dish
+sombrero
+soup bowl
+space bar
+space heater
+space shuttle
+spatula
+speedboat
+spider web
+spindle
+sports car
+spotlight
+stage
+steam locomotive
+steel arch bridge
+steel drum
+stethoscope
+stole
+stone wall
+stopwatch
+stove
+strainer
+streetcar
+stretcher
+studio couch
+stupa
+submarine
+suit
+sundial
+sunglass
+sunglasses
+sunscreen
+suspension bridge
+swab
+sweatshirt
+swimming trunks
+swing
+switch
+syringe
+table lamp
+tank
+tape player
+teapot
+teddy
+television
+tennis ball
+thatch
+theater curtain
+thimble
+thresher
+throne
+tile roof
+toaster
+tobacco shop
+toilet seat
+torch
+totem pole
+tow truck
+toyshop
+tractor
+trailer truck
+tray
+trench coat
+tricycle
+trimaran
+tripod
+triumphal arch
+trolleybus
+trombone
+tub
+turnstile
+typewriter keyboard
+umbrella
+unicycle
+upright
+vacuum
+vase
+vault
+velvet
+vending machine
+vestment
+viaduct
+violin
+volleyball
+waffle iron
+wall clock
+wallet
+wardrobe
+warplane
+washbasin
+washer
+water bottle
+water jug
+water tower
+whiskey jug
+whistle
+wig
+window screen
+window shade
+Windsor tie
+wine bottle
+wing
+wok
+wooden spoon
+wool
+worm fence
+wreck
+yawl
+yurt
+web site
+comic book
+crossword puzzle
+street sign
+traffic light
+book jacket
+menu
+plate
+guacamole
+consomme
+hot pot
+trifle
+ice cream
+ice lolly
+French loaf
+bagel
+pretzel
+cheeseburger
+hotdog
+mashed potato
+head cabbage
+broccoli
+cauliflower
+zucchini
+spaghetti squash
+acorn squash
+butternut squash
+cucumber
+artichoke
+bell pepper
+cardoon
+mushroom
+Granny Smith
+strawberry
+orange
+lemon
+fig
+pineapple
+banana
+jackfruit
+custard apple
+pomegranate
+hay
+carbonara
+chocolate sauce
+dough
+meat loaf
+pizza
+potpie
+burrito
+red wine
+espresso
+cup
+eggnog
+alp
+bubble
+cliff
+coral reef
+geyser
+lakeside
+promontory
+sandbar
+seashore
+valley
+volcano
+ballplayer
+groom
+scuba diver
+rapeseed
+daisy
+yellow lady's slipper
+corn
+acorn
+hip
+buckeye
+coral fungus
+agaric
+gyromitra
+stinkhorn
+earthstar
+hen-of-the-woods
+bolete
+ear
+toilet tissue
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/ic_tflite_model.tflite b/src/utc/capi-media-vision-dl/res/inference/models/ic_tflite_model.tflite
new file mode 100644 (file)
index 0000000..db26630
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/inference/models/ic_tflite_model.tflite differ
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/ic_tflite_model_meta.json b/src/utc/capi-media-vision-dl/res/inference/models/ic_tflite_model_meta.json
new file mode 100644 (file)
index 0000000..a22c544
--- /dev/null
@@ -0,0 +1,34 @@
+{
+  "input": [
+    {
+      "tensor1": {
+        "name": "input_2",
+        "shape_type": "NHWC",
+        "shape_dims": [ 1, 224, 224, 3 ],
+        "data_type": "FLOAT32",
+        "color_space": "RGB888",
+        "preprocess": {
+          "normalization": {
+            "mean": [ 127.5, 127.5, 127.5 ],
+            "std": [ 127.5, 127.5, 127.5 ]
+          }
+        }
+      }
+    }
+  ],
+  "output": [
+    {
+      "tensor1": {
+        "name": "dense_3/Softmax",
+        "postprocess": {
+          "score": {
+            "index": [ -1, 1 ],
+            "top_number": 5,
+            "threshold": 0.3,
+            "score_type": "NORMAL"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/od_label.txt b/src/utc/capi-media-vision-dl/res/inference/models/od_label.txt
new file mode 100644 (file)
index 0000000..029bab8
--- /dev/null
@@ -0,0 +1,91 @@
+person
+bicycle
+car
+motorcycle
+airplane
+bus
+train
+truck
+boat
+traffic light
+fire hydrant
+street sign
+stop sign
+parking meter
+bench
+bird
+cat
+dog
+horse
+sheep
+cow
+elephant
+bear
+zebra
+giraffe
+hat
+backpack
+umbrella
+shoe
+eye glasses
+handbag
+tie
+suitcase
+frisbee
+skis
+snowboard
+sports ball
+kite
+baseball bat
+baseball glove
+skateboard
+surfboard
+tennis racket
+bottle
+plate
+wine glass
+cup
+fork
+knife
+spoon
+bowl
+banana
+apple
+sandwich
+orange
+broccoli
+carrot
+hot dog
+pizza
+donut
+cake
+chair
+couch
+potted plant
+bed
+mirror
+dining table
+window
+desk
+toilet
+door
+tv
+laptop
+mouse
+remote
+keyboard
+cell phone
+microwave
+oven
+toaster
+sink
+refrigerator
+blender
+book
+clock
+vase
+scissors
+teddy bear
+hair drier
+toothbrush
+hair brush
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/od_tflite_model.tflite b/src/utc/capi-media-vision-dl/res/inference/models/od_tflite_model.tflite
new file mode 100644 (file)
index 0000000..aee1d31
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/inference/models/od_tflite_model.tflite differ
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/od_tflite_model_meta.json b/src/utc/capi-media-vision-dl/res/inference/models/od_tflite_model_meta.json
new file mode 100644 (file)
index 0000000..0d78373
--- /dev/null
@@ -0,0 +1,69 @@
+{
+  "input": [
+    {
+      "tensor1": {
+        "name": "normalized_input_image_tensor",
+        "shape_type": "NHWC",
+        "shape_dims": [ 1, 300, 300, 3 ],
+        "data_type": "FLOAT32",
+        "color_space": "RGB888",
+        "preprocess": {
+          "normalization": {
+            "mean": [ 127.5, 127.5, 127.5 ],
+            "std": [ 127.5, 127.5, 127.5 ]
+          }
+        }
+      }
+    }
+  ],
+
+  "output": [
+    {
+      "tensor1": {
+        "name": "TFLite_Detection_PostProcess:2",
+        "postprocess": {
+          "score": {
+            "index": [ -1, 1 ],
+            "top_number": 5,
+            "threshold": 0.3,
+            "score_type": "NORMAL"
+          }
+        }
+      }
+    },
+    {
+      "tensor2": {
+        "name": "TFLite_Detection_PostProcess",
+        "postprocess": {
+          "box": {
+            "index": [ -1, -1, 1 ],
+            "box_type": "ORIGIN_LEFTTOP",
+            "box_order": [ 1, 0, 3, 2 ],
+            "box_coordinate": "RATIO",
+            "decoding_type": "BYPASS"
+          }
+        }
+      }
+    },
+    {
+      "tensor3": {
+        "name": "TFLite_Detection_PostProcess:1",
+        "postprocess": {
+          "label": {
+            "index": [ -1, 1 ]
+          }
+        }
+      }
+    },
+    {
+      "tensor4": {
+        "name": "TFLite_Detection_PostProcess:3",
+        "postprocess": {
+          "number": {
+            "index": [ 1 ]
+          }
+        }
+      }
+    }
+  ]
+}
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/pld_mocap.bvh b/src/utc/capi-media-vision-dl/res/inference/models/pld_mocap.bvh
new file mode 100644 (file)
index 0000000..f93d086
--- /dev/null
@@ -0,0 +1,116 @@
+HIERARCHY
+ROOT Hips
+{
+       OFFSET 0.000000 0.000000 0.000000
+       CHANNELS 6 Xposition Yposition Zposition Zrotation Xrotation Yrotation
+       JOINT Chest
+       {
+               OFFSET 0.000000 5.210000 0.000000
+               CHANNELS 3 Zrotation Xrotation Yrotation
+               JOINT Neck
+               {
+                       OFFSET 0.000000 18.650002 0.000000
+                       CHANNELS 3 Zrotation Xrotation Yrotation
+                       JOINT Head
+                       {
+                               OFFSET 0.000000 5.450001 0.000000
+                               CHANNELS 3 Zrotation Xrotation Yrotation
+                               End Site
+                               {
+                                       OFFSET 0.000000 3.869999 0.000000
+                               }
+                       }
+               }
+               JOINT RightCollar
+               {
+                       OFFSET 1.120000 16.230000 1.870000
+                       CHANNELS 3 Zrotation Xrotation Yrotation
+                       JOINT RightUpArm
+                       {
+                               OFFSET 5.540000 0.000000 0.000000
+                               CHANNELS 3 Zrotation Xrotation Yrotation
+                               JOINT RightLowArm
+                               {
+                                       OFFSET 0.000000 -11.960000 0.000000
+                                       CHANNELS 3 Zrotation Xrotation Yrotation
+                                       JOINT RightHand
+                                       {
+                                               OFFSET 0.000000 -9.930000 0.000000
+                                               CHANNELS 3 Zrotation Xrotation Yrotation
+                                               End Site
+                                               {
+                                                       OFFSET 0.000000 -7.000000 0.000000
+                                               }
+                                       }
+                               }
+                       }
+               }
+               JOINT LeftCollar
+               {
+                       OFFSET -1.120000 16.230000 1.870000
+                       CHANNELS 3 Zrotation Xrotation Yrotation
+                       JOINT LeftUpArm
+                       {
+                               OFFSET -6.070000 0.000000 0.000000
+                               CHANNELS 3 Zrotation Xrotation Yrotation
+                               JOINT LeftLowArm
+                               {
+                                       OFFSET 0.000000 -11.820000 0.000000
+                                       CHANNELS 3 Zrotation Xrotation Yrotation
+                                       JOINT LeftHand
+                                       {
+                                               OFFSET 0.000000 -10.650000 0.000000
+                                               CHANNELS 3 Zrotation Xrotation Yrotation
+                                               End Site
+                                               {
+                                                       OFFSET 0.000000 -7.000000 0.000000
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
+       JOINT RightUpLeg
+       {
+               OFFSET 3.910000 0.000000 0.000000
+               CHANNELS 3 Zrotation Xrotation Yrotation
+               JOINT RightLowLeg
+               {
+                       OFFSET 0.000000 -18.340000 0.000000
+                       CHANNELS 3 Zrotation Xrotation Yrotation
+                       JOINT RightFoot
+                       {
+                               OFFSET 0.000000 -17.369999 0.000000
+                               CHANNELS 3 Zrotation Xrotation Yrotation
+                               End Site
+                               {
+                                       OFFSET 0.000000 -3.459999 0.000000
+                               }
+                       }
+               }
+       }
+       JOINT LeftUpLeg
+       {
+               OFFSET -3.910000 0.000000 0.000000
+               CHANNELS 3 Zrotation Xrotation Yrotation
+               JOINT LeftLowLeg
+               {
+                       OFFSET 0.000000 -17.629999 0.000000
+                       CHANNELS 3 Zrotation Xrotation Yrotation
+                       JOINT LeftFoot
+                       {
+                               OFFSET 0.000000 -17.139997 0.000000
+                               CHANNELS 3 Zrotation Xrotation Yrotation
+                               End Site
+                               {
+                                       OFFSET 0.000000 -3.750000 0.000000
+                               }
+                       }
+               }
+       }
+}
+MOTION
+Frames: 2
+Frame Time: 0.041667
+8.030000 35.009998 88.360001 -3.410002 14.780000 -164.349993 13.089996 40.299994 -24.599999 7.879999 43.799988 0.000001 -3.610007 -41.450000 5.819997 10.080001 -0.000003 10.209998 97.950001 -23.530003 -2.139997 60.275566 -2.764629 54.273831 0.689997 0.029999 -0.000000 -14.040001 -0.000001 -10.499999 -85.519999 -13.719994 -102.930001 61.910009 -61.179999 65.179997 -1.570001 0.690000 0.020000 43.262770 -10.754126 140.039587 36.023886 12.970090 151.073785 0.000001 -1.140000 -0.000001 -56.152423 -7.908401 12.931058 43.240302 1.483285 -31.399612 0.000000 -23.949997 -0.000000 
+7.810000 35.099998 86.470001 -3.780000 12.940000 -166.969998 12.639998 42.569994 -22.340000 7.669998 43.609993 -0.000001 -4.230008 -41.410003 4.889995 19.099999 0.000004 4.159999 93.119992 -9.689997 -9.429998 132.670182 -81.860029 136.800124 0.699997 0.370000 0.000001 -8.619999 0.000001 -21.819997 -87.310001 -27.569989 -100.089995 56.170000 -61.560003 58.719997 -1.630005 0.950000 0.030000 13.160001 15.440001 -3.560000 7.970004 59.290004 4.969998 0.000000 1.640002 -0.000001 -17.180000 -10.019999 -3.080000 13.560003 53.380005 -18.070000 0.000000 -25.929999 0.000000 
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/pld_mocap_mapping.txt b/src/utc/capi-media-vision-dl/res/inference/models/pld_mocap_mapping.txt
new file mode 100644 (file)
index 0000000..07ce1a2
--- /dev/null
@@ -0,0 +1,15 @@
+Hips,10
+Neck,2
+Head,1
+LeftUpArm,7
+LeftLowArm,8
+LeftHand,9
+RightUpArm,4
+RightLowArm,5
+RightHand,6
+LeftUpLeg,14
+LeftLowLeg,15
+LeftFoot,16
+RightUpLeg,11
+RightLowLeg,12
+RightFoot,13
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/pld_pose_mapping.txt b/src/utc/capi-media-vision-dl/res/inference/models/pld_pose_mapping.txt
new file mode 100644 (file)
index 0000000..0a66dce
--- /dev/null
@@ -0,0 +1,16 @@
+1
+2
+-1
+3
+4
+5
+6
+7
+8
+-1
+9
+10
+11
+12
+13
+14
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/pld_tflite_model.tflite b/src/utc/capi-media-vision-dl/res/inference/models/pld_tflite_model.tflite
new file mode 100644 (file)
index 0000000..ff6044d
Binary files /dev/null and b/src/utc/capi-media-vision-dl/res/inference/models/pld_tflite_model.tflite differ
diff --git a/src/utc/capi-media-vision-dl/res/inference/models/pld_tflite_model_meta.json b/src/utc/capi-media-vision-dl/res/inference/models/pld_tflite_model_meta.json
new file mode 100644 (file)
index 0000000..7b4e49d
--- /dev/null
@@ -0,0 +1,45 @@
+{
+  "input": [
+    {
+      "tensor1": {
+        "name": "image",
+        "shape_type": "NHWC",
+        "shape_dims": [ 1, 192, 192, 3 ],
+        "data_type": "FLOAT32",
+        "color_space": "RGB888",
+        "preprocess": {
+          "normalization": {
+            "mean": [ 0.0, 0.0, 0.0 ],
+            "std": [ 1.0, 1.0, 1.0 ]
+          }
+        }
+      }
+    }
+  ],
+  "output": [
+    {
+      "tensor1": {
+        "name": "Convolutional_Pose_Machine/stage_5_out",
+        "postprocess": {
+          "score": {
+            "index": [ -1, 1, 1, 1 ],
+            "top_number": 1,
+            "threshold": 0.3,
+            "score_type": "NORMAL"
+          },
+          "landmark": {
+            "index": [ -1, 1, 1, 1 ],
+            "landmark_type": "2D_SINGLE",
+            "landmark_coordinate": "PIXEL",
+            "decoding_type": "HEATMAP",
+            "decoding_info": {
+              "heatmap": {
+                "shape_type": "NHWC"
+              }
+            }
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core.c b/src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core.c
new file mode 100644 (file)
index 0000000..35db84f
--- /dev/null
@@ -0,0 +1,132 @@
+//
+// Copyright (c) 2014 Samsung Electronics Co., Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the License);
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include <stdio.h>
+#include <string.h>
+#include "tct_common.h"
+
+#ifdef MOBILE
+#include "tct-capi-media-vision-dl-core_mobile.h"
+#endif /* MOBILE */
+#ifdef WEARABLE
+#include "tct-capi-media-vision-dl-core_wearable.h"
+#endif /* WEARABLE */
+#ifdef TV
+#include "tct-capi-media-vision-dl-core_tv.h"
+#endif /* TV */
+#ifdef TIZENIOT    
+#include "tct-capi-media-vision-dl-core_tizeniot.h"
+#endif //TIZENIOT
+
+#include <malloc.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <glib.h>
+#include <stdbool.h>
+#include <app.h>
+#include <dlog.h>
+
+static bool app_create(void *data)
+{
+       return true;
+}
+
+static void app_control(app_control_h app_control, void *data)
+{
+       char* pszGetTCName = NULL;
+       int i=0, result=0, nRet=0;
+       nRet = app_control_get_extra_data(app_control, "testcase_name", &pszGetTCName);
+       if(nRet != APP_CONTROL_ERROR_NONE)
+       {
+               dlog_print(DLOG_ERROR, "NativeTCT", "[%s:%d] app_control_get_extra_data returns error = %d", __FUNCTION__, __LINE__, nRet);
+               PRINT_UTC_LOG("\\n[%s][Line : %d]Unable to fetch test case name: app_control_get_extra_data API call fails\\n", __FILE__, __LINE__);
+               PRINT_TC_RESULT("%d",1);
+               FREE_MEMORY_TC(pszGetTCName);
+               return;
+       }
+
+       dlog_print(DLOG_INFO, "NativeTCT", "[%s:%d] Executing TC Name = %s", __FUNCTION__, __LINE__, pszGetTCName);
+       for ( i = 0; tc_array[i].name; i++ )
+       {
+               if ( 0 == strncmp(pszGetTCName, tc_array[i].name, strlen(pszGetTCName)) )
+               {
+                       DUMP_UTC_ERRLOG();
+                       dlog_print(DLOG_INFO, "NativeTCT", "%s : Startup begin", pszGetTCName);
+                       if ( tc_array[i].startup )
+                       {
+                               tc_array[i].startup();
+                       }
+                       dlog_print(DLOG_INFO, "NativeTCT", "%s : Startup end", pszGetTCName);
+
+                       dlog_print(DLOG_INFO, "NativeTCT", "%s : Body begin", pszGetTCName);
+                       result = tc_array[i].function();
+                       dlog_print(DLOG_INFO, "NativeTCT", "%s returns value = %d", pszGetTCName, result);
+                       dlog_print(DLOG_INFO, "NativeTCT", "%s : Body end", pszGetTCName);
+
+                       dlog_print(DLOG_INFO, "NativeTCT", "%s : Cleanup begin", pszGetTCName);
+                       if ( tc_array[i].cleanup )
+                       {
+                               tc_array[i].cleanup();
+                       }
+                       dlog_print(DLOG_INFO, "NativeTCT", "%s : Cleanup end", pszGetTCName);
+
+                       CLOSE_UTC_ERRLOG();
+                       PRINT_TC_RESULT("%d",result);
+                       FREE_MEMORY_TC(pszGetTCName);
+                       return;
+               }
+       }
+
+       dlog_print(DLOG_ERROR, "NativeTCT", "[%s:%d] Unable to execute %s : Unknown Test Case Name", __FUNCTION__, __LINE__, pszGetTCName);
+       PRINT_UTC_LOG("\\n[%s][Line : %d]Unable to execute %s : Unknown Test Case Name\\n", __FILE__, __LINE__, pszGetTCName);
+       PRINT_TC_RESULT("%d",1);
+       FREE_MEMORY_TC(pszGetTCName);
+       return;
+}
+
+static void app_terminate(void *data)
+{
+       dlog_print(DLOG_INFO, "NativeTCT", "[%s:%d] Application Package is now Terminating", __FUNCTION__, __LINE__);
+}
+
+int main(int argc, char *argv[])
+{
+       int ret = 0;
+       
+
+       ui_app_lifecycle_callback_s event_callback = {0,};
+       event_callback.create = app_create;
+       event_callback.terminate = app_terminate;
+       event_callback.app_control = app_control;
+
+       //setting gcda file location for coverage
+       setenv("GCOV_PREFIX","/tmp",1);
+       dlog_print(DLOG_INFO, "NativeTCT", "[%s:%d] Coverage *.gcda File location set to /tmp/home/abuild/rpmbuild/BUILD/ ", __FUNCTION__, __LINE__);
+
+       dlog_print(DLOG_INFO, "NativeTCT", "[%s:%d] Application Main Function is Invoked", __FUNCTION__, __LINE__);
+       ret = ui_app_main(argc, argv, &event_callback, NULL);
+       if (ret != APP_ERROR_NONE)
+       {
+               dlog_print(DLOG_ERROR, "NativeTCT", "Application ui_app_main call gets failed. err = %d", ret);
+               PRINT_UTC_LOG("\\n[%s][Line : %d]Application ui_app_main call gets failed. err = %d\\n", __FILE__, __LINE__, ret);
+               PRINT_TC_RESULT("%d",1);
+               return ret;
+       }
+
+       dlog_print(DLOG_INFO, "NativeTCT", "[%s:%d] Application Package is Terminated", __FUNCTION__, __LINE__);
+       return ret;
+}
diff --git a/src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core_mobile.h b/src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core_mobile.h
new file mode 100755 (executable)
index 0000000..8f4b03b
--- /dev/null
@@ -0,0 +1,161 @@
+//
+// Copyright (c) 2014 Samsung Electronics Co., Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the License);
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef __TCT_CAPI-MEDIA-VISION-NATIVE_H__
+#define __TCT_CAPI-MEDIA-VISION-NATIVE_H__
+
+#include "testcase.h"
+#include "tct_common.h"
+
+extern void utc_capi_media_vision_inference_startup1(void);
+extern void utc_capi_media_vision_inference_cleanup1(void);
+extern void utc_capi_media_vision_inference_startup2(void);
+extern void utc_capi_media_vision_inference_cleanup2(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup(void);
+extern void utc_capi_media_vision_face_recognition_startup(void);
+extern void utc_capi_media_vision_face_recognition_cleanup(void);
+
+extern int utc_mediavision_mv_inference_foreach_supported_engine(void);
+extern int utc_mediavision_mv_inference_create_p(void);
+extern int utc_mediavision_mv_inference_create_n(void);
+extern int utc_mediavision_mv_inference_destroy_p(void);
+extern int utc_mediavision_mv_inference_destroy_n(void);
+extern int utc_mediavision_mv_inference_configure_p(void);
+extern int utc_mediavision_mv_inference_configure_n1(void);
+extern int utc_mediavision_mv_inference_configure_n2(void);
+extern int utc_mediavision_mv_inference_prepare_p(void);
+extern int utc_mediavision_mv_inference_prepare_n1(void);
+extern int utc_mediavision_mv_inference_image_classify_p(void);
+extern int utc_mediavision_mv_inference_image_classify_n1(void);
+extern int utc_mediavision_mv_inference_image_classify_n2(void);
+extern int utc_mediavision_mv_inference_object_detect_p(void);
+extern int utc_mediavision_mv_inference_object_detect_n1(void);
+extern int utc_mediavision_mv_inference_object_detect_n2(void);
+extern int utc_mediavision_mv_inference_face_detect_p(void);
+extern int utc_mediavision_mv_inference_face_detect_n1(void);
+extern int utc_mediavision_mv_inference_face_detect_n2(void);
+extern int utc_mediavision_mv_inference_facial_landmark_detect_p(void);
+extern int utc_mediavision_mv_inference_facial_landmark_detect_n1(void);
+extern int utc_mediavision_mv_inference_facial_landmark_detect_n2(void);
+extern int utc_mediavision_mv_inference_pose_landmark_detect_p(void);
+extern int utc_mediavision_mv_inference_pose_landmark_detect_n1(void);
+extern int utc_mediavision_mv_inference_pose_landmark_detect_n2(void);
+extern int utc_mediavision_mv_inference_get_number_of_poses_p(void);
+extern int utc_mediavision_mv_inference_get_number_of_poses_n(void);
+extern int utc_mediavision_mv_inference_get_number_of_landmarks_p(void);
+extern int utc_mediavision_mv_inference_get_number_of_landmarks_n(void);
+extern int utc_mediavision_mv_inference_get_label_p(void);
+extern int utc_mediavision_mv_inference_get_label_n(void);
+extern int utc_mediavision_mv_inference_get_landmark_p(void);
+extern int utc_mediavision_mv_inference_get_landmark_n(void);
+extern int utc_mediavision_mv_pose_create_p(void);
+extern int utc_mediavision_mv_pose_create_n(void);
+extern int utc_mediavision_mv_pose_destroy_p(void);
+extern int utc_mediavision_mv_pose_destroy_n(void);
+extern int utc_mediavision_mv_pose_set_from_file_p(void);
+extern int utc_mediavision_mv_pose_set_from_file_n1(void);
+extern int utc_mediavision_mv_pose_set_from_file_n2(void);
+extern int utc_mediavision_mv_pose_compare_p(void);
+extern int utc_mediavision_mv_pose_compare_n(void);
+extern int utc_mediavision_mv_face_recognition_create_p(void);
+extern int utc_mediavision_mv_face_recognition_create_n(void);
+extern int utc_mediavision_mv_face_recognition_destroy_p(void);
+extern int utc_mediavision_mv_face_recognition_destroy_n(void);
+extern int utc_mediavision_mv_face_recognition_prepare_p(void);
+extern int utc_mediavision_mv_face_recognition_prepare_n(void);
+extern int utc_mediavision_mv_face_recognition_register_p(void);
+extern int utc_mediavision_mv_face_recognition_register_n1(void);
+extern int utc_mediavision_mv_face_recognition_register_n2(void);
+extern int utc_mediavision_mv_face_recognition_register_n3(void);
+extern int utc_mediavision_mv_face_recognition_inference_p(void);
+extern int utc_mediavision_mv_face_recognition_inference_n1(void);
+extern int utc_mediavision_mv_face_recognition_inference_n2(void);
+extern int utc_mediavision_mv_face_recognition_get_label_p(void);
+extern int utc_mediavision_mv_face_recognition_get_label_n1(void);
+extern int utc_mediavision_mv_face_recognition_get_label_n2(void);
+extern int utc_mediavision_mv_face_recognition_unregister_p(void);
+extern int utc_mediavision_mv_face_recognition_unregister_n1(void);
+extern int utc_mediavision_mv_face_recognition_unregister_n2(void);
+
+testcase tc_array[] = {
+       {"utc_mediavision_mv_inference_foreach_supported_engine",utc_mediavision_mv_inference_foreach_supported_engine,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_create_p",utc_mediavision_mv_inference_create_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_create_n",utc_mediavision_mv_inference_create_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_destroy_p",utc_mediavision_mv_inference_destroy_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_destroy_n",utc_mediavision_mv_inference_destroy_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_configure_p",utc_mediavision_mv_inference_configure_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_configure_n1",utc_mediavision_mv_inference_configure_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_configure_n2",utc_mediavision_mv_inference_configure_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_prepare_p",utc_mediavision_mv_inference_prepare_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_prepare_n1",utc_mediavision_mv_inference_prepare_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_image_classify_p",utc_mediavision_mv_inference_image_classify_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_image_classify_n1",utc_mediavision_mv_inference_image_classify_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_image_classify_n2",utc_mediavision_mv_inference_image_classify_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_object_detect_p",utc_mediavision_mv_inference_object_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_object_detect_n1",utc_mediavision_mv_inference_object_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_object_detect_n2",utc_mediavision_mv_inference_object_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_face_detect_p",utc_mediavision_mv_inference_face_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_face_detect_n1",utc_mediavision_mv_inference_face_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_face_detect_n2",utc_mediavision_mv_inference_face_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_facial_landmark_detect_p",utc_mediavision_mv_inference_facial_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_facial_landmark_detect_n1",utc_mediavision_mv_inference_facial_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_facial_landmark_detect_n2",utc_mediavision_mv_inference_facial_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_pose_landmark_detect_p", utc_mediavision_mv_inference_pose_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_pose_landmark_detect_n1", utc_mediavision_mv_inference_pose_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_pose_landmark_detect_n2", utc_mediavision_mv_inference_pose_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_poses_p", utc_mediavision_mv_inference_get_number_of_poses_p,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_poses_n", utc_mediavision_mv_inference_get_number_of_poses_n,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_label_p", utc_mediavision_mv_inference_get_label_p,utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_label_n", utc_mediavision_mv_inference_get_label_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_landmarks_p", utc_mediavision_mv_inference_get_number_of_landmarks_p,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_landmarks_n", utc_mediavision_mv_inference_get_number_of_landmarks_n,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_landmark_p", utc_mediavision_mv_inference_get_landmark_p,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_landmark_n", utc_mediavision_mv_inference_get_landmark_n,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_create_p", utc_mediavision_mv_pose_create_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_create_n", utc_mediavision_mv_pose_create_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_destroy_p", utc_mediavision_mv_pose_destroy_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_destroy_n", utc_mediavision_mv_pose_destroy_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_set_from_file_p", utc_mediavision_mv_pose_set_from_file_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_set_from_file_n1", utc_mediavision_mv_pose_set_from_file_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_set_from_file_n2", utc_mediavision_mv_pose_set_from_file_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_compare_p", utc_mediavision_mv_pose_compare_p,utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_compare_n", utc_mediavision_mv_pose_compare_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_face_recognition_create_p", utc_mediavision_mv_face_recognition_create_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_create_n", utc_mediavision_mv_face_recognition_create_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_destroy_p", utc_mediavision_mv_face_recognition_destroy_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_destroy_n", utc_mediavision_mv_face_recognition_destroy_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_prepare_p", utc_mediavision_mv_face_recognition_prepare_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_prepare_n", utc_mediavision_mv_face_recognition_prepare_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_register_p", utc_mediavision_mv_face_recognition_register_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_register_n1", utc_mediavision_mv_face_recognition_register_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_register_n2", utc_mediavision_mv_face_recognition_register_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_register_n3", utc_mediavision_mv_face_recognition_register_n3,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_inference_p", utc_mediavision_mv_face_recognition_inference_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_inference_n1", utc_mediavision_mv_face_recognition_inference_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_inference_n2", utc_mediavision_mv_face_recognition_inference_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_get_label_p", utc_mediavision_mv_face_recognition_get_label_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_get_label_n1", utc_mediavision_mv_face_recognition_get_label_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_get_label_n2", utc_mediavision_mv_face_recognition_get_label_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_unregister_p", utc_mediavision_mv_face_recognition_unregister_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_unregister_n1", utc_mediavision_mv_face_recognition_unregister_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_unregister_n2", utc_mediavision_mv_face_recognition_unregister_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {NULL, NULL}
+};
+
+#endif // __TCT_CAPI-MEDIA-VISION-NATIVE_H__
diff --git a/src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core_tizeniot.h b/src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core_tizeniot.h
new file mode 100755 (executable)
index 0000000..8f4b03b
--- /dev/null
@@ -0,0 +1,161 @@
+//
+// Copyright (c) 2014 Samsung Electronics Co., Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the License);
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef __TCT_CAPI-MEDIA-VISION-NATIVE_H__
+#define __TCT_CAPI-MEDIA-VISION-NATIVE_H__
+
+#include "testcase.h"
+#include "tct_common.h"
+
+extern void utc_capi_media_vision_inference_startup1(void);
+extern void utc_capi_media_vision_inference_cleanup1(void);
+extern void utc_capi_media_vision_inference_startup2(void);
+extern void utc_capi_media_vision_inference_cleanup2(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup(void);
+extern void utc_capi_media_vision_face_recognition_startup(void);
+extern void utc_capi_media_vision_face_recognition_cleanup(void);
+
+extern int utc_mediavision_mv_inference_foreach_supported_engine(void);
+extern int utc_mediavision_mv_inference_create_p(void);
+extern int utc_mediavision_mv_inference_create_n(void);
+extern int utc_mediavision_mv_inference_destroy_p(void);
+extern int utc_mediavision_mv_inference_destroy_n(void);
+extern int utc_mediavision_mv_inference_configure_p(void);
+extern int utc_mediavision_mv_inference_configure_n1(void);
+extern int utc_mediavision_mv_inference_configure_n2(void);
+extern int utc_mediavision_mv_inference_prepare_p(void);
+extern int utc_mediavision_mv_inference_prepare_n1(void);
+extern int utc_mediavision_mv_inference_image_classify_p(void);
+extern int utc_mediavision_mv_inference_image_classify_n1(void);
+extern int utc_mediavision_mv_inference_image_classify_n2(void);
+extern int utc_mediavision_mv_inference_object_detect_p(void);
+extern int utc_mediavision_mv_inference_object_detect_n1(void);
+extern int utc_mediavision_mv_inference_object_detect_n2(void);
+extern int utc_mediavision_mv_inference_face_detect_p(void);
+extern int utc_mediavision_mv_inference_face_detect_n1(void);
+extern int utc_mediavision_mv_inference_face_detect_n2(void);
+extern int utc_mediavision_mv_inference_facial_landmark_detect_p(void);
+extern int utc_mediavision_mv_inference_facial_landmark_detect_n1(void);
+extern int utc_mediavision_mv_inference_facial_landmark_detect_n2(void);
+extern int utc_mediavision_mv_inference_pose_landmark_detect_p(void);
+extern int utc_mediavision_mv_inference_pose_landmark_detect_n1(void);
+extern int utc_mediavision_mv_inference_pose_landmark_detect_n2(void);
+extern int utc_mediavision_mv_inference_get_number_of_poses_p(void);
+extern int utc_mediavision_mv_inference_get_number_of_poses_n(void);
+extern int utc_mediavision_mv_inference_get_number_of_landmarks_p(void);
+extern int utc_mediavision_mv_inference_get_number_of_landmarks_n(void);
+extern int utc_mediavision_mv_inference_get_label_p(void);
+extern int utc_mediavision_mv_inference_get_label_n(void);
+extern int utc_mediavision_mv_inference_get_landmark_p(void);
+extern int utc_mediavision_mv_inference_get_landmark_n(void);
+extern int utc_mediavision_mv_pose_create_p(void);
+extern int utc_mediavision_mv_pose_create_n(void);
+extern int utc_mediavision_mv_pose_destroy_p(void);
+extern int utc_mediavision_mv_pose_destroy_n(void);
+extern int utc_mediavision_mv_pose_set_from_file_p(void);
+extern int utc_mediavision_mv_pose_set_from_file_n1(void);
+extern int utc_mediavision_mv_pose_set_from_file_n2(void);
+extern int utc_mediavision_mv_pose_compare_p(void);
+extern int utc_mediavision_mv_pose_compare_n(void);
+extern int utc_mediavision_mv_face_recognition_create_p(void);
+extern int utc_mediavision_mv_face_recognition_create_n(void);
+extern int utc_mediavision_mv_face_recognition_destroy_p(void);
+extern int utc_mediavision_mv_face_recognition_destroy_n(void);
+extern int utc_mediavision_mv_face_recognition_prepare_p(void);
+extern int utc_mediavision_mv_face_recognition_prepare_n(void);
+extern int utc_mediavision_mv_face_recognition_register_p(void);
+extern int utc_mediavision_mv_face_recognition_register_n1(void);
+extern int utc_mediavision_mv_face_recognition_register_n2(void);
+extern int utc_mediavision_mv_face_recognition_register_n3(void);
+extern int utc_mediavision_mv_face_recognition_inference_p(void);
+extern int utc_mediavision_mv_face_recognition_inference_n1(void);
+extern int utc_mediavision_mv_face_recognition_inference_n2(void);
+extern int utc_mediavision_mv_face_recognition_get_label_p(void);
+extern int utc_mediavision_mv_face_recognition_get_label_n1(void);
+extern int utc_mediavision_mv_face_recognition_get_label_n2(void);
+extern int utc_mediavision_mv_face_recognition_unregister_p(void);
+extern int utc_mediavision_mv_face_recognition_unregister_n1(void);
+extern int utc_mediavision_mv_face_recognition_unregister_n2(void);
+
+testcase tc_array[] = {
+       {"utc_mediavision_mv_inference_foreach_supported_engine",utc_mediavision_mv_inference_foreach_supported_engine,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_create_p",utc_mediavision_mv_inference_create_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_create_n",utc_mediavision_mv_inference_create_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_destroy_p",utc_mediavision_mv_inference_destroy_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_destroy_n",utc_mediavision_mv_inference_destroy_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_configure_p",utc_mediavision_mv_inference_configure_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_configure_n1",utc_mediavision_mv_inference_configure_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_configure_n2",utc_mediavision_mv_inference_configure_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_prepare_p",utc_mediavision_mv_inference_prepare_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_prepare_n1",utc_mediavision_mv_inference_prepare_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_image_classify_p",utc_mediavision_mv_inference_image_classify_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_image_classify_n1",utc_mediavision_mv_inference_image_classify_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_image_classify_n2",utc_mediavision_mv_inference_image_classify_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_object_detect_p",utc_mediavision_mv_inference_object_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_object_detect_n1",utc_mediavision_mv_inference_object_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_object_detect_n2",utc_mediavision_mv_inference_object_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_face_detect_p",utc_mediavision_mv_inference_face_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_face_detect_n1",utc_mediavision_mv_inference_face_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_face_detect_n2",utc_mediavision_mv_inference_face_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_facial_landmark_detect_p",utc_mediavision_mv_inference_facial_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_facial_landmark_detect_n1",utc_mediavision_mv_inference_facial_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_facial_landmark_detect_n2",utc_mediavision_mv_inference_facial_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_pose_landmark_detect_p", utc_mediavision_mv_inference_pose_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_pose_landmark_detect_n1", utc_mediavision_mv_inference_pose_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_pose_landmark_detect_n2", utc_mediavision_mv_inference_pose_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_poses_p", utc_mediavision_mv_inference_get_number_of_poses_p,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_poses_n", utc_mediavision_mv_inference_get_number_of_poses_n,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_label_p", utc_mediavision_mv_inference_get_label_p,utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_label_n", utc_mediavision_mv_inference_get_label_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_landmarks_p", utc_mediavision_mv_inference_get_number_of_landmarks_p,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_landmarks_n", utc_mediavision_mv_inference_get_number_of_landmarks_n,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_landmark_p", utc_mediavision_mv_inference_get_landmark_p,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_landmark_n", utc_mediavision_mv_inference_get_landmark_n,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_create_p", utc_mediavision_mv_pose_create_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_create_n", utc_mediavision_mv_pose_create_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_destroy_p", utc_mediavision_mv_pose_destroy_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_destroy_n", utc_mediavision_mv_pose_destroy_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_set_from_file_p", utc_mediavision_mv_pose_set_from_file_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_set_from_file_n1", utc_mediavision_mv_pose_set_from_file_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_set_from_file_n2", utc_mediavision_mv_pose_set_from_file_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_compare_p", utc_mediavision_mv_pose_compare_p,utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_compare_n", utc_mediavision_mv_pose_compare_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_face_recognition_create_p", utc_mediavision_mv_face_recognition_create_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_create_n", utc_mediavision_mv_face_recognition_create_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_destroy_p", utc_mediavision_mv_face_recognition_destroy_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_destroy_n", utc_mediavision_mv_face_recognition_destroy_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_prepare_p", utc_mediavision_mv_face_recognition_prepare_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_prepare_n", utc_mediavision_mv_face_recognition_prepare_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_register_p", utc_mediavision_mv_face_recognition_register_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_register_n1", utc_mediavision_mv_face_recognition_register_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_register_n2", utc_mediavision_mv_face_recognition_register_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_register_n3", utc_mediavision_mv_face_recognition_register_n3,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_inference_p", utc_mediavision_mv_face_recognition_inference_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_inference_n1", utc_mediavision_mv_face_recognition_inference_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_inference_n2", utc_mediavision_mv_face_recognition_inference_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_get_label_p", utc_mediavision_mv_face_recognition_get_label_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_get_label_n1", utc_mediavision_mv_face_recognition_get_label_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_get_label_n2", utc_mediavision_mv_face_recognition_get_label_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_unregister_p", utc_mediavision_mv_face_recognition_unregister_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_unregister_n1", utc_mediavision_mv_face_recognition_unregister_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_unregister_n2", utc_mediavision_mv_face_recognition_unregister_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {NULL, NULL}
+};
+
+#endif // __TCT_CAPI-MEDIA-VISION-NATIVE_H__
diff --git a/src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core_tv.h b/src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core_tv.h
new file mode 100755 (executable)
index 0000000..b7e97b9
--- /dev/null
@@ -0,0 +1,121 @@
+//
+// Copyright (c) 2014 Samsung Electronics Co., Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the License);
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef __TCT_CAPI-MEDIA-VISION-NATIVE_H__
+#define __TCT_CAPI-MEDIA-VISION-NATIVE_H__
+
+#include "testcase.h"
+#include "tct_common.h"
+
+extern void utc_capi_media_vision_inference_startup1(void);
+extern void utc_capi_media_vision_inference_cleanup1(void);
+extern void utc_capi_media_vision_inference_startup2(void);
+extern void utc_capi_media_vision_inference_cleanup2(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup(void);
+
+extern int utc_mediavision_mv_inference_foreach_supported_engine(void);
+extern int utc_mediavision_mv_inference_create_p(void);
+extern int utc_mediavision_mv_inference_create_n(void);
+extern int utc_mediavision_mv_inference_destroy_p(void);
+extern int utc_mediavision_mv_inference_destroy_n(void);
+extern int utc_mediavision_mv_inference_configure_p(void);
+extern int utc_mediavision_mv_inference_configure_n1(void);
+extern int utc_mediavision_mv_inference_configure_n2(void);
+extern int utc_mediavision_mv_inference_prepare_p(void);
+extern int utc_mediavision_mv_inference_prepare_n1(void);
+extern int utc_mediavision_mv_inference_image_classify_p(void);
+extern int utc_mediavision_mv_inference_image_classify_n1(void);
+extern int utc_mediavision_mv_inference_image_classify_n2(void);
+extern int utc_mediavision_mv_inference_object_detect_p(void);
+extern int utc_mediavision_mv_inference_object_detect_n1(void);
+extern int utc_mediavision_mv_inference_object_detect_n2(void);
+extern int utc_mediavision_mv_inference_face_detect_p(void);
+extern int utc_mediavision_mv_inference_face_detect_n1(void);
+extern int utc_mediavision_mv_inference_face_detect_n2(void);
+extern int utc_mediavision_mv_inference_facial_landmark_detect_p(void);
+extern int utc_mediavision_mv_inference_facial_landmark_detect_n1(void);
+extern int utc_mediavision_mv_inference_facial_landmark_detect_n2(void);
+extern int utc_mediavision_mv_inference_pose_landmark_detect_p(void);
+extern int utc_mediavision_mv_inference_pose_landmark_detect_n1(void);
+extern int utc_mediavision_mv_inference_pose_landmark_detect_n2(void);
+extern int utc_mediavision_mv_inference_get_number_of_poses_p(void);
+extern int utc_mediavision_mv_inference_get_number_of_poses_n(void);
+extern int utc_mediavision_mv_inference_get_number_of_landmarks_p(void);
+extern int utc_mediavision_mv_inference_get_number_of_landmarks_n(void);
+extern int utc_mediavision_mv_inference_get_label_p(void);
+extern int utc_mediavision_mv_inference_get_label_n(void);
+extern int utc_mediavision_mv_inference_get_landmark_p(void);
+extern int utc_mediavision_mv_inference_get_landmark_n(void);
+extern int utc_mediavision_mv_pose_create_p(void);
+extern int utc_mediavision_mv_pose_create_n(void);
+extern int utc_mediavision_mv_pose_destroy_p(void);
+extern int utc_mediavision_mv_pose_destroy_n(void);
+extern int utc_mediavision_mv_pose_set_from_file_p(void);
+extern int utc_mediavision_mv_pose_set_from_file_n1(void);
+extern int utc_mediavision_mv_pose_set_from_file_n2(void);
+extern int utc_mediavision_mv_pose_compare_p(void);
+extern int utc_mediavision_mv_pose_compare_n(void);
+
+testcase tc_array[] = {
+       {"utc_mediavision_mv_inference_foreach_supported_engine",utc_mediavision_mv_inference_foreach_supported_engine,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_create_p",utc_mediavision_mv_inference_create_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_create_n",utc_mediavision_mv_inference_create_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_destroy_p",utc_mediavision_mv_inference_destroy_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_destroy_n",utc_mediavision_mv_inference_destroy_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_configure_p",utc_mediavision_mv_inference_configure_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_configure_n1",utc_mediavision_mv_inference_configure_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_configure_n2",utc_mediavision_mv_inference_configure_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_prepare_p",utc_mediavision_mv_inference_prepare_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_prepare_n1",utc_mediavision_mv_inference_prepare_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_image_classify_p",utc_mediavision_mv_inference_image_classify_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_image_classify_n1",utc_mediavision_mv_inference_image_classify_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_image_classify_n2",utc_mediavision_mv_inference_image_classify_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_object_detect_p",utc_mediavision_mv_inference_object_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_object_detect_n1",utc_mediavision_mv_inference_object_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_object_detect_n2",utc_mediavision_mv_inference_object_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_face_detect_p",utc_mediavision_mv_inference_face_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_face_detect_n1",utc_mediavision_mv_inference_face_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_face_detect_n2",utc_mediavision_mv_inference_face_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_facial_landmark_detect_p",utc_mediavision_mv_inference_facial_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_facial_landmark_detect_n1",utc_mediavision_mv_inference_facial_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_facial_landmark_detect_n2",utc_mediavision_mv_inference_facial_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_pose_landmark_detect_p", utc_mediavision_mv_inference_pose_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_pose_landmark_detect_n1", utc_mediavision_mv_inference_pose_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_pose_landmark_detect_n2", utc_mediavision_mv_inference_pose_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_poses_p", utc_mediavision_mv_inference_get_number_of_poses_p,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_poses_n", utc_mediavision_mv_inference_get_number_of_poses_n,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_label_p", utc_mediavision_mv_inference_get_label_p,utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_label_n", utc_mediavision_mv_inference_get_label_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_landmarks_p", utc_mediavision_mv_inference_get_number_of_landmarks_p,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_landmarks_n", utc_mediavision_mv_inference_get_number_of_landmarks_n,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_landmark_p", utc_mediavision_mv_inference_get_landmark_p,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_landmark_n", utc_mediavision_mv_inference_get_landmark_n,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_create_p", utc_mediavision_mv_pose_create_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_create_n", utc_mediavision_mv_pose_create_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_destroy_p", utc_mediavision_mv_pose_destroy_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_destroy_n", utc_mediavision_mv_pose_destroy_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_set_from_file_p", utc_mediavision_mv_pose_set_from_file_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_set_from_file_n1", utc_mediavision_mv_pose_set_from_file_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_set_from_file_n2", utc_mediavision_mv_pose_set_from_file_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_compare_p", utc_mediavision_mv_pose_compare_p,utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_compare_n", utc_mediavision_mv_pose_compare_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {NULL, NULL}
+};
+
+#endif // __TCT_CAPI-MEDIA-VISION-NATIVE_H__
diff --git a/src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core_wearable.h b/src/utc/capi-media-vision-dl/tct-capi-media-vision-dl-core_wearable.h
new file mode 100755 (executable)
index 0000000..8f4b03b
--- /dev/null
@@ -0,0 +1,161 @@
+//
+// Copyright (c) 2014 Samsung Electronics Co., Ltd.
+//
+// Licensed under the Apache License, Version 2.0 (the License);
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef __TCT_CAPI-MEDIA-VISION-NATIVE_H__
+#define __TCT_CAPI-MEDIA-VISION-NATIVE_H__
+
+#include "testcase.h"
+#include "tct_common.h"
+
+extern void utc_capi_media_vision_inference_startup1(void);
+extern void utc_capi_media_vision_inference_cleanup1(void);
+extern void utc_capi_media_vision_inference_startup2(void);
+extern void utc_capi_media_vision_inference_cleanup2(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup(void);
+extern void utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup(void);
+extern void utc_capi_media_vision_face_recognition_startup(void);
+extern void utc_capi_media_vision_face_recognition_cleanup(void);
+
+extern int utc_mediavision_mv_inference_foreach_supported_engine(void);
+extern int utc_mediavision_mv_inference_create_p(void);
+extern int utc_mediavision_mv_inference_create_n(void);
+extern int utc_mediavision_mv_inference_destroy_p(void);
+extern int utc_mediavision_mv_inference_destroy_n(void);
+extern int utc_mediavision_mv_inference_configure_p(void);
+extern int utc_mediavision_mv_inference_configure_n1(void);
+extern int utc_mediavision_mv_inference_configure_n2(void);
+extern int utc_mediavision_mv_inference_prepare_p(void);
+extern int utc_mediavision_mv_inference_prepare_n1(void);
+extern int utc_mediavision_mv_inference_image_classify_p(void);
+extern int utc_mediavision_mv_inference_image_classify_n1(void);
+extern int utc_mediavision_mv_inference_image_classify_n2(void);
+extern int utc_mediavision_mv_inference_object_detect_p(void);
+extern int utc_mediavision_mv_inference_object_detect_n1(void);
+extern int utc_mediavision_mv_inference_object_detect_n2(void);
+extern int utc_mediavision_mv_inference_face_detect_p(void);
+extern int utc_mediavision_mv_inference_face_detect_n1(void);
+extern int utc_mediavision_mv_inference_face_detect_n2(void);
+extern int utc_mediavision_mv_inference_facial_landmark_detect_p(void);
+extern int utc_mediavision_mv_inference_facial_landmark_detect_n1(void);
+extern int utc_mediavision_mv_inference_facial_landmark_detect_n2(void);
+extern int utc_mediavision_mv_inference_pose_landmark_detect_p(void);
+extern int utc_mediavision_mv_inference_pose_landmark_detect_n1(void);
+extern int utc_mediavision_mv_inference_pose_landmark_detect_n2(void);
+extern int utc_mediavision_mv_inference_get_number_of_poses_p(void);
+extern int utc_mediavision_mv_inference_get_number_of_poses_n(void);
+extern int utc_mediavision_mv_inference_get_number_of_landmarks_p(void);
+extern int utc_mediavision_mv_inference_get_number_of_landmarks_n(void);
+extern int utc_mediavision_mv_inference_get_label_p(void);
+extern int utc_mediavision_mv_inference_get_label_n(void);
+extern int utc_mediavision_mv_inference_get_landmark_p(void);
+extern int utc_mediavision_mv_inference_get_landmark_n(void);
+extern int utc_mediavision_mv_pose_create_p(void);
+extern int utc_mediavision_mv_pose_create_n(void);
+extern int utc_mediavision_mv_pose_destroy_p(void);
+extern int utc_mediavision_mv_pose_destroy_n(void);
+extern int utc_mediavision_mv_pose_set_from_file_p(void);
+extern int utc_mediavision_mv_pose_set_from_file_n1(void);
+extern int utc_mediavision_mv_pose_set_from_file_n2(void);
+extern int utc_mediavision_mv_pose_compare_p(void);
+extern int utc_mediavision_mv_pose_compare_n(void);
+extern int utc_mediavision_mv_face_recognition_create_p(void);
+extern int utc_mediavision_mv_face_recognition_create_n(void);
+extern int utc_mediavision_mv_face_recognition_destroy_p(void);
+extern int utc_mediavision_mv_face_recognition_destroy_n(void);
+extern int utc_mediavision_mv_face_recognition_prepare_p(void);
+extern int utc_mediavision_mv_face_recognition_prepare_n(void);
+extern int utc_mediavision_mv_face_recognition_register_p(void);
+extern int utc_mediavision_mv_face_recognition_register_n1(void);
+extern int utc_mediavision_mv_face_recognition_register_n2(void);
+extern int utc_mediavision_mv_face_recognition_register_n3(void);
+extern int utc_mediavision_mv_face_recognition_inference_p(void);
+extern int utc_mediavision_mv_face_recognition_inference_n1(void);
+extern int utc_mediavision_mv_face_recognition_inference_n2(void);
+extern int utc_mediavision_mv_face_recognition_get_label_p(void);
+extern int utc_mediavision_mv_face_recognition_get_label_n1(void);
+extern int utc_mediavision_mv_face_recognition_get_label_n2(void);
+extern int utc_mediavision_mv_face_recognition_unregister_p(void);
+extern int utc_mediavision_mv_face_recognition_unregister_n1(void);
+extern int utc_mediavision_mv_face_recognition_unregister_n2(void);
+
+testcase tc_array[] = {
+       {"utc_mediavision_mv_inference_foreach_supported_engine",utc_mediavision_mv_inference_foreach_supported_engine,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_create_p",utc_mediavision_mv_inference_create_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_create_n",utc_mediavision_mv_inference_create_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_destroy_p",utc_mediavision_mv_inference_destroy_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_destroy_n",utc_mediavision_mv_inference_destroy_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
+       {"utc_mediavision_mv_inference_configure_p",utc_mediavision_mv_inference_configure_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_configure_n1",utc_mediavision_mv_inference_configure_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_configure_n2",utc_mediavision_mv_inference_configure_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_prepare_p",utc_mediavision_mv_inference_prepare_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_prepare_n1",utc_mediavision_mv_inference_prepare_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_image_classify_p",utc_mediavision_mv_inference_image_classify_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_image_classify_n1",utc_mediavision_mv_inference_image_classify_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_image_classify_n2",utc_mediavision_mv_inference_image_classify_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_object_detect_p",utc_mediavision_mv_inference_object_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_object_detect_n1",utc_mediavision_mv_inference_object_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_object_detect_n2",utc_mediavision_mv_inference_object_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_face_detect_p",utc_mediavision_mv_inference_face_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_face_detect_n1",utc_mediavision_mv_inference_face_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_face_detect_n2",utc_mediavision_mv_inference_face_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_facial_landmark_detect_p",utc_mediavision_mv_inference_facial_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_facial_landmark_detect_n1",utc_mediavision_mv_inference_facial_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_facial_landmark_detect_n2",utc_mediavision_mv_inference_facial_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_pose_landmark_detect_p", utc_mediavision_mv_inference_pose_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_pose_landmark_detect_n1", utc_mediavision_mv_inference_pose_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_pose_landmark_detect_n2", utc_mediavision_mv_inference_pose_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_poses_p", utc_mediavision_mv_inference_get_number_of_poses_p,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_poses_n", utc_mediavision_mv_inference_get_number_of_poses_n,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_label_p", utc_mediavision_mv_inference_get_label_p,utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_label_n", utc_mediavision_mv_inference_get_label_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_landmarks_p", utc_mediavision_mv_inference_get_number_of_landmarks_p,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_number_of_landmarks_n", utc_mediavision_mv_inference_get_number_of_landmarks_n,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_landmark_p", utc_mediavision_mv_inference_get_landmark_p,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_inference_get_landmark_n", utc_mediavision_mv_inference_get_landmark_n,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_create_p", utc_mediavision_mv_pose_create_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_create_n", utc_mediavision_mv_pose_create_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_destroy_p", utc_mediavision_mv_pose_destroy_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_destroy_n", utc_mediavision_mv_pose_destroy_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_set_from_file_p", utc_mediavision_mv_pose_set_from_file_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_set_from_file_n1", utc_mediavision_mv_pose_set_from_file_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_set_from_file_n2", utc_mediavision_mv_pose_set_from_file_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_compare_p", utc_mediavision_mv_pose_compare_p,utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_pose_compare_n", utc_mediavision_mv_pose_compare_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
+       {"utc_mediavision_mv_face_recognition_create_p", utc_mediavision_mv_face_recognition_create_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_create_n", utc_mediavision_mv_face_recognition_create_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_destroy_p", utc_mediavision_mv_face_recognition_destroy_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_destroy_n", utc_mediavision_mv_face_recognition_destroy_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_prepare_p", utc_mediavision_mv_face_recognition_prepare_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_prepare_n", utc_mediavision_mv_face_recognition_prepare_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_register_p", utc_mediavision_mv_face_recognition_register_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_register_n1", utc_mediavision_mv_face_recognition_register_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_register_n2", utc_mediavision_mv_face_recognition_register_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_register_n3", utc_mediavision_mv_face_recognition_register_n3,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_inference_p", utc_mediavision_mv_face_recognition_inference_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_inference_n1", utc_mediavision_mv_face_recognition_inference_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_inference_n2", utc_mediavision_mv_face_recognition_inference_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_get_label_p", utc_mediavision_mv_face_recognition_get_label_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_get_label_n1", utc_mediavision_mv_face_recognition_get_label_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_get_label_n2", utc_mediavision_mv_face_recognition_get_label_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_unregister_p", utc_mediavision_mv_face_recognition_unregister_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_unregister_n1", utc_mediavision_mv_face_recognition_unregister_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {"utc_mediavision_mv_face_recognition_unregister_n2", utc_mediavision_mv_face_recognition_unregister_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
+       {NULL, NULL}
+};
+
+#endif // __TCT_CAPI-MEDIA-VISION-NATIVE_H__
diff --git a/src/utc/capi-media-vision-dl/utc-mv_common.c b/src/utc/capi-media-vision-dl/utc-mv_common.c
new file mode 100755 (executable)
index 0000000..8e30734
--- /dev/null
@@ -0,0 +1,2127 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/mv_barcode.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "assert.h"
+
+#include <system_info.h>
+#include <json-glib/json-glib.h>
+
+#define DICT_KEY_SIZE 255
+#define DICT_STR_VALUE_SIZE 1024
+#define DICT_ARRAY_STR_SIZE 3
+
+static bool isVisionSupported = false;
+static int gStartupError;
+
+static const char *eng_conf_path = "/usr/share/capi-media-vision/media-vision-config.json";
+static const char *eng_conf_json_attr_section_name = "attributes";
+
+struct attr_dbl_entry
+{
+    char key[DICT_KEY_SIZE];
+    double value;
+};
+
+struct attr_int_entry
+{
+    char key[DICT_KEY_SIZE];
+    int value;
+};
+
+struct attr_bool_entry
+{
+    char key[DICT_KEY_SIZE];
+    bool value;
+};
+
+struct attr_str_entry
+{
+    char key[DICT_KEY_SIZE];
+    char value[DICT_STR_VALUE_SIZE];
+};
+
+struct attr_array_str_entry
+{
+    char key[DICT_KEY_SIZE];
+    char value[DICT_ARRAY_STR_SIZE][DICT_STR_VALUE_SIZE];
+    int str_n;
+};
+
+static mv_engine_config_h engine_config = NULL;
+static bool _is_broken_config = false;
+
+static struct attr_dbl_entry  *dict_dbl  = NULL;
+static int dict_dbl_n = 0;
+static struct attr_int_entry  *dict_int  = NULL;
+static int dict_int_n = 0;
+static struct attr_bool_entry *dict_bool = NULL;
+static int dict_bool_n = 0;
+static struct attr_str_entry  *dict_str  = NULL;
+static int dict_str_n = 0;
+static struct attr_array_str_entry *dict_array_str = NULL;
+static int dict_array_n = 0;
+
+bool _parse_attr_dictionaries(const char *conf_file)
+{
+    JsonParser *parser;
+    GError *error = NULL;
+
+    parser = json_parser_new();
+    json_parser_load_from_file(parser, conf_file, &error);
+    if (error)
+    {
+        g_print("Unable to parse file '%s': %s\n", conf_file, error->message);
+        g_error_free(error);
+        g_object_unref(parser);
+        return false;
+    }
+
+    JsonNode *root = json_parser_get_root(parser);
+    if (JSON_NODE_OBJECT != json_node_get_node_type(root))
+    {
+        printf("Can't parse tests configuration file. Incorrect json markup.\n");
+        g_object_unref(parser);
+        return false;
+    }
+
+    JsonObject *jobj = json_node_get_object(root);
+
+    if (!json_object_has_member(jobj, eng_conf_json_attr_section_name))
+    {
+        printf("Can't parse tests configuration file. "
+               "No '%s' section.\n", eng_conf_json_attr_section_name);
+        g_object_unref(parser);
+        return false;
+    }
+
+    JsonNode *attr_node =
+                  json_object_get_member(jobj, eng_conf_json_attr_section_name);
+
+    if (JSON_NODE_ARRAY != json_node_get_node_type(attr_node))
+    {
+        printf("Can't parse tests configuration file. "
+               "'%s' section isn't array.\n", eng_conf_json_attr_section_name);
+        g_object_unref(parser);
+        return false;
+    }
+
+    JsonArray *attr_array = json_node_get_array(attr_node);
+
+    const guint attr_num = json_array_get_length(attr_array);
+
+    guint attr_ind = 0;
+    for (; attr_ind < attr_num; ++attr_ind)
+    {
+        JsonNode *attr_node = json_array_get_element(attr_array, attr_ind);
+
+        if (JSON_NODE_OBJECT != json_node_get_node_type(attr_node))
+        {
+            printf("Attribute %u wasn't parsed from json file.", attr_ind);
+            continue;
+        }
+
+        JsonObject *attr_obj = json_node_get_object(attr_node);
+
+        if (!json_object_has_member(attr_obj, "name") ||
+            !json_object_has_member(attr_obj, "type") ||
+            !json_object_has_member(attr_obj, "value"))
+        {
+            printf("Attribute %u wasn't parsed from json file.", attr_ind);
+            continue;
+        }
+
+        const char *str_name =
+                       (char*)json_object_get_string_member(attr_obj, "name");
+        assert_geq(DICT_KEY_SIZE, strlen(str_name));
+        const char *str_type =
+                       (char*)json_object_get_string_member(attr_obj, "type");
+
+        if (0 == strcmp("double", str_type))
+        {
+            dict_dbl = (struct attr_dbl_entry*)realloc(dict_dbl, ++dict_dbl_n * sizeof(struct attr_dbl_entry));
+            snprintf(dict_dbl[dict_dbl_n-1].key, DICT_KEY_SIZE, "%s", str_name);
+            dict_dbl[dict_dbl_n-1].value =
+                    (double)json_object_get_double_member(attr_obj, "value");
+        }
+        else if (0 == strcmp("integer", str_type))
+        {
+            dict_int = (struct attr_int_entry*)realloc(dict_int, ++dict_int_n * sizeof(struct attr_int_entry));
+            snprintf(dict_int[dict_int_n-1].key, DICT_KEY_SIZE, "%s", str_name);
+            dict_int[dict_int_n-1].value =
+                    (int)json_object_get_int_member(attr_obj, "value");
+        }
+        else if (0 == strcmp("boolean", str_type))
+        {
+            dict_bool = (struct attr_bool_entry*)realloc(dict_bool, ++dict_bool_n * sizeof(struct attr_bool_entry));
+            snprintf(dict_bool[dict_bool_n-1].key, DICT_KEY_SIZE, "%s", str_name);
+            dict_bool[dict_bool_n-1].value =
+                    (bool)json_object_get_boolean_member(attr_obj, "value");
+        }
+        else if (0 == strcmp("string", str_type))
+        {
+            dict_str = (struct attr_str_entry*)realloc(dict_str, ++dict_str_n * sizeof(struct attr_str_entry));
+            snprintf(dict_str[dict_str_n-1].key, DICT_KEY_SIZE, "%s", str_name);
+            const char *str_value = (const char*)json_object_get_string_member(attr_obj, "value");
+            assert_geq(DICT_STR_VALUE_SIZE, strlen(str_value));
+            snprintf(dict_str[dict_str_n-1].value, DICT_STR_VALUE_SIZE, "%s", str_value);
+        } else if (0 == strcmp("array", str_type)){
+            const char *subTypeStr = (char*)json_object_get_string_member(attr_obj, "subtype");
+            if (0 == strcmp("string", subTypeStr)) {
+                dict_array_str = (struct attr_array_str_entry*)realloc(dict_array_str, ++dict_array_n * sizeof(struct attr_array_str_entry));
+                snprintf(dict_array_str[dict_array_n-1].key, DICT_KEY_SIZE, "%s", str_name);
+
+                JsonArray *attr_array = json_object_get_array_member(attr_obj, "value");
+                dict_array_str[dict_array_n-1].str_n = json_array_get_length(attr_array);
+                assert_geq(DICT_ARRAY_STR_SIZE, dict_array_str[dict_array_n-1].str_n);
+
+                for (unsigned int item = 0; item < dict_array_str[dict_array_n-1].str_n; ++item) {
+                    snprintf(dict_array_str[dict_array_n-1].value[item], DICT_STR_VALUE_SIZE, "%s", json_array_get_string_element(attr_array, item));
+                }
+            }
+        }
+        else
+        {
+            printf("Attribute %s wasn't parsed from json file. Type isn't supported.", str_name);
+            continue;
+        }
+    }
+
+    g_object_unref(parser);
+
+    return true;
+}
+
+bool _is_supported_dbl_attr(const char *key, double *value)
+{
+    int ind = 0;
+    for(; ind < dict_dbl_n; ++ind)
+    {
+        if (0 == strcmp(dict_dbl[ind].key, key))
+        {
+            if (value != NULL)
+            {
+                *value = dict_dbl[ind].value;
+            }
+            return true;
+        }
+    }
+
+    return false;
+}
+
+bool _is_supported_int_attr(const char *key, int *value)
+{
+    int ind = 0;
+    for(; ind < dict_int_n; ++ind)
+    {
+        if (0 == strcmp(dict_int[ind].key, key))
+        {
+            if (value != NULL)
+            {
+                *value = dict_int[ind].value;
+            }
+            return true;
+        }
+    }
+
+    return false;
+}
+
+bool _is_supported_bool_attr(const char *key, bool *value)
+{
+    int ind = 0;
+    for(; ind < dict_bool_n; ++ind)
+    {
+        if (0 == strcmp(dict_bool[ind].key, key))
+        {
+            if (value != NULL)
+            {
+                *value = dict_bool[ind].value;
+            }
+            return true;
+        }
+    }
+
+    return false;
+}
+
+bool _is_supported_str_attr(const char *key, char *value)
+{
+    int ind = 0;
+    for(; ind < dict_str_n; ++ind)
+    {
+        if (0 == strcmp(dict_str[ind].key, key))
+        {
+            if (value != NULL)
+            {
+                snprintf(value, 1024, "%s", dict_str[ind].value);
+            }
+            return true;
+        }
+    }
+
+    return false;
+}
+
+bool _is_supported_array_str_attr(const char *key, char (*value)[DICT_ARRAY_STR_SIZE][DICT_STR_VALUE_SIZE], int *size)
+{
+    int ind = 0;
+    for(; ind < dict_array_n; ++ind)
+    {
+        if (0 == strcmp(dict_array_str[ind].key, key))
+        {
+            if (value != NULL && size != NULL)
+            {
+                *size = dict_array_str[ind].str_n;
+                for (int item = 0; item < (*size); ++item) {
+                    snprintf((*value)[item], 1024, "%s", dict_array_str[ind].value[item]);
+                }
+            }
+            return true;
+        }
+    }
+
+    return false;
+}
+
+/**
+ * @function           utc_capi_media_vision_common_startup
+ * @description                Called before each test
+ * @parameter          NA
+ * @return                     NA
+ */
+void utc_capi_media_vision_common_startup(void)
+{
+    printf("capi-media-vision tests STARTUP is launched\n");
+
+    bool isFaceRecognitionSupported = false;
+    bool isImageRecognitionSupported = false;
+    bool isBarcodeDetectionSupported = false;
+    bool isBarcodeGenerationSupported = false;
+    bool isVisionInferenceImageSupported = false;
+    bool isVisionInferenceFaceSupported = false;
+
+    system_info_get_platform_bool("http://tizen.org/feature/vision.face_recognition", &isFaceRecognitionSupported);
+    system_info_get_platform_bool("http://tizen.org/feature/vision.image_recognition", &isImageRecognitionSupported);
+    system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_detection", &isBarcodeDetectionSupported);
+    system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_generation", &isBarcodeGenerationSupported);
+    system_info_get_platform_bool("http://tizen.org/feature/vision.inference.image", &isVisionInferenceImageSupported);
+    system_info_get_platform_bool("http://tizen.org/feature/vision.inference.face", &isVisionInferenceFaceSupported);
+
+    if (isFaceRecognitionSupported || isImageRecognitionSupported ||
+        isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
+        isVisionInferenceImageSupported || isVisionInferenceFaceSupported)
+        isVisionSupported = true;
+    else
+        isVisionSupported = false;
+
+    gStartupError = MEDIA_VISION_ERROR_NONE;
+
+    if (!engine_config)
+    {
+        gStartupError = mv_create_engine_config(&engine_config);
+
+        if (MEDIA_VISION_ERROR_NONE != gStartupError)
+        {
+            _is_broken_config = true;
+            engine_config = NULL;
+
+            printf("mv_engine_config_h create is failed\n");
+        }
+
+        if (!_parse_attr_dictionaries(eng_conf_path))
+        {
+            printf("Loading of attributes supported by engine "
+                   "configuration is failed\n");
+        }
+    }
+
+    printf("capi-media-vision tests STARTUP is completed\n");
+}
+
+/**
+ * @function           utc_capi_media_vision_common_cleanup
+ * @description                Called after each test
+ * @parameter          NA
+ * @return                     NA
+ */
+void utc_capi_media_vision_common_cleanup(void)
+{
+    printf("capi-media-vision tests CLEANUP is launched\n");
+
+    if (engine_config)
+    {
+        mv_destroy_engine_config(engine_config);
+        engine_config = NULL;
+    }
+
+    if (NULL != dict_dbl)
+    {
+        free(dict_dbl);
+        dict_dbl = NULL;
+        dict_dbl_n = 0;
+    }
+
+    if (NULL != dict_int)
+    {
+        free(dict_int);
+        dict_int = NULL;
+        dict_int_n = 0;
+    }
+
+    if (NULL != dict_bool)
+    {
+        free(dict_bool);
+        dict_bool = NULL;
+        dict_bool_n = 0;
+    }
+    if (NULL != dict_str)
+    {
+        free(dict_str);
+        dict_str = NULL;
+        dict_str_n = 0;
+    }
+
+    if (NULL != dict_array_str)
+    {
+        free(dict_array_str);
+        dict_array_str = NULL;
+        dict_array_n = 0;
+    }
+
+    printf("capi-media-vision tests CLEANUP is completed\n");
+}
+
+void prepareTestBuffer(unsigned char **buffer)
+{
+    const size_t dataSize = 800 * 600 * 4; // Width * Height * Channels
+    *buffer = (unsigned char *) malloc(dataSize * sizeof(unsigned char));
+    if (*buffer == NULL)
+    {
+        return;
+    }
+
+    int i;
+    for (i = 0; i < dataSize; ++i)
+    {
+        (*buffer)[i] = 255; // Black image
+    }
+}
+
+int _finalize_callback(media_packet_h packet, int err, void *userdata)
+{
+    return MEDIA_PACKET_FINALIZE;
+}
+
+bool _attribute_supported_callback(
+        mv_config_attribute_type_e attribute_type,
+        const char *attribute_name, void *user_data)
+{
+    printf("\nCallback call for attribute [%s]\n", attribute_name);
+
+    bool *isCorrect = false;
+    if (user_data != NULL)
+    {
+        isCorrect = (bool*)user_data;
+    }
+    else
+    {
+        printf("user_data wasn't passed to the callback. Failed\n");
+        return false;
+    }
+
+    double dbl_value = 0.0;
+    double real_dbl_value = -1.0;
+    int int_value = 0;
+    int real_int_value = -1;
+    bool bool_value = false;
+    bool real_bool_value = true;
+    char str_value[DICT_STR_VALUE_SIZE] = "a";
+    char *real_str_value;
+    char array_str_value[DICT_ARRAY_STR_SIZE][DICT_STR_VALUE_SIZE] = {"a",};
+    int array_str_size = 0;
+    char **real_array_str_value = NULL;
+    int real_array_str_size = -1;
+    bool is_supported = false;
+    bool are_exp_act_equal = false;
+    int item;
+
+    switch (attribute_type)
+    {
+        case MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE:
+            is_supported = _is_supported_dbl_attr(attribute_name, &dbl_value);
+            if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
+                    mv_engine_config_get_double_attribute(
+                            engine_config, attribute_name, &real_dbl_value))
+            {
+                printf("Default double attribute %s wasn't set in engine "
+                        "configuration by default. Failed\n", attribute_name);
+                *isCorrect = false;
+                printf("FAILED\n");
+                return true;
+            }
+            printf("Expected value: [%f] | Real value: [%f]\n", dbl_value, real_dbl_value);
+            are_exp_act_equal = dbl_value == real_dbl_value;
+            break;
+        case MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER:
+            is_supported = _is_supported_int_attr(attribute_name, &int_value);
+            if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
+                    mv_engine_config_get_int_attribute(
+                            engine_config, attribute_name, &real_int_value))
+            {
+                printf("Default integer attribute %s wasn't set in engine "
+                        "configuration by default. Failed\n", attribute_name);
+                *isCorrect = false;
+                printf("FAILED\n");
+                return true;
+            }
+            printf("Expected value: [%i] | Real value: [%i]\n", int_value, real_int_value);
+            are_exp_act_equal = int_value == real_int_value;
+            break;
+        case MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN:
+            is_supported = _is_supported_bool_attr(attribute_name, &bool_value);
+            if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
+                    mv_engine_config_get_bool_attribute(
+                            engine_config, attribute_name, &real_bool_value))
+            {
+                printf("Default bool attribute %s wasn't set in engine "
+                        "configuration by default. Failed\n", attribute_name);
+                *isCorrect = false;
+                printf("FAILED\n");
+                return true;
+            }
+            printf("Expected value: [%s] | Real value: [%s]\n",
+                    bool_value ?      "TRUE" : "FALSE",
+                    real_bool_value ? "TRUE" : "FALSE");
+            are_exp_act_equal = bool_value == real_bool_value;
+            break;
+        case MV_ENGINE_CONFIG_ATTR_TYPE_STRING:
+            is_supported = _is_supported_str_attr(attribute_name, str_value);
+            if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
+                    mv_engine_config_get_string_attribute(
+                            engine_config, attribute_name, &real_str_value))
+            {
+                printf("Default string attribute %s wasn't set in engine "
+                        "configuration by default. Failed\n", attribute_name);
+                *isCorrect = false;
+                free(real_str_value);
+                printf("FAILED\n");
+                return true;
+            }
+            printf("Expected value: [%s] | Real value: [%s]\n", str_value, real_str_value);
+            are_exp_act_equal = 0 == strcmp(str_value, real_str_value);
+            free(real_str_value);
+            break;
+        case MV_ENGINE_CONFIG_ATTR_TYPE_ARRAY_STRING:
+            is_supported = _is_supported_array_str_attr(attribute_name, &array_str_value, &array_str_size);
+            if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
+                    mv_engine_config_get_array_string_attribute(
+                            engine_config, attribute_name, &real_array_str_value, &real_array_str_size))
+            {
+                printf("Default string attribute %s wasn't set n engine "
+                        "configuration by default. Failed\n", attribute_name);
+                *isCorrect = false;
+                for(item = 0; item < real_array_str_size; ++item) {
+                    free(real_array_str_value[item]);
+                }
+                free(real_array_str_value);
+                printf("FAILED\n");
+                return true;
+            }
+
+            if (array_str_size != real_array_str_size)
+                are_exp_act_equal = 1;
+            else {
+                for(item = 0; item < real_array_str_size; ++item) {
+                    printf("Expected value: [%s] | Real value: [%s]\n",
+                        array_str_value[item], real_array_str_value[item]);
+                    are_exp_act_equal = 0 == strcmp(array_str_value[item], real_array_str_value[item]);
+                    if (are_exp_act_equal != 0) {
+                        break;
+                    }
+                }
+            }
+
+            for(item = 0; item < real_array_str_size; ++item) {
+                free(real_array_str_value[item]);
+            }
+            free(real_array_str_value);
+
+            break;
+        default:
+            printf("Attribute type received in mv_supported_attribute_cb "
+                    "is not supported. Failed");
+            *isCorrect = false;
+            printf("FAILED\n");
+            return true;
+    }
+
+    // Attribute has to be supported and has correct default value:
+    if (is_supported && are_exp_act_equal)
+    {
+        printf("PASSED\n");
+    }
+    else
+    {
+        *isCorrect = false;
+        printf("FAILED\n");
+    }
+
+    return true;
+}
+
+/**
+ * @brief Positive test case of mv_create_source()
+ * @testcase           utc_mediavision_mv_create_source_p
+ * @since_tizen                2.4
+ * @description                Create media source handle
+ */
+int utc_mediavision_mv_create_source_p(void)
+{
+    printf("Inside mv_create_source_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h source = NULL;
+
+    int ret = mv_create_source(&source);
+    assert_eq(ret, MEDIA_VISION_ERROR_NONE);
+    assert_neq(source, (void*) NULL);
+
+    ret = mv_destroy_source(source);
+    assert_eq(ret, MEDIA_VISION_ERROR_NONE);
+
+    printf("Before return mv_create_source_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_create_source()
+ * @testcase           utc_mediavision_mv_create_source_n
+ * @since_tizen                2.4
+ * @description                Create media source handle,
+ *                                                             but fail because input handle is null
+ */
+int utc_mediavision_mv_create_source_n(void)
+{
+    printf("Inside mv_create_source_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    const int ret = mv_create_source(NULL);
+    assert_eq(ret, MEDIA_VISION_ERROR_INVALID_PARAMETER);
+
+    printf("Before return mv_create_source_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_destroy_source()
+ * @testcase           utc_mediavision_mv_destroy_source_p
+ * @since_tizen                2.4
+ * @description                Destroy media source handle
+ */
+int utc_mediavision_mv_destroy_source_p(void)
+{
+    printf("Inside mv_destroy_source_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h source = NULL;
+
+    int ret = mv_create_source(&source);
+    assert_eq(ret, MEDIA_VISION_ERROR_NONE);
+
+    ret = mv_destroy_source(source);
+    assert_eq(ret, MEDIA_VISION_ERROR_NONE);
+
+    printf("Before return mv_destroy_source_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_destroy_source()
+ * @testcase           utc_mediavision_mv_destroy_source_n
+ * @since_tizen                2.4
+ * @description                Destroy media source handle
+ *                                                             but fail because input handle is null
+ */
+int utc_mediavision_mv_destroy_source_n(void)
+{
+    printf("Inside mv_destroy_source_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h source = NULL;
+
+    const int ret = mv_destroy_source(source);
+    assert_eq(ret, MEDIA_VISION_ERROR_INVALID_PARAMETER);
+
+    printf("Before return mv_destroy_source_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_source_fill_by_media_packet()
+ * @testcase           utc_mediavision_mv_source_fill_by_media_packet_p
+ * @since_tizen                2.4
+ * @description                Fill data of media source handle from media packet
+ */
+int utc_mediavision_mv_source_fill_by_media_packet_p(void)
+{
+    printf("Inside mv_source_fill_by_media_packet_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    tbm_format *tbm_formats = NULL;
+    uint32_t format_num = 0;
+
+    int ret = tbm_surface_query_formats(&tbm_formats, &format_num);
+    if (TBM_SURFACE_ERROR_NONE != ret)
+    {
+        printf("Failed to get formats supported by the system. "
+                "Impossible to create media packet.\n"
+                "SKIPPED\n");
+        printf("Before return mv_source_fill_by_media_packet_p\n");
+    }
+
+    assert_eq(TBM_SURFACE_ERROR_NONE, ret);
+
+    const unsigned int width = 800;
+    const unsigned int height = 600;
+
+    tbm_surface_h tbm_surface = NULL;
+    int ind = 0;
+    for (; ind < format_num; ++ind)
+    {
+        if (TBM_FORMAT_YUV420 == tbm_formats[ind])
+        {
+            tbm_surface = tbm_surface_create(width, height, TBM_FORMAT_YUV420);
+            if (NULL == tbm_surface)
+            {
+                printf("Failed to create tbm_surface. "
+                        "Impossible to create media packet.\n"
+                        "SKIPPED\n");
+            }
+            break;
+        }
+    }
+
+    assert_neq(NULL, tbm_surface);
+
+    if (ind == format_num)
+    {
+        printf("Format YUV420 isn't supported. "
+                "Impossible to create media packet.\n"
+                "SKIPPED\n");
+        if (TBM_SURFACE_ERROR_NONE != tbm_surface_destroy(tbm_surface))
+        {
+            printf("Failed to destroy TBM surface. Memory leaked.\n");
+        }
+    }
+
+    assert_lt(ind, format_num);
+
+    tbm_surface_info_s surface_info;
+    ret = tbm_surface_map(tbm_surface,
+            TBM_SURF_OPTION_READ|TBM_SURF_OPTION_WRITE, &surface_info);
+    if (TBM_SURFACE_ERROR_NONE != ret)
+    {
+        printf("Fail to map tbm_surface. "
+                "Impossible to create media packet.\n"
+                "SKIPPED\n");
+        if (TBM_SURFACE_ERROR_NONE != tbm_surface_destroy(tbm_surface))
+        {
+            printf("Failed to destroy TBM surface. Memory leaked.\n");
+        }
+    }
+
+    assert_eq(TBM_SURFACE_ERROR_NONE, ret);
+
+    for (ind = 0; ind < surface_info.num_planes; ++ind)
+    {
+        memset(surface_info.planes[ind].ptr, 0xFF, surface_info.planes[ind].size);
+    }
+
+    media_format_h format = NULL;
+    if (MEDIA_FORMAT_ERROR_NONE !=
+            (ret = media_format_create(&format)) ||
+        MEDIA_FORMAT_ERROR_NONE !=
+            (ret = media_format_set_video_mime(format, MEDIA_FORMAT_RGB888)) ||
+        MEDIA_FORMAT_ERROR_NONE !=
+            (ret = media_format_set_video_width(format, width)) ||
+        MEDIA_FORMAT_ERROR_NONE !=
+            (ret = media_format_set_video_height(format, height)))
+    {
+        printf("Fail to create media_format_h handle. "
+               "Impossible to create media packet.\n"
+               "SKIPPED\n");
+        if (format)
+        {
+            media_format_unref(format);
+        }
+        if (TBM_SURFACE_ERROR_NONE != tbm_surface_destroy(tbm_surface))
+        {
+            printf("Failed to destroy TBM surface. Memory leaked.\n");
+        }
+    }
+
+    assert_eq(MEDIA_FORMAT_ERROR_NONE, ret);
+
+    media_packet_h packet = NULL;
+    ret = media_packet_create_from_tbm_surface(
+            format, tbm_surface, _finalize_callback, NULL, &packet);
+    if (MEDIA_PACKET_ERROR_NONE != ret)
+    {
+        printf("Impossible to create media packet.\n"
+               "SKIPPED\n");
+
+        if (packet && MEDIA_PACKET_ERROR_NONE != media_packet_destroy(packet))
+        {
+            printf("Failed to destroy media packet. Memory leaked.\n");
+        }
+
+        media_format_unref(format);
+    }
+
+    assert_eq(MEDIA_PACKET_ERROR_NONE, ret);
+
+    mv_source_h source = NULL;
+    assert_eq(MEDIA_VISION_ERROR_NONE, mv_create_source(&source));
+    assert_eq(MEDIA_VISION_ERROR_NONE,
+            mv_source_fill_by_media_packet(source, packet));
+
+    assert_eq(MEDIA_VISION_ERROR_NONE, mv_destroy_source(source));
+    assert_eq(MEDIA_PACKET_ERROR_NONE, media_packet_destroy(packet));
+    assert_eq(MEDIA_FORMAT_ERROR_NONE, media_format_unref(format));
+    assert_eq(TBM_SURFACE_ERROR_NONE, tbm_surface_destroy(tbm_surface));
+
+    printf("PASSED\n");
+
+    printf("Before return mv_source_fill_by_media_packet_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_source_fill_by_media_packet()
+ * @testcase           utc_mediavision_mv_source_fill_by_media_packet_n
+ * @since_tizen                2.4
+ * @description                Fill data of media source handle
+ *                                                             but fail because media packet is null
+ */
+int utc_mediavision_mv_source_fill_by_media_packet_n(void)
+{
+    printf("Inside mv_source_fill_by_media_packet_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h source = NULL;
+
+    int ret = mv_create_source(&source);
+    assert_eq(ret, MEDIA_VISION_ERROR_NONE);
+
+    media_packet_h packet = NULL;
+
+    ret = mv_source_fill_by_media_packet(source, packet);
+    assert_eq(ret, MEDIA_VISION_ERROR_INVALID_PARAMETER);
+
+    ret = mv_destroy_source(source);
+    assert_eq(ret, MEDIA_VISION_ERROR_NONE);
+
+    printf("Before return mv_source_fill_by_media_packet_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_source_fill_by_buffer()
+ * @testcase           utc_mediavision_mv_source_fill_by_buffer_p
+ * @since_tizen                2.4
+ * @description                Fill data of media source handle from buffer
+ */
+int utc_mediavision_mv_source_fill_by_buffer_p(void)
+{
+    printf("Inside mv_source_fill_by_buffer_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h source = NULL;
+    mv_create_source(&source);
+    const size_t dataSize = 800 * 600 * 4;
+    unsigned char *data = NULL;
+    prepareTestBuffer(&data);
+
+    const int ret = mv_source_fill_by_buffer(source, data, dataSize, 800, 600, 4);
+
+    free(data);
+
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_neq((void*)NULL, source);
+
+    mv_destroy_source(source);
+
+    printf("Before return mv_source_fill_by_buffer_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_source_fill_by_buffer()
+ * @testcase           utc_mediavision_mv_source_fill_by_buffer_n
+ * @since_tizen                2.4
+ * @description                Fill data of media source handle
+ *                                                             but fail because handle is null
+ */
+int utc_mediavision_mv_source_fill_by_buffer_n(void)
+{
+    printf("Inside mv_source_fill_by_buffer_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h fakeSource = NULL;
+    unsigned char *data = NULL;
+    const size_t dataSize = 800 * 600 * 4;
+    prepareTestBuffer(&data);
+
+    const int ret = mv_source_fill_by_buffer(fakeSource, data, dataSize, 800, 400, 4);
+
+    free(data);
+
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_source_fill_by_buffer_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_source_clear()
+ * @testcase           utc_mediavision_mv_source_clear_p
+ * @since_tizen                2.4
+ * @description                Clear the data of media source handle
+ */
+int utc_mediavision_mv_source_clear_p(void)
+{
+    printf("Inside mv_source_clear_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h source = NULL;
+    mv_create_source(&source);
+    unsigned char *data = NULL;
+    prepareTestBuffer(&data);
+
+    int ret = mv_source_fill_by_buffer(source, data, 800 * 600 * 4, 800, 600, 4);
+
+    free(data);
+
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_source_clear(source);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    mv_destroy_source(source);
+
+    printf("Before return mv_source_clear_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_source_clear()
+ * @testcase           utc_mediavision_mv_source_clear_n
+ * @since_tizen                2.4
+ * @description                Clear the data of media source handle
+ *                                                             but fail becaus handle is null
+ */
+int utc_mediavision_mv_source_clear_n(void)
+{
+    printf("Inside mv_source_clear_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h source = NULL;
+
+    const int ret = mv_source_clear(source);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_source_clear_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_source_get_buffer()
+ * @testcase           utc_mediavision_mv_source_get_buffer_p
+ * @since_tizen                2.4
+ * @description                Get the data pointer from media source handle
+ */
+int utc_mediavision_mv_source_get_buffer_p(void)
+{
+    printf("Inside mv_source_get_buffer_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h source = NULL;
+    mv_create_source(&source);
+    unsigned char *expectedData = NULL;
+    size_t dataSize = 0;
+
+    int ret = mv_source_get_buffer(source, &expectedData, &dataSize);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    prepareTestBuffer(&expectedData);
+
+    ret = mv_source_fill_by_buffer(source, expectedData, 800 * 600 * 4, 800, 600, 4);
+    if (ret != MEDIA_VISION_ERROR_NONE)
+        goto _ERR;
+
+
+    unsigned char *actualData = NULL;
+    size_t actualDataSize = 0;
+
+    ret = mv_source_get_buffer(source, &actualData, &actualDataSize);
+    if (ret != MEDIA_VISION_ERROR_NONE || actualDataSize != (800 * 600 * 4))
+        goto _ERR;
+
+    int i;
+    bool isEqual = true;
+    for (i = 0; i < actualDataSize; ++i)
+    {
+        isEqual = (expectedData[i] == actualData[i]);
+        if (isEqual == false)
+           break;
+    }
+
+_ERR:
+    free(expectedData);
+    mv_destroy_source(source);
+
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_eq(actualDataSize, 800 * 600 * 4);
+    assert_eq(true, isEqual);
+
+    printf("Before return mv_source_get_buffer_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_source_get_buffer()
+ * @testcase           utc_mediavision_mv_source_get_buffer_n
+ * @since_tizen                2.4
+ * @description                Get the data pointer from media source handle but fail
+ *                                                             because handle is null
+ */
+int utc_mediavision_mv_source_get_buffer_n(void)
+{
+    printf("Inside mv_source_get_buffer_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h fakeSource = NULL;
+    unsigned char *data = NULL;
+    size_t dataSize = 0;
+
+    const int ret = mv_source_get_buffer(fakeSource, &data, &dataSize);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_source_get_buffer_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_source_get_height()
+ * @testcase           utc_mediavision_mv_source_get_height_p
+ * @since_tizen                2.4
+ * @description                Get the height from media source handle
+ */
+int utc_mediavision_mv_source_get_height_p(void)
+{
+    printf("Inside mv_source_get_height_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h source = NULL;
+    mv_create_source(&source);
+    size_t height = 0;
+    unsigned char *data = NULL;
+    prepareTestBuffer(&data);
+
+    int ret = mv_source_fill_by_buffer(source, data, 800 * 600 * 4, 800, 600, 4);
+    if (ret == MEDIA_VISION_ERROR_NONE)
+       ret = mv_source_get_height(source, &height);
+
+    free(data);
+    mv_destroy_source(source);
+
+    assert_eq(MEDIA_VISION_ERROR_NONE,ret);
+    assert_eq(600, height);
+
+    printf("Before return mv_source_get_height_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_source_get_height()
+ * @testcase           utc_mediavision_mv_source_get_height_n
+ * @since_tizen                2.4
+ * @description                Get the height from media source handle but fail
+ *                                                             because handle is null
+ */
+int utc_mediavision_mv_source_get_height_n(void)
+{
+    printf("Inside mv_source_get_height_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h fakeSource = NULL;
+    size_t height = 0;
+
+    const int ret = mv_source_get_height(fakeSource, &height);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_source_get_height_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_source_get_width()
+ * @testcase           utc_mediavision_mv_source_get_width_p
+ * @since_tizen                2.4
+ * @description                Get the width from media source handle
+ */
+int utc_mediavision_mv_source_get_width_p(void)
+{
+    printf("Inside mv_source_get_width_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h source = NULL;
+    mv_create_source(&source);
+    size_t width = 0;
+    unsigned char *data = NULL;
+    prepareTestBuffer(&data);
+
+    int ret = mv_source_fill_by_buffer(source, data, 800 * 600 * 4, 800, 600, 4);
+    if (ret == MEDIA_VISION_ERROR_NONE)
+        ret = mv_source_get_width(source, &width);
+
+    free(data);
+    mv_destroy_source(source);
+
+    assert_eq(MEDIA_VISION_ERROR_NONE,ret);
+    assert_eq(800, width);
+
+    printf("Before return mv_source_get_width_p\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_source_get_width()
+ * @testcase           utc_mediavision_mv_source_get_width_n
+ * @since_tizen                2.4
+ * @description                Get the width of media source handle but fail
+ *                                                             because handle is null
+ */
+int utc_mediavision_mv_source_get_width_n(void)
+{
+    printf("Inside mv_source_get_width_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h fakeSource = NULL;
+    size_t width = 0;
+
+    const int ret = mv_source_get_width(fakeSource, &width);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_source_get_width_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_source_get_colorspace()
+ * @testcase           utc_mediavision_mv_source_get_colorspace_p
+ * @since_tizen                2.4
+ * @description                Get the colorspace from media source handle
+ */
+int utc_mediavision_mv_source_get_colorspace_p(void)
+{
+    printf("Inside mv_source_get_colorspace_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h source = NULL;
+    mv_create_source(&source);
+    mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+    unsigned char *data = NULL;
+    prepareTestBuffer(&data);
+
+    int ret = mv_source_fill_by_buffer(source, data, 800 * 600 * 4, 800, 600, MEDIA_VISION_COLORSPACE_RGBA);
+    if (ret == MEDIA_VISION_ERROR_NONE)
+        ret = mv_source_get_colorspace(source, &colorspace);
+
+    free(data);
+    mv_destroy_source(source);
+
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_eq(MEDIA_VISION_COLORSPACE_RGBA, colorspace);
+
+    printf("Before return mv_source_get_colorspace_p\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_source_get_colorspace()
+ * @testcase           utc_mediavision_mv_source_get_colorspace_n
+ * @since_tizen                2.4
+ * @description                Get the colorspace from media source handle but fail
+ *                                                             because handle is null
+ */
+int utc_mediavision_mv_source_get_colorspace_n(void)
+{
+    printf("Inside mv_source_get_colorspace_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_source_h fakeSource = NULL;
+    mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+
+    const int ret = mv_source_get_colorspace(fakeSource, &colorspace);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_source_get_colorspace_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_create_engine_config()
+ * @testcase           utc_mediavision_mv_create_engine_config_p
+ * @since_tizen                2.4
+ * @description                Create engine configuration handle
+ */
+int utc_mediavision_mv_create_engine_config_p(void)
+{
+    printf("Inside mv_create_engine_config_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_neq((void*)NULL, engHandler);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_create_engine_config_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_create_engine_config()
+ * @testcase           utc_mediavision_mv_create_engine_config_n
+ * @since_tizen                2.4
+ * @description                Create engine configuration handle but fail
+ *                                                             because input handle is null
+ */
+int utc_mediavision_mv_create_engine_config_n(void)
+{
+    printf("Inside mv_create_engine_config_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    const int ret = mv_create_engine_config((void*)NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_create_engine_config_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_destroy_engine_config()
+ * @testcase           utc_mediavision_mv_destroy_engine_config_p
+ * @since_tizen                2.4
+ * @description                Destroy engine configuration handle
+ */
+int utc_mediavision_mv_destroy_engine_config_p(void)
+{
+    printf("Inside mv_destroy_engine_config_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_neq((void*)NULL, engHandler);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_destroy_engine_config_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_destroy_engine_config()
+ * @testcase           utc_mediavision_mv_destroy_engine_config_n
+ * @since_tizen                2.4
+ * @description                Destroy engine configuration handle but fail
+ *                                                             because handle is null
+ */
+int utc_mediavision_mv_destroy_engine_config_n(void)
+{
+    printf("Inside mv_destroy_engine_config_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h actualEngHandler = NULL;
+
+    const int ret = mv_destroy_engine_config(actualEngHandler);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_destroy_engine_config_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_engine_config_set_double_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_set_double_attribute_p
+ * @since_tizen                2.4
+ * @description                Set double type attribute
+ */
+int utc_mediavision_mv_engine_config_set_double_attribute_p(void)
+{
+    printf("Inside mv_engine_config_set_double_attribute_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    const double attributeValue = 2.0;
+    ret = mv_engine_config_set_double_attribute(engHandler, "MV_IMAGE_TRACKING_EXPECTED_OFFSET", attributeValue);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_engine_config_set_double_attribute_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_engine_config_set_double_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_set_double_attribute_n
+ * @since_tizen                2.4
+ * @description                Set double type attribute but fail
+ *                                                             because engine configuration handle is null
+ */
+int utc_mediavision_mv_engine_config_set_double_attribute_n(void)
+{
+    printf("Inside mv_engine_config_set_double_attribute_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+    const double attributeValue = 0.;
+
+    const int ret = mv_engine_config_set_double_attribute(engHandler, "test", attributeValue);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_engine_config_set_double_attribute_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_engine_config_set_int_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_set_int_attribute_p
+ * @since_tizen                2.4
+ * @description                Set integer type attribute
+ */
+int utc_mediavision_mv_engine_config_set_int_attribute_p(void)
+{
+    printf("Inside mv_engine_config_set_int_attribute_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    const int attributeValue = 2;
+    ret = mv_engine_config_set_int_attribute(engHandler, "MV_FACE_RECOGNITION_MODEL_TYPE", attributeValue);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_engine_config_set_int_attribute_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_engine_config_set_int_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_set_int_attribute_n
+ * @since_tizen                2.4
+ * @description                Set integer type attribute but fail
+ *                                                             because engine configuration handle is null
+ */
+int utc_mediavision_mv_engine_config_set_int_attribute_n(void)
+{
+    printf("Inside mv_engine_config_set_int_attribute_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+    const int attributeValue = 255;
+
+    const int ret = mv_engine_config_set_int_attribute(engHandler, "test", attributeValue);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_engine_config_set_int_attribute_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_engine_config_set_bool_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_set_bool_attribute_p
+ * @since_tizen                2.4
+ * @description                Set bool type attrbute
+ */
+int utc_mediavision_mv_engine_config_set_bool_attribute_p(void)
+{
+    printf("Inside mv_engine_config_set_bool_attribute_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    const bool attributeValue = true;
+
+    ret = mv_engine_config_set_bool_attribute(engHandler, "MV_IMAGE_TRACKING_USE_STABLIZATION", attributeValue);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_engine_config_set_bool_attribute_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_engine_config_set_bool_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_set_bool_attribute_n
+ * @since_tizen                2.4
+ * @description                Set bool type attribute but fail
+ *                                                             because engine configuration handle is null
+ */
+int utc_mediavision_mv_engine_config_set_bool_attribute_n(void)
+{
+    printf("Inside mv_engine_config_set_bool_attribute_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+    const bool attributeValue = true;
+
+    const int ret = mv_engine_config_set_bool_attribute(engHandler, "test", attributeValue);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_engine_config_set_bool_attribute_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_engine_config_set_string_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_set_string_attribute_p
+ * @since_tizen                2.4
+ * @description                Set string type attribute
+ */
+int utc_mediavision_mv_engine_config_set_string_attribute_p(void)
+{
+    printf("Inside mv_engine_config_set_string_attribute_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    const char *attributeValue = "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
+
+    ret = mv_engine_config_set_string_attribute(engHandler, "MV_FACE_DETECTION_MODEL_FILE_PATH", attributeValue);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_engine_config_set_string_attribute_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_engine_config_set_string_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_set_string_attribute_n
+ * @since_tizen                2.4
+ * @description                Set string type attribute but fail
+ *                                                             because engine configuration handle is null
+ */
+int utc_mediavision_mv_engine_config_set_string_attribute_n(void)
+{
+    printf("Inside mv_engine_config_set_string_attribute_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+    const char attributeValue[12] = "test string";
+
+    const int ret = mv_engine_config_set_string_attribute(engHandler, "test", attributeValue);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_engine_config_set_string_attribute_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_engine_config_set_array_string_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_set_array_string_attribute_p
+ * @since_tizen                5.5
+ * @description                Set array string type attribute
+ */
+int utc_mediavision_mv_engine_config_set_array_string_attribute_p(void)
+{
+    printf("Inside mv_engine_config_set_array_string_attribute_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    const char *attributeValue[2] = {"output/node1", "output/node2"};
+
+    ret = mv_engine_config_set_array_string_attribute(engHandler, "MV_INFERENCE_OUTPUT_NODE_NAMES", attributeValue, 2);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_engine_config_set_array_string_attribute_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_engine_config_set_array_string_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_set_array_string_attribute_n
+ * @since_tizen                5.5
+ * @description                Set array string type attribute but fail
+ *                                                             because engine configuration handle is null
+ */
+int utc_mediavision_mv_engine_config_set_array_string_attribute_n(void)
+{
+    printf("Inside mv_engine_config_set_array_string_attribute_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+    const char *attributeValue[2] = {"output/node1", "output/node2"};
+
+    const int ret = mv_engine_config_set_array_string_attribute(engHandler, "MV_INFERENCE_OUTPUT_NODE_NAMES", attributeValue, 2);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_engine_config_set_string_attribute_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_engine_config_get_double_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_get_double_attribute_p
+ * @since_tizen                2.4
+ * @description                Get double type attribute
+ */
+int utc_mediavision_mv_engine_config_get_double_attribute_p(void)
+{
+    printf("Inside mv_engine_config_get_double_attribute_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    const double expectedAttributeValue = 2.0;
+
+    ret = mv_engine_config_set_double_attribute(engHandler, "MV_IMAGE_TRACKING_EXPECTED_OFFSET", expectedAttributeValue);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    double actualAttributeValue = 0.0;
+
+    ret = mv_engine_config_get_double_attribute(engHandler, "MV_IMAGE_TRACKING_EXPECTED_OFFSET", &actualAttributeValue);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    assert_eq(expectedAttributeValue, actualAttributeValue);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_engine_config_get_double_attribute_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_engine_config_get_double_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_get_double_attribute_n
+ * @since_tizen                2.4
+ * @description                Get double type attribute but fail
+ *                                                             because engine configuration handle is null
+ */
+int utc_mediavision_mv_engine_config_get_double_attribute_n(void)
+{
+    printf("Inside mv_engine_config_get_double_attribute_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    double attributeValue = 0.0;
+
+    ret = mv_engine_config_get_double_attribute(engHandler, "test", &attributeValue);
+    assert_eq(MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE, ret);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_engine_config_get_double_attribute_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_engine_config_get_int_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_get_int_attribute_p
+ * @since_tizen                2.4
+ * @description                Get integer type attribute
+ */
+int utc_mediavision_mv_engine_config_get_int_attribute_p(void)
+{
+    printf("Inside mv_engine_config_get_int_attribute_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    const int expectedParameterValue = 2;
+
+    ret = mv_engine_config_set_int_attribute(engHandler, "MV_FACE_RECOGNITION_MODEL_TYPE", expectedParameterValue);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    int actualParameterValue = 0;
+
+    ret = mv_engine_config_get_int_attribute(engHandler, "MV_FACE_RECOGNITION_MODEL_TYPE", &actualParameterValue);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    assert_eq(expectedParameterValue, actualParameterValue);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_engine_config_get_int_attribute_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_engine_config_get_int_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_get_int_attribute_n
+ * @since_tizen                2.4
+ * @description                Get integer type attribute but fail
+ *                                                             because engine configuration handle is null
+ */
+int utc_mediavision_mv_engine_config_get_int_attribute_n(void)
+{
+    printf("Inside mv_engine_config_get_int_attribute_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    int attributeValue = 0;
+
+    ret = mv_engine_config_get_int_attribute(engHandler, "test", &attributeValue);
+    assert_eq(MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE, ret);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_engine_config_get_int_attribute_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_engine_config_get_bool_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_get_bool_attribute_p
+ * @since_tizen                2.4
+ * @description                Get bool type attribute
+ */
+int utc_mediavision_mv_engine_config_get_bool_attribute_p(void)
+{
+    printf("Inside mv_engine_config_get_bool_attribute_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    const bool expectedParameterValue = true;
+
+    ret = mv_engine_config_set_bool_attribute(engHandler, "MV_IMAGE_TRACKING_USE_STABLIZATION", expectedParameterValue);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    bool actualParameterValue = false;
+
+    ret = mv_engine_config_get_bool_attribute(engHandler, "MV_IMAGE_TRACKING_USE_STABLIZATION", &actualParameterValue);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    assert_eq(expectedParameterValue, actualParameterValue);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_engine_config_get_bool_attribute_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_engine_config_get_bool_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_get_bool_attribute_n
+ * @since_tizen                2.4
+ * @description                Get bool type attribute but fail
+ *                                                             because engine configuration handle is null
+ */
+int utc_mediavision_mv_engine_config_get_bool_attribute_n(void)
+{
+    printf("Inside mv_engine_config_get_bool_attribute_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    bool attributeValue = false;
+
+    ret = mv_engine_config_get_bool_attribute(engHandler, "test", &attributeValue);
+    assert_eq(MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE, ret);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_engine_config_get_bool_attribute_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_engine_config_get_string_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_get_string_attribute_p
+ * @since_tizen                2.4
+ * @description                Get string attribute
+ */
+int utc_mediavision_mv_engine_config_get_string_attribute_p(void)
+{
+    printf("Inside mv_engine_config_get_string_attribute_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    const char *expectedParameterValue = "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
+
+    ret = mv_engine_config_set_string_attribute(engHandler, "MV_FACE_DETECTION_MODEL_FILE_PATH", expectedParameterValue);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    char *actualParameterValue = NULL;
+
+    ret = mv_engine_config_get_string_attribute(engHandler, "MV_FACE_DETECTION_MODEL_FILE_PATH", &actualParameterValue);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    assert_eq(strcmp(expectedParameterValue, actualParameterValue), 0);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    free(actualParameterValue);
+
+    printf("Before return mv_engine_config_get_string_attribute_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_engine_config_get_string_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_get_string_attribute_n
+ * @since_tizen                2.4
+ * @description                Get string attribute but fail
+ *                                                             because engine configuration handle is null
+ */
+int utc_mediavision_mv_engine_config_get_string_attribute_n(void)
+{
+    printf("Inside mv_engine_config_get_string_attribute_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    char *attributeValue = NULL;
+
+    ret = mv_engine_config_get_string_attribute(engHandler, "test", &attributeValue);
+    assert_eq(MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE, ret);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    free(attributeValue);
+
+    printf("Before return mv_engine_config_get_string_attribute_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_engine_config_get_array_string_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_get_array_string_attribute_p
+ * @since_tizen                5.5
+ * @description                Get array string attribute
+ */
+int utc_mediavision_mv_engine_config_get_array_string_attribute_p(void)
+{
+    printf("Inside mv_engine_config_get_array_string_attribute_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    const int expectedSize = 2;
+    const char *expectedParameterValue[2] = {"output/node1", "output/node2"};
+
+    ret = mv_engine_config_set_array_string_attribute(engHandler, "MV_INFERENCE_OUTPUT_NODE_NAMES", expectedParameterValue, 2);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    int actualSize = 0;
+    char **actualParameterValue = NULL;
+
+    ret = mv_engine_config_get_array_string_attribute(engHandler, "MV_INFERENCE_OUTPUT_NODE_NAMES", &actualParameterValue, &actualSize);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    assert_eq(expectedSize, actualSize);
+    for (int i = 0; i < expectedSize; ++i) {
+        assert_eq(strcmp(expectedParameterValue[i], actualParameterValue[i]), 0);
+    }
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    for (int i = 0; i < actualSize; ++i) {
+        free(actualParameterValue[i]);
+    }
+    free(actualParameterValue);
+
+    printf("Before return mv_engine_config_get_string_attribute_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_engine_config_get_array_string_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_get_array_string_attribute_n
+ * @since_tizen                5.5
+ * @description                Get array string attribute but fail
+ *                                                             because key is not available
+ */
+int utc_mediavision_mv_engine_config_get_array_string_attribute_n(void)
+{
+    printf("Inside mv_engine_config_get_array_string_attribute_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    mv_engine_config_h engHandler = NULL;
+
+    int ret = mv_create_engine_config(&engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    int size = 0;
+    char **attributeValue = NULL;
+
+    ret = mv_engine_config_get_array_string_attribute(engHandler, "test", &attributeValue, &size);
+    assert_eq(MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE, ret);
+
+    ret = mv_destroy_engine_config(engHandler);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_eq(0, size);
+    assert_eq(NULL, attributeValue);
+
+    printf("Before return mv_engine_config_get_array_string_attribute_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_engine_config_foreach_supported_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_foreach_supported_attribute_p
+ * @since_tizen                2.4
+ * @description                Check supported all attributes
+ */
+int utc_mediavision_mv_engine_config_foreach_supported_attribute_p(void)
+{
+    printf("Inside mv_engine_config_foreach_supported_attribute_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    assert_eq(false, _is_broken_config);
+
+    bool isCorrect = true;
+
+    const int ret = mv_engine_config_foreach_supported_attribute(
+            _attribute_supported_callback, &isCorrect);
+
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_eq(true, isCorrect);
+
+    printf("Before mv_engine_config_foreach_supported_attribute_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_engine_config_foreach_supported_attribute()
+ * @testcase           utc_mediavision_mv_engine_config_foreach_supported_attribute_n
+ * @since_tizen                2.4
+ * @description                Check supported all attributes but fail
+ *                                                             because callback is null
+ */
+int utc_mediavision_mv_engine_config_foreach_supported_attribute_n(void)
+{
+    printf("Inside mv_engine_config_foreach_supported_attribute_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    const int ret = mv_engine_config_foreach_supported_attribute(
+            NULL, NULL);
+
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before mv_engine_config_foreach_supported_attribute_n\n");
+
+    return 0;
+}
diff --git a/src/utc/capi-media-vision-dl/utc-mv_face_recognition.c b/src/utc/capi-media-vision-dl/utc-mv_face_recognition.c
new file mode 100755 (executable)
index 0000000..beac991
--- /dev/null
@@ -0,0 +1,940 @@
+/**
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assert.h"
+#include <mv_face_recognition.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <image_util.h>
+#include <system_info.h>
+#include <storage.h>
+#include "tct_common.h"
+
+#define MAX_LABEL_CNT  2
+#define MAX_IMAGE_CNT  10
+#define FILE_PATH_SIZE 1024
+#define API_NAMESPACE "[MediaVision]"
+
+static const char *p1_face_examples_dir = NULL;
+static const char *p2_face_examples_dir = NULL;
+
+static const char *image_file_names[MAX_IMAGE_CNT] = {
+       "00.jpg", "01.jpg", "02.jpg", "03.jpg", "04.jpg",
+       "05.jpg", "06.jpg", "07.jpg", "08.jpg", "09.jpg"
+};
+
+static const char *label_names[MAX_LABEL_CNT] = {
+       "p1", "p2"
+};
+
+static int load_image_to_media_source(const char *file_path, mv_source_h source)
+{
+       if (NULL == file_path || NULL == source)
+       {
+               printf("File path or source is NULL\n");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
+
+       unsigned long width = 0;
+       unsigned long height = 0;
+       unsigned long long buffer_size = 0;
+       unsigned char *data_buffer = NULL;
+       int ret1 = IMAGE_UTIL_ERROR_NONE;
+       int ret2 = MEDIA_VISION_ERROR_NONE;
+       image_util_decode_h _decoder = NULL;
+
+       ret1 = image_util_decode_create(&_decoder);
+       if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
+       ret1 = image_util_decode_set_input_path(_decoder, file_path);
+       if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
+       ret1 = image_util_decode_set_colorspace(_decoder, IMAGE_UTIL_COLORSPACE_RGB888);
+       if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
+       ret1 = image_util_decode_set_output_buffer(_decoder, &data_buffer);
+       if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
+       ret1 = image_util_decode_run(_decoder, &width, &height, &buffer_size);
+       if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
+
+       // Only grayscale and RGB jpegs in test set:
+       mv_colorspace_e source_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
+
+       ret2 = mv_source_clear(source);
+       if (ret2 != MEDIA_VISION_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
+
+       ret2 = mv_source_fill_by_buffer(source, data_buffer, (unsigned long long)buffer_size,
+                                                                       (unsigned int)width, (unsigned int)height, source_colorspace);
+
+_LOAD_IMAGE_FAIL:
+       image_util_decode_destroy(_decoder);
+       if(data_buffer)
+               free(data_buffer);
+
+       assert_eq(IMAGE_UTIL_ERROR_NONE, ret1);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret2);
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+static bool is_face_recognition_feature_supported(void)
+{
+       bool isFaceRecognitionSupported =false;
+
+       system_info_get_platform_bool("http://tizen.org/feature/vision.inference.face", &isFaceRecognitionSupported);
+
+       if (!isFaceRecognitionSupported)
+               printf("Not support face recognition feature.");
+
+       return isFaceRecognitionSupported;
+}
+
+/**
+ * @function   utc_capi_media_vision_face_recognition_startup
+ * @description        Face recognition module UTC startup code
+ * @parameter  NA
+ * @return             NA
+ */
+void utc_capi_media_vision_face_recognition_startup(void)
+{
+       printf("capi-media-vision mv_face_recognition tests STARTUP is launched\n");
+
+       char pszValue[CONFIG_VALUE_LEN_MAX] = {0,};
+
+       if (!GetValueForTCTSetting("DEVICE_PHYSICAL_STORAGE_30", pszValue, API_NAMESPACE)) {
+               printf("Fail to get value for TCT setting.\n");
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+       }
+
+       PRINT_UTC_LOG("[Line : %d][%s] 'DEVICE_PHYSICAL_STORAGE_30' Values Received %s\\n", __LINE__, API_NAMESPACE, pszValue);
+
+       p1_face_examples_dir=(char*)calloc(strlen(pszValue)+strlen("/res/face_recognition/images/P1")+1, sizeof(char));
+       snprintf(p1_face_examples_dir, strlen(pszValue)+strlen("/res/face_recognition/images/P1")+1, "%s/res/face_recognition/images/P1", pszValue);
+
+       p2_face_examples_dir=(char*)calloc(strlen(pszValue)+strlen("/res/face_recognition/images/P2")+1, sizeof(char));
+       snprintf(p2_face_examples_dir, strlen(pszValue)+strlen("/res/face_recognition/images/P2")+1, "%s/res/face_recognition/images/P2", pszValue);
+
+       printf("capi-media-vision mv_face_recognition tests STARTUP is completed\n");
+}
+
+/**
+ * @function   utc_capi_media_vision_face_recognition_cleanup
+ * @description        Face recognition module UTC cleanup code
+ * @parameter  NA
+ * @return             NA
+ */
+void utc_capi_media_vision_face_recognition_cleanup(void)
+{
+       printf("capi-media-vision mv_face_recognition tests CLEANUP is launched\n");
+
+       free(p1_face_examples_dir);
+       free(p2_face_examples_dir);
+
+       printf("capi-media-vision mv_face_recognition tests CLEANUP is completed\n");
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_create_p()
+ * @testcase        utc_mediavision_mv_face_recognition_create_p
+ * @since_tizen     7.0
+ * @description     Create face recognition handle
+ */
+int utc_mediavision_mv_face_recognition_create_p(void)
+{
+       printf("Start mv_face_recognition_create_p\n");
+
+       mv_face_recognition_h handle = NULL;
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_create_p\n");
+
+       return 0;
+}
+
+/**
+ * @brief Negative test case of mv_face_recognition_create()
+ * @testcase        utc_mediavision_mv_face_recognition_create_n
+ * @since_tizen     7.0
+ * @description     Create face recognition handle,
+ *                  but fail because input parameter is NULL
+ */
+int utc_mediavision_mv_face_recognition_create_n(void)
+{
+       printf("Start mv_face_recognition_create_n\n");
+
+       int ret = mv_face_recognition_create(NULL);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+       printf("End mv_face_recognition_create_n\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_destroy_p()
+ * @testcase        utc_mediavision_mv_face_recognition_destroy_p
+ * @since_tizen     7.0
+ * @description     Destroy face recognition handle
+ */
+int utc_mediavision_mv_face_recognition_destroy_p(void)
+{
+       printf("Start mv_face_recognition_destroy_p\n");
+
+       mv_face_recognition_h handle = NULL;
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_destroy_p\n");
+
+       return 0;
+}
+
+/**
+ * @brief Negative test case of mv_face_recognition_destroy_n()
+ * @testcase        utc_mediavision_mv_face_recognition_destroy_n
+ * @since_tizen     7.0
+ * @description     Destroy face recognition handle,
+ *                  but fail because input parameter is NULL
+ */
+int utc_mediavision_mv_face_recognition_destroy_n(void)
+{
+       printf("Start mv_face_recognition_destroy_n\n");
+
+       int ret = mv_face_recognition_destroy(NULL);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+       printf("End mv_face_recognition_destroy_n\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_prepare_p()
+ * @testcase        utc_mediavision_mv_face_recognition_prepare_p
+ * @since_tizen     7.0
+ * @description     Prepare face recognition
+ */
+int utc_mediavision_mv_face_recognition_prepare_p(void)
+{
+       printf("Start mv_face_recognition_prepare_p\n");
+
+       mv_face_recognition_h handle = NULL;
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_prepare_p\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_prepare_n()
+ * @testcase        utc_mediavision_mv_face_recognition_prepare_n
+ * @since_tizen     7.0
+ * @description     Prepare face recognition
+ */
+int utc_mediavision_mv_face_recognition_prepare_n(void)
+{
+       printf("Start mv_face_recognition_prepare_n\n");
+
+       int ret = mv_face_recognition_prepare(NULL);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+       printf("End mv_face_recognition_prepare_n\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_register_p()
+ * @testcase        utc_mediavision_mv_face_recognition_register_p
+ * @since_tizen     7.0
+ * @description     Register face image and its label
+ */
+int utc_mediavision_mv_face_recognition_register_p(void)
+{
+       printf("Start mv_face_recognition_register_p\n");
+
+       mv_face_recognition_h handle = NULL;
+       mv_source_h mv_source = NULL;
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
+               for (unsigned int img_idx = 0; img_idx < MAX_IMAGE_CNT / 2; ++img_idx) {
+                       char image_path[FILE_PATH_SIZE] = "";
+
+                       ret = mv_create_source(&mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       if (label_idx == 0)
+                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, image_file_names[img_idx]);
+                       else
+                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p2_face_examples_dir, image_file_names[img_idx]);
+
+                       ret = load_image_to_media_source(image_path, mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_face_recognition_register(handle, mv_source, label_names[label_idx]);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_destroy_source(mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+               }
+       }
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_register_p\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_register_n1()
+ * @testcase        utc_mediavision_mv_face_recognition_register_n1
+ * @since_tizen     7.0
+ * @description     Register face image and its label
+ */
+int utc_mediavision_mv_face_recognition_register_n1(void)
+{
+       printf("Start mv_face_recognition_register_n1\n");
+
+       mv_face_recognition_h handle = NULL;
+       mv_source_h mv_source = NULL;
+       char image_path[FILE_PATH_SIZE] = "";
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_create_source(&mv_source);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, "00.jpg");
+
+       ret = load_image_to_media_source(image_path, mv_source);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_register(NULL, mv_source, "test");
+       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+       ret = mv_destroy_source(mv_source);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_register_n1\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_register_n2()
+ * @testcase        utc_mediavision_mv_face_recognition_register_n2
+ * @since_tizen     7.0
+ * @description     Register face image and its label
+ */
+int utc_mediavision_mv_face_recognition_register_n2(void)
+{
+       printf("Start mv_face_recognition_register_n2\n");
+
+       mv_face_recognition_h handle = NULL;
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_register(handle, NULL, "test");
+       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_register_n2\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_register_n3()
+ * @testcase        utc_mediavision_mv_face_recognition_register_n3
+ * @since_tizen     7.0
+ * @description     Register face image and its label
+ */
+int utc_mediavision_mv_face_recognition_register_n3(void)
+{
+       printf("Start mv_face_recognition_register_n3\n");
+
+       mv_face_recognition_h handle = NULL;
+       mv_source_h mv_source = NULL;
+       char image_path[FILE_PATH_SIZE] = "";
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_create_source(&mv_source);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, "00.jpg");
+
+       ret = mv_face_recognition_register(handle, image_path, NULL);
+       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+       ret = mv_destroy_source(mv_source);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_register_n3\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_infernce_p()
+ * @testcase        utc_mediavision_mv_face_recognition_inference_p
+ * @since_tizen     7.0
+ * @description     Register face image and its label
+ */
+int utc_mediavision_mv_face_recognition_inference_p(void)
+{
+       printf("Start mv_face_recognition_inference_p\n");
+
+       mv_face_recognition_h handle = NULL;
+       mv_source_h mv_source = NULL;
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       // Training
+       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
+               for (unsigned int img_idx = 0; img_idx < MAX_IMAGE_CNT / 2; ++img_idx) {
+                       char image_path[FILE_PATH_SIZE] = "";
+
+                       ret = mv_create_source(&mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       if (label_idx == 0)
+                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, image_file_names[img_idx]);
+                       else
+                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p2_face_examples_dir, image_file_names[img_idx]);
+
+                       ret = load_image_to_media_source(image_path, mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_face_recognition_register(handle, mv_source, label_names[label_idx]);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_destroy_source(mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+               }
+       }
+
+       // Inference
+       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
+               for (unsigned int img_idx = 5; img_idx < MAX_IMAGE_CNT; ++img_idx) {
+                       char image_path[FILE_PATH_SIZE] = "";
+
+                       ret = mv_create_source(&mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       if (label_idx == 0)
+                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, image_file_names[img_idx]);
+                       else
+                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p2_face_examples_dir, image_file_names[img_idx]);
+
+                       ret = load_image_to_media_source(image_path, mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_face_recognition_inference(handle, mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_destroy_source(mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+               }
+       }
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_inference_p\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_inference_n1()
+ * @testcase        utc_mediavision_mv_face_recognition_inference_n1
+ * @since_tizen     7.0
+ * @description     Recognize a given face image
+ */
+int utc_mediavision_mv_face_recognition_inference_n1(void)
+{
+       printf("Start mv_face_recognition_inference_n1\n");
+
+       mv_face_recognition_h handle = NULL;
+       mv_source_h mv_source = NULL;
+       char image_path[FILE_PATH_SIZE] = "";
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_create_source(&mv_source);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, "00.jpg");
+
+       ret = load_image_to_media_source(image_path, mv_source);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_inference(NULL, mv_source);
+       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+       ret = mv_destroy_source(mv_source);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_inference_n1\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_inference_n2()
+ * @testcase        utc_mediavision_mv_face_recognition_inference_n2
+ * @since_tizen     7.0
+ * @description     Recognize a given face image
+ */
+int utc_mediavision_mv_face_recognition_inference_n2(void)
+{
+       printf("Start mv_face_recognition_inference_n2\n");
+
+       mv_face_recognition_h handle = NULL;
+       mv_source_h mv_source = NULL;
+       char image_path[FILE_PATH_SIZE] = "";
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_create_source(&mv_source);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, "00.jpg");
+
+       ret = load_image_to_media_source(image_path, mv_source);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_inference(handle, NULL);
+       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+       ret = mv_destroy_source(mv_source);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_inference_n2\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_get_label_p()
+ * @testcase        utc_mediavision_mv_face_recognition_get_label_p
+ * @since_tizen     7.0
+ * @description     Get a label
+ */
+int utc_mediavision_mv_face_recognition_get_label_p(void)
+{
+       printf("Start mv_face_recognition_get_label_p\n");
+
+       mv_face_recognition_h handle = NULL;
+       mv_source_h mv_source = NULL;
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       // Training
+       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
+               for (unsigned int img_idx = 0; img_idx < MAX_IMAGE_CNT / 2; ++img_idx) {
+                       char image_path[FILE_PATH_SIZE] = "";
+
+                       ret = mv_create_source(&mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       if (label_idx == 0)
+                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, image_file_names[img_idx]);
+                       else
+                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p2_face_examples_dir, image_file_names[img_idx]);
+
+                       ret = load_image_to_media_source(image_path, mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_face_recognition_register(handle, mv_source, label_names[label_idx]);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_destroy_source(mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+               }
+       }
+
+       // Inference
+       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
+               for (unsigned int img_idx = 5; img_idx < MAX_IMAGE_CNT; ++img_idx) {
+                       char image_path[FILE_PATH_SIZE] = "";
+                       const char *out_label = NULL;
+
+                       ret = mv_create_source(&mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       if (label_idx == 0)
+                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, image_file_names[img_idx]);
+                       else
+                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p2_face_examples_dir, image_file_names[img_idx]);
+
+                       ret = load_image_to_media_source(image_path, mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_face_recognition_inference(handle, mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_face_recognition_get_label(handle, &out_label);
+                       if (ret != MEDIA_VISION_ERROR_NO_DATA)
+                               assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_destroy_source(mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+               }
+       }
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_get_label_p\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_get_label_n1()
+ * @testcase        utc_mediavision_mv_face_recognition_get_label_n1
+ * @since_tizen     7.0
+ * @description     Get a label
+ */
+int utc_mediavision_mv_face_recognition_get_label_n1(void)
+{
+       printf("Start mv_face_recognition_get_label_n1\n");
+
+       mv_face_recognition_h handle = NULL;
+       const char *out_label = NULL;
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_get_label(NULL, &out_label);
+       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_get_label_n1\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_get_label_n2()
+ * @testcase        utc_mediavision_mv_face_recognition_get_label_n2
+ * @since_tizen     7.0
+ * @description     Get a label
+ */
+int utc_mediavision_mv_face_recognition_get_label_n2(void)
+{
+       printf("Start mv_face_recognition_get_label_n2\n");
+
+       mv_face_recognition_h handle = NULL;
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_get_label(handle, NULL);
+       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_get_label_n2\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_unregister_p()
+ * @testcase        utc_mediavision_mv_face_recognition_unregister_p
+ * @since_tizen     7.0
+ * @description     UNregister a given label
+ */
+int utc_mediavision_mv_face_recognition_unregister_p(void)
+{
+       printf("Start mv_face_recognition_unregister_p\n");
+
+       mv_face_recognition_h handle = NULL;
+       mv_source_h mv_source = NULL;
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       // Training
+       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
+               for (unsigned int img_idx = 0; img_idx < MAX_IMAGE_CNT / 2; ++img_idx) {
+                       char image_path[FILE_PATH_SIZE] = "";
+
+                       ret = mv_create_source(&mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       if (label_idx == 0)
+                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, image_file_names[img_idx]);
+                       else
+                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p2_face_examples_dir, image_file_names[img_idx]);
+
+                       ret = load_image_to_media_source(image_path, mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_face_recognition_register(handle, mv_source, label_names[label_idx]);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+                       ret = mv_destroy_source(mv_source);
+                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+               }
+       }
+
+       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
+               ret = mv_face_recognition_unregister(handle, label_names[label_idx]);
+               assert_eq(ret, MEDIA_VISION_ERROR_NONE);
+       }
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_unregister_p\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_unregister_n1()
+ * @testcase        utc_mediavision_mv_face_recognition_unregister_n1
+ * @since_tizen     7.0
+ * @description     Unregister a given label
+ */
+int utc_mediavision_mv_face_recognition_unregister_n1(void)
+{
+       printf("Start mv_face_recognition_unregister_n1\n");
+
+       mv_face_recognition_h handle = NULL;
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_unregister(NULL, "p1");
+       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_unregister_n1\n");
+
+       return 0;
+}
+
+/**
+ * @brief Positive test case of mv_face_recognition_unregister_n2()
+ * @testcase        utc_mediavision_mv_face_recognition_unregister_n2
+ * @since_tizen     7.0
+ * @description     Unregister a given label
+ */
+int utc_mediavision_mv_face_recognition_unregister_n2(void)
+{
+       printf("Start mv_face_recognition_unregister_n2\n");
+
+       mv_face_recognition_h handle = NULL;
+
+       int ret = mv_face_recognition_create(&handle);
+       if (!is_face_recognition_feature_supported()) {
+               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+               return 0;
+       }
+
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_prepare(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       ret = mv_face_recognition_unregister(handle, NULL);
+       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+       ret = mv_face_recognition_destroy(handle);
+       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+       printf("End mv_face_recognition_unregister_n2\n");
+
+       return 0;
+}
\ No newline at end of file
diff --git a/src/utc/capi-media-vision-dl/utc-mv_inference.c b/src/utc/capi-media-vision-dl/utc-mv_inference.c
new file mode 100755 (executable)
index 0000000..09baa39
--- /dev/null
@@ -0,0 +1,2519 @@
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assert.h"
+#include <mv_inference.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <image_util.h>
+#include <system_info.h>
+#include <storage.h>
+#include "tct_common.h"
+
+#define FILE_PATH_SIZE 1024
+#define API_NAMESPACE "[MediaVision]"
+
+static bool isVisionSupported = false;
+static int gStartupError;
+
+static mv_inference_h gInferenceHandle = NULL;
+static mv_engine_config_h gEngineConfigHandle = NULL;
+static mv_source_h gSourceHandle = NULL;
+const char *gInferenceExampleDir = NULL;
+static mv_point_s** gPldResultLandmarks = NULL;
+static float** gPldResultScore = NULL;
+static mv_pose_h gPoseHandle = NULL;
+
+static bool gIsForeachSupportedCallBackInvoked = false;
+static bool gIsImageClassifyCallBackInvoked = false;
+static bool gIsObjectDetectCallBackInvoked = false;
+static bool gIsFaceDetectCallBackInvoked = false;
+static bool gIsFacialLandmarkDetectCallBackInvoked = false;
+static bool gIsPoseLandmarkDetectCallBackInvoked = false;
+static bool gIsGetPoseLandmark = false;
+
+static int gPldResultErr;
+static int gPldResultNumberOfPoses;
+static int gPldResultNumberOfLandmarks;
+static int gPldResultLabel;
+static float gPoseCompareScore;
+
+#define IC_MODEL_FILENAME "ic_tflite_model.tflite"
+#define IC_MODEL_META_FILENAME "ic_tflite_model_meta.json"
+#define IC_LABEL_FILENAME "ic_label.txt"
+
+#define OD_MODEL_FILENAME "od_tflite_model.tflite"
+#define OD_MODEL_META_FILENAME "od_tflite_model_meta.json"
+#define OD_LABEL_FILENAME "od_label.txt"
+
+#define FD_MODEL_FILENAME "fd_tflite_model1.tflite"
+#define FD_MODEL_META_FILENAME "fd_tflite_model1_meta.json"
+
+#define FLD_MODEL_FILENAME "fld_tflite_model1.tflite"
+#define FLD_MODEL_META_FILENAME "fld_tflite_model1_meta.json"
+
+#define PLD_MODEL_FILENAME "pld_tflite_model.tflite"
+#define PLD_MODEL_META_FILENAME "pld_tflite_model_meta.json"
+#define PLD_POSE_MAPPING_FILENAME "pld_pose_mapping.txt"
+#define PLD_MOCAP_FILEAME "pld_mocap.bvh"
+#define PLD_MOCAP_MAPPING_FILENAME "pld_mocap_mapping.txt"
+
+static int set_image_classification_engine_config(mv_engine_config_h engineCfg)
+{
+    int ret = MEDIA_VISION_ERROR_NONE;
+
+    char modelFilename[1024];
+    char modelMetaFilename[1024];
+    char labelFilename[1024];
+    char *inputNodeName = "input_2";
+    char *outputNodeName[1] = {"dense_3/Softmax"};
+    snprintf(modelFilename, 1024, "%s/models/%s", gInferenceExampleDir, IC_MODEL_FILENAME);
+    snprintf(modelMetaFilename, 1024, "%s/models/%s", gInferenceExampleDir, IC_MODEL_META_FILENAME);
+    snprintf(labelFilename, 1024, "%s/models/%s", gInferenceExampleDir, IC_LABEL_FILENAME);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                        modelFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_META_FILE_PATH,
+                        modelMetaFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_DATA_TYPE,
+                        MV_INFERENCE_DATA_FLOAT32);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_USER_FILE_PATH,
+                        labelFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_MEAN_VALUE,
+                        127.0);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_STD_VALUE,
+                        127.0);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_CONFIDENCE_THRESHOLD,
+                        0.5);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_BACKEND_TYPE,
+                        MV_INFERENCE_BACKEND_TFLITE);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_TARGET_DEVICE_TYPE,
+                        MV_INFERENCE_TARGET_DEVICE_CPU);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
+                        224);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+                        224);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+                        3);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_NODE_NAME,
+                        inputNodeName);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_array_string_attribute(engineCfg,
+                        MV_INFERENCE_OUTPUT_NODE_NAMES,
+                        outputNodeName,
+                        1);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    return ret;
+}
+
+static int set_object_detection_engine_config(mv_engine_config_h engineCfg)
+{
+    int ret = MEDIA_VISION_ERROR_NONE;
+
+    char modelFilename[1024];
+    char modelMetaFilename[1024];
+    char labelFilename[1024];
+    char *inputNodeName = "normalized_input_image_tensor";
+    char *outputNodeName[4] = {"TFLite_Detection_PostProcess",
+                            "TFLite_Detection_PostProcess:1",
+                            "TFLite_Detection_PostProcess:2",
+                            "TFLite_Detection_PostProcess:3"};
+
+    snprintf(modelFilename, 1024, "%s/models/%s", gInferenceExampleDir, OD_MODEL_FILENAME);
+    snprintf(modelMetaFilename, 1024, "%s/models/%s", gInferenceExampleDir, OD_MODEL_META_FILENAME);
+    snprintf(labelFilename, 1024, "%s/models/%s", gInferenceExampleDir, OD_LABEL_FILENAME);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                        modelFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_META_FILE_PATH,
+                        modelMetaFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_DATA_TYPE,
+                        MV_INFERENCE_DATA_FLOAT32);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_USER_FILE_PATH,
+                        labelFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_MEAN_VALUE,
+                        127.5);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_STD_VALUE,
+                        127.5);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_CONFIDENCE_THRESHOLD,
+                        0.3);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_BACKEND_TYPE,
+                        MV_INFERENCE_BACKEND_TFLITE);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_TARGET_DEVICE_TYPE,
+                        MV_INFERENCE_TARGET_DEVICE_CPU);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
+                        300);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+                        300);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+                        3);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_NODE_NAME,
+                        inputNodeName);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_array_string_attribute(engineCfg,
+                        MV_INFERENCE_OUTPUT_NODE_NAMES,
+                        outputNodeName,
+                        4);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    return ret;
+}
+
+static int set_face_detection_engine_config(mv_engine_config_h engineCfg)
+{
+    int ret = MEDIA_VISION_ERROR_NONE;
+
+    char modelFilename[1024];
+    char modelMetaFilename[1024];
+    char *inputNodeName = "normalized_input_image_tensor";
+    char *outputNodeName[4] = {"TFLite_Detection_PostProcess",
+                                "TFLite_Detection_PostProcess:1",
+                                "TFLite_Detection_PostProcess:2",
+                                "TFLite_Detection_PostProcess:3"};
+    snprintf(modelFilename, 1024, "%s/models/%s", gInferenceExampleDir, FD_MODEL_FILENAME);
+    snprintf(modelMetaFilename, 1024, "%s/models/%s", gInferenceExampleDir, FD_MODEL_META_FILENAME);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                        modelFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_META_FILE_PATH,
+                        modelMetaFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_DATA_TYPE,
+                        MV_INFERENCE_DATA_FLOAT32);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_MEAN_VALUE,
+                        127.5);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_STD_VALUE,
+                        127.5);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_CONFIDENCE_THRESHOLD,
+                        0.3);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_BACKEND_TYPE,
+                        MV_INFERENCE_BACKEND_TFLITE);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_TARGET_DEVICE_TYPE,
+                        MV_INFERENCE_TARGET_DEVICE_CPU);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
+                        300);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+                        300);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+                        3);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_NODE_NAME,
+                        inputNodeName);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_array_string_attribute(engineCfg,
+                        MV_INFERENCE_OUTPUT_NODE_NAMES,
+                        outputNodeName,
+                        4);
+    return ret;
+}
+
+static int set_facial_landmark_detection_engine_config(mv_engine_config_h engineCfg)
+{
+    int ret = MEDIA_VISION_ERROR_NONE;
+
+    char modelFilename[1024];
+    char modelMetaFilename[1024];
+    char *inputNodeName = "Placeholder";
+    char *outputNodeName[1] = {"fanet8ss_inference/fully_connected_1/Sigmoid"};
+    snprintf(modelFilename, 1024, "%s/models/%s", gInferenceExampleDir, FLD_MODEL_FILENAME);
+    snprintf(modelMetaFilename, 1024, "%s/models/%s", gInferenceExampleDir, FLD_MODEL_META_FILENAME);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                        modelFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_META_FILE_PATH,
+                        modelMetaFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_DATA_TYPE,
+                        MV_INFERENCE_DATA_FLOAT32);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_MEAN_VALUE,
+                        0.0);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_STD_VALUE,
+                        1.0);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_BACKEND_TYPE,
+                        MV_INFERENCE_BACKEND_TFLITE);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_TARGET_DEVICE_TYPE,
+                        MV_INFERENCE_TARGET_DEVICE_CPU);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
+                        128);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+                        128);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+                        3);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_NODE_NAME,
+                        inputNodeName);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_array_string_attribute(engineCfg,
+                        MV_INFERENCE_OUTPUT_NODE_NAMES,
+                        outputNodeName,
+                        1);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    return ret;
+}
+
+static int set_pose_landmark_detection_engine_config(mv_engine_config_h engineCfg)
+{
+    int ret = MEDIA_VISION_ERROR_NONE;
+
+    char modelFilename[FILE_PATH_SIZE];
+    char modelMetaFilename[FILE_PATH_SIZE];
+    char poseMappingFilename[FILE_PATH_SIZE];
+    char *inputNodeName = "image";
+    char *outputNodeName[1] = { "Convolutional_Pose_Machine/stage_5_out" };
+    snprintf(modelFilename, FILE_PATH_SIZE, "%s/models/%s",
+                            gInferenceExampleDir, PLD_MODEL_FILENAME);
+    snprintf(modelMetaFilename, FILE_PATH_SIZE, "%s/models/%s",
+                            gInferenceExampleDir, PLD_MODEL_META_FILENAME);
+    snprintf(poseMappingFilename, FILE_PATH_SIZE, "%s/models/%s",
+                            gInferenceExampleDir, PLD_POSE_MAPPING_FILENAME);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                        modelFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_META_FILE_PATH,
+                        modelMetaFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_DATA_TYPE,
+                        MV_INFERENCE_DATA_FLOAT32);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_USER_FILE_PATH,
+                        poseMappingFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_MEAN_VALUE,
+                        0.0);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_double_attribute(engineCfg,
+                        MV_INFERENCE_MODEL_STD_VALUE,
+                        1.0);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_BACKEND_TYPE,
+                        MV_INFERENCE_BACKEND_TFLITE);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_TARGET_DEVICE_TYPE,
+                        MV_INFERENCE_TARGET_DEVICE_CPU);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
+                        192);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+                        192);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_int_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+                        3);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_string_attribute(engineCfg,
+                        MV_INFERENCE_INPUT_NODE_NAME,
+                        inputNodeName);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_engine_config_set_array_string_attribute(engineCfg,
+                        MV_INFERENCE_OUTPUT_NODE_NAMES,
+                        outputNodeName,
+                        1);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    return ret;
+}
+
+static int load_image_to_media_source(
+        const char *file_path,
+        mv_source_h source)
+{
+    if (NULL == file_path || NULL == source)
+    {
+        printf("File path or source is NULL\n");
+        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+    }
+
+    unsigned long width = 0;
+    unsigned long height = 0;
+    unsigned long long buffer_size = 0;
+    unsigned char *data_buffer = NULL;
+    int ret1 = IMAGE_UTIL_ERROR_NONE;
+    int ret2 = MEDIA_VISION_ERROR_NONE;
+    image_util_decode_h _decoder = NULL;
+
+    ret1 = image_util_decode_create(&_decoder);
+    if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
+    ret1 = image_util_decode_set_input_path(_decoder, file_path);
+    if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
+    ret1 = image_util_decode_set_colorspace(_decoder, IMAGE_UTIL_COLORSPACE_RGB888);
+    if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
+    ret1 = image_util_decode_set_output_buffer(_decoder, &data_buffer);
+    if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
+    ret1 = image_util_decode_run(_decoder, &width, &height, &buffer_size);
+    if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
+
+    // Only grayscale and RGB jpegs in test set:
+    mv_colorspace_e source_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
+
+    ret2 = mv_source_clear(source);
+    if (ret2 != MEDIA_VISION_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
+
+    ret2 = mv_source_fill_by_buffer(
+                    source, data_buffer, (unsigned int)buffer_size,
+                    (unsigned int)width, (unsigned int)height,
+                    source_colorspace);
+
+_LOAD_IMAGE_FAIL:
+    image_util_decode_destroy(_decoder);
+    if(data_buffer)
+        free(data_buffer);
+
+    assert_eq(IMAGE_UTIL_ERROR_NONE, ret1);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret2);
+
+    return MEDIA_VISION_ERROR_NONE;
+}
+
+/**
+ * @function   utc_capi_media_vision_inference_startup
+ * @description        Inference module UTC startup code
+ * @parameter  NA
+ * @return             NA
+ */
+void utc_capi_media_vision_inference_startup1(void)
+{
+    printf("capi-media-vision mv_inference tests STARTUP is launched\n");
+
+    bool isInferenceSupported = true;
+    system_info_get_platform_bool("http://tizen.org/feature/vision.inference", &isInferenceSupported);
+
+    if (isInferenceSupported)
+        isVisionSupported = true;
+    else
+        isVisionSupported = false;
+
+    printf("capi-media-vision mv_inference tests STARTUP is completed\n");
+}
+
+/**
+ * @function   utc_capi_media_vision_inference_cleanup
+ * @description        Inference module UTC cleanup code
+ * @parameter  NA
+ * @return             NA
+ */
+void utc_capi_media_vision_inference_cleanup1(void)
+{
+    printf("capi-media-vision mv_image tests CLEANUP is launched\n");
+    printf("capi-media-vision mv_image tests CLEANUP is completed\n");
+}
+
+
+/**
+ * @function   utc_capi_media_vision_inference_startup
+ * @description        Inference module UTC startup code
+ * @parameter  NA
+ * @return             NA
+ */
+void utc_capi_media_vision_inference_startup2(void)
+{
+    printf("capi-media-vision mv_inference tests STARTUP is launched\n");
+
+    bool isInferenceSupported = true;
+    system_info_get_platform_bool("http://tizen.org/feature/vision.inference", &isInferenceSupported);
+
+    if (isInferenceSupported)
+        isVisionSupported = true;
+    else
+        isVisionSupported = false;
+
+
+    char pszValue[CONFIG_VALUE_LEN_MAX] = {0,};
+    if (true == GetValueForTCTSetting("DEVICE_SUITE_TARGET_30", pszValue, API_NAMESPACE)) {
+        PRINT_UTC_LOG("[Line : %d][%s] 'DEVICE_SUITE_TARGET_30' Values Received %s\\n", __LINE__, API_NAMESPACE, pszValue);
+
+        gInferenceExampleDir=(char*)calloc(strlen(pszValue)+strlen("/res/res/inference")+1, sizeof(char));
+        snprintf(gInferenceExampleDir, strlen(pszValue)+strlen("/res/res/inference")+1, "%s/res/res/inference", pszValue);
+
+    } else {
+        PRINT_UTC_LOG("[Line : %d][%s] GetValueForTCTSetting returned error for 'DEVICE_SUITE_TARGET_30'\\n", __LINE__, API_NAMESPACE);
+    }
+
+    gStartupError = mv_inference_create(&gInferenceHandle);
+    if (MEDIA_VISION_ERROR_NONE != gStartupError) {
+        gInferenceHandle = NULL;
+        printf("mv_inference_h create is failed\n");
+        return;
+    }
+
+    gStartupError = mv_create_engine_config(&gEngineConfigHandle);
+    if (MEDIA_VISION_ERROR_NONE != gStartupError) {
+        gEngineConfigHandle = NULL;
+        printf("mv_engine_config_h create is failed\n");
+        return;
+    }
+
+    gStartupError = mv_create_source(&gSourceHandle);
+    if (MEDIA_VISION_ERROR_NONE != gStartupError) {
+        gSourceHandle = NULL;
+        printf("mv_source_h create is failed\n");
+        return;
+    }
+
+    printf("capi-media-vision mv_inference tests STARTUP is completed\n");
+}
+
+/**
+ * @function   utc_capi_media_vision_inference_cleanup
+ * @description        Inference module UTC cleanup code
+ * @parameter  NA
+ * @return             NA
+ */
+void utc_capi_media_vision_inference_cleanup2(void)
+{
+    printf("capi-media-vision mv_image tests CLEANUP is launched\n");
+
+    if (gInferenceHandle) {
+        mv_inference_destroy(gInferenceHandle);
+        gInferenceHandle = NULL;
+    }
+
+    if (gEngineConfigHandle) {
+        mv_destroy_engine_config(gEngineConfigHandle);
+        gEngineConfigHandle = NULL;
+    }
+
+    if (gSourceHandle) {
+        mv_destroy_source(gSourceHandle);
+        gSourceHandle = NULL;
+    }
+
+    if (gInferenceExampleDir) {
+        free(gInferenceExampleDir);
+        gInferenceExampleDir = NULL;
+    }
+
+    if (gPldResultLandmarks){
+        for (int pose = 0; pose < gPldResultNumberOfPoses; ++pose)
+            free(gPldResultLandmarks[pose]);
+        free(gPldResultLandmarks);
+        gPldResultLandmarks = NULL;
+    }
+
+    if (gPldResultScore){
+        for (int pose = 0; pose < gPldResultNumberOfPoses; ++pose)
+            free(gPldResultScore[pose]);
+        free(gPldResultScore);
+        gPldResultScore = NULL;
+    }
+
+    if (gPoseHandle) {
+        mv_pose_destroy(gPoseHandle);
+        gPoseHandle = NULL;
+    }
+
+    printf("capi-media-vision mv_image tests CLEANUP is completed\n");
+}
+
+/**
+ * @brief Positive test case of mv_inference_create_p()
+ * @testcase        utc_mediavision_mv_inference_create_p
+ * @since_tizen     5.5
+ * @description     Create inference handle
+ */
+int utc_mediavision_mv_inference_create_p(void)
+{
+    printf("Inside mv_inference_create_p\n");
+
+    mv_inference_h inferenceHandle = NULL;
+    int ret = mv_inference_create(&inferenceHandle);
+    if (!isVisionSupported) {
+        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_destroy(inferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_inference_create_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_create()
+ * @testcase        utc_mediavision_mv_inference_create_n
+ * @since_tizen     5.5
+ * @description     Create inference handle,
+ *                  but fail because input parameter is NULL
+ */
+int utc_mediavision_mv_inference_create_n(void)
+{
+    printf("Inside mv_inference_create_n\n");
+
+    int ret = mv_inference_create(NULL);
+    if (!isVisionSupported) {
+        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_create_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_inference_destroy_p()
+ * @testcase        utc_mediavision_mv_inference_destroy_p
+ * @since_tizen     5.5
+ * @description     Destroy inference handle
+ */
+int utc_mediavision_mv_inference_destroy_p(void)
+{
+    printf("Inside mv_inference_destroy_p\n");
+
+    mv_inference_h inferenceHandle = NULL;
+    int ret = mv_inference_create(&inferenceHandle);
+    if (!isVisionSupported) {
+        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_destroy(inferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_inference_destroy_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_destroy_n()
+ * @testcase        utc_mediavision_mv_inference_destroy_n
+ * @since_tizen     5.5
+ * @description     Destroy inference handle,
+ *                  but fail because input parameter is NULL
+ */
+int utc_mediavision_mv_inference_destroy_n(void)
+{
+    printf("Inside mv_inference_destroy_n\n");
+
+    int ret = mv_inference_destroy(NULL);
+    if (!isVisionSupported) {
+        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_destroy_n\n");
+
+    return 0;
+}
+
+static bool _supported_inference_engine_cb(
+                        const char *engine,
+                        bool supported,
+                        void *user_data)
+{
+    gIsForeachSupportedCallBackInvoked = true;
+    return true;
+}
+/**
+ * @brief Positive test case of mv_inference_foreach_supported_engine_p()
+ * @testcase        utc_mediavision_mv_inference_foreach_supported_engine_p
+ * @since_tizen     5.5
+ * @description     Check supported engine
+ */
+int utc_mediavision_mv_inference_foreach_supported_engine()
+{
+    printf("Inside mv_inference_foreach_supported_engine_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    int ret = mv_inference_foreach_supported_engine(gInferenceHandle,
+                        _supported_inference_engine_cb,
+                        NULL);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_eq(true, gIsForeachSupportedCallBackInvoked);
+
+    printf("Before return mv_inference_foreach_supported_engine_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_inference_configure_p()
+ * @testcase        utc_mediavision_mv_inference_configure_p
+ * @since_tizen     5.5
+ * @description     Configure inference handle
+ */
+int utc_mediavision_mv_inference_configure_p(void)
+{
+    printf("Inside mv_inference_configure_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    int ret = MEDIA_VISION_ERROR_NONE;
+
+    // create handle
+    assert_eq(set_image_classification_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_inference_configure_p\n");
+    return 0;
+}
+
+
+/**
+ * @brief Negative test case of mv_inference_configure_n1()
+ * @testcase        utc_mediavision_mv_inference_configure_n
+ * @since_tizen     5.5
+ * @description     Configure inference handle,
+ *                  but fail because input parameters are invalid
+ */
+int utc_mediavision_mv_inference_configure_n1(void)
+{
+    printf("Inside mv_inference_configure_n1\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    int ret = mv_inference_configure(gInferenceHandle, NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    ret = mv_inference_configure(NULL, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    ret = mv_inference_configure(NULL, NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_configure_n1\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_configure_n2()
+ * @testcase        utc_mediavision_mv_inference_configure_n
+ * @since_tizen     5.5
+ * @description     Configure inference handle,
+ *                  but fail because the invalid value is set
+ */
+int utc_mediavision_mv_inference_configure_n2(void)
+{
+    printf("Inside mv_inference_configure_n2\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    int ret = mv_engine_config_set_int_attribute(gEngineConfigHandle,
+                        MV_INFERENCE_BACKEND_TYPE,
+                        MV_INFERENCE_BACKEND_MAX);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_configure_n2\n");
+    return 0;
+}
+
+
+/**
+ * @brief Positive test case of mv_inference_prepare_p()
+ * @testcase        utc_mediavision_mv_inference_prepare_p
+ * @since_tizen     5.5
+ * @description     Prepare inference handle
+ */
+int utc_mediavision_mv_inference_prepare_p(void)
+{
+    printf("Inside mv_inference_prepare_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    assert_eq(set_image_classification_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_inference_configure_p\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_prepare_n1()
+ * @testcase        utc_mediavision_mv_inference_prepare_n
+ * @since_tizen     5.5
+ * @description     Prepare inference handle,
+ *                  but fail because input handle is NULL
+ */
+int utc_mediavision_mv_inference_prepare_n1(void)
+{
+    printf("Inside mv_inference_prepare_n1\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    int ret = mv_inference_prepare(NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_prepare_n1\n");
+    return 0;
+}
+
+static void _classified_cb (mv_source_h source,
+                        int number_of_classes,
+                        const int *indices,
+                        const char **names,
+                        const float *confidences,
+                        void *user_data)
+{
+    gIsImageClassifyCallBackInvoked = true;
+}
+/**
+ * @brief Positive test case of mv_inference_image_classify()
+ * @testcase        utc_mediavision_mv_inference_image_classify_p
+ * @since_tizen     5.5
+ * @description     Classify an image
+ */
+int utc_mediavision_mv_inference_image_classify_p(void)
+{
+    printf("Inside mv_inference_image_classify_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[1024];
+
+    assert_eq(set_image_classification_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "banana.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_image_classify(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    _classified_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_eq(true, gIsImageClassifyCallBackInvoked);
+
+    printf("Before return mv_inference_image_classify_p\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_image_classify()
+ * @testcase        utc_mediavision_mv_inference_image_classify_n1
+ * @since_tizen     5.5
+ * @description     Classify an image,
+ *                  but fail because input parameter is NULL
+ */
+int utc_mediavision_mv_inference_image_classify_n1(void)
+{
+    printf("Inside mv_inference_image_classify_n1\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[1024];
+
+    assert_eq(set_image_classification_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "banana.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    // source is NULL
+    ret = mv_inference_image_classify(NULL,
+                                    gInferenceHandle,
+                                    NULL,
+                                    _classified_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    // inference handle is NULL
+    ret = mv_inference_image_classify(gSourceHandle,
+                                    NULL,
+                                    NULL,
+                                    _classified_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    // callback is NULL
+    ret = mv_inference_image_classify(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    NULL,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_image_classify_n1\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_image_classify()
+ * @testcase        utc_mediavision_mv_inference_image_classify_n2
+ * @since_tizen     5.5
+ * @description     Classify an image,
+ *                  but fail because mv_inference_prepare() isn't called before
+ */
+int utc_mediavision_mv_inference_image_classify_n2(void)
+{
+    printf("Inside mv_inference_image_classify_n2\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[1024];
+
+    assert_eq(set_image_classification_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "banana.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    // skip the mv_inference_prepare()
+    ret = mv_inference_image_classify(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    _classified_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_OPERATION, ret);
+
+    printf("Before return mv_inference_image_classify_n2\n");
+    return 0;
+}
+
+static void _od_detected_cb (mv_source_h source,
+                        int number_of_object,
+                        const int *indices,
+                        const char **names,
+                        const float *confidences,
+                        const mv_rectangle_s *locations,
+                        void *user_data)
+{
+    gIsObjectDetectCallBackInvoked = true;
+}
+/**
+ * @brief Positive test case of mv_inference_object_detect()
+ * @testcase        utc_mediavision_mv_inference_object_detect_p
+ * @since_tizen     5.5
+ * @description     Detect objects in an image
+ */
+int utc_mediavision_mv_inference_object_detect_p(void)
+{
+    printf("Inside mv_inference_object_detect_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[1024];
+
+    assert_eq(set_object_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "dog2.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_object_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    _od_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_eq(true, gIsObjectDetectCallBackInvoked);
+
+    printf("Before return mv_inference_object_detect_p\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_object_detect()
+ * @testcase        utc_mediavision_mv_inference_object_detect_n1
+ * @since_tizen     5.5
+ * @description     Detect objects in an image,
+ *                  but fail because input parameter is NULL
+ */
+int utc_mediavision_mv_inference_object_detect_n1(void)
+{
+    printf("Inside mv_inference_object_detect_n1\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[1024];
+
+    assert_eq(set_object_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "dog2.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    // source is NULL
+    ret = mv_inference_object_detect(NULL,
+                                    gInferenceHandle,
+                                    _od_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    // inference handle is NULL
+    ret = mv_inference_object_detect(gSourceHandle,
+                                    NULL,
+                                    _od_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    // callback is NULL
+    ret = mv_inference_object_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_image_classify_n1\n");
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_inference_object_detect()
+ * @testcase        utc_mediavision_mv_inference_object_detect_n2
+ * @since_tizen     5.5
+ * @description     Detect objects in an image,
+ *                  but fail because mv_inference_prepare() isn't called before
+ */
+int utc_mediavision_mv_inference_object_detect_n2(void)
+{
+    printf("Inside mv_inference_object_detect_n2\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[1024];
+
+    assert_eq(set_object_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "dog2.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_object_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    _od_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_OPERATION, ret);
+
+    printf("Before return mv_inference_object_detect_n2\n");
+    return 0;
+}
+
+static void _fd_detected_cb (mv_source_h source,
+                        int number_of_faces,
+                        const float *confidences,
+                        const mv_rectangle_s *locations,
+                        void *user_data)
+{
+    gIsFaceDetectCallBackInvoked = true;
+}
+/**
+ * @brief Positive test case of mv_inference_face_detect()
+ * @testcase        utc_mediavision_mv_inference_face_detect_p
+ * @since_tizen     5.5
+ * @description     Detect faces in an image
+ */
+int utc_mediavision_mv_inference_face_detect_p(void)
+{
+    printf("Inside mv_inference_face_detect_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[1024];
+
+    assert_eq(set_face_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "faceDetection.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_face_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    _fd_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_eq(true, gIsFaceDetectCallBackInvoked);
+
+    printf("Before return mv_inference_face_detect_p\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_face_detect()
+ * @testcase        utc_mediavision_mv_inference_face_detect_n1
+ * @since_tizen     5.5
+ * @description     Detect faces in an image,
+ *                  but fail because input parameter is NULL
+ */
+int utc_mediavision_mv_inference_face_detect_n1(void)
+{
+    printf("Inside mv_inference_face_detect_n1\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[1024];
+
+    assert_eq(set_face_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "faceDetection.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    // source is NULL
+    ret = mv_inference_face_detect(NULL,
+                                    gInferenceHandle,
+                                    _fd_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    // inference handle is NULL
+    ret = mv_inference_face_detect(gSourceHandle,
+                                    NULL,
+                                    _fd_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    // callback is NULL
+    ret = mv_inference_face_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_object_detect_n1\n");
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_inference_face_detect()
+ * @testcase        utc_mediavision_mv_inference_face_detect_n2
+ * @since_tizen     5.5
+ * @description     Detect objects in an image,
+ *                  but fail because mv_inference_prepare() isn't called before
+ */
+int utc_mediavision_mv_inference_face_detect_n2(void)
+{
+    printf("Inside mv_inference_face_detect_n2\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[1024];
+
+    assert_eq(set_face_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "faceDetection.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_face_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    _fd_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_OPERATION, ret);
+
+    printf("Before return mv_inference_face_detect_n2\n");
+    return 0;
+}
+
+static void _fld_detected_cb (mv_source_h source,
+                        int number_of_landmark,
+                        const mv_rectangle_s *locations,
+                        void *user_data)
+{
+    gIsFacialLandmarkDetectCallBackInvoked = true;
+}
+/**
+ * @brief Positive test case of mv_inference_facial_landmark_detect()
+ * @testcase        utc_mediavision_mv_inference_facial_landmark_detect_p
+ * @since_tizen     5.5
+ * @description     Detect landmark on a face which is detected
+ */
+int utc_mediavision_mv_inference_facial_landmark_detect_p(void)
+{
+    printf("Inside mv_inference_facial_landmark_detect_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[1024];
+
+    assert_eq(set_facial_landmark_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "faceLandmark.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_facial_landmark_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    _fld_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_eq(true, gIsFacialLandmarkDetectCallBackInvoked);
+
+    printf("Before return mv_inference_facial_landmark_detect_p\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_facial_landmark_detect()
+ * @testcase        utc_mediavision_mv_inference_facial_landmark_detect_n1
+ * @since_tizen     5.5
+ * @description     Detect landmark on a face which is detected,
+ *                  but fail because input parameter is NULL
+ */
+int utc_mediavision_mv_inference_facial_landmark_detect_n1(void)
+{
+    printf("Inside mv_inference_facial_landmark_detect_n1\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[1024];
+
+    assert_eq(set_facial_landmark_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "faceLandmark.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    // source is NULL
+    ret = mv_inference_facial_landmark_detect(NULL,
+                                    gInferenceHandle,
+                                    NULL,
+                                    _fld_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    // inference handle is NULL
+    ret = mv_inference_facial_landmark_detect(gSourceHandle,
+                                    NULL,
+                                    NULL,
+                                    _fld_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    // callback is NULL
+    ret = mv_inference_facial_landmark_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    NULL,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_facial_landmark_detect_n1\n");
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_inference_facial_landmark_detect()
+ * @testcase        utc_mediavision_mv_inference_facial_landmark_detect_n2
+ * @since_tizen     5.5
+ * @description     Detect landmark on a face which is detected,
+ *                  but fail because mv_inference_prepare() isn't called before
+ */
+int utc_mediavision_mv_inference_facial_landmark_detect_n2(void)
+{
+    printf("Inside mv_inference_facial_landmark_detect_n2\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[1024];
+
+    assert_eq(set_facial_landmark_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "faceLandmark.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_facial_landmark_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    _fld_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_OPERATION, ret);
+
+    printf("Before return mv_inference_facial_landmark_detect_n2\n");
+    return 0;
+}
+
+static void _pld_detected_cb(mv_source_h source,
+                        mv_inference_pose_result_h locations,
+                        void *user_data)
+{
+    gIsPoseLandmarkDetectCallBackInvoked = true;
+}
+
+/**
+ * @brief Positive test case of mv_inference_pose_landmark_detect()
+ * @testcase        utc_mediavision_mv_inference_pose_landmark_detect_p
+ * @since_tizen     6.0
+ * @description     Detect pose landmark on a human body
+ */
+int utc_mediavision_mv_inference_pose_landmark_detect_p(void)
+{
+    printf("Inside mv_inference_pose_landmark_detect_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[FILE_PATH_SIZE];
+    assert_eq(set_pose_landmark_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_pose_landmark_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    _pld_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+    assert_eq(true, gIsPoseLandmarkDetectCallBackInvoked);
+
+    printf("Before return mv_inference_pose_landmark_detect_p\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_pose_landmark_detect()
+ * @testcase        utc_mediavision_mv_inference_pose_landmark_detect_n1
+ * @since_tizen     6.0
+ * @description     Detect pose landmark on a human body,
+ *                  but fail because an input parameter is NULL
+ */
+int utc_mediavision_mv_inference_pose_landmark_detect_n1(void)
+{
+    printf("Inside mv_inference_pose_landmark_detect_n1\n");
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[FILE_PATH_SIZE];
+    assert_eq(set_pose_landmark_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    // source is NULL
+    ret = mv_inference_pose_landmark_detect(NULL,
+                                    gInferenceHandle,
+                                    NULL,
+                                    _pld_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    // inference handle is NULL
+    ret = mv_inference_pose_landmark_detect(gSourceHandle,
+                                    NULL,
+                                    NULL,
+                                    _pld_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    // callback is NULL
+     ret = mv_inference_pose_landmark_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    NULL,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_pose_landmark_detect_n1\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_pose_landmark_detect()
+ * @testcase        utc_mediavision_mv_inference_pose_landmark_detect_n2
+ * @since_tizen     6.0
+ * @description     Detect pose landmark on a human body,
+ *                  but fail because mv_inference_prepare() isn't called
+ */
+int utc_mediavision_mv_inference_pose_landmark_detect_n2(void)
+{
+    printf("Inside mv_inference_pose_landmark_detect_n2\n");
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    char imageFilename[FILE_PATH_SIZE];
+    assert_eq(set_pose_landmark_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
+
+    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_inference_pose_landmark_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    _pld_detected_cb,
+                                    NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_OPERATION, ret);
+
+    printf("Before return mv_inference_pose_landmark_detect_n2\n");
+    return 0;
+}
+
+static void get_pose_landmark_detection_result_cb1(mv_source_h source,
+                                    mv_inference_pose_result_h result,
+                                    void *user_data)
+{
+    printf("Inside get_pose_landmark_detection_result_cb1\n");
+    gIsPoseLandmarkDetectCallBackInvoked = true;
+
+    gPldResultErr = mv_inference_pose_get_number_of_poses(result, &gPldResultNumberOfPoses);
+    printf("Before retrun get_pose_landmark_detection_result_cb1\n");
+}
+
+/**
+ * @function   utc_mediavision_mv_inference_pose_landmark_detect_cb1_startup
+ * @description        Inference module UTC startup code for mv_inference_pose_get_number_of_poses()
+ * @parameter  NA
+ * @return             NA
+ */
+void utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup(void)
+{
+    printf("Inside utc_mediavision_mv_inference_pose_landmark_detect_cb1_startup\n");
+    utc_capi_media_vision_inference_startup2();
+
+    if (gStartupError != MEDIA_VISION_ERROR_NONE)
+        return;
+
+    char imageFilename[FILE_PATH_SIZE];
+    int ret = set_pose_landmark_detection_engine_config(gEngineConfigHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("set_pose_landmark_detection_engine_config is failed\n");
+        return;
+    }
+
+    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_configure is failed\n");
+        return;
+    }
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_prepare is failed\n");
+        return;
+    }
+
+    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("load_image_to_media_source is failed\n");
+        return;
+    }
+
+    ret = mv_inference_pose_landmark_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    get_pose_landmark_detection_result_cb1,
+                                    NULL);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_pose_landmark_detect is failed\n");
+        return;
+    }
+
+    printf("Before return utc_mediavision_mv_inference_pose_landmark_detect_cb1_startup\n");
+}
+
+/**
+ * @brief Positive test case of mv_inference_get_number_of_poses()
+ * @testcase        utc_mediavision_mv_inference_get_number_of_poses_p
+ * @since_tizen     6.0
+ * @description     Get the number of poses from a detected result
+ */
+int utc_mediavision_mv_inference_get_number_of_poses_p(void)
+{
+    printf("Inside mv_inference_get_number_of_poses_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    assert_eq(true, gIsPoseLandmarkDetectCallBackInvoked);
+    assert_eq(MEDIA_VISION_ERROR_NONE, gPldResultErr);
+    assert_gt(gPldResultNumberOfPoses, 0);
+
+    printf("Before return mv_inference_get_number_of_poses_p\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_get_number_of_poses()
+ * @testcase        utc_mediavision_mv_inference_get_number_of_poses_n
+ * @since_tizen     6.0
+ * @description     Get the number of poses from a detected result,
+ *                  but fail because handle is NULL
+ */
+int utc_mediavision_mv_inference_get_number_of_poses_n(void)
+{
+    printf("Inside mv_inference_get_number_of_poses_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    // handle is NULL
+    int ret = mv_inference_pose_get_number_of_poses(NULL, &gPldResultNumberOfPoses);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_get_number_of_poses_n\n");
+    return 0;
+}
+
+static void get_pose_landmark_detection_result_cb2(mv_source_h source,
+                                    mv_inference_pose_result_h result,
+                                    void *user_data)
+{
+    printf("Inside get_pose_landmark_detection_result_cb2\n");
+    gIsPoseLandmarkDetectCallBackInvoked = true;
+
+    gPldResultErr = mv_inference_pose_get_number_of_landmarks(result, &gPldResultNumberOfLandmarks);
+    printf("Before retrun get_pose_landmark_detection_result_cb2\n");
+}
+
+/**
+ * @function   utc_mediavision_mv_inference_pose_landmark_detect_cb2_startup
+ * @description        Inference module UTC startup code for mv_inference_pose_get_number_of_landmarks()
+ * @parameter  NA
+ * @return             NA
+ */
+void utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup(void)
+{
+    printf("Inside utc_mediavision_mv_inference_pose_landmark_detect_cb2_startup\n");
+    utc_capi_media_vision_inference_startup2();
+
+    if (gStartupError != MEDIA_VISION_ERROR_NONE)
+        return;
+
+    char imageFilename[FILE_PATH_SIZE];
+    int ret = set_pose_landmark_detection_engine_config(gEngineConfigHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("set_pose_landmark_detection_engine_config is failed\n");
+        return;
+    }
+
+    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_configure is failed\n");
+        return;
+    }
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_prepare is failed\n");
+        return;
+    }
+
+    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("load_image_to_media_source is failed\n");
+        return;
+    }
+
+    ret = mv_inference_pose_landmark_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    get_pose_landmark_detection_result_cb2,
+                                    NULL);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_pose_landmark_detect is failed\n");
+        return;
+    }
+
+    printf("Before return utc_mediavision_mv_inference_pose_landmark_detect_cb2_startup\n");
+}
+
+/**
+ * @brief Positive test case of mv_inference_get_number_of_landmarks()
+ * @testcase        utc_mediavision_mv_inference_get_number_of_landmarks_p
+ * @since_tizen     6.0
+ * @description     Get the number of landmarks from a detected result
+ */
+int utc_mediavision_mv_inference_get_number_of_landmarks_p(void)
+{
+    printf("Inside mv_inference_get_number_of_landmarks_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    assert_eq(true, gIsPoseLandmarkDetectCallBackInvoked);
+    assert_eq(MEDIA_VISION_ERROR_NONE, gPldResultErr);
+    assert_gt(gPldResultNumberOfLandmarks, 0);
+
+    printf("Before return mv_inference_get_number_of_landmarks_p\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_get_number_of_landmarks()
+ * @testcase        utc_mediavision_mv_inference_get_number_of_landmarks_n
+ * @since_tizen     6.0
+ * @description     Get the number of landmarks from a detected result,
+ *                  but fail because handle is NULL
+ */
+int utc_mediavision_mv_inference_get_number_of_landmarks_n(void)
+{
+    printf("Inside mv_inference_get_number_of_landmarks_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    // handle is NULL
+    int ret = mv_inference_pose_get_number_of_landmarks(NULL, &gPldResultNumberOfLandmarks);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_get_number_of_landmarks_n\n");
+    return 0;
+}
+
+static void get_pose_landmark_detection_result_cb3(mv_source_h source,
+                                    mv_inference_pose_result_h result,
+                                    void *user_data)
+{
+    printf("Inside get_pose_landmark_detection_result_cb3\n");
+    gIsPoseLandmarkDetectCallBackInvoked = true;
+
+    // get the number of poses
+    gPldResultErr = mv_inference_pose_get_number_of_poses(result, &gPldResultNumberOfPoses);
+    if (gPldResultErr != MEDIA_VISION_ERROR_NONE)
+        return;
+
+    // get the number of landmarks
+    gPldResultErr = mv_inference_pose_get_number_of_landmarks(result, &gPldResultNumberOfLandmarks);
+    if (gPldResultErr != MEDIA_VISION_ERROR_NONE)
+        return;
+
+    // allocate memory with the number of poses
+    gPldResultLandmarks = (mv_point_s **)malloc(gPldResultNumberOfPoses * sizeof(mv_point_s *));
+    gPldResultScore = (float **)malloc(gPldResultNumberOfPoses * sizeof(float *));
+    for (int pose = 0; pose < gPldResultNumberOfPoses; ++pose) {
+        // allocate memory with the number of landmarks
+        gPldResultLandmarks[pose] = (mv_point_s *)malloc(gPldResultNumberOfLandmarks * sizeof(mv_point_s));
+        gPldResultScore[pose] = (float *)malloc(gPldResultNumberOfLandmarks * sizeof(float));
+        for (int part = 0; part < gPldResultNumberOfLandmarks; ++part) {
+            gPldResultErr = mv_inference_pose_get_landmark(result, pose, part,
+                                                            &(gPldResultLandmarks[pose][part]),
+                                                            &(gPldResultScore[pose][part]));
+            if (gPldResultErr != MEDIA_VISION_ERROR_NONE)
+                return;
+        }
+    }
+
+    gIsGetPoseLandmark = true;
+    printf("Before retrun get_pose_landmark_detection_result_cb3\n");
+}
+
+/**
+ * @function   utc_mediavision_mv_inference_pose_landmark_detect_cb3_startup
+ * @description        Inference module UTC startup code for mv_inference_pose_get_landmark()
+ * @parameter  NA
+ * @return             NA
+ */
+void utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup(void)
+{
+    printf("Inside utc_mediavision_mv_inference_pose_landmark_detect_cb3_startup\n");
+    utc_capi_media_vision_inference_startup2();
+
+    if (gStartupError != MEDIA_VISION_ERROR_NONE)
+        return;
+
+    char imageFilename[FILE_PATH_SIZE];
+    int ret =  set_pose_landmark_detection_engine_config(gEngineConfigHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("set_pose_landmark_detection_engine_config is failed\n");
+        return;
+    }
+
+    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_configure is failed\n");
+        return;
+    }
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_prepare is failed\n");
+        return;
+    }
+
+    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("load_image_to_media_source is failed\n");
+        return;
+    }
+
+    ret = mv_inference_pose_landmark_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    get_pose_landmark_detection_result_cb3,
+                                    NULL);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_pose_landmark_detect is failed\n");
+        return;
+    }
+
+    printf("Before return utc_mediavision_mv_inference_pose_landmark_detect_cb3_startup\n");
+}
+
+/**
+ * @brief Positive test case of mv_inference_get_landmark()
+ * @testcase        utc_mediavision_mv_inference_get_landmark_p
+ * @since_tizen     6.0
+ * @description     Get landmarks from a detected result
+ */
+int utc_mediavision_mv_inference_get_landmark_p(void)
+{
+    printf("Inside mv_inference_get_landmark_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    assert_eq(true, gIsPoseLandmarkDetectCallBackInvoked);
+    assert_eq(MEDIA_VISION_ERROR_NONE, gPldResultErr);
+    assert_gt(gPldResultNumberOfPoses, 0);
+    assert_gt(gPldResultNumberOfLandmarks, 0);
+    assert_eq(true, gIsGetPoseLandmark);
+
+    printf("Before return mv_inference_get_landmark_p\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_inference_get_landmark()
+ * @testcase        utc_mediavision_mv_inference_get_landmark_n
+ * @since_tizen     6.0
+ * @description     Get landmarks from a detected result,
+ *                  but fail because handle is NULL
+ */
+int utc_mediavision_mv_inference_get_landmark_n(void)
+{
+    printf("Inside mv_inference_get_landmark_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    // handle is NULL
+    mv_point_s landmark;
+    float score;
+    int ret = mv_inference_pose_get_landmark(NULL, 0, 0, &landmark, &score);
+
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_get_landmark_n\n");
+    return 0;
+}
+
+static void get_pose_landmark_detection_result_cb5(mv_source_h source,
+                                    mv_inference_pose_result_h result,
+                                    void *user_data)
+{
+    printf("Inside get_pose_landmark_detection_result_cb5\n");
+    gIsPoseLandmarkDetectCallBackInvoked = true;
+
+    gPldResultErr = mv_inference_pose_get_number_of_poses(result, &gPldResultNumberOfPoses);
+    if (gPldResultErr != MEDIA_VISION_ERROR_NONE)
+        return;
+
+    if (gPldResultNumberOfPoses <= 0)
+        return;
+
+    gPldResultErr = mv_inference_pose_get_label(result, 0, &gPldResultLabel);
+    printf("Before retrun get_pose_landmark_detection_result_cb5\n");
+}
+
+/**
+ * @function   utc_mediavision_mv_inference_pose_landmark_detect_cb5_startup
+ * @description        Inference module UTC startup code for mv_inference_pose_get_label()
+ * @parameter  NA
+ * @return             NA
+ */
+void utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup(void)
+{
+    printf("Inside utc_mediavision_mv_inference_pose_landmark_detect_cb5_startup\n");
+    utc_capi_media_vision_inference_startup2();
+
+    if (gStartupError != MEDIA_VISION_ERROR_NONE)
+        return;
+
+    char imageFilename[FILE_PATH_SIZE];
+    int ret = set_pose_landmark_detection_engine_config(gEngineConfigHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("set_pose_landmark_detection_engine_config is failed\n");
+        return;
+    }
+
+    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_configure is failed\n");
+        return;
+    }
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_prepare is failed\n");
+        return;
+    }
+
+    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("load_image_to_media_source is failed\n");
+        return;
+    }
+
+    ret = mv_inference_pose_landmark_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    get_pose_landmark_detection_result_cb5,
+                                    NULL);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_pose_landmark_detect is failed\n");
+        return;
+    }
+
+    printf("Before return utc_mediavision_mv_inference_pose_landmark_detect_cb5_startup\n");
+}
+
+/**
+ * @brief Positive test case of mv_inference_get_label()
+ * @testcase        utc_mediavision_mv_inference_pose_get_label_p
+ * @since_tizen     6.0
+ * @description     Get the label from a detected result
+ */
+int utc_mediavision_mv_inference_get_label_p(void)
+{
+    printf("Inside mv_inference_pose_get_label_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    assert_eq(true, gIsPoseLandmarkDetectCallBackInvoked);
+    assert_eq(MEDIA_VISION_ERROR_NONE, gPldResultErr);
+    assert_gt(gPldResultNumberOfPoses, 0);
+    assert_leq(gPldResultLabel, 0);
+
+    printf("Before return mv_inference_pose_get_label_p\n");
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_inference_get_label()
+ * @testcase        utc_mediavision_mv_inference_pose_get_label_n
+ * @since_tizen     6.0
+ * @description     Get the label from a detected result,
+ *                  but fail because handle is NULL
+ */
+int utc_mediavision_mv_inference_get_label_n(void)
+{
+    printf("Inside mv_inference_pose_get_label_n\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    // handle is NULL
+    int ret = mv_inference_pose_get_label(NULL, 0, &gPldResultLabel);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_inference_pose_get_label_nn");
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_pose_create_p()
+ * @testcase        utc_mediavision_mv_pose_create_p
+ * @since_tizen     6.0
+ * @description     Create pose handle
+ */
+int utc_mediavision_mv_pose_create_p(void)
+{
+    printf("Inside mv_pose_create_p\n");
+
+    mv_pose_h poseHandle = NULL;
+    int ret = mv_pose_create(&poseHandle);
+    if (!isVisionSupported) {
+        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_pose_destroy(poseHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_pose_create_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_pose_create()
+ * @testcase        utc_mediavision_mv_pose_create_n
+ * @since_tizen     6.0
+ * @description     Create pose handle,
+ *                  but fail because input parameter is NULL
+ */
+int utc_mediavision_mv_pose_create_n(void)
+{
+    printf("Inside mv_pose_create_n\n");
+
+    int ret = mv_pose_create(NULL);
+    if (!isVisionSupported) {
+        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_pose_create_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_pose_destroy_p()
+ * @testcase        utc_mediavision_mv_pose_destroy_p
+ * @since_tizen     6.0
+ * @description     Destroy pose handle
+ */
+int utc_mediavision_mv_pose_destroy_p(void)
+{
+    printf("Inside mv_pose_destroy_p\n");
+
+    mv_pose_h poseHandle = NULL;
+    int ret = mv_pose_create(&poseHandle);
+    if (!isVisionSupported) {
+        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_pose_destroy(poseHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_pose_destroy_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_pose_destroy_n()
+ * @testcase        utc_mediavision_mv_pose_destroy_n
+ * @since_tizen     6.0
+ * @description     Destroy pose handle,
+ *                  but fail because input parameter is NULL
+ */
+int utc_mediavision_mv_pose_destroy_n(void)
+{
+    printf("Inside mv_pose_destroy_n\n");
+
+    int ret = mv_pose_destroy(NULL);
+    if (!isVisionSupported) {
+        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_pose_destroy_n\n");
+
+    return 0;
+}
+
+/**
+ * @brief Positive test case of mv_pose_set_from_file()
+ * @testcase        utc_mediavision_mv_pose_set_from_file_p
+ * @since_tizen     6.0
+ * @description     Set pose mocap file and its mapping file
+ */
+int utc_mediavision_mv_pose_set_from_file_p(void)
+{
+    printf("Inside mv_pose_set_from_file_p\n");
+
+    mv_pose_h poseHandle = NULL;
+    int ret = mv_pose_create(&poseHandle);
+    if (!isVisionSupported) {
+        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    char poseMocapFilename[FILE_PATH_SIZE];
+    char poseMocapMappingFilename[FILE_PATH_SIZE];
+    snprintf(poseMocapFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap.bvh");
+    snprintf(poseMocapMappingFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap_mapping.txt");
+
+    ret = mv_pose_set_from_file(poseHandle, poseMocapFilename, poseMocapMappingFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    ret = mv_pose_destroy(poseHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_pose_set_from_file_p\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_pose_set_from_file()
+ * @testcase        utc_mediavision_mv_pose_set_from_file_n1
+ * @since_tizen     6.0
+ * @description     Set pose mocap file and its mapping file,
+ *                  but fail because file paths are NULL
+ */
+int utc_mediavision_mv_pose_set_from_file_n1(void)
+{
+    printf("Inside mv_pose_set_from_file_n1\n");
+
+    mv_pose_h poseHandle = NULL;
+    int ret = mv_pose_create(&poseHandle);
+    if (!isVisionSupported) {
+        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    char poseMocapFilename[FILE_PATH_SIZE];
+    char poseMocapMappingFilename[FILE_PATH_SIZE];
+    snprintf(poseMocapFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap.bvh");
+    snprintf(poseMocapMappingFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap_mapping.txt");
+
+    // hand is NULL
+    ret = mv_pose_set_from_file(NULL, poseMocapFilename, poseMocapMappingFilename);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    // motion capture file path is NULL
+    ret = mv_pose_set_from_file(poseHandle, NULL, poseMocapMappingFilename);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    // motion mapping file path is NULL
+    ret = mv_pose_set_from_file(poseHandle, poseMocapFilename, NULL);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    ret = mv_pose_destroy(poseHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_pose_set_from_file_n1\n");
+
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_pose_set_from_file()
+ * @testcase        utc_mediavision_mv_pose_set_from_file_n2
+ * @since_tizen     6.0
+ * @description     Set pose mocap file and its mapping file,
+ *                  but fail because file paths are fake (invalid)
+ */
+int utc_mediavision_mv_pose_set_from_file_n2(void)
+{
+    printf("Inside mv_pose_set_from_file_n2\n");
+
+    mv_pose_h poseHandle = NULL;
+    int ret = mv_pose_create(&poseHandle);
+    if (!isVisionSupported) {
+        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    char poseMocapFilename[FILE_PATH_SIZE];
+    char poseMocapMappingFilename[FILE_PATH_SIZE];
+    snprintf(poseMocapFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap.bvh");
+    snprintf(poseMocapMappingFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap_mapping.txt");
+
+    // fakefile doesn't exist
+    char fakeFilename[FILE_PATH_SIZE];
+    snprintf(fakeFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "fakefile");
+
+    // motion capture file path is invalid
+    ret = mv_pose_set_from_file(poseHandle, fakeFilename, poseMocapMappingFilename);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PATH, ret);
+
+    // motion mapping file path is invalid
+    ret = mv_pose_set_from_file(poseHandle, poseMocapFilename, fakeFilename);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PATH, ret);
+
+    ret = mv_pose_destroy(poseHandle);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    printf("Before return mv_pose_set_from_file_n2\n");
+
+    return 0;
+}
+
+static void get_pose_landmark_detection_result_cb4(mv_source_h source,
+                                    mv_inference_pose_result_h result,
+                                    void *user_data)
+{
+    printf("Inside get_pose_landmark_detection_result_cb4\n");
+    gIsPoseLandmarkDetectCallBackInvoked = true;
+
+    mv_pose_h *pose = (mv_pose_h *)user_data;
+    int part = MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT | MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT;
+    gPldResultErr = mv_pose_compare(*pose, result, part, &gPoseCompareScore);
+    if (gPldResultErr != MEDIA_VISION_ERROR_NONE)
+        return;
+
+    printf("Before retrun get_pose_landmark_detection_result_cb4\n");
+}
+
+/**
+ * @function   utc_mediavision_mv_inference_pose_landmark_detect_cb4_startup
+ * @description        Inference module UTC startup code for mv_pose_compare()
+ * @parameter  NA
+ * @return             NA
+ */
+void utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup(void)
+{
+    printf("Inside utc_mediavision_mv_inference_pose_landmark_detect_cb4_startup\n");
+    utc_capi_media_vision_inference_startup2();
+
+    if (gStartupError != MEDIA_VISION_ERROR_NONE)
+        return;
+
+    char imageFilename[FILE_PATH_SIZE];
+    int ret = set_pose_landmark_detection_engine_config(gEngineConfigHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("set_pose_landmark_detection_engine_config is failed\n");
+        return;
+    }
+
+    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_configure is failed\n");
+        return;
+    }
+
+    ret = mv_inference_prepare(gInferenceHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_prepare is failed\n");
+        return;
+    }
+
+    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
+    ret = load_image_to_media_source(imageFilename, gSourceHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("load_image_to_media_source is failed\n");
+        return;
+    }
+
+    ret = mv_pose_create(&gPoseHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_pose_create is failed\n");
+        return;
+    }
+
+    char poseMocapFilename[FILE_PATH_SIZE];
+    char poseMocapMappingFilename[FILE_PATH_SIZE];
+    snprintf(poseMocapFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap.bvh");
+    snprintf(poseMocapMappingFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap_mapping.txt");
+
+    ret = mv_pose_set_from_file(gPoseHandle, poseMocapFilename, poseMocapMappingFilename);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_pose_set_from_file is failed\n");
+        return;
+    }
+
+    ret = mv_inference_pose_landmark_detect(gSourceHandle,
+                                    gInferenceHandle,
+                                    NULL,
+                                    get_pose_landmark_detection_result_cb4,
+                                    &gPoseHandle);
+    if (ret != MEDIA_VISION_ERROR_NONE) {
+        printf("mv_inference_pose_landmark_detect is failed\n");
+        return;
+    }
+
+    printf("Before return utc_mediavision_mv_inference_pose_landmark_detect_cb4_startup\n");
+}
+
+/**
+ * @brief Positive test case of mv_pose_compare()
+ * @testcase        utc_mediavision_mv_pose_compare_p
+ * @since_tizen     6.0
+ * @description     Compare a result which is detected by mv_inference_pose_landmark_detect()
+ *                  with a pose which is set by mv_pose_set_from_file()
+ */
+int utc_mediavision_mv_pose_compare_p(void)
+{
+    printf("Inside mv_inference_get_landmark_p\n");
+
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+
+    assert_eq(true, gIsPoseLandmarkDetectCallBackInvoked);
+    assert_eq(MEDIA_VISION_ERROR_NONE, gPldResultErr);
+    assert_gt(gPoseCompareScore, 0.5);
+
+    printf("Before return mv_inference_get_landmark_p\n");
+    return 0;
+}
+
+/**
+ * @brief Negative test case of mv_pose_compare()
+ * @testcase        utc_mediavision_mv_pose_compare_n
+ * @since_tizen     6.0
+ * @description     Compare a result which is detected by mv_inference_pose_landmark_detect()
+ *                  with a pose which is set by mv_pose_set_from_file(),
+ *                  but fail because hanlde is NULL
+ */
+int utc_mediavision_mv_pose_compare_n(void)
+{
+    printf("Inside mv_pose_compare_n\n");
+
+    mv_pose_h poseHandle = NULL;
+    int ret = mv_pose_create(&poseHandle);
+    if (!isVisionSupported) {
+        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
+        return 0;
+    }
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    char poseMocapFilename[FILE_PATH_SIZE];
+    char poseMocapMappingFilename[FILE_PATH_SIZE];
+    snprintf(poseMocapFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap.bvh");
+    snprintf(poseMocapMappingFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap_mapping.txt");
+
+    ret = mv_pose_set_from_file(poseHandle, poseMocapFilename, poseMocapMappingFilename);
+    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
+
+    int part = MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT | MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT;
+    // handle is NULL
+    ret = mv_pose_compare(poseHandle, NULL, part, &gPoseCompareScore);
+    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
+
+    printf("Before return mv_pose_compare_n\n");
+    return 0;
+}
index 013fa7f321657f7116c98849879c566fd2855122..a384d71d225534b2008b8f599869c790b7fbda56 100755 (executable)
@@ -10,14 +10,12 @@ SET(TC_SOURCES
     utc-mv_face.c
     utc-mv_image.c
     utc-mv_surveillance.c
-    utc-mv_inference.c
        utc-mv_3d.c
 )
 
 IF( DEFINED TIZENIOT || DEFINED MOBILE || DEFINED WEARABLE)
 SET(TC_SOURCES
     utc-mv_roi_tracker.c
-    utc-mv_face_recognition.c
 )
 ENDIF()
 
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P1/00.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P1/00.jpg
deleted file mode 100755 (executable)
index 3fa77e1..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P1/00.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P1/01.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P1/01.jpg
deleted file mode 100755 (executable)
index f2b27ba..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P1/01.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P1/02.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P1/02.jpg
deleted file mode 100755 (executable)
index 27bf5f0..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P1/02.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P1/03.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P1/03.jpg
deleted file mode 100755 (executable)
index 3a317ad..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P1/03.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P1/04.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P1/04.jpg
deleted file mode 100755 (executable)
index 5b3f8c6..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P1/04.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P1/05.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P1/05.jpg
deleted file mode 100755 (executable)
index bedc5f1..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P1/05.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P1/06.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P1/06.jpg
deleted file mode 100755 (executable)
index 180173a..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P1/06.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P1/07.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P1/07.jpg
deleted file mode 100755 (executable)
index 70a0f9d..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P1/07.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P1/08.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P1/08.jpg
deleted file mode 100755 (executable)
index c6556a4..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P1/08.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P1/09.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P1/09.jpg
deleted file mode 100755 (executable)
index 4b510db..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P1/09.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P2/00.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P2/00.jpg
deleted file mode 100755 (executable)
index 54be030..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P2/00.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P2/01.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P2/01.jpg
deleted file mode 100755 (executable)
index aa86935..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P2/01.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P2/02.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P2/02.jpg
deleted file mode 100755 (executable)
index 8650349..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P2/02.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P2/03.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P2/03.jpg
deleted file mode 100755 (executable)
index 1f8f469..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P2/03.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P2/04.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P2/04.jpg
deleted file mode 100755 (executable)
index 5a68372..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P2/04.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P2/05.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P2/05.jpg
deleted file mode 100755 (executable)
index 8e1300f..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P2/05.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P2/06.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P2/06.jpg
deleted file mode 100755 (executable)
index b507918..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P2/06.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P2/07.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P2/07.jpg
deleted file mode 100755 (executable)
index 78d1e06..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P2/07.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P2/08.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P2/08.jpg
deleted file mode 100755 (executable)
index 1989d27..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P2/08.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/images/P2/09.jpg b/src/utc/capi-media-vision/res/face_recognition/images/P2/09.jpg
deleted file mode 100755 (executable)
index 5d8288c..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/images/P2/09.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/face_recognition/models/facenet.tflite b/src/utc/capi-media-vision/res/face_recognition/models/facenet.tflite
deleted file mode 100644 (file)
index 4c19477..0000000
Binary files a/src/utc/capi-media-vision/res/face_recognition/models/facenet.tflite and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/inference/images/banana.jpg b/src/utc/capi-media-vision/res/inference/images/banana.jpg
deleted file mode 100644 (file)
index 74a3527..0000000
Binary files a/src/utc/capi-media-vision/res/inference/images/banana.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/inference/images/dog2.jpg b/src/utc/capi-media-vision/res/inference/images/dog2.jpg
deleted file mode 100644 (file)
index 1d9e77c..0000000
Binary files a/src/utc/capi-media-vision/res/inference/images/dog2.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/inference/images/faceDetection.jpg b/src/utc/capi-media-vision/res/inference/images/faceDetection.jpg
deleted file mode 100644 (file)
index faa36fc..0000000
Binary files a/src/utc/capi-media-vision/res/inference/images/faceDetection.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/inference/images/faceLandmark.jpg b/src/utc/capi-media-vision/res/inference/images/faceLandmark.jpg
deleted file mode 100644 (file)
index 375fb0b..0000000
Binary files a/src/utc/capi-media-vision/res/inference/images/faceLandmark.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/inference/images/poseLandmark.jpg b/src/utc/capi-media-vision/res/inference/images/poseLandmark.jpg
deleted file mode 100644 (file)
index 199db8f..0000000
Binary files a/src/utc/capi-media-vision/res/inference/images/poseLandmark.jpg and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/inference/models/fd_tflite_model1.tflite b/src/utc/capi-media-vision/res/inference/models/fd_tflite_model1.tflite
deleted file mode 100644 (file)
index 5425fcf..0000000
Binary files a/src/utc/capi-media-vision/res/inference/models/fd_tflite_model1.tflite and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/inference/models/fd_tflite_model1_meta.json b/src/utc/capi-media-vision/res/inference/models/fd_tflite_model1_meta.json
deleted file mode 100644 (file)
index 86b7b3c..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-{
-    "inputmetadata" :
-    {
-        "tensor_info" : [
-            {
-                "name" : "normalized_input_image_tensor",
-                "shape_type" : "NHWC",
-                "shape_dims" : [ 1, 300, 300, 3],
-                "data_type" : "FLOAT32",
-                "color_space" : "RGB888"
-            }
-        ],
-        "preprocess" : [
-            {
-                "normalization" : [
-                    {
-                        "mean" : [127.5, 127.5, 127.5],
-                        "std" : [127.5, 127.5, 127.5]
-                    }
-                ]
-            }
-        ]
-    },
-    "outputmetadata" :
-    {
-        "score" :
-            {
-                "name" : ["TFLite_Detection_PostProcess:2"],
-                "index" : [-1, 1],
-                "top_number" : 5,
-                "threshold" : 0.3,
-                "score_type" : "NORMAL"
-            },
-        "box" :
-            {
-               "name" : ["TFLite_Detection_PostProcess"],
-               "index" : [-1, -1, 1],
-               "box_type" : "ORIGIN_LEFTTOP",
-               "box_order" : [1, 0, 3, 2],
-               "box_coordinate" : "RATIO",
-               "decoding_type": "BYPASS"
-            },
-        "label" : [
-            {
-                "name" : "TFLite_Detection_PostProcess:1",
-                "index" : [-1, 1]
-            }
-        ],
-        "number" : [
-            {
-                "name" : "TFLite_Detection_PostProcess:3",
-                "index" : [1]
-            }
-        ]
-    }
-}
diff --git a/src/utc/capi-media-vision/res/inference/models/fld_tflite_model1.tflite b/src/utc/capi-media-vision/res/inference/models/fld_tflite_model1.tflite
deleted file mode 100644 (file)
index 2449c5e..0000000
Binary files a/src/utc/capi-media-vision/res/inference/models/fld_tflite_model1.tflite and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/inference/models/fld_tflite_model1_meta.json b/src/utc/capi-media-vision/res/inference/models/fld_tflite_model1_meta.json
deleted file mode 100644 (file)
index f0b4c90..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-{
-    "inputmetadata" :
-    {
-        "tensor_info" : [
-            {
-                "name" : "Placeholder",
-                "shape_type" : "NHWC",
-                "shape_dims" : [ 1, 128, 128, 3],
-                "data_type" : "FLOAT32",
-                "color_space" : "RGB888"
-            }
-        ],
-        "preprocess" : [
-            {
-                "normalization" : [
-                    {
-                        "mean" : [0.0, 0.0, 0.0],
-                        "std" : [1.0, 1.0, 1.0]
-                    }
-                ]
-            }
-        ]
-    },
-    "outputmetadata" :
-    {
-        "score" :
-            {
-                "name" : ["fanet8ss_inference/fully_connected_1/Sigmoid"],
-                "index" : [-1, -1, -1, -1],
-                "top_number" : 1,
-                "threshold" : 0.0,
-                "score_type" : "NORMAL"
-            }
-        ,
-        "landmark" : [
-            {
-                "name" : "fanet8ss_inference/fully_connected_1/Sigmoid",
-                "index" : [-1, 1],
-                "landmark_type" : "2D_SINGLE",
-                "landmark_coordinate" : "RATIO",
-                "landmark_offset" : 2,
-                "decoding_type" : "BYPASS"
-            }
-        ]
-    }
-}
diff --git a/src/utc/capi-media-vision/res/inference/models/ic_label.txt b/src/utc/capi-media-vision/res/inference/models/ic_label.txt
deleted file mode 100644 (file)
index fe81123..0000000
+++ /dev/null
@@ -1,1001 +0,0 @@
-background
-tench
-goldfish
-great white shark
-tiger shark
-hammerhead
-electric ray
-stingray
-cock
-hen
-ostrich
-brambling
-goldfinch
-house finch
-junco
-indigo bunting
-robin
-bulbul
-jay
-magpie
-chickadee
-water ouzel
-kite
-bald eagle
-vulture
-great grey owl
-European fire salamander
-common newt
-eft
-spotted salamander
-axolotl
-bullfrog
-tree frog
-tailed frog
-loggerhead
-leatherback turtle
-mud turtle
-terrapin
-box turtle
-banded gecko
-common iguana
-American chameleon
-whiptail
-agama
-frilled lizard
-alligator lizard
-Gila monster
-green lizard
-African chameleon
-Komodo dragon
-African crocodile
-American alligator
-triceratops
-thunder snake
-ringneck snake
-hognose snake
-green snake
-king snake
-garter snake
-water snake
-vine snake
-night snake
-boa constrictor
-rock python
-Indian cobra
-green mamba
-sea snake
-horned viper
-diamondback
-sidewinder
-trilobite
-harvestman
-scorpion
-black and gold garden spider
-barn spider
-garden spider
-black widow
-tarantula
-wolf spider
-tick
-centipede
-black grouse
-ptarmigan
-ruffed grouse
-prairie chicken
-peacock
-quail
-partridge
-African grey
-macaw
-sulphur-crested cockatoo
-lorikeet
-coucal
-bee eater
-hornbill
-hummingbird
-jacamar
-toucan
-drake
-red-breasted merganser
-goose
-black swan
-tusker
-echidna
-platypus
-wallaby
-koala
-wombat
-jellyfish
-sea anemone
-brain coral
-flatworm
-nematode
-conch
-snail
-slug
-sea slug
-chiton
-chambered nautilus
-Dungeness crab
-rock crab
-fiddler crab
-king crab
-American lobster
-spiny lobster
-crayfish
-hermit crab
-isopod
-white stork
-black stork
-spoonbill
-flamingo
-little blue heron
-American egret
-bittern
-crane
-limpkin
-European gallinule
-American coot
-bustard
-ruddy turnstone
-red-backed sandpiper
-redshank
-dowitcher
-oystercatcher
-pelican
-king penguin
-albatross
-grey whale
-killer whale
-dugong
-sea lion
-Chihuahua
-Japanese spaniel
-Maltese dog
-Pekinese
-Shih-Tzu
-Blenheim spaniel
-papillon
-toy terrier
-Rhodesian ridgeback
-Afghan hound
-basset
-beagle
-bloodhound
-bluetick
-black-and-tan coonhound
-Walker hound
-English foxhound
-redbone
-borzoi
-Irish wolfhound
-Italian greyhound
-whippet
-Ibizan hound
-Norwegian elkhound
-otterhound
-Saluki
-Scottish deerhound
-Weimaraner
-Staffordshire bullterrier
-American Staffordshire terrier
-Bedlington terrier
-Border terrier
-Kerry blue terrier
-Irish terrier
-Norfolk terrier
-Norwich terrier
-Yorkshire terrier
-wire-haired fox terrier
-Lakeland terrier
-Sealyham terrier
-Airedale
-cairn
-Australian terrier
-Dandie Dinmont
-Boston bull
-miniature schnauzer
-giant schnauzer
-standard schnauzer
-Scotch terrier
-Tibetan terrier
-silky terrier
-soft-coated wheaten terrier
-West Highland white terrier
-Lhasa
-flat-coated retriever
-curly-coated retriever
-golden retriever
-Labrador retriever
-Chesapeake Bay retriever
-German short-haired pointer
-vizsla
-English setter
-Irish setter
-Gordon setter
-Brittany spaniel
-clumber
-English springer
-Welsh springer spaniel
-cocker spaniel
-Sussex spaniel
-Irish water spaniel
-kuvasz
-schipperke
-groenendael
-malinois
-briard
-kelpie
-komondor
-Old English sheepdog
-Shetland sheepdog
-collie
-Border collie
-Bouvier des Flandres
-Rottweiler
-German shepherd
-Doberman
-miniature pinscher
-Greater Swiss Mountain dog
-Bernese mountain dog
-Appenzeller
-EntleBucher
-boxer
-bull mastiff
-Tibetan mastiff
-French bulldog
-Great Dane
-Saint Bernard
-Eskimo dog
-malamute
-Siberian husky
-dalmatian
-affenpinscher
-basenji
-pug
-Leonberg
-Newfoundland
-Great Pyrenees
-Samoyed
-Pomeranian
-chow
-keeshond
-Brabancon griffon
-Pembroke
-Cardigan
-toy poodle
-miniature poodle
-standard poodle
-Mexican hairless
-timber wolf
-white wolf
-red wolf
-coyote
-dingo
-dhole
-African hunting dog
-hyena
-red fox
-kit fox
-Arctic fox
-grey fox
-tabby
-tiger cat
-Persian cat
-Siamese cat
-Egyptian cat
-cougar
-lynx
-leopard
-snow leopard
-jaguar
-lion
-tiger
-cheetah
-brown bear
-American black bear
-ice bear
-sloth bear
-mongoose
-meerkat
-tiger beetle
-ladybug
-ground beetle
-long-horned beetle
-leaf beetle
-dung beetle
-rhinoceros beetle
-weevil
-fly
-bee
-ant
-grasshopper
-cricket
-walking stick
-cockroach
-mantis
-cicada
-leafhopper
-lacewing
-dragonfly
-damselfly
-admiral
-ringlet
-monarch
-cabbage butterfly
-sulphur butterfly
-lycaenid
-starfish
-sea urchin
-sea cucumber
-wood rabbit
-hare
-Angora
-hamster
-porcupine
-fox squirrel
-marmot
-beaver
-guinea pig
-sorrel
-zebra
-hog
-wild boar
-warthog
-hippopotamus
-ox
-water buffalo
-bison
-ram
-bighorn
-ibex
-hartebeest
-impala
-gazelle
-Arabian camel
-llama
-weasel
-mink
-polecat
-black-footed ferret
-otter
-skunk
-badger
-armadillo
-three-toed sloth
-orangutan
-gorilla
-chimpanzee
-gibbon
-siamang
-guenon
-patas
-baboon
-macaque
-langur
-colobus
-proboscis monkey
-marmoset
-capuchin
-howler monkey
-titi
-spider monkey
-squirrel monkey
-Madagascar cat
-indri
-Indian elephant
-African elephant
-lesser panda
-giant panda
-barracouta
-eel
-coho
-rock beauty
-anemone fish
-sturgeon
-gar
-lionfish
-puffer
-abacus
-abaya
-academic gown
-accordion
-acoustic guitar
-aircraft carrier
-airliner
-airship
-altar
-ambulance
-amphibian
-analog clock
-apiary
-apron
-ashcan
-assault rifle
-backpack
-bakery
-balance beam
-balloon
-ballpoint
-Band Aid
-banjo
-bannister
-barbell
-barber chair
-barbershop
-barn
-barometer
-barrel
-barrow
-baseball
-basketball
-bassinet
-bassoon
-bathing cap
-bath towel
-bathtub
-beach wagon
-beacon
-beaker
-bearskin
-beer bottle
-beer glass
-bell cote
-bib
-bicycle-built-for-two
-bikini
-binder
-binoculars
-birdhouse
-boathouse
-bobsled
-bolo tie
-bonnet
-bookcase
-bookshop
-bottlecap
-bow
-bow tie
-brass
-brassiere
-breakwater
-breastplate
-broom
-bucket
-buckle
-bulletproof vest
-bullet train
-butcher shop
-cab
-caldron
-candle
-cannon
-canoe
-can opener
-cardigan
-car mirror
-carousel
-carpenter's kit
-carton
-car wheel
-cash machine
-cassette
-cassette player
-castle
-catamaran
-CD player
-cello
-cellular telephone
-chain
-chainlink fence
-chain mail
-chain saw
-chest
-chiffonier
-chime
-china cabinet
-Christmas stocking
-church
-cinema
-cleaver
-cliff dwelling
-cloak
-clog
-cocktail shaker
-coffee mug
-coffeepot
-coil
-combination lock
-computer keyboard
-confectionery
-container ship
-convertible
-corkscrew
-cornet
-cowboy boot
-cowboy hat
-cradle
-crane
-crash helmet
-crate
-crib
-Crock Pot
-croquet ball
-crutch
-cuirass
-dam
-desk
-desktop computer
-dial telephone
-diaper
-digital clock
-digital watch
-dining table
-dishrag
-dishwasher
-disk brake
-dock
-dogsled
-dome
-doormat
-drilling platform
-drum
-drumstick
-dumbbell
-Dutch oven
-electric fan
-electric guitar
-electric locomotive
-entertainment center
-envelope
-espresso maker
-face powder
-feather boa
-file
-fireboat
-fire engine
-fire screen
-flagpole
-flute
-folding chair
-football helmet
-forklift
-fountain
-fountain pen
-four-poster
-freight car
-French horn
-frying pan
-fur coat
-garbage truck
-gasmask
-gas pump
-goblet
-go-kart
-golf ball
-golfcart
-gondola
-gong
-gown
-grand piano
-greenhouse
-grille
-grocery store
-guillotine
-hair slide
-hair spray
-half track
-hammer
-hamper
-hand blower
-hand-held computer
-handkerchief
-hard disc
-harmonica
-harp
-harvester
-hatchet
-holster
-home theater
-honeycomb
-hook
-hoopskirt
-horizontal bar
-horse cart
-hourglass
-iPod
-iron
-jack-o'-lantern
-jean
-jeep
-jersey
-jigsaw puzzle
-jinrikisha
-joystick
-kimono
-knee pad
-knot
-lab coat
-ladle
-lampshade
-laptop
-lawn mower
-lens cap
-letter opener
-library
-lifeboat
-lighter
-limousine
-liner
-lipstick
-Loafer
-lotion
-loudspeaker
-loupe
-lumbermill
-magnetic compass
-mailbag
-mailbox
-maillot
-maillot
-manhole cover
-maraca
-marimba
-mask
-matchstick
-maypole
-maze
-measuring cup
-medicine chest
-megalith
-microphone
-microwave
-military uniform
-milk can
-minibus
-miniskirt
-minivan
-missile
-mitten
-mixing bowl
-mobile home
-Model T
-modem
-monastery
-monitor
-moped
-mortar
-mortarboard
-mosque
-mosquito net
-motor scooter
-mountain bike
-mountain tent
-mouse
-mousetrap
-moving van
-muzzle
-nail
-neck brace
-necklace
-nipple
-notebook
-obelisk
-oboe
-ocarina
-odometer
-oil filter
-organ
-oscilloscope
-overskirt
-oxcart
-oxygen mask
-packet
-paddle
-paddlewheel
-padlock
-paintbrush
-pajama
-palace
-panpipe
-paper towel
-parachute
-parallel bars
-park bench
-parking meter
-passenger car
-patio
-pay-phone
-pedestal
-pencil box
-pencil sharpener
-perfume
-Petri dish
-photocopier
-pick
-pickelhaube
-picket fence
-pickup
-pier
-piggy bank
-pill bottle
-pillow
-ping-pong ball
-pinwheel
-pirate
-pitcher
-plane
-planetarium
-plastic bag
-plate rack
-plow
-plunger
-Polaroid camera
-pole
-police van
-poncho
-pool table
-pop bottle
-pot
-potter's wheel
-power drill
-prayer rug
-printer
-prison
-projectile
-projector
-puck
-punching bag
-purse
-quill
-quilt
-racer
-racket
-radiator
-radio
-radio telescope
-rain barrel
-recreational vehicle
-reel
-reflex camera
-refrigerator
-remote control
-restaurant
-revolver
-rifle
-rocking chair
-rotisserie
-rubber eraser
-rugby ball
-rule
-running shoe
-safe
-safety pin
-saltshaker
-sandal
-sarong
-sax
-scabbard
-scale
-school bus
-schooner
-scoreboard
-screen
-screw
-screwdriver
-seat belt
-sewing machine
-shield
-shoe shop
-shoji
-shopping basket
-shopping cart
-shovel
-shower cap
-shower curtain
-ski
-ski mask
-sleeping bag
-slide rule
-sliding door
-slot
-snorkel
-snowmobile
-snowplow
-soap dispenser
-soccer ball
-sock
-solar dish
-sombrero
-soup bowl
-space bar
-space heater
-space shuttle
-spatula
-speedboat
-spider web
-spindle
-sports car
-spotlight
-stage
-steam locomotive
-steel arch bridge
-steel drum
-stethoscope
-stole
-stone wall
-stopwatch
-stove
-strainer
-streetcar
-stretcher
-studio couch
-stupa
-submarine
-suit
-sundial
-sunglass
-sunglasses
-sunscreen
-suspension bridge
-swab
-sweatshirt
-swimming trunks
-swing
-switch
-syringe
-table lamp
-tank
-tape player
-teapot
-teddy
-television
-tennis ball
-thatch
-theater curtain
-thimble
-thresher
-throne
-tile roof
-toaster
-tobacco shop
-toilet seat
-torch
-totem pole
-tow truck
-toyshop
-tractor
-trailer truck
-tray
-trench coat
-tricycle
-trimaran
-tripod
-triumphal arch
-trolleybus
-trombone
-tub
-turnstile
-typewriter keyboard
-umbrella
-unicycle
-upright
-vacuum
-vase
-vault
-velvet
-vending machine
-vestment
-viaduct
-violin
-volleyball
-waffle iron
-wall clock
-wallet
-wardrobe
-warplane
-washbasin
-washer
-water bottle
-water jug
-water tower
-whiskey jug
-whistle
-wig
-window screen
-window shade
-Windsor tie
-wine bottle
-wing
-wok
-wooden spoon
-wool
-worm fence
-wreck
-yawl
-yurt
-web site
-comic book
-crossword puzzle
-street sign
-traffic light
-book jacket
-menu
-plate
-guacamole
-consomme
-hot pot
-trifle
-ice cream
-ice lolly
-French loaf
-bagel
-pretzel
-cheeseburger
-hotdog
-mashed potato
-head cabbage
-broccoli
-cauliflower
-zucchini
-spaghetti squash
-acorn squash
-butternut squash
-cucumber
-artichoke
-bell pepper
-cardoon
-mushroom
-Granny Smith
-strawberry
-orange
-lemon
-fig
-pineapple
-banana
-jackfruit
-custard apple
-pomegranate
-hay
-carbonara
-chocolate sauce
-dough
-meat loaf
-pizza
-potpie
-burrito
-red wine
-espresso
-cup
-eggnog
-alp
-bubble
-cliff
-coral reef
-geyser
-lakeside
-promontory
-sandbar
-seashore
-valley
-volcano
-ballplayer
-groom
-scuba diver
-rapeseed
-daisy
-yellow lady's slipper
-corn
-acorn
-hip
-buckeye
-coral fungus
-agaric
-gyromitra
-stinkhorn
-earthstar
-hen-of-the-woods
-bolete
-ear
-toilet tissue
diff --git a/src/utc/capi-media-vision/res/inference/models/ic_tflite_model.tflite b/src/utc/capi-media-vision/res/inference/models/ic_tflite_model.tflite
deleted file mode 100644 (file)
index db26630..0000000
Binary files a/src/utc/capi-media-vision/res/inference/models/ic_tflite_model.tflite and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/inference/models/ic_tflite_model_meta.json b/src/utc/capi-media-vision/res/inference/models/ic_tflite_model_meta.json
deleted file mode 100644 (file)
index 5b1782d..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-{
-    "inputmetadata" :
-    {
-        "tensor_info" : [
-            {
-                "name" : "input_2",
-                "shape_type" : "NHWC",
-                "shape_dims" : [ 1, 224, 224, 3],
-                "data_type" : "FLOAT32",
-                "color_space" : "RGB888"
-            }
-        ],
-        "preprocess" : [
-            {
-                "normalization" : [
-                    {
-                        "mean" : [127.5, 127.5, 127.5],
-                        "std" : [127.5, 127.5, 127.5]
-                    }
-                ]
-            }
-        ]
-    },
-    "outputmetadata" :
-    {
-        "score" :
-            {
-                "name" : ["dense_3/Softmax"],
-                "index" : [-1, 1],
-                               "top_number" : 5,
-                "threshold" : 0.3,
-                "score_type" : "NORMAL"
-            }
-    }
-}
diff --git a/src/utc/capi-media-vision/res/inference/models/od_label.txt b/src/utc/capi-media-vision/res/inference/models/od_label.txt
deleted file mode 100644 (file)
index 029bab8..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-person
-bicycle
-car
-motorcycle
-airplane
-bus
-train
-truck
-boat
-traffic light
-fire hydrant
-street sign
-stop sign
-parking meter
-bench
-bird
-cat
-dog
-horse
-sheep
-cow
-elephant
-bear
-zebra
-giraffe
-hat
-backpack
-umbrella
-shoe
-eye glasses
-handbag
-tie
-suitcase
-frisbee
-skis
-snowboard
-sports ball
-kite
-baseball bat
-baseball glove
-skateboard
-surfboard
-tennis racket
-bottle
-plate
-wine glass
-cup
-fork
-knife
-spoon
-bowl
-banana
-apple
-sandwich
-orange
-broccoli
-carrot
-hot dog
-pizza
-donut
-cake
-chair
-couch
-potted plant
-bed
-mirror
-dining table
-window
-desk
-toilet
-door
-tv
-laptop
-mouse
-remote
-keyboard
-cell phone
-microwave
-oven
-toaster
-sink
-refrigerator
-blender
-book
-clock
-vase
-scissors
-teddy bear
-hair drier
-toothbrush
-hair brush
diff --git a/src/utc/capi-media-vision/res/inference/models/od_tflite_model.tflite b/src/utc/capi-media-vision/res/inference/models/od_tflite_model.tflite
deleted file mode 100644 (file)
index aee1d31..0000000
Binary files a/src/utc/capi-media-vision/res/inference/models/od_tflite_model.tflite and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/inference/models/od_tflite_model_meta.json b/src/utc/capi-media-vision/res/inference/models/od_tflite_model_meta.json
deleted file mode 100644 (file)
index 86b7b3c..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-{
-    "inputmetadata" :
-    {
-        "tensor_info" : [
-            {
-                "name" : "normalized_input_image_tensor",
-                "shape_type" : "NHWC",
-                "shape_dims" : [ 1, 300, 300, 3],
-                "data_type" : "FLOAT32",
-                "color_space" : "RGB888"
-            }
-        ],
-        "preprocess" : [
-            {
-                "normalization" : [
-                    {
-                        "mean" : [127.5, 127.5, 127.5],
-                        "std" : [127.5, 127.5, 127.5]
-                    }
-                ]
-            }
-        ]
-    },
-    "outputmetadata" :
-    {
-        "score" :
-            {
-                "name" : ["TFLite_Detection_PostProcess:2"],
-                "index" : [-1, 1],
-                "top_number" : 5,
-                "threshold" : 0.3,
-                "score_type" : "NORMAL"
-            },
-        "box" :
-            {
-               "name" : ["TFLite_Detection_PostProcess"],
-               "index" : [-1, -1, 1],
-               "box_type" : "ORIGIN_LEFTTOP",
-               "box_order" : [1, 0, 3, 2],
-               "box_coordinate" : "RATIO",
-               "decoding_type": "BYPASS"
-            },
-        "label" : [
-            {
-                "name" : "TFLite_Detection_PostProcess:1",
-                "index" : [-1, 1]
-            }
-        ],
-        "number" : [
-            {
-                "name" : "TFLite_Detection_PostProcess:3",
-                "index" : [1]
-            }
-        ]
-    }
-}
diff --git a/src/utc/capi-media-vision/res/inference/models/pld_mocap.bvh b/src/utc/capi-media-vision/res/inference/models/pld_mocap.bvh
deleted file mode 100644 (file)
index f93d086..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-HIERARCHY
-ROOT Hips
-{
-       OFFSET 0.000000 0.000000 0.000000
-       CHANNELS 6 Xposition Yposition Zposition Zrotation Xrotation Yrotation
-       JOINT Chest
-       {
-               OFFSET 0.000000 5.210000 0.000000
-               CHANNELS 3 Zrotation Xrotation Yrotation
-               JOINT Neck
-               {
-                       OFFSET 0.000000 18.650002 0.000000
-                       CHANNELS 3 Zrotation Xrotation Yrotation
-                       JOINT Head
-                       {
-                               OFFSET 0.000000 5.450001 0.000000
-                               CHANNELS 3 Zrotation Xrotation Yrotation
-                               End Site
-                               {
-                                       OFFSET 0.000000 3.869999 0.000000
-                               }
-                       }
-               }
-               JOINT RightCollar
-               {
-                       OFFSET 1.120000 16.230000 1.870000
-                       CHANNELS 3 Zrotation Xrotation Yrotation
-                       JOINT RightUpArm
-                       {
-                               OFFSET 5.540000 0.000000 0.000000
-                               CHANNELS 3 Zrotation Xrotation Yrotation
-                               JOINT RightLowArm
-                               {
-                                       OFFSET 0.000000 -11.960000 0.000000
-                                       CHANNELS 3 Zrotation Xrotation Yrotation
-                                       JOINT RightHand
-                                       {
-                                               OFFSET 0.000000 -9.930000 0.000000
-                                               CHANNELS 3 Zrotation Xrotation Yrotation
-                                               End Site
-                                               {
-                                                       OFFSET 0.000000 -7.000000 0.000000
-                                               }
-                                       }
-                               }
-                       }
-               }
-               JOINT LeftCollar
-               {
-                       OFFSET -1.120000 16.230000 1.870000
-                       CHANNELS 3 Zrotation Xrotation Yrotation
-                       JOINT LeftUpArm
-                       {
-                               OFFSET -6.070000 0.000000 0.000000
-                               CHANNELS 3 Zrotation Xrotation Yrotation
-                               JOINT LeftLowArm
-                               {
-                                       OFFSET 0.000000 -11.820000 0.000000
-                                       CHANNELS 3 Zrotation Xrotation Yrotation
-                                       JOINT LeftHand
-                                       {
-                                               OFFSET 0.000000 -10.650000 0.000000
-                                               CHANNELS 3 Zrotation Xrotation Yrotation
-                                               End Site
-                                               {
-                                                       OFFSET 0.000000 -7.000000 0.000000
-                                               }
-                                       }
-                               }
-                       }
-               }
-       }
-       JOINT RightUpLeg
-       {
-               OFFSET 3.910000 0.000000 0.000000
-               CHANNELS 3 Zrotation Xrotation Yrotation
-               JOINT RightLowLeg
-               {
-                       OFFSET 0.000000 -18.340000 0.000000
-                       CHANNELS 3 Zrotation Xrotation Yrotation
-                       JOINT RightFoot
-                       {
-                               OFFSET 0.000000 -17.369999 0.000000
-                               CHANNELS 3 Zrotation Xrotation Yrotation
-                               End Site
-                               {
-                                       OFFSET 0.000000 -3.459999 0.000000
-                               }
-                       }
-               }
-       }
-       JOINT LeftUpLeg
-       {
-               OFFSET -3.910000 0.000000 0.000000
-               CHANNELS 3 Zrotation Xrotation Yrotation
-               JOINT LeftLowLeg
-               {
-                       OFFSET 0.000000 -17.629999 0.000000
-                       CHANNELS 3 Zrotation Xrotation Yrotation
-                       JOINT LeftFoot
-                       {
-                               OFFSET 0.000000 -17.139997 0.000000
-                               CHANNELS 3 Zrotation Xrotation Yrotation
-                               End Site
-                               {
-                                       OFFSET 0.000000 -3.750000 0.000000
-                               }
-                       }
-               }
-       }
-}
-MOTION
-Frames: 2
-Frame Time: 0.041667
-8.030000 35.009998 88.360001 -3.410002 14.780000 -164.349993 13.089996 40.299994 -24.599999 7.879999 43.799988 0.000001 -3.610007 -41.450000 5.819997 10.080001 -0.000003 10.209998 97.950001 -23.530003 -2.139997 60.275566 -2.764629 54.273831 0.689997 0.029999 -0.000000 -14.040001 -0.000001 -10.499999 -85.519999 -13.719994 -102.930001 61.910009 -61.179999 65.179997 -1.570001 0.690000 0.020000 43.262770 -10.754126 140.039587 36.023886 12.970090 151.073785 0.000001 -1.140000 -0.000001 -56.152423 -7.908401 12.931058 43.240302 1.483285 -31.399612 0.000000 -23.949997 -0.000000 
-7.810000 35.099998 86.470001 -3.780000 12.940000 -166.969998 12.639998 42.569994 -22.340000 7.669998 43.609993 -0.000001 -4.230008 -41.410003 4.889995 19.099999 0.000004 4.159999 93.119992 -9.689997 -9.429998 132.670182 -81.860029 136.800124 0.699997 0.370000 0.000001 -8.619999 0.000001 -21.819997 -87.310001 -27.569989 -100.089995 56.170000 -61.560003 58.719997 -1.630005 0.950000 0.030000 13.160001 15.440001 -3.560000 7.970004 59.290004 4.969998 0.000000 1.640002 -0.000001 -17.180000 -10.019999 -3.080000 13.560003 53.380005 -18.070000 0.000000 -25.929999 0.000000 
diff --git a/src/utc/capi-media-vision/res/inference/models/pld_mocap_mapping.txt b/src/utc/capi-media-vision/res/inference/models/pld_mocap_mapping.txt
deleted file mode 100644 (file)
index 07ce1a2..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-Hips,10
-Neck,2
-Head,1
-LeftUpArm,7
-LeftLowArm,8
-LeftHand,9
-RightUpArm,4
-RightLowArm,5
-RightHand,6
-LeftUpLeg,14
-LeftLowLeg,15
-LeftFoot,16
-RightUpLeg,11
-RightLowLeg,12
-RightFoot,13
diff --git a/src/utc/capi-media-vision/res/inference/models/pld_pose_mapping.txt b/src/utc/capi-media-vision/res/inference/models/pld_pose_mapping.txt
deleted file mode 100644 (file)
index 0a66dce..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-1
-2
--1
-3
-4
-5
-6
-7
-8
--1
-9
-10
-11
-12
-13
-14
diff --git a/src/utc/capi-media-vision/res/inference/models/pld_tflite_model.tflite b/src/utc/capi-media-vision/res/inference/models/pld_tflite_model.tflite
deleted file mode 100644 (file)
index ff6044d..0000000
Binary files a/src/utc/capi-media-vision/res/inference/models/pld_tflite_model.tflite and /dev/null differ
diff --git a/src/utc/capi-media-vision/res/inference/models/pld_tflite_model_meta.json b/src/utc/capi-media-vision/res/inference/models/pld_tflite_model_meta.json
deleted file mode 100644 (file)
index a6718d5..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-{
-    "inputmetadata" :
-    {
-        "tensor_info" : [
-            {
-                "name" : "image",
-                "shape_type" : "NHWC",
-                "shape_dims" : [ 1, 192, 192, 3],
-                "data_type" : "FLOAT32",
-                "color_space" : "RGB888"
-            }
-        ],
-        "preprocess" : [
-            {
-                "normalization" : [
-                    {
-                        "mean" : [0.0, 0.0, 0.0],
-                        "std" : [1.0, 1.0, 1.0]
-                    }
-                ]
-            }
-        ]
-    },
-    "outputmetadata" :
-    {
-        "score" :
-            {
-                "name" : ["Convolutional_Pose_Machine/stage_5_out"],
-                "index" : [-1, 1, 1, 1],
-                "top_number" : 1 ,
-                "threshold" : 0.3,
-                "score_type" : "NORMAL"
-            }
-        ,
-        "landmark" : [
-            {
-                "name" : "Convolutional_Pose_Machine/stage_5_out",
-                "index" : [-1, 1, 1, 1],
-                "landmark_type" : "2D_SINGLE",
-                "landmark_coordinate" : "PIXEL",
-                "decoding_type" : "HEATMAP",
-                "decoding_info" :
-                {
-                    "heatmap" :
-                     {
-                         "shape_type": "NHWC"
-                     }
-                }
-            }
-        ]
-    }
-}
index 47bbee4d1bf8fa153e28bd2659e684b874aa4f18..3f220177f0195c865c7b2dffbb30e6e012b097ec 100644 (file)
 #include <app.h>
 #include <dlog.h>
 
-
-
-
-       
-       
-       
-
-
 static bool app_create(void *data)
 {
        return true;
index 56e4a16a5d3a95b3536fbb483e78a0c26d665877..c146fe9644b202ead2f5c330c658ec72563be533 100755 (executable)
@@ -32,19 +32,8 @@ extern void utc_capi_media_vision_surveillance_cleanup(void);
 extern void utc_capi_media_vision_surveillance_cb1_startup(void);
 extern void utc_capi_media_vision_surveillance_cb2_startup(void);
 extern void utc_capi_media_vision_surveillance_cb3_startup(void);
-extern void utc_capi_media_vision_inference_startup1(void);
-extern void utc_capi_media_vision_inference_cleanup1(void);
-extern void utc_capi_media_vision_inference_startup2(void);
-extern void utc_capi_media_vision_inference_cleanup2(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup(void);
 extern void utc_capi_media_vision_roi_tracker_startup(void);
 extern void utc_capi_media_vision_roi_tracker_cleanup(void);
-extern void utc_capi_media_vision_face_recognition_startup(void);
-extern void utc_capi_media_vision_face_recognition_cleanup(void);
 extern void utc_capi_media_vision_3d_startup(void);
 extern void utc_capi_media_vision_3d_cleanup(void);
 
@@ -210,48 +199,6 @@ extern int utc_mediavision_mv_surveillance_get_result_value_n(void);
 extern int utc_mediavision_mv_surveillance_get_result_value_p1(void);
 extern int utc_mediavision_mv_surveillance_get_result_value_p2(void);
 extern int utc_mediavision_mv_surveillance_get_result_value_p3(void);
-extern int utc_mediavision_mv_inference_foreach_supported_engine(void);
-extern int utc_mediavision_mv_inference_create_p(void);
-extern int utc_mediavision_mv_inference_create_n(void);
-extern int utc_mediavision_mv_inference_destroy_p(void);
-extern int utc_mediavision_mv_inference_destroy_n(void);
-extern int utc_mediavision_mv_inference_configure_p(void);
-extern int utc_mediavision_mv_inference_configure_n1(void);
-extern int utc_mediavision_mv_inference_configure_n2(void);
-extern int utc_mediavision_mv_inference_prepare_p(void);
-extern int utc_mediavision_mv_inference_prepare_n1(void);
-extern int utc_mediavision_mv_inference_image_classify_p(void);
-extern int utc_mediavision_mv_inference_image_classify_n1(void);
-extern int utc_mediavision_mv_inference_image_classify_n2(void);
-extern int utc_mediavision_mv_inference_object_detect_p(void);
-extern int utc_mediavision_mv_inference_object_detect_n1(void);
-extern int utc_mediavision_mv_inference_object_detect_n2(void);
-extern int utc_mediavision_mv_inference_face_detect_p(void);
-extern int utc_mediavision_mv_inference_face_detect_n1(void);
-extern int utc_mediavision_mv_inference_face_detect_n2(void);
-extern int utc_mediavision_mv_inference_facial_landmark_detect_p(void);
-extern int utc_mediavision_mv_inference_facial_landmark_detect_n1(void);
-extern int utc_mediavision_mv_inference_facial_landmark_detect_n2(void);
-extern int utc_mediavision_mv_inference_pose_landmark_detect_p(void);
-extern int utc_mediavision_mv_inference_pose_landmark_detect_n1(void);
-extern int utc_mediavision_mv_inference_pose_landmark_detect_n2(void);
-extern int utc_mediavision_mv_inference_get_number_of_poses_p(void);
-extern int utc_mediavision_mv_inference_get_number_of_poses_n(void);
-extern int utc_mediavision_mv_inference_get_number_of_landmarks_p(void);
-extern int utc_mediavision_mv_inference_get_number_of_landmarks_n(void);
-extern int utc_mediavision_mv_inference_get_label_p(void);
-extern int utc_mediavision_mv_inference_get_label_n(void);
-extern int utc_mediavision_mv_inference_get_landmark_p(void);
-extern int utc_mediavision_mv_inference_get_landmark_n(void);
-extern int utc_mediavision_mv_pose_create_p(void);
-extern int utc_mediavision_mv_pose_create_n(void);
-extern int utc_mediavision_mv_pose_destroy_p(void);
-extern int utc_mediavision_mv_pose_destroy_n(void);
-extern int utc_mediavision_mv_pose_set_from_file_p(void);
-extern int utc_mediavision_mv_pose_set_from_file_n1(void);
-extern int utc_mediavision_mv_pose_set_from_file_n2(void);
-extern int utc_mediavision_mv_pose_compare_p(void);
-extern int utc_mediavision_mv_pose_compare_n(void);
 extern int utc_mediavision_mv_roi_tracker_create_p(void);
 extern int utc_mediavision_mv_roi_tracker_create_n(void);
 extern int utc_mediavision_mv_roi_tracker_destroy_p(void);
@@ -262,25 +209,6 @@ extern int utc_mediavision_mv_roi_tracker_prepare_p(void);
 extern int utc_mediavision_mv_roi_tracker_prepare_n(void);
 extern int utc_mediavision_mv_roi_tracker_perform_p(void);
 extern int utc_mediavision_mv_roi_tracker_perform_n(void);
-extern int utc_mediavision_mv_face_recognition_create_p(void);
-extern int utc_mediavision_mv_face_recognition_create_n(void);
-extern int utc_mediavision_mv_face_recognition_destroy_p(void);
-extern int utc_mediavision_mv_face_recognition_destroy_n(void);
-extern int utc_mediavision_mv_face_recognition_prepare_p(void);
-extern int utc_mediavision_mv_face_recognition_prepare_n(void);
-extern int utc_mediavision_mv_face_recognition_register_p(void);
-extern int utc_mediavision_mv_face_recognition_register_n1(void);
-extern int utc_mediavision_mv_face_recognition_register_n2(void);
-extern int utc_mediavision_mv_face_recognition_register_n3(void);
-extern int utc_mediavision_mv_face_recognition_inference_p(void);
-extern int utc_mediavision_mv_face_recognition_inference_n1(void);
-extern int utc_mediavision_mv_face_recognition_inference_n2(void);
-extern int utc_mediavision_mv_face_recognition_get_label_p(void);
-extern int utc_mediavision_mv_face_recognition_get_label_n1(void);
-extern int utc_mediavision_mv_face_recognition_get_label_n2(void);
-extern int utc_mediavision_mv_face_recognition_unregister_p(void);
-extern int utc_mediavision_mv_face_recognition_unregister_n1(void);
-extern int utc_mediavision_mv_face_recognition_unregister_n2(void);
 extern int utc_mediavision_mv_3d_create_p(void);
 extern int utc_mediavision_mv_3d_create_n(void);
 extern int utc_mediavision_mv_3d_destroy_p(void);
@@ -464,48 +392,6 @@ testcase tc_array[] = {
        {"utc_mediavision_mv_surveillance_get_result_value_p1",utc_mediavision_mv_surveillance_get_result_value_p1,utc_capi_media_vision_surveillance_cb1_startup,utc_capi_media_vision_surveillance_cleanup},
        {"utc_mediavision_mv_surveillance_get_result_value_p2",utc_mediavision_mv_surveillance_get_result_value_p2,utc_capi_media_vision_surveillance_cb2_startup,utc_capi_media_vision_surveillance_cleanup},
        {"utc_mediavision_mv_surveillance_get_result_value_p3",utc_mediavision_mv_surveillance_get_result_value_p3,utc_capi_media_vision_surveillance_cb3_startup,utc_capi_media_vision_surveillance_cleanup},
-       {"utc_mediavision_mv_inference_foreach_supported_engine",utc_mediavision_mv_inference_foreach_supported_engine,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_create_p",utc_mediavision_mv_inference_create_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_create_n",utc_mediavision_mv_inference_create_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_destroy_p",utc_mediavision_mv_inference_destroy_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_destroy_n",utc_mediavision_mv_inference_destroy_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_configure_p",utc_mediavision_mv_inference_configure_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_configure_n1",utc_mediavision_mv_inference_configure_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_configure_n2",utc_mediavision_mv_inference_configure_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_prepare_p",utc_mediavision_mv_inference_prepare_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_prepare_n1",utc_mediavision_mv_inference_prepare_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_image_classify_p",utc_mediavision_mv_inference_image_classify_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_image_classify_n1",utc_mediavision_mv_inference_image_classify_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_image_classify_n2",utc_mediavision_mv_inference_image_classify_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_object_detect_p",utc_mediavision_mv_inference_object_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_object_detect_n1",utc_mediavision_mv_inference_object_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_object_detect_n2",utc_mediavision_mv_inference_object_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_face_detect_p",utc_mediavision_mv_inference_face_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_face_detect_n1",utc_mediavision_mv_inference_face_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_face_detect_n2",utc_mediavision_mv_inference_face_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_facial_landmark_detect_p",utc_mediavision_mv_inference_facial_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_facial_landmark_detect_n1",utc_mediavision_mv_inference_facial_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_facial_landmark_detect_n2",utc_mediavision_mv_inference_facial_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_pose_landmark_detect_p", utc_mediavision_mv_inference_pose_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_pose_landmark_detect_n1", utc_mediavision_mv_inference_pose_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_pose_landmark_detect_n2", utc_mediavision_mv_inference_pose_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_poses_p", utc_mediavision_mv_inference_get_number_of_poses_p,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_poses_n", utc_mediavision_mv_inference_get_number_of_poses_n,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_label_p", utc_mediavision_mv_inference_get_label_p,utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_label_n", utc_mediavision_mv_inference_get_label_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_landmarks_p", utc_mediavision_mv_inference_get_number_of_landmarks_p,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_landmarks_n", utc_mediavision_mv_inference_get_number_of_landmarks_n,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_landmark_p", utc_mediavision_mv_inference_get_landmark_p,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_landmark_n", utc_mediavision_mv_inference_get_landmark_n,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_create_p", utc_mediavision_mv_pose_create_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_create_n", utc_mediavision_mv_pose_create_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_destroy_p", utc_mediavision_mv_pose_destroy_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_destroy_n", utc_mediavision_mv_pose_destroy_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_set_from_file_p", utc_mediavision_mv_pose_set_from_file_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_set_from_file_n1", utc_mediavision_mv_pose_set_from_file_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_set_from_file_n2", utc_mediavision_mv_pose_set_from_file_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_compare_p", utc_mediavision_mv_pose_compare_p,utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_compare_n", utc_mediavision_mv_pose_compare_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
        {"utc_mediavision_mv_roi_tracker_create_p", utc_mediavision_mv_roi_tracker_create_p,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
        {"utc_mediavision_mv_roi_tracker_create_n", utc_mediavision_mv_roi_tracker_create_n,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
        {"utc_mediavision_mv_roi_tracker_destroy_p", utc_mediavision_mv_roi_tracker_destroy_p,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
@@ -516,25 +402,6 @@ testcase tc_array[] = {
        {"utc_mediavision_mv_roi_tracker_prepare_n", utc_mediavision_mv_roi_tracker_prepare_n,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
        {"utc_mediavision_mv_roi_tracker_perform_p", utc_mediavision_mv_roi_tracker_perform_p,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
        {"utc_mediavision_mv_roi_tracker_perform_n", utc_mediavision_mv_roi_tracker_perform_n,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
-       {"utc_mediavision_mv_face_recognition_create_p", utc_mediavision_mv_face_recognition_create_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_create_n", utc_mediavision_mv_face_recognition_create_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_destroy_p", utc_mediavision_mv_face_recognition_destroy_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_destroy_n", utc_mediavision_mv_face_recognition_destroy_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_prepare_p", utc_mediavision_mv_face_recognition_prepare_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_prepare_n", utc_mediavision_mv_face_recognition_prepare_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_register_p", utc_mediavision_mv_face_recognition_register_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_register_n1", utc_mediavision_mv_face_recognition_register_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_register_n2", utc_mediavision_mv_face_recognition_register_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_register_n3", utc_mediavision_mv_face_recognition_register_n3,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_inference_p", utc_mediavision_mv_face_recognition_inference_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_inference_n1", utc_mediavision_mv_face_recognition_inference_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_inference_n2", utc_mediavision_mv_face_recognition_inference_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_get_label_p", utc_mediavision_mv_face_recognition_get_label_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_get_label_n1", utc_mediavision_mv_face_recognition_get_label_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_get_label_n2", utc_mediavision_mv_face_recognition_get_label_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_unregister_p", utc_mediavision_mv_face_recognition_unregister_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_unregister_n1", utc_mediavision_mv_face_recognition_unregister_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_unregister_n2", utc_mediavision_mv_face_recognition_unregister_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
        {"utc_mediavision_mv_3d_create_p", utc_mediavision_mv_3d_create_p,utc_capi_media_vision_3d_startup,utc_capi_media_vision_3d_cleanup},
        {"utc_mediavision_mv_3d_create_n", utc_mediavision_mv_3d_create_n,utc_capi_media_vision_3d_startup,utc_capi_media_vision_3d_cleanup},
        {"utc_mediavision_mv_3d_destroy_p", utc_mediavision_mv_3d_destroy_p,utc_capi_media_vision_3d_startup,utc_capi_media_vision_3d_cleanup},
index 882812f96a25285eafb894af3cf9441a177dd9ec..e2b5762fccaece703f9b8a983a05ff2e75bd289f 100755 (executable)
@@ -32,64 +32,11 @@ extern void utc_capi_media_vision_surveillance_cleanup(void);
 extern void utc_capi_media_vision_surveillance_cb1_startup(void);
 extern void utc_capi_media_vision_surveillance_cb2_startup(void);
 extern void utc_capi_media_vision_surveillance_cb3_startup(void);
-extern void utc_capi_media_vision_inference_startup1(void);
-extern void utc_capi_media_vision_inference_cleanup1(void);
-extern void utc_capi_media_vision_inference_startup2(void);
-extern void utc_capi_media_vision_inference_cleanup2(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup(void);
 extern void utc_capi_media_vision_roi_tracker_startup(void);
 extern void utc_capi_media_vision_roi_tracker_cleanup(void);
-extern void utc_capi_media_vision_face_recognition_startup(void);
-extern void utc_capi_media_vision_face_recognition_cleanup(void);
 extern void utc_capi_media_vision_3d_startup(void);
 extern void utc_capi_media_vision_3d_cleanup(void);
 
-extern int utc_mediavision_mv_inference_foreach_supported_engine(void);
-extern int utc_mediavision_mv_inference_create_p(void);
-extern int utc_mediavision_mv_inference_create_n(void);
-extern int utc_mediavision_mv_inference_destroy_p(void);
-extern int utc_mediavision_mv_inference_destroy_n(void);
-extern int utc_mediavision_mv_inference_configure_p(void);
-extern int utc_mediavision_mv_inference_configure_n1(void);
-extern int utc_mediavision_mv_inference_configure_n2(void);
-extern int utc_mediavision_mv_inference_prepare_p(void);
-extern int utc_mediavision_mv_inference_prepare_n1(void);
-extern int utc_mediavision_mv_inference_image_classify_p(void);
-extern int utc_mediavision_mv_inference_image_classify_n1(void);
-extern int utc_mediavision_mv_inference_image_classify_n2(void);
-extern int utc_mediavision_mv_inference_object_detect_p(void);
-extern int utc_mediavision_mv_inference_object_detect_n1(void);
-extern int utc_mediavision_mv_inference_object_detect_n2(void);
-extern int utc_mediavision_mv_inference_face_detect_p(void);
-extern int utc_mediavision_mv_inference_face_detect_n1(void);
-extern int utc_mediavision_mv_inference_face_detect_n2(void);
-extern int utc_mediavision_mv_inference_facial_landmark_detect_p(void);
-extern int utc_mediavision_mv_inference_facial_landmark_detect_n1(void);
-extern int utc_mediavision_mv_inference_facial_landmark_detect_n2(void);
-extern int utc_mediavision_mv_inference_pose_landmark_detect_p(void);
-extern int utc_mediavision_mv_inference_pose_landmark_detect_n1(void);
-extern int utc_mediavision_mv_inference_pose_landmark_detect_n2(void);
-extern int utc_mediavision_mv_inference_get_number_of_poses_p(void);
-extern int utc_mediavision_mv_inference_get_number_of_poses_n(void);
-extern int utc_mediavision_mv_inference_get_number_of_landmarks_p(void);
-extern int utc_mediavision_mv_inference_get_number_of_landmarks_n(void);
-extern int utc_mediavision_mv_inference_get_label_p(void);
-extern int utc_mediavision_mv_inference_get_label_n(void);
-extern int utc_mediavision_mv_inference_get_landmark_p(void);
-extern int utc_mediavision_mv_inference_get_landmark_n(void);
-extern int utc_mediavision_mv_pose_create_p(void);
-extern int utc_mediavision_mv_pose_create_n(void);
-extern int utc_mediavision_mv_pose_destroy_p(void);
-extern int utc_mediavision_mv_pose_destroy_n(void);
-extern int utc_mediavision_mv_pose_set_from_file_p(void);
-extern int utc_mediavision_mv_pose_set_from_file_n1(void);
-extern int utc_mediavision_mv_pose_set_from_file_n2(void);
-extern int utc_mediavision_mv_pose_compare_p(void);
-extern int utc_mediavision_mv_pose_compare_n(void);
 extern int utc_mediavision_mv_create_source_p(void);
 extern int utc_mediavision_mv_create_source_n(void);
 extern int utc_mediavision_mv_destroy_source_p(void);
@@ -262,25 +209,6 @@ extern int utc_mediavision_mv_roi_tracker_prepare_p(void);
 extern int utc_mediavision_mv_roi_tracker_prepare_n(void);
 extern int utc_mediavision_mv_roi_tracker_perform_p(void);
 extern int utc_mediavision_mv_roi_tracker_perform_n(void);
-extern int utc_mediavision_mv_face_recognition_create_p(void);
-extern int utc_mediavision_mv_face_recognition_create_n(void);
-extern int utc_mediavision_mv_face_recognition_destroy_p(void);
-extern int utc_mediavision_mv_face_recognition_destroy_n(void);
-extern int utc_mediavision_mv_face_recognition_prepare_p(void);
-extern int utc_mediavision_mv_face_recognition_prepare_n(void);
-extern int utc_mediavision_mv_face_recognition_register_p(void);
-extern int utc_mediavision_mv_face_recognition_register_n1(void);
-extern int utc_mediavision_mv_face_recognition_register_n2(void);
-extern int utc_mediavision_mv_face_recognition_register_n3(void);
-extern int utc_mediavision_mv_face_recognition_inference_p(void);
-extern int utc_mediavision_mv_face_recognition_inference_n1(void);
-extern int utc_mediavision_mv_face_recognition_inference_n2(void);
-extern int utc_mediavision_mv_face_recognition_get_label_p(void);
-extern int utc_mediavision_mv_face_recognition_get_label_n1(void);
-extern int utc_mediavision_mv_face_recognition_get_label_n2(void);
-extern int utc_mediavision_mv_face_recognition_unregister_p(void);
-extern int utc_mediavision_mv_face_recognition_unregister_n1(void);
-extern int utc_mediavision_mv_face_recognition_unregister_n2(void);
 extern int utc_mediavision_mv_3d_create_p(void);
 extern int utc_mediavision_mv_3d_create_n(void);
 extern int utc_mediavision_mv_3d_destroy_p(void);
@@ -464,48 +392,6 @@ testcase tc_array[] = {
        {"utc_mediavision_mv_surveillance_get_result_value_p1",utc_mediavision_mv_surveillance_get_result_value_p1,utc_capi_media_vision_surveillance_cb1_startup,utc_capi_media_vision_surveillance_cleanup},
        {"utc_mediavision_mv_surveillance_get_result_value_p2",utc_mediavision_mv_surveillance_get_result_value_p2,utc_capi_media_vision_surveillance_cb2_startup,utc_capi_media_vision_surveillance_cleanup},
        {"utc_mediavision_mv_surveillance_get_result_value_p3",utc_mediavision_mv_surveillance_get_result_value_p3,utc_capi_media_vision_surveillance_cb3_startup,utc_capi_media_vision_surveillance_cleanup},
-       {"utc_mediavision_mv_inference_foreach_supported_engine",utc_mediavision_mv_inference_foreach_supported_engine,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_create_p",utc_mediavision_mv_inference_create_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_create_n",utc_mediavision_mv_inference_create_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_destroy_p",utc_mediavision_mv_inference_destroy_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_destroy_n",utc_mediavision_mv_inference_destroy_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_configure_p",utc_mediavision_mv_inference_configure_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_configure_n1",utc_mediavision_mv_inference_configure_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_configure_n2",utc_mediavision_mv_inference_configure_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_prepare_p",utc_mediavision_mv_inference_prepare_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_prepare_n1",utc_mediavision_mv_inference_prepare_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_image_classify_p",utc_mediavision_mv_inference_image_classify_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_image_classify_n1",utc_mediavision_mv_inference_image_classify_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_image_classify_n2",utc_mediavision_mv_inference_image_classify_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_object_detect_p",utc_mediavision_mv_inference_object_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_object_detect_n1",utc_mediavision_mv_inference_object_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_object_detect_n2",utc_mediavision_mv_inference_object_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_face_detect_p",utc_mediavision_mv_inference_face_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_face_detect_n1",utc_mediavision_mv_inference_face_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_face_detect_n2",utc_mediavision_mv_inference_face_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_facial_landmark_detect_p",utc_mediavision_mv_inference_facial_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_facial_landmark_detect_n1",utc_mediavision_mv_inference_facial_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_facial_landmark_detect_n2",utc_mediavision_mv_inference_facial_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_pose_landmark_detect_p", utc_mediavision_mv_inference_pose_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_pose_landmark_detect_n1", utc_mediavision_mv_inference_pose_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_pose_landmark_detect_n2", utc_mediavision_mv_inference_pose_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_poses_p", utc_mediavision_mv_inference_get_number_of_poses_p,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_poses_n", utc_mediavision_mv_inference_get_number_of_poses_n,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_label_p", utc_mediavision_mv_inference_get_label_p,utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_label_n", utc_mediavision_mv_inference_get_label_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_landmarks_p", utc_mediavision_mv_inference_get_number_of_landmarks_p,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_landmarks_n", utc_mediavision_mv_inference_get_number_of_landmarks_n,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_landmark_p", utc_mediavision_mv_inference_get_landmark_p,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_landmark_n", utc_mediavision_mv_inference_get_landmark_n,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_create_p", utc_mediavision_mv_pose_create_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_create_n", utc_mediavision_mv_pose_create_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_destroy_p", utc_mediavision_mv_pose_destroy_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_destroy_n", utc_mediavision_mv_pose_destroy_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_set_from_file_p", utc_mediavision_mv_pose_set_from_file_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_set_from_file_n1", utc_mediavision_mv_pose_set_from_file_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_set_from_file_n2", utc_mediavision_mv_pose_set_from_file_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_compare_p", utc_mediavision_mv_pose_compare_p,utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_compare_n", utc_mediavision_mv_pose_compare_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
        {"utc_mediavision_mv_roi_tracker_create_p", utc_mediavision_mv_roi_tracker_create_p,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
        {"utc_mediavision_mv_roi_tracker_create_n", utc_mediavision_mv_roi_tracker_create_n,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
        {"utc_mediavision_mv_roi_tracker_destroy_p", utc_mediavision_mv_roi_tracker_destroy_p,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
@@ -516,25 +402,6 @@ testcase tc_array[] = {
        {"utc_mediavision_mv_roi_tracker_prepare_n", utc_mediavision_mv_roi_tracker_prepare_n,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
        {"utc_mediavision_mv_roi_tracker_perform_p", utc_mediavision_mv_roi_tracker_perform_p,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
        {"utc_mediavision_mv_roi_tracker_perform_n", utc_mediavision_mv_roi_tracker_perform_n,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
-       {"utc_mediavision_mv_face_recognition_create_p", utc_mediavision_mv_face_recognition_create_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_create_n", utc_mediavision_mv_face_recognition_create_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_destroy_p", utc_mediavision_mv_face_recognition_destroy_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_destroy_n", utc_mediavision_mv_face_recognition_destroy_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_prepare_p", utc_mediavision_mv_face_recognition_prepare_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_prepare_n", utc_mediavision_mv_face_recognition_prepare_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_register_p", utc_mediavision_mv_face_recognition_register_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_register_n1", utc_mediavision_mv_face_recognition_register_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_register_n2", utc_mediavision_mv_face_recognition_register_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_register_n3", utc_mediavision_mv_face_recognition_register_n3,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_inference_p", utc_mediavision_mv_face_recognition_inference_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_inference_n1", utc_mediavision_mv_face_recognition_inference_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_inference_n2", utc_mediavision_mv_face_recognition_inference_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_get_label_p", utc_mediavision_mv_face_recognition_get_label_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_get_label_n1", utc_mediavision_mv_face_recognition_get_label_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_get_label_n2", utc_mediavision_mv_face_recognition_get_label_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_unregister_p", utc_mediavision_mv_face_recognition_unregister_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_unregister_n1", utc_mediavision_mv_face_recognition_unregister_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_unregister_n2", utc_mediavision_mv_face_recognition_unregister_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
        {"utc_mediavision_mv_3d_create_p", utc_mediavision_mv_3d_create_p,utc_capi_media_vision_3d_startup,utc_capi_media_vision_3d_cleanup},
        {"utc_mediavision_mv_3d_create_n", utc_mediavision_mv_3d_create_n,utc_capi_media_vision_3d_startup,utc_capi_media_vision_3d_cleanup},
        {"utc_mediavision_mv_3d_destroy_p", utc_mediavision_mv_3d_destroy_p,utc_capi_media_vision_3d_startup,utc_capi_media_vision_3d_cleanup},
@@ -557,5 +424,4 @@ testcase tc_array[] = {
        {NULL, NULL}
 };
 
-
 #endif // __TCT_CAPI-MEDIA-VISION-NATIVE_H__
index 8c271fbe09aaa64243b7d3b4330ca94656fe169d..9fe8ca66c10e91075886543e60bbd4c249402bf3 100755 (executable)
@@ -32,15 +32,6 @@ extern void utc_capi_media_vision_surveillance_cleanup(void);
 extern void utc_capi_media_vision_surveillance_cb1_startup(void);
 extern void utc_capi_media_vision_surveillance_cb2_startup(void);
 extern void utc_capi_media_vision_surveillance_cb3_startup(void);
-extern void utc_capi_media_vision_inference_startup1(void);
-extern void utc_capi_media_vision_inference_cleanup1(void);
-extern void utc_capi_media_vision_inference_startup2(void);
-extern void utc_capi_media_vision_inference_cleanup2(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup(void);
 extern void utc_capi_media_vision_3d_startup(void);
 extern void utc_capi_media_vision_3d_cleanup(void);
 
@@ -206,48 +197,6 @@ extern int utc_mediavision_mv_surveillance_get_result_value_n(void);
 extern int utc_mediavision_mv_surveillance_get_result_value_p1(void);
 extern int utc_mediavision_mv_surveillance_get_result_value_p2(void);
 extern int utc_mediavision_mv_surveillance_get_result_value_p3(void);
-extern int utc_mediavision_mv_inference_foreach_supported_engine(void);
-extern int utc_mediavision_mv_inference_create_p(void);
-extern int utc_mediavision_mv_inference_create_n(void);
-extern int utc_mediavision_mv_inference_destroy_p(void);
-extern int utc_mediavision_mv_inference_destroy_n(void);
-extern int utc_mediavision_mv_inference_configure_p(void);
-extern int utc_mediavision_mv_inference_configure_n1(void);
-extern int utc_mediavision_mv_inference_configure_n2(void);
-extern int utc_mediavision_mv_inference_prepare_p(void);
-extern int utc_mediavision_mv_inference_prepare_n1(void);
-extern int utc_mediavision_mv_inference_image_classify_p(void);
-extern int utc_mediavision_mv_inference_image_classify_n1(void);
-extern int utc_mediavision_mv_inference_image_classify_n2(void);
-extern int utc_mediavision_mv_inference_object_detect_p(void);
-extern int utc_mediavision_mv_inference_object_detect_n1(void);
-extern int utc_mediavision_mv_inference_object_detect_n2(void);
-extern int utc_mediavision_mv_inference_face_detect_p(void);
-extern int utc_mediavision_mv_inference_face_detect_n1(void);
-extern int utc_mediavision_mv_inference_face_detect_n2(void);
-extern int utc_mediavision_mv_inference_facial_landmark_detect_p(void);
-extern int utc_mediavision_mv_inference_facial_landmark_detect_n1(void);
-extern int utc_mediavision_mv_inference_facial_landmark_detect_n2(void);
-extern int utc_mediavision_mv_inference_pose_landmark_detect_p(void);
-extern int utc_mediavision_mv_inference_pose_landmark_detect_n1(void);
-extern int utc_mediavision_mv_inference_pose_landmark_detect_n2(void);
-extern int utc_mediavision_mv_inference_get_number_of_poses_p(void);
-extern int utc_mediavision_mv_inference_get_number_of_poses_n(void);
-extern int utc_mediavision_mv_inference_get_number_of_landmarks_p(void);
-extern int utc_mediavision_mv_inference_get_number_of_landmarks_n(void);
-extern int utc_mediavision_mv_inference_get_label_p(void);
-extern int utc_mediavision_mv_inference_get_label_n(void);
-extern int utc_mediavision_mv_inference_get_landmark_p(void);
-extern int utc_mediavision_mv_inference_get_landmark_n(void);
-extern int utc_mediavision_mv_pose_create_p(void);
-extern int utc_mediavision_mv_pose_create_n(void);
-extern int utc_mediavision_mv_pose_destroy_p(void);
-extern int utc_mediavision_mv_pose_destroy_n(void);
-extern int utc_mediavision_mv_pose_set_from_file_p(void);
-extern int utc_mediavision_mv_pose_set_from_file_n1(void);
-extern int utc_mediavision_mv_pose_set_from_file_n2(void);
-extern int utc_mediavision_mv_pose_compare_p(void);
-extern int utc_mediavision_mv_pose_compare_n(void);
 extern int utc_mediavision_mv_3d_create_p(void);
 extern int utc_mediavision_mv_3d_create_n(void);
 extern int utc_mediavision_mv_3d_destroy_p(void);
@@ -431,48 +380,6 @@ testcase tc_array[] = {
        {"utc_mediavision_mv_surveillance_get_result_value_p1",utc_mediavision_mv_surveillance_get_result_value_p1,utc_capi_media_vision_surveillance_cb1_startup,utc_capi_media_vision_surveillance_cleanup},
        {"utc_mediavision_mv_surveillance_get_result_value_p2",utc_mediavision_mv_surveillance_get_result_value_p2,utc_capi_media_vision_surveillance_cb2_startup,utc_capi_media_vision_surveillance_cleanup},
        {"utc_mediavision_mv_surveillance_get_result_value_p3",utc_mediavision_mv_surveillance_get_result_value_p3,utc_capi_media_vision_surveillance_cb3_startup,utc_capi_media_vision_surveillance_cleanup},
-       {"utc_mediavision_mv_inference_foreach_supported_engine",utc_mediavision_mv_inference_foreach_supported_engine,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_create_p",utc_mediavision_mv_inference_create_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_create_n",utc_mediavision_mv_inference_create_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_destroy_p",utc_mediavision_mv_inference_destroy_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_destroy_n",utc_mediavision_mv_inference_destroy_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_configure_p",utc_mediavision_mv_inference_configure_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_configure_n1",utc_mediavision_mv_inference_configure_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_configure_n2",utc_mediavision_mv_inference_configure_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_prepare_p",utc_mediavision_mv_inference_prepare_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_prepare_n1",utc_mediavision_mv_inference_prepare_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_image_classify_p",utc_mediavision_mv_inference_image_classify_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_image_classify_n1",utc_mediavision_mv_inference_image_classify_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_image_classify_n2",utc_mediavision_mv_inference_image_classify_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_object_detect_p",utc_mediavision_mv_inference_object_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_object_detect_n1",utc_mediavision_mv_inference_object_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_object_detect_n2",utc_mediavision_mv_inference_object_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_face_detect_p",utc_mediavision_mv_inference_face_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_face_detect_n1",utc_mediavision_mv_inference_face_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_face_detect_n2",utc_mediavision_mv_inference_face_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_facial_landmark_detect_p",utc_mediavision_mv_inference_facial_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_facial_landmark_detect_n1",utc_mediavision_mv_inference_facial_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_facial_landmark_detect_n2",utc_mediavision_mv_inference_facial_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_pose_landmark_detect_p", utc_mediavision_mv_inference_pose_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_pose_landmark_detect_n1", utc_mediavision_mv_inference_pose_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_pose_landmark_detect_n2", utc_mediavision_mv_inference_pose_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_poses_p", utc_mediavision_mv_inference_get_number_of_poses_p,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_poses_n", utc_mediavision_mv_inference_get_number_of_poses_n,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_label_p", utc_mediavision_mv_inference_get_label_p,utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_label_n", utc_mediavision_mv_inference_get_label_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_landmarks_p", utc_mediavision_mv_inference_get_number_of_landmarks_p,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_landmarks_n", utc_mediavision_mv_inference_get_number_of_landmarks_n,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_landmark_p", utc_mediavision_mv_inference_get_landmark_p,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_landmark_n", utc_mediavision_mv_inference_get_landmark_n,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_create_p", utc_mediavision_mv_pose_create_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_create_n", utc_mediavision_mv_pose_create_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_destroy_p", utc_mediavision_mv_pose_destroy_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_destroy_n", utc_mediavision_mv_pose_destroy_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_set_from_file_p", utc_mediavision_mv_pose_set_from_file_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_set_from_file_n1", utc_mediavision_mv_pose_set_from_file_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_set_from_file_n2", utc_mediavision_mv_pose_set_from_file_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_compare_p", utc_mediavision_mv_pose_compare_p,utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_compare_n", utc_mediavision_mv_pose_compare_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
        {"utc_mediavision_mv_3d_create_p", utc_mediavision_mv_3d_create_p,utc_capi_media_vision_3d_startup,utc_capi_media_vision_3d_cleanup},
        {"utc_mediavision_mv_3d_create_n", utc_mediavision_mv_3d_create_n,utc_capi_media_vision_3d_startup,utc_capi_media_vision_3d_cleanup},
        {"utc_mediavision_mv_3d_destroy_p", utc_mediavision_mv_3d_destroy_p,utc_capi_media_vision_3d_startup,utc_capi_media_vision_3d_cleanup},
index 4b641eb5f6c6770530c4a20e48620a5a0c244d1e..c146fe9644b202ead2f5c330c658ec72563be533 100755 (executable)
@@ -32,19 +32,8 @@ extern void utc_capi_media_vision_surveillance_cleanup(void);
 extern void utc_capi_media_vision_surveillance_cb1_startup(void);
 extern void utc_capi_media_vision_surveillance_cb2_startup(void);
 extern void utc_capi_media_vision_surveillance_cb3_startup(void);
-extern void utc_capi_media_vision_inference_startup1(void);
-extern void utc_capi_media_vision_inference_cleanup1(void);
-extern void utc_capi_media_vision_inference_startup2(void);
-extern void utc_capi_media_vision_inference_cleanup2(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup(void);
-extern void utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup(void);
 extern void utc_capi_media_vision_roi_tracker_startup(void);
 extern void utc_capi_media_vision_roi_tracker_cleanup(void);
-extern void utc_capi_media_vision_face_recognition_startup(void);
-extern void utc_capi_media_vision_face_recognition_cleanup(void);
 extern void utc_capi_media_vision_3d_startup(void);
 extern void utc_capi_media_vision_3d_cleanup(void);
 
@@ -210,48 +199,6 @@ extern int utc_mediavision_mv_surveillance_get_result_value_n(void);
 extern int utc_mediavision_mv_surveillance_get_result_value_p1(void);
 extern int utc_mediavision_mv_surveillance_get_result_value_p2(void);
 extern int utc_mediavision_mv_surveillance_get_result_value_p3(void);
-extern int utc_mediavision_mv_inference_foreach_supported_engine(void);
-extern int utc_mediavision_mv_inference_create_p(void);
-extern int utc_mediavision_mv_inference_create_n(void);
-extern int utc_mediavision_mv_inference_destroy_p(void);
-extern int utc_mediavision_mv_inference_destroy_n(void);
-extern int utc_mediavision_mv_inference_configure_p(void);
-extern int utc_mediavision_mv_inference_configure_n1(void);
-extern int utc_mediavision_mv_inference_configure_n2(void);
-extern int utc_mediavision_mv_inference_prepare_p(void);
-extern int utc_mediavision_mv_inference_prepare_n1(void);
-extern int utc_mediavision_mv_inference_image_classify_p(void);
-extern int utc_mediavision_mv_inference_image_classify_n1(void);
-extern int utc_mediavision_mv_inference_image_classify_n2(void);
-extern int utc_mediavision_mv_inference_object_detect_p(void);
-extern int utc_mediavision_mv_inference_object_detect_n1(void);
-extern int utc_mediavision_mv_inference_object_detect_n2(void);
-extern int utc_mediavision_mv_inference_face_detect_p(void);
-extern int utc_mediavision_mv_inference_face_detect_n1(void);
-extern int utc_mediavision_mv_inference_face_detect_n2(void);
-extern int utc_mediavision_mv_inference_facial_landmark_detect_p(void);
-extern int utc_mediavision_mv_inference_facial_landmark_detect_n1(void);
-extern int utc_mediavision_mv_inference_facial_landmark_detect_n2(void);
-extern int utc_mediavision_mv_inference_pose_landmark_detect_p(void);
-extern int utc_mediavision_mv_inference_pose_landmark_detect_n1(void);
-extern int utc_mediavision_mv_inference_pose_landmark_detect_n2(void);
-extern int utc_mediavision_mv_inference_get_number_of_poses_p(void);
-extern int utc_mediavision_mv_inference_get_number_of_poses_n(void);
-extern int utc_mediavision_mv_inference_get_number_of_landmarks_p(void);
-extern int utc_mediavision_mv_inference_get_number_of_landmarks_n(void);
-extern int utc_mediavision_mv_inference_get_label_p(void);
-extern int utc_mediavision_mv_inference_get_label_n(void);
-extern int utc_mediavision_mv_inference_get_landmark_p(void);
-extern int utc_mediavision_mv_inference_get_landmark_n(void);
-extern int utc_mediavision_mv_pose_create_p(void);
-extern int utc_mediavision_mv_pose_create_n(void);
-extern int utc_mediavision_mv_pose_destroy_p(void);
-extern int utc_mediavision_mv_pose_destroy_n(void);
-extern int utc_mediavision_mv_pose_set_from_file_p(void);
-extern int utc_mediavision_mv_pose_set_from_file_n1(void);
-extern int utc_mediavision_mv_pose_set_from_file_n2(void);
-extern int utc_mediavision_mv_pose_compare_p(void);
-extern int utc_mediavision_mv_pose_compare_n(void);
 extern int utc_mediavision_mv_roi_tracker_create_p(void);
 extern int utc_mediavision_mv_roi_tracker_create_n(void);
 extern int utc_mediavision_mv_roi_tracker_destroy_p(void);
@@ -262,25 +209,6 @@ extern int utc_mediavision_mv_roi_tracker_prepare_p(void);
 extern int utc_mediavision_mv_roi_tracker_prepare_n(void);
 extern int utc_mediavision_mv_roi_tracker_perform_p(void);
 extern int utc_mediavision_mv_roi_tracker_perform_n(void);
-extern int utc_mediavision_mv_face_recognition_create_p(void);
-extern int utc_mediavision_mv_face_recognition_create_n(void);
-extern int utc_mediavision_mv_face_recognition_destroy_p(void);
-extern int utc_mediavision_mv_face_recognition_destroy_n(void);
-extern int utc_mediavision_mv_face_recognition_prepare_p(void);
-extern int utc_mediavision_mv_face_recognition_prepare_n(void);
-extern int utc_mediavision_mv_face_recognition_register_p(void);
-extern int utc_mediavision_mv_face_recognition_register_n1(void);
-extern int utc_mediavision_mv_face_recognition_register_n2(void);
-extern int utc_mediavision_mv_face_recognition_register_n3(void);
-extern int utc_mediavision_mv_face_recognition_inference_p(void);
-extern int utc_mediavision_mv_face_recognition_inference_n1(void);
-extern int utc_mediavision_mv_face_recognition_inference_n2(void);
-extern int utc_mediavision_mv_face_recognition_get_label_p(void);
-extern int utc_mediavision_mv_face_recognition_get_label_n1(void);
-extern int utc_mediavision_mv_face_recognition_get_label_n2(void);
-extern int utc_mediavision_mv_face_recognition_unregister_p(void);
-extern int utc_mediavision_mv_face_recognition_unregister_n1(void);
-extern int utc_mediavision_mv_face_recognition_unregister_n2(void);
 extern int utc_mediavision_mv_3d_create_p(void);
 extern int utc_mediavision_mv_3d_create_n(void);
 extern int utc_mediavision_mv_3d_destroy_p(void);
@@ -464,48 +392,6 @@ testcase tc_array[] = {
        {"utc_mediavision_mv_surveillance_get_result_value_p1",utc_mediavision_mv_surveillance_get_result_value_p1,utc_capi_media_vision_surveillance_cb1_startup,utc_capi_media_vision_surveillance_cleanup},
        {"utc_mediavision_mv_surveillance_get_result_value_p2",utc_mediavision_mv_surveillance_get_result_value_p2,utc_capi_media_vision_surveillance_cb2_startup,utc_capi_media_vision_surveillance_cleanup},
        {"utc_mediavision_mv_surveillance_get_result_value_p3",utc_mediavision_mv_surveillance_get_result_value_p3,utc_capi_media_vision_surveillance_cb3_startup,utc_capi_media_vision_surveillance_cleanup},
-       {"utc_mediavision_mv_inference_foreach_supported_engine",utc_mediavision_mv_inference_foreach_supported_engine,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_create_p",utc_mediavision_mv_inference_create_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_create_n",utc_mediavision_mv_inference_create_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_destroy_p",utc_mediavision_mv_inference_destroy_p,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_destroy_n",utc_mediavision_mv_inference_destroy_n,utc_capi_media_vision_inference_startup1,utc_capi_media_vision_inference_cleanup1},
-       {"utc_mediavision_mv_inference_configure_p",utc_mediavision_mv_inference_configure_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_configure_n1",utc_mediavision_mv_inference_configure_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_configure_n2",utc_mediavision_mv_inference_configure_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_prepare_p",utc_mediavision_mv_inference_prepare_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_prepare_n1",utc_mediavision_mv_inference_prepare_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_image_classify_p",utc_mediavision_mv_inference_image_classify_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_image_classify_n1",utc_mediavision_mv_inference_image_classify_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_image_classify_n2",utc_mediavision_mv_inference_image_classify_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_object_detect_p",utc_mediavision_mv_inference_object_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_object_detect_n1",utc_mediavision_mv_inference_object_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_object_detect_n2",utc_mediavision_mv_inference_object_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_face_detect_p",utc_mediavision_mv_inference_face_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_face_detect_n1",utc_mediavision_mv_inference_face_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_face_detect_n2",utc_mediavision_mv_inference_face_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_facial_landmark_detect_p",utc_mediavision_mv_inference_facial_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_facial_landmark_detect_n1",utc_mediavision_mv_inference_facial_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_facial_landmark_detect_n2",utc_mediavision_mv_inference_facial_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_pose_landmark_detect_p", utc_mediavision_mv_inference_pose_landmark_detect_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_pose_landmark_detect_n1", utc_mediavision_mv_inference_pose_landmark_detect_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_pose_landmark_detect_n2", utc_mediavision_mv_inference_pose_landmark_detect_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_poses_p", utc_mediavision_mv_inference_get_number_of_poses_p,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_poses_n", utc_mediavision_mv_inference_get_number_of_poses_n,utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_label_p", utc_mediavision_mv_inference_get_label_p,utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_label_n", utc_mediavision_mv_inference_get_label_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_landmarks_p", utc_mediavision_mv_inference_get_number_of_landmarks_p,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_number_of_landmarks_n", utc_mediavision_mv_inference_get_number_of_landmarks_n,utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_landmark_p", utc_mediavision_mv_inference_get_landmark_p,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_inference_get_landmark_n", utc_mediavision_mv_inference_get_landmark_n,utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_create_p", utc_mediavision_mv_pose_create_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_create_n", utc_mediavision_mv_pose_create_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_destroy_p", utc_mediavision_mv_pose_destroy_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_destroy_n", utc_mediavision_mv_pose_destroy_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_set_from_file_p", utc_mediavision_mv_pose_set_from_file_p,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_set_from_file_n1", utc_mediavision_mv_pose_set_from_file_n1,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_set_from_file_n2", utc_mediavision_mv_pose_set_from_file_n2,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_compare_p", utc_mediavision_mv_pose_compare_p,utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup,utc_capi_media_vision_inference_cleanup2},
-       {"utc_mediavision_mv_pose_compare_n", utc_mediavision_mv_pose_compare_n,utc_capi_media_vision_inference_startup2,utc_capi_media_vision_inference_cleanup2},
        {"utc_mediavision_mv_roi_tracker_create_p", utc_mediavision_mv_roi_tracker_create_p,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
        {"utc_mediavision_mv_roi_tracker_create_n", utc_mediavision_mv_roi_tracker_create_n,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
        {"utc_mediavision_mv_roi_tracker_destroy_p", utc_mediavision_mv_roi_tracker_destroy_p,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
@@ -516,25 +402,6 @@ testcase tc_array[] = {
        {"utc_mediavision_mv_roi_tracker_prepare_n", utc_mediavision_mv_roi_tracker_prepare_n,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
        {"utc_mediavision_mv_roi_tracker_perform_p", utc_mediavision_mv_roi_tracker_perform_p,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
        {"utc_mediavision_mv_roi_tracker_perform_n", utc_mediavision_mv_roi_tracker_perform_n,utc_capi_media_vision_roi_tracker_startup,utc_capi_media_vision_roi_tracker_cleanup},
-       {"utc_mediavision_mv_face_recognition_create_p", utc_mediavision_mv_face_recognition_create_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_create_n", utc_mediavision_mv_face_recognition_create_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_destroy_p", utc_mediavision_mv_face_recognition_destroy_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_destroy_n", utc_mediavision_mv_face_recognition_destroy_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_prepare_p", utc_mediavision_mv_face_recognition_prepare_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_prepare_n", utc_mediavision_mv_face_recognition_prepare_n,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_register_p", utc_mediavision_mv_face_recognition_register_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_register_n1", utc_mediavision_mv_face_recognition_register_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_register_n2", utc_mediavision_mv_face_recognition_register_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_register_n3", utc_mediavision_mv_face_recognition_register_n3,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_inference_p", utc_mediavision_mv_face_recognition_inference_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_inference_n1", utc_mediavision_mv_face_recognition_inference_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_inference_n2", utc_mediavision_mv_face_recognition_inference_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_get_label_p", utc_mediavision_mv_face_recognition_get_label_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_get_label_n1", utc_mediavision_mv_face_recognition_get_label_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_get_label_n2", utc_mediavision_mv_face_recognition_get_label_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_unregister_p", utc_mediavision_mv_face_recognition_unregister_p,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_unregister_n1", utc_mediavision_mv_face_recognition_unregister_n1,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
-       {"utc_mediavision_mv_face_recognition_unregister_n2", utc_mediavision_mv_face_recognition_unregister_n2,utc_capi_media_vision_face_recognition_startup,utc_capi_media_vision_face_recognition_cleanup},
        {"utc_mediavision_mv_3d_create_p", utc_mediavision_mv_3d_create_p,utc_capi_media_vision_3d_startup,utc_capi_media_vision_3d_cleanup},
        {"utc_mediavision_mv_3d_create_n", utc_mediavision_mv_3d_create_n,utc_capi_media_vision_3d_startup,utc_capi_media_vision_3d_cleanup},
        {"utc_mediavision_mv_3d_destroy_p", utc_mediavision_mv_3d_destroy_p,utc_capi_media_vision_3d_startup,utc_capi_media_vision_3d_cleanup},
@@ -557,5 +424,4 @@ testcase tc_array[] = {
        {NULL, NULL}
 };
 
-
 #endif // __TCT_CAPI-MEDIA-VISION-NATIVE_H__
diff --git a/src/utc/capi-media-vision/utc-mv_face_recognition.c b/src/utc/capi-media-vision/utc-mv_face_recognition.c
deleted file mode 100755 (executable)
index 304f253..0000000
+++ /dev/null
@@ -1,940 +0,0 @@
-/**
- * Copyright (c) 2022 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "assert.h"
-#include <mv_face_recognition.h>
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <image_util.h>
-#include <system_info.h>
-#include <storage.h>
-#include "tct_common.h"
-
-#define MAX_LABEL_CNT  2
-#define MAX_IMAGE_CNT  10
-#define FILE_PATH_SIZE 1024
-#define API_NAMESPACE "[MediaVision]"
-
-static const char *p1_face_examples_dir = NULL;
-static const char *p2_face_examples_dir = NULL;
-
-static const char *image_file_names[MAX_IMAGE_CNT] = {
-       "00.jpg", "01.jpg", "02.jpg", "03.jpg", "04.jpg",
-       "05.jpg", "06.jpg", "07.jpg", "08.jpg", "09.jpg"
-};
-
-static const char *label_names[MAX_LABEL_CNT] = {
-       "p1", "p2"
-};
-
-static int load_image_to_media_source(const char *file_path, mv_source_h source)
-{
-       if (NULL == file_path || NULL == source)
-       {
-               printf("File path or source is NULL\n");
-               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-       }
-
-       unsigned long width = 0;
-       unsigned long height = 0;
-       unsigned long long buffer_size = 0;
-       unsigned char *data_buffer = NULL;
-       int ret1 = IMAGE_UTIL_ERROR_NONE;
-       int ret2 = MEDIA_VISION_ERROR_NONE;
-       image_util_decode_h _decoder = NULL;
-
-       ret1 = image_util_decode_create(&_decoder);
-       if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
-       ret1 = image_util_decode_set_input_path(_decoder, file_path);
-       if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
-       ret1 = image_util_decode_set_colorspace(_decoder, IMAGE_UTIL_COLORSPACE_RGB888);
-       if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
-       ret1 = image_util_decode_set_output_buffer(_decoder, &data_buffer);
-       if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
-       ret1 = image_util_decode_run(_decoder, &width, &height, &buffer_size);
-       if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
-
-       // Only grayscale and RGB jpegs in test set:
-       mv_colorspace_e source_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
-
-       ret2 = mv_source_clear(source);
-       if (ret2 != MEDIA_VISION_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
-
-       ret2 = mv_source_fill_by_buffer(source, data_buffer, (unsigned long long)buffer_size,
-                                                                       (unsigned int)width, (unsigned int)height, source_colorspace);
-
-_LOAD_IMAGE_FAIL:
-       image_util_decode_destroy(_decoder);
-       if(data_buffer)
-               free(data_buffer);
-
-       assert_eq(IMAGE_UTIL_ERROR_NONE, ret1);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret2);
-       return MEDIA_VISION_ERROR_NONE;
-}
-
-static bool is_face_recognition_feature_supported(void)
-{
-       bool isFaceRecognitionSupported =false;
-
-       system_info_get_platform_bool("http://tizen.org/feature/vision.inference.face", &isFaceRecognitionSupported);
-
-       if (!is_face_recognition_feature_supported)
-               printf("Not support face recognition feature.");
-
-       return isFaceRecognitionSupported;
-}
-
-/**
- * @function   utc_capi_media_vision_face_recognition_startup
- * @description        Face recognition module UTC startup code
- * @parameter  NA
- * @return             NA
- */
-void utc_capi_media_vision_face_recognition_startup(void)
-{
-       printf("capi-media-vision mv_face_recognition tests STARTUP is launched\n");
-
-       char pszValue[CONFIG_VALUE_LEN_MAX] = {0,};
-
-       if (!GetValueForTCTSetting("DEVICE_PHYSICAL_STORAGE_30", pszValue, API_NAMESPACE)) {
-               printf("Fail to get value for TCT setting.\n");
-               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-       }
-
-       PRINT_UTC_LOG("[Line : %d][%s] 'DEVICE_PHYSICAL_STORAGE_30' Values Received %s\\n", __LINE__, API_NAMESPACE, pszValue);
-
-       p1_face_examples_dir=(char*)calloc(strlen(pszValue)+strlen("/res/face_recognition/images/P1")+1, sizeof(char));
-       snprintf(p1_face_examples_dir, strlen(pszValue)+strlen("/res/face_recognition/images/P1")+1, "%s/res/face_recognition/images/P1", pszValue);
-
-       p2_face_examples_dir=(char*)calloc(strlen(pszValue)+strlen("/res/face_recognition/images/P2")+1, sizeof(char));
-       snprintf(p2_face_examples_dir, strlen(pszValue)+strlen("/res/face_recognition/images/P2")+1, "%s/res/face_recognition/images/P2", pszValue);
-
-       printf("capi-media-vision mv_face_recognition tests STARTUP is completed\n");
-}
-
-/**
- * @function   utc_capi_media_vision_face_recognition_cleanup
- * @description        Face recognition module UTC cleanup code
- * @parameter  NA
- * @return             NA
- */
-void utc_capi_media_vision_face_recognition_cleanup(void)
-{
-       printf("capi-media-vision mv_face_recognition tests CLEANUP is launched\n");
-
-       free(p1_face_examples_dir);
-       free(p2_face_examples_dir);
-
-       printf("capi-media-vision mv_face_recognition tests CLEANUP is completed\n");
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_create_p()
- * @testcase        utc_mediavision_mv_face_recognition_create_p
- * @since_tizen     7.0
- * @description     Create face recognition handle
- */
-int utc_mediavision_mv_face_recognition_create_p(void)
-{
-       printf("Start mv_face_recognition_create_p\n");
-
-       mv_face_recognition_h handle = NULL;
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_create_p\n");
-
-       return 0;
-}
-
-/**
- * @brief Negative test case of mv_face_recognition_create()
- * @testcase        utc_mediavision_mv_face_recognition_create_n
- * @since_tizen     7.0
- * @description     Create face recognition handle,
- *                  but fail because input parameter is NULL
- */
-int utc_mediavision_mv_face_recognition_create_n(void)
-{
-       printf("Start mv_face_recognition_create_n\n");
-
-       int ret = mv_face_recognition_create(NULL);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-       printf("End mv_face_recognition_create_n\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_destroy_p()
- * @testcase        utc_mediavision_mv_face_recognition_destroy_p
- * @since_tizen     7.0
- * @description     Destroy face recognition handle
- */
-int utc_mediavision_mv_face_recognition_destroy_p(void)
-{
-       printf("Start mv_face_recognition_destroy_p\n");
-
-       mv_face_recognition_h handle = NULL;
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_destroy_p\n");
-
-       return 0;
-}
-
-/**
- * @brief Negative test case of mv_face_recognition_destroy_n()
- * @testcase        utc_mediavision_mv_face_recognition_destroy_n
- * @since_tizen     7.0
- * @description     Destroy face recognition handle,
- *                  but fail because input parameter is NULL
- */
-int utc_mediavision_mv_face_recognition_destroy_n(void)
-{
-       printf("Start mv_face_recognition_destroy_n\n");
-
-       int ret = mv_face_recognition_destroy(NULL);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-       printf("End mv_face_recognition_destroy_n\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_prepare_p()
- * @testcase        utc_mediavision_mv_face_recognition_prepare_p
- * @since_tizen     7.0
- * @description     Prepare face recognition
- */
-int utc_mediavision_mv_face_recognition_prepare_p(void)
-{
-       printf("Start mv_face_recognition_prepare_p\n");
-
-       mv_face_recognition_h handle = NULL;
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_prepare_p\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_prepare_n()
- * @testcase        utc_mediavision_mv_face_recognition_prepare_n
- * @since_tizen     7.0
- * @description     Prepare face recognition
- */
-int utc_mediavision_mv_face_recognition_prepare_n(void)
-{
-       printf("Start mv_face_recognition_prepare_n\n");
-
-       int ret = mv_face_recognition_prepare(NULL);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-       printf("End mv_face_recognition_prepare_n\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_register_p()
- * @testcase        utc_mediavision_mv_face_recognition_register_p
- * @since_tizen     7.0
- * @description     Register face image and its label
- */
-int utc_mediavision_mv_face_recognition_register_p(void)
-{
-       printf("Start mv_face_recognition_register_p\n");
-
-       mv_face_recognition_h handle = NULL;
-       mv_source_h mv_source = NULL;
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
-               for (unsigned int img_idx = 0; img_idx < MAX_IMAGE_CNT / 2; ++img_idx) {
-                       char image_path[FILE_PATH_SIZE] = "";
-
-                       ret = mv_create_source(&mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       if (label_idx == 0)
-                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, image_file_names[img_idx]);
-                       else
-                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p2_face_examples_dir, image_file_names[img_idx]);
-
-                       ret = load_image_to_media_source(image_path, mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_face_recognition_register(handle, mv_source, label_names[label_idx]);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_destroy_source(mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-               }
-       }
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_register_p\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_register_n1()
- * @testcase        utc_mediavision_mv_face_recognition_register_n1
- * @since_tizen     7.0
- * @description     Register face image and its label
- */
-int utc_mediavision_mv_face_recognition_register_n1(void)
-{
-       printf("Start mv_face_recognition_register_n1\n");
-
-       mv_face_recognition_h handle = NULL;
-       mv_source_h mv_source = NULL;
-       char image_path[FILE_PATH_SIZE] = "";
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_create_source(&mv_source);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, "00.jpg");
-
-       ret = load_image_to_media_source(image_path, mv_source);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_register(NULL, mv_source, "test");
-       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-       ret = mv_destroy_source(mv_source);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_register_n1\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_register_n2()
- * @testcase        utc_mediavision_mv_face_recognition_register_n2
- * @since_tizen     7.0
- * @description     Register face image and its label
- */
-int utc_mediavision_mv_face_recognition_register_n2(void)
-{
-       printf("Start mv_face_recognition_register_n2\n");
-
-       mv_face_recognition_h handle = NULL;
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_register(handle, NULL, "test");
-       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_register_n2\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_register_n3()
- * @testcase        utc_mediavision_mv_face_recognition_register_n3
- * @since_tizen     7.0
- * @description     Register face image and its label
- */
-int utc_mediavision_mv_face_recognition_register_n3(void)
-{
-       printf("Start mv_face_recognition_register_n3\n");
-
-       mv_face_recognition_h handle = NULL;
-       mv_source_h mv_source = NULL;
-       char image_path[FILE_PATH_SIZE] = "";
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_create_source(&mv_source);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, "00.jpg");
-
-       ret = mv_face_recognition_register(handle, image_path, NULL);
-       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-       ret = mv_destroy_source(mv_source);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_register_n3\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_infernce_p()
- * @testcase        utc_mediavision_mv_face_recognition_inference_p
- * @since_tizen     7.0
- * @description     Register face image and its label
- */
-int utc_mediavision_mv_face_recognition_inference_p(void)
-{
-       printf("Start mv_face_recognition_inference_p\n");
-
-       mv_face_recognition_h handle = NULL;
-       mv_source_h mv_source = NULL;
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       // Training
-       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
-               for (unsigned int img_idx = 0; img_idx < MAX_IMAGE_CNT / 2; ++img_idx) {
-                       char image_path[FILE_PATH_SIZE] = "";
-
-                       ret = mv_create_source(&mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       if (label_idx == 0)
-                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, image_file_names[img_idx]);
-                       else
-                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p2_face_examples_dir, image_file_names[img_idx]);
-
-                       ret = load_image_to_media_source(image_path, mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_face_recognition_register(handle, mv_source, label_names[label_idx]);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_destroy_source(mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-               }
-       }
-
-       // Inference
-       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
-               for (unsigned int img_idx = 5; img_idx < MAX_IMAGE_CNT; ++img_idx) {
-                       char image_path[FILE_PATH_SIZE] = "";
-
-                       ret = mv_create_source(&mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       if (label_idx == 0)
-                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, image_file_names[img_idx]);
-                       else
-                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p2_face_examples_dir, image_file_names[img_idx]);
-
-                       ret = load_image_to_media_source(image_path, mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_face_recognition_inference(handle, mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_destroy_source(mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-               }
-       }
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_inference_p\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_inference_n1()
- * @testcase        utc_mediavision_mv_face_recognition_inference_n1
- * @since_tizen     7.0
- * @description     Recognize a given face image
- */
-int utc_mediavision_mv_face_recognition_inference_n1(void)
-{
-       printf("Start mv_face_recognition_inference_n1\n");
-
-       mv_face_recognition_h handle = NULL;
-       mv_source_h mv_source = NULL;
-       char image_path[FILE_PATH_SIZE] = "";
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_create_source(&mv_source);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, "00.jpg");
-
-       ret = load_image_to_media_source(image_path, mv_source);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_inference(NULL, mv_source);
-       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-       ret = mv_destroy_source(mv_source);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_inference_n1\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_inference_n2()
- * @testcase        utc_mediavision_mv_face_recognition_inference_n2
- * @since_tizen     7.0
- * @description     Recognize a given face image
- */
-int utc_mediavision_mv_face_recognition_inference_n2(void)
-{
-       printf("Start mv_face_recognition_inference_n2\n");
-
-       mv_face_recognition_h handle = NULL;
-       mv_source_h mv_source = NULL;
-       char image_path[FILE_PATH_SIZE] = "";
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_create_source(&mv_source);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, "00.jpg");
-
-       ret = load_image_to_media_source(image_path, mv_source);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_inference(handle, NULL);
-       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-       ret = mv_destroy_source(mv_source);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_inference_n2\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_get_label_p()
- * @testcase        utc_mediavision_mv_face_recognition_get_label_p
- * @since_tizen     7.0
- * @description     Get a label
- */
-int utc_mediavision_mv_face_recognition_get_label_p(void)
-{
-       printf("Start mv_face_recognition_get_label_p\n");
-
-       mv_face_recognition_h handle = NULL;
-       mv_source_h mv_source = NULL;
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       // Training
-       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
-               for (unsigned int img_idx = 0; img_idx < MAX_IMAGE_CNT / 2; ++img_idx) {
-                       char image_path[FILE_PATH_SIZE] = "";
-
-                       ret = mv_create_source(&mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       if (label_idx == 0)
-                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, image_file_names[img_idx]);
-                       else
-                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p2_face_examples_dir, image_file_names[img_idx]);
-
-                       ret = load_image_to_media_source(image_path, mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_face_recognition_register(handle, mv_source, label_names[label_idx]);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_destroy_source(mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-               }
-       }
-
-       // Inference
-       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
-               for (unsigned int img_idx = 5; img_idx < MAX_IMAGE_CNT; ++img_idx) {
-                       char image_path[FILE_PATH_SIZE] = "";
-                       const char *out_label = NULL;
-
-                       ret = mv_create_source(&mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       if (label_idx == 0)
-                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, image_file_names[img_idx]);
-                       else
-                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p2_face_examples_dir, image_file_names[img_idx]);
-
-                       ret = load_image_to_media_source(image_path, mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_face_recognition_inference(handle, mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_face_recognition_get_label(handle, &out_label);
-                       if (ret != MEDIA_VISION_ERROR_NO_DATA)
-                               assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_destroy_source(mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-               }
-       }
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_get_label_p\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_get_label_n1()
- * @testcase        utc_mediavision_mv_face_recognition_get_label_n1
- * @since_tizen     7.0
- * @description     Get a label
- */
-int utc_mediavision_mv_face_recognition_get_label_n1(void)
-{
-       printf("Start mv_face_recognition_get_label_n1\n");
-
-       mv_face_recognition_h handle = NULL;
-       const char *out_label = NULL;
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_get_label(NULL, &out_label);
-       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_get_label_n1\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_get_label_n2()
- * @testcase        utc_mediavision_mv_face_recognition_get_label_n2
- * @since_tizen     7.0
- * @description     Get a label
- */
-int utc_mediavision_mv_face_recognition_get_label_n2(void)
-{
-       printf("Start mv_face_recognition_get_label_n2\n");
-
-       mv_face_recognition_h handle = NULL;
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_get_label(handle, NULL);
-       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_get_label_n2\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_unregister_p()
- * @testcase        utc_mediavision_mv_face_recognition_unregister_p
- * @since_tizen     7.0
- * @description     UNregister a given label
- */
-int utc_mediavision_mv_face_recognition_unregister_p(void)
-{
-       printf("Start mv_face_recognition_unregister_p\n");
-
-       mv_face_recognition_h handle = NULL;
-       mv_source_h mv_source = NULL;
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       // Training
-       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
-               for (unsigned int img_idx = 0; img_idx < MAX_IMAGE_CNT / 2; ++img_idx) {
-                       char image_path[FILE_PATH_SIZE] = "";
-
-                       ret = mv_create_source(&mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       if (label_idx == 0)
-                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p1_face_examples_dir, image_file_names[img_idx]);
-                       else
-                               snprintf(image_path, FILE_PATH_SIZE, "%s/%s", p2_face_examples_dir, image_file_names[img_idx]);
-
-                       ret = load_image_to_media_source(image_path, mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_face_recognition_register(handle, mv_source, label_names[label_idx]);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-                       ret = mv_destroy_source(mv_source);
-                       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-               }
-       }
-
-       for (unsigned int label_idx = 0; label_idx < MAX_LABEL_CNT; ++label_idx) {
-               ret = mv_face_recognition_unregister(handle, label_names[label_idx]);
-               assert_eq(ret, MEDIA_VISION_ERROR_NONE);
-       }
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_unregister_p\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_unregister_n1()
- * @testcase        utc_mediavision_mv_face_recognition_unregister_n1
- * @since_tizen     7.0
- * @description     Unregister a given label
- */
-int utc_mediavision_mv_face_recognition_unregister_n1(void)
-{
-       printf("Start mv_face_recognition_unregister_n1\n");
-
-       mv_face_recognition_h handle = NULL;
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_unregister(NULL, "p1");
-       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_unregister_n1\n");
-
-       return 0;
-}
-
-/**
- * @brief Positive test case of mv_face_recognition_unregister_n2()
- * @testcase        utc_mediavision_mv_face_recognition_unregister_n2
- * @since_tizen     7.0
- * @description     Unregister a given label
- */
-int utc_mediavision_mv_face_recognition_unregister_n2(void)
-{
-       printf("Start mv_face_recognition_unregister_n2\n");
-
-       mv_face_recognition_h handle = NULL;
-
-       int ret = mv_face_recognition_create(&handle);
-       if (!is_face_recognition_feature_supported()) {
-               assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-               return 0;
-       }
-
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_prepare(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       ret = mv_face_recognition_unregister(handle, NULL);
-       assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-       ret = mv_face_recognition_destroy(handle);
-       assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-       printf("End mv_face_recognition_unregister_n2\n");
-
-       return 0;
-}
\ No newline at end of file
diff --git a/src/utc/capi-media-vision/utc-mv_inference.c b/src/utc/capi-media-vision/utc-mv_inference.c
deleted file mode 100755 (executable)
index 09baa39..0000000
+++ /dev/null
@@ -1,2519 +0,0 @@
-/**
- * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "assert.h"
-#include <mv_inference.h>
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <image_util.h>
-#include <system_info.h>
-#include <storage.h>
-#include "tct_common.h"
-
-#define FILE_PATH_SIZE 1024
-#define API_NAMESPACE "[MediaVision]"
-
-static bool isVisionSupported = false;
-static int gStartupError;
-
-static mv_inference_h gInferenceHandle = NULL;
-static mv_engine_config_h gEngineConfigHandle = NULL;
-static mv_source_h gSourceHandle = NULL;
-const char *gInferenceExampleDir = NULL;
-static mv_point_s** gPldResultLandmarks = NULL;
-static float** gPldResultScore = NULL;
-static mv_pose_h gPoseHandle = NULL;
-
-static bool gIsForeachSupportedCallBackInvoked = false;
-static bool gIsImageClassifyCallBackInvoked = false;
-static bool gIsObjectDetectCallBackInvoked = false;
-static bool gIsFaceDetectCallBackInvoked = false;
-static bool gIsFacialLandmarkDetectCallBackInvoked = false;
-static bool gIsPoseLandmarkDetectCallBackInvoked = false;
-static bool gIsGetPoseLandmark = false;
-
-static int gPldResultErr;
-static int gPldResultNumberOfPoses;
-static int gPldResultNumberOfLandmarks;
-static int gPldResultLabel;
-static float gPoseCompareScore;
-
-#define IC_MODEL_FILENAME "ic_tflite_model.tflite"
-#define IC_MODEL_META_FILENAME "ic_tflite_model_meta.json"
-#define IC_LABEL_FILENAME "ic_label.txt"
-
-#define OD_MODEL_FILENAME "od_tflite_model.tflite"
-#define OD_MODEL_META_FILENAME "od_tflite_model_meta.json"
-#define OD_LABEL_FILENAME "od_label.txt"
-
-#define FD_MODEL_FILENAME "fd_tflite_model1.tflite"
-#define FD_MODEL_META_FILENAME "fd_tflite_model1_meta.json"
-
-#define FLD_MODEL_FILENAME "fld_tflite_model1.tflite"
-#define FLD_MODEL_META_FILENAME "fld_tflite_model1_meta.json"
-
-#define PLD_MODEL_FILENAME "pld_tflite_model.tflite"
-#define PLD_MODEL_META_FILENAME "pld_tflite_model_meta.json"
-#define PLD_POSE_MAPPING_FILENAME "pld_pose_mapping.txt"
-#define PLD_MOCAP_FILEAME "pld_mocap.bvh"
-#define PLD_MOCAP_MAPPING_FILENAME "pld_mocap_mapping.txt"
-
-static int set_image_classification_engine_config(mv_engine_config_h engineCfg)
-{
-    int ret = MEDIA_VISION_ERROR_NONE;
-
-    char modelFilename[1024];
-    char modelMetaFilename[1024];
-    char labelFilename[1024];
-    char *inputNodeName = "input_2";
-    char *outputNodeName[1] = {"dense_3/Softmax"};
-    snprintf(modelFilename, 1024, "%s/models/%s", gInferenceExampleDir, IC_MODEL_FILENAME);
-    snprintf(modelMetaFilename, 1024, "%s/models/%s", gInferenceExampleDir, IC_MODEL_META_FILENAME);
-    snprintf(labelFilename, 1024, "%s/models/%s", gInferenceExampleDir, IC_LABEL_FILENAME);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        modelFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_META_FILE_PATH,
-                        modelMetaFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_USER_FILE_PATH,
-                        labelFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        127.0);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        127.0);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_CONFIDENCE_THRESHOLD,
-                        0.5);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_TFLITE);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_CPU);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        224);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        224);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_array_string_attribute(engineCfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeName,
-                        1);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    return ret;
-}
-
-static int set_object_detection_engine_config(mv_engine_config_h engineCfg)
-{
-    int ret = MEDIA_VISION_ERROR_NONE;
-
-    char modelFilename[1024];
-    char modelMetaFilename[1024];
-    char labelFilename[1024];
-    char *inputNodeName = "normalized_input_image_tensor";
-    char *outputNodeName[4] = {"TFLite_Detection_PostProcess",
-                            "TFLite_Detection_PostProcess:1",
-                            "TFLite_Detection_PostProcess:2",
-                            "TFLite_Detection_PostProcess:3"};
-
-    snprintf(modelFilename, 1024, "%s/models/%s", gInferenceExampleDir, OD_MODEL_FILENAME);
-    snprintf(modelMetaFilename, 1024, "%s/models/%s", gInferenceExampleDir, OD_MODEL_META_FILENAME);
-    snprintf(labelFilename, 1024, "%s/models/%s", gInferenceExampleDir, OD_LABEL_FILENAME);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        modelFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_META_FILE_PATH,
-                        modelMetaFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_USER_FILE_PATH,
-                        labelFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        127.5);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        127.5);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_CONFIDENCE_THRESHOLD,
-                        0.3);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_TFLITE);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_CPU);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        300);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        300);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_array_string_attribute(engineCfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeName,
-                        4);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    return ret;
-}
-
-static int set_face_detection_engine_config(mv_engine_config_h engineCfg)
-{
-    int ret = MEDIA_VISION_ERROR_NONE;
-
-    char modelFilename[1024];
-    char modelMetaFilename[1024];
-    char *inputNodeName = "normalized_input_image_tensor";
-    char *outputNodeName[4] = {"TFLite_Detection_PostProcess",
-                                "TFLite_Detection_PostProcess:1",
-                                "TFLite_Detection_PostProcess:2",
-                                "TFLite_Detection_PostProcess:3"};
-    snprintf(modelFilename, 1024, "%s/models/%s", gInferenceExampleDir, FD_MODEL_FILENAME);
-    snprintf(modelMetaFilename, 1024, "%s/models/%s", gInferenceExampleDir, FD_MODEL_META_FILENAME);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        modelFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_META_FILE_PATH,
-                        modelMetaFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        127.5);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        127.5);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_CONFIDENCE_THRESHOLD,
-                        0.3);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_TFLITE);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_CPU);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        300);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        300);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_array_string_attribute(engineCfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeName,
-                        4);
-    return ret;
-}
-
-static int set_facial_landmark_detection_engine_config(mv_engine_config_h engineCfg)
-{
-    int ret = MEDIA_VISION_ERROR_NONE;
-
-    char modelFilename[1024];
-    char modelMetaFilename[1024];
-    char *inputNodeName = "Placeholder";
-    char *outputNodeName[1] = {"fanet8ss_inference/fully_connected_1/Sigmoid"};
-    snprintf(modelFilename, 1024, "%s/models/%s", gInferenceExampleDir, FLD_MODEL_FILENAME);
-    snprintf(modelMetaFilename, 1024, "%s/models/%s", gInferenceExampleDir, FLD_MODEL_META_FILENAME);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        modelFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_META_FILE_PATH,
-                        modelMetaFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_TFLITE);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_CPU);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        128);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        128);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_array_string_attribute(engineCfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeName,
-                        1);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    return ret;
-}
-
-static int set_pose_landmark_detection_engine_config(mv_engine_config_h engineCfg)
-{
-    int ret = MEDIA_VISION_ERROR_NONE;
-
-    char modelFilename[FILE_PATH_SIZE];
-    char modelMetaFilename[FILE_PATH_SIZE];
-    char poseMappingFilename[FILE_PATH_SIZE];
-    char *inputNodeName = "image";
-    char *outputNodeName[1] = { "Convolutional_Pose_Machine/stage_5_out" };
-    snprintf(modelFilename, FILE_PATH_SIZE, "%s/models/%s",
-                            gInferenceExampleDir, PLD_MODEL_FILENAME);
-    snprintf(modelMetaFilename, FILE_PATH_SIZE, "%s/models/%s",
-                            gInferenceExampleDir, PLD_MODEL_META_FILENAME);
-    snprintf(poseMappingFilename, FILE_PATH_SIZE, "%s/models/%s",
-                            gInferenceExampleDir, PLD_POSE_MAPPING_FILENAME);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                        modelFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_META_FILE_PATH,
-                        modelMetaFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_DATA_TYPE,
-                        MV_INFERENCE_DATA_FLOAT32);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_USER_FILE_PATH,
-                        poseMappingFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_MEAN_VALUE,
-                        0.0);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_double_attribute(engineCfg,
-                        MV_INFERENCE_MODEL_STD_VALUE,
-                        1.0);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_TFLITE);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_TARGET_DEVICE_TYPE,
-                        MV_INFERENCE_TARGET_DEVICE_CPU);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_WIDTH,
-                        192);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_HEIGHT,
-                        192);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_int_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_TENSOR_CHANNELS,
-                        3);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_string_attribute(engineCfg,
-                        MV_INFERENCE_INPUT_NODE_NAME,
-                        inputNodeName);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_engine_config_set_array_string_attribute(engineCfg,
-                        MV_INFERENCE_OUTPUT_NODE_NAMES,
-                        outputNodeName,
-                        1);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    return ret;
-}
-
-static int load_image_to_media_source(
-        const char *file_path,
-        mv_source_h source)
-{
-    if (NULL == file_path || NULL == source)
-    {
-        printf("File path or source is NULL\n");
-        return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-    }
-
-    unsigned long width = 0;
-    unsigned long height = 0;
-    unsigned long long buffer_size = 0;
-    unsigned char *data_buffer = NULL;
-    int ret1 = IMAGE_UTIL_ERROR_NONE;
-    int ret2 = MEDIA_VISION_ERROR_NONE;
-    image_util_decode_h _decoder = NULL;
-
-    ret1 = image_util_decode_create(&_decoder);
-    if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
-    ret1 = image_util_decode_set_input_path(_decoder, file_path);
-    if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
-    ret1 = image_util_decode_set_colorspace(_decoder, IMAGE_UTIL_COLORSPACE_RGB888);
-    if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
-    ret1 = image_util_decode_set_output_buffer(_decoder, &data_buffer);
-    if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
-    ret1 = image_util_decode_run(_decoder, &width, &height, &buffer_size);
-    if (ret1 != IMAGE_UTIL_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
-
-    // Only grayscale and RGB jpegs in test set:
-    mv_colorspace_e source_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
-
-    ret2 = mv_source_clear(source);
-    if (ret2 != MEDIA_VISION_ERROR_NONE) goto _LOAD_IMAGE_FAIL;
-
-    ret2 = mv_source_fill_by_buffer(
-                    source, data_buffer, (unsigned int)buffer_size,
-                    (unsigned int)width, (unsigned int)height,
-                    source_colorspace);
-
-_LOAD_IMAGE_FAIL:
-    image_util_decode_destroy(_decoder);
-    if(data_buffer)
-        free(data_buffer);
-
-    assert_eq(IMAGE_UTIL_ERROR_NONE, ret1);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret2);
-
-    return MEDIA_VISION_ERROR_NONE;
-}
-
-/**
- * @function   utc_capi_media_vision_inference_startup
- * @description        Inference module UTC startup code
- * @parameter  NA
- * @return             NA
- */
-void utc_capi_media_vision_inference_startup1(void)
-{
-    printf("capi-media-vision mv_inference tests STARTUP is launched\n");
-
-    bool isInferenceSupported = true;
-    system_info_get_platform_bool("http://tizen.org/feature/vision.inference", &isInferenceSupported);
-
-    if (isInferenceSupported)
-        isVisionSupported = true;
-    else
-        isVisionSupported = false;
-
-    printf("capi-media-vision mv_inference tests STARTUP is completed\n");
-}
-
-/**
- * @function   utc_capi_media_vision_inference_cleanup
- * @description        Inference module UTC cleanup code
- * @parameter  NA
- * @return             NA
- */
-void utc_capi_media_vision_inference_cleanup1(void)
-{
-    printf("capi-media-vision mv_image tests CLEANUP is launched\n");
-    printf("capi-media-vision mv_image tests CLEANUP is completed\n");
-}
-
-
-/**
- * @function   utc_capi_media_vision_inference_startup
- * @description        Inference module UTC startup code
- * @parameter  NA
- * @return             NA
- */
-void utc_capi_media_vision_inference_startup2(void)
-{
-    printf("capi-media-vision mv_inference tests STARTUP is launched\n");
-
-    bool isInferenceSupported = true;
-    system_info_get_platform_bool("http://tizen.org/feature/vision.inference", &isInferenceSupported);
-
-    if (isInferenceSupported)
-        isVisionSupported = true;
-    else
-        isVisionSupported = false;
-
-
-    char pszValue[CONFIG_VALUE_LEN_MAX] = {0,};
-    if (true == GetValueForTCTSetting("DEVICE_SUITE_TARGET_30", pszValue, API_NAMESPACE)) {
-        PRINT_UTC_LOG("[Line : %d][%s] 'DEVICE_SUITE_TARGET_30' Values Received %s\\n", __LINE__, API_NAMESPACE, pszValue);
-
-        gInferenceExampleDir=(char*)calloc(strlen(pszValue)+strlen("/res/res/inference")+1, sizeof(char));
-        snprintf(gInferenceExampleDir, strlen(pszValue)+strlen("/res/res/inference")+1, "%s/res/res/inference", pszValue);
-
-    } else {
-        PRINT_UTC_LOG("[Line : %d][%s] GetValueForTCTSetting returned error for 'DEVICE_SUITE_TARGET_30'\\n", __LINE__, API_NAMESPACE);
-    }
-
-    gStartupError = mv_inference_create(&gInferenceHandle);
-    if (MEDIA_VISION_ERROR_NONE != gStartupError) {
-        gInferenceHandle = NULL;
-        printf("mv_inference_h create is failed\n");
-        return;
-    }
-
-    gStartupError = mv_create_engine_config(&gEngineConfigHandle);
-    if (MEDIA_VISION_ERROR_NONE != gStartupError) {
-        gEngineConfigHandle = NULL;
-        printf("mv_engine_config_h create is failed\n");
-        return;
-    }
-
-    gStartupError = mv_create_source(&gSourceHandle);
-    if (MEDIA_VISION_ERROR_NONE != gStartupError) {
-        gSourceHandle = NULL;
-        printf("mv_source_h create is failed\n");
-        return;
-    }
-
-    printf("capi-media-vision mv_inference tests STARTUP is completed\n");
-}
-
-/**
- * @function   utc_capi_media_vision_inference_cleanup
- * @description        Inference module UTC cleanup code
- * @parameter  NA
- * @return             NA
- */
-void utc_capi_media_vision_inference_cleanup2(void)
-{
-    printf("capi-media-vision mv_image tests CLEANUP is launched\n");
-
-    if (gInferenceHandle) {
-        mv_inference_destroy(gInferenceHandle);
-        gInferenceHandle = NULL;
-    }
-
-    if (gEngineConfigHandle) {
-        mv_destroy_engine_config(gEngineConfigHandle);
-        gEngineConfigHandle = NULL;
-    }
-
-    if (gSourceHandle) {
-        mv_destroy_source(gSourceHandle);
-        gSourceHandle = NULL;
-    }
-
-    if (gInferenceExampleDir) {
-        free(gInferenceExampleDir);
-        gInferenceExampleDir = NULL;
-    }
-
-    if (gPldResultLandmarks){
-        for (int pose = 0; pose < gPldResultNumberOfPoses; ++pose)
-            free(gPldResultLandmarks[pose]);
-        free(gPldResultLandmarks);
-        gPldResultLandmarks = NULL;
-    }
-
-    if (gPldResultScore){
-        for (int pose = 0; pose < gPldResultNumberOfPoses; ++pose)
-            free(gPldResultScore[pose]);
-        free(gPldResultScore);
-        gPldResultScore = NULL;
-    }
-
-    if (gPoseHandle) {
-        mv_pose_destroy(gPoseHandle);
-        gPoseHandle = NULL;
-    }
-
-    printf("capi-media-vision mv_image tests CLEANUP is completed\n");
-}
-
-/**
- * @brief Positive test case of mv_inference_create_p()
- * @testcase        utc_mediavision_mv_inference_create_p
- * @since_tizen     5.5
- * @description     Create inference handle
- */
-int utc_mediavision_mv_inference_create_p(void)
-{
-    printf("Inside mv_inference_create_p\n");
-
-    mv_inference_h inferenceHandle = NULL;
-    int ret = mv_inference_create(&inferenceHandle);
-    if (!isVisionSupported) {
-        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_destroy(inferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    printf("Before return mv_inference_create_p\n");
-
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_create()
- * @testcase        utc_mediavision_mv_inference_create_n
- * @since_tizen     5.5
- * @description     Create inference handle,
- *                  but fail because input parameter is NULL
- */
-int utc_mediavision_mv_inference_create_n(void)
-{
-    printf("Inside mv_inference_create_n\n");
-
-    int ret = mv_inference_create(NULL);
-    if (!isVisionSupported) {
-        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_create_n\n");
-
-    return 0;
-}
-
-/**
- * @brief Positive test case of mv_inference_destroy_p()
- * @testcase        utc_mediavision_mv_inference_destroy_p
- * @since_tizen     5.5
- * @description     Destroy inference handle
- */
-int utc_mediavision_mv_inference_destroy_p(void)
-{
-    printf("Inside mv_inference_destroy_p\n");
-
-    mv_inference_h inferenceHandle = NULL;
-    int ret = mv_inference_create(&inferenceHandle);
-    if (!isVisionSupported) {
-        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_destroy(inferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    printf("Before return mv_inference_destroy_p\n");
-
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_destroy_n()
- * @testcase        utc_mediavision_mv_inference_destroy_n
- * @since_tizen     5.5
- * @description     Destroy inference handle,
- *                  but fail because input parameter is NULL
- */
-int utc_mediavision_mv_inference_destroy_n(void)
-{
-    printf("Inside mv_inference_destroy_n\n");
-
-    int ret = mv_inference_destroy(NULL);
-    if (!isVisionSupported) {
-        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_destroy_n\n");
-
-    return 0;
-}
-
-static bool _supported_inference_engine_cb(
-                        const char *engine,
-                        bool supported,
-                        void *user_data)
-{
-    gIsForeachSupportedCallBackInvoked = true;
-    return true;
-}
-/**
- * @brief Positive test case of mv_inference_foreach_supported_engine_p()
- * @testcase        utc_mediavision_mv_inference_foreach_supported_engine_p
- * @since_tizen     5.5
- * @description     Check supported engine
- */
-int utc_mediavision_mv_inference_foreach_supported_engine()
-{
-    printf("Inside mv_inference_foreach_supported_engine_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    int ret = mv_inference_foreach_supported_engine(gInferenceHandle,
-                        _supported_inference_engine_cb,
-                        NULL);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-    assert_eq(true, gIsForeachSupportedCallBackInvoked);
-
-    printf("Before return mv_inference_foreach_supported_engine_p\n");
-
-    return 0;
-}
-
-/**
- * @brief Positive test case of mv_inference_configure_p()
- * @testcase        utc_mediavision_mv_inference_configure_p
- * @since_tizen     5.5
- * @description     Configure inference handle
- */
-int utc_mediavision_mv_inference_configure_p(void)
-{
-    printf("Inside mv_inference_configure_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    int ret = MEDIA_VISION_ERROR_NONE;
-
-    // create handle
-    assert_eq(set_image_classification_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    printf("Before return mv_inference_configure_p\n");
-    return 0;
-}
-
-
-/**
- * @brief Negative test case of mv_inference_configure_n1()
- * @testcase        utc_mediavision_mv_inference_configure_n
- * @since_tizen     5.5
- * @description     Configure inference handle,
- *                  but fail because input parameters are invalid
- */
-int utc_mediavision_mv_inference_configure_n1(void)
-{
-    printf("Inside mv_inference_configure_n1\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    int ret = mv_inference_configure(gInferenceHandle, NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    ret = mv_inference_configure(NULL, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    ret = mv_inference_configure(NULL, NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_configure_n1\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_configure_n2()
- * @testcase        utc_mediavision_mv_inference_configure_n
- * @since_tizen     5.5
- * @description     Configure inference handle,
- *                  but fail because the invalid value is set
- */
-int utc_mediavision_mv_inference_configure_n2(void)
-{
-    printf("Inside mv_inference_configure_n2\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    int ret = mv_engine_config_set_int_attribute(gEngineConfigHandle,
-                        MV_INFERENCE_BACKEND_TYPE,
-                        MV_INFERENCE_BACKEND_MAX);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_configure_n2\n");
-    return 0;
-}
-
-
-/**
- * @brief Positive test case of mv_inference_prepare_p()
- * @testcase        utc_mediavision_mv_inference_prepare_p
- * @since_tizen     5.5
- * @description     Prepare inference handle
- */
-int utc_mediavision_mv_inference_prepare_p(void)
-{
-    printf("Inside mv_inference_prepare_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    assert_eq(set_image_classification_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    printf("Before return mv_inference_configure_p\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_prepare_n1()
- * @testcase        utc_mediavision_mv_inference_prepare_n
- * @since_tizen     5.5
- * @description     Prepare inference handle,
- *                  but fail because input handle is NULL
- */
-int utc_mediavision_mv_inference_prepare_n1(void)
-{
-    printf("Inside mv_inference_prepare_n1\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    int ret = mv_inference_prepare(NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_prepare_n1\n");
-    return 0;
-}
-
-static void _classified_cb (mv_source_h source,
-                        int number_of_classes,
-                        const int *indices,
-                        const char **names,
-                        const float *confidences,
-                        void *user_data)
-{
-    gIsImageClassifyCallBackInvoked = true;
-}
-/**
- * @brief Positive test case of mv_inference_image_classify()
- * @testcase        utc_mediavision_mv_inference_image_classify_p
- * @since_tizen     5.5
- * @description     Classify an image
- */
-int utc_mediavision_mv_inference_image_classify_p(void)
-{
-    printf("Inside mv_inference_image_classify_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[1024];
-
-    assert_eq(set_image_classification_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "banana.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_image_classify(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    _classified_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-    assert_eq(true, gIsImageClassifyCallBackInvoked);
-
-    printf("Before return mv_inference_image_classify_p\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_image_classify()
- * @testcase        utc_mediavision_mv_inference_image_classify_n1
- * @since_tizen     5.5
- * @description     Classify an image,
- *                  but fail because input parameter is NULL
- */
-int utc_mediavision_mv_inference_image_classify_n1(void)
-{
-    printf("Inside mv_inference_image_classify_n1\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[1024];
-
-    assert_eq(set_image_classification_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "banana.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    // source is NULL
-    ret = mv_inference_image_classify(NULL,
-                                    gInferenceHandle,
-                                    NULL,
-                                    _classified_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    // inference handle is NULL
-    ret = mv_inference_image_classify(gSourceHandle,
-                                    NULL,
-                                    NULL,
-                                    _classified_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    // callback is NULL
-    ret = mv_inference_image_classify(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    NULL,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_image_classify_n1\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_image_classify()
- * @testcase        utc_mediavision_mv_inference_image_classify_n2
- * @since_tizen     5.5
- * @description     Classify an image,
- *                  but fail because mv_inference_prepare() isn't called before
- */
-int utc_mediavision_mv_inference_image_classify_n2(void)
-{
-    printf("Inside mv_inference_image_classify_n2\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[1024];
-
-    assert_eq(set_image_classification_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "banana.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    // skip the mv_inference_prepare()
-    ret = mv_inference_image_classify(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    _classified_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_OPERATION, ret);
-
-    printf("Before return mv_inference_image_classify_n2\n");
-    return 0;
-}
-
-static void _od_detected_cb (mv_source_h source,
-                        int number_of_object,
-                        const int *indices,
-                        const char **names,
-                        const float *confidences,
-                        const mv_rectangle_s *locations,
-                        void *user_data)
-{
-    gIsObjectDetectCallBackInvoked = true;
-}
-/**
- * @brief Positive test case of mv_inference_object_detect()
- * @testcase        utc_mediavision_mv_inference_object_detect_p
- * @since_tizen     5.5
- * @description     Detect objects in an image
- */
-int utc_mediavision_mv_inference_object_detect_p(void)
-{
-    printf("Inside mv_inference_object_detect_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[1024];
-
-    assert_eq(set_object_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "dog2.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_object_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    _od_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-    assert_eq(true, gIsObjectDetectCallBackInvoked);
-
-    printf("Before return mv_inference_object_detect_p\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_object_detect()
- * @testcase        utc_mediavision_mv_inference_object_detect_n1
- * @since_tizen     5.5
- * @description     Detect objects in an image,
- *                  but fail because input parameter is NULL
- */
-int utc_mediavision_mv_inference_object_detect_n1(void)
-{
-    printf("Inside mv_inference_object_detect_n1\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[1024];
-
-    assert_eq(set_object_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "dog2.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    // source is NULL
-    ret = mv_inference_object_detect(NULL,
-                                    gInferenceHandle,
-                                    _od_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    // inference handle is NULL
-    ret = mv_inference_object_detect(gSourceHandle,
-                                    NULL,
-                                    _od_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    // callback is NULL
-    ret = mv_inference_object_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_image_classify_n1\n");
-    return 0;
-}
-
-/**
- * @brief Positive test case of mv_inference_object_detect()
- * @testcase        utc_mediavision_mv_inference_object_detect_n2
- * @since_tizen     5.5
- * @description     Detect objects in an image,
- *                  but fail because mv_inference_prepare() isn't called before
- */
-int utc_mediavision_mv_inference_object_detect_n2(void)
-{
-    printf("Inside mv_inference_object_detect_n2\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[1024];
-
-    assert_eq(set_object_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "dog2.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_object_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    _od_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_OPERATION, ret);
-
-    printf("Before return mv_inference_object_detect_n2\n");
-    return 0;
-}
-
-static void _fd_detected_cb (mv_source_h source,
-                        int number_of_faces,
-                        const float *confidences,
-                        const mv_rectangle_s *locations,
-                        void *user_data)
-{
-    gIsFaceDetectCallBackInvoked = true;
-}
-/**
- * @brief Positive test case of mv_inference_face_detect()
- * @testcase        utc_mediavision_mv_inference_face_detect_p
- * @since_tizen     5.5
- * @description     Detect faces in an image
- */
-int utc_mediavision_mv_inference_face_detect_p(void)
-{
-    printf("Inside mv_inference_face_detect_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[1024];
-
-    assert_eq(set_face_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "faceDetection.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_face_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    _fd_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-    assert_eq(true, gIsFaceDetectCallBackInvoked);
-
-    printf("Before return mv_inference_face_detect_p\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_face_detect()
- * @testcase        utc_mediavision_mv_inference_face_detect_n1
- * @since_tizen     5.5
- * @description     Detect faces in an image,
- *                  but fail because input parameter is NULL
- */
-int utc_mediavision_mv_inference_face_detect_n1(void)
-{
-    printf("Inside mv_inference_face_detect_n1\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[1024];
-
-    assert_eq(set_face_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "faceDetection.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    // source is NULL
-    ret = mv_inference_face_detect(NULL,
-                                    gInferenceHandle,
-                                    _fd_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    // inference handle is NULL
-    ret = mv_inference_face_detect(gSourceHandle,
-                                    NULL,
-                                    _fd_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    // callback is NULL
-    ret = mv_inference_face_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_object_detect_n1\n");
-    return 0;
-}
-
-/**
- * @brief Positive test case of mv_inference_face_detect()
- * @testcase        utc_mediavision_mv_inference_face_detect_n2
- * @since_tizen     5.5
- * @description     Detect objects in an image,
- *                  but fail because mv_inference_prepare() isn't called before
- */
-int utc_mediavision_mv_inference_face_detect_n2(void)
-{
-    printf("Inside mv_inference_face_detect_n2\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[1024];
-
-    assert_eq(set_face_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "faceDetection.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_face_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    _fd_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_OPERATION, ret);
-
-    printf("Before return mv_inference_face_detect_n2\n");
-    return 0;
-}
-
-static void _fld_detected_cb (mv_source_h source,
-                        int number_of_landmark,
-                        const mv_rectangle_s *locations,
-                        void *user_data)
-{
-    gIsFacialLandmarkDetectCallBackInvoked = true;
-}
-/**
- * @brief Positive test case of mv_inference_facial_landmark_detect()
- * @testcase        utc_mediavision_mv_inference_facial_landmark_detect_p
- * @since_tizen     5.5
- * @description     Detect landmark on a face which is detected
- */
-int utc_mediavision_mv_inference_facial_landmark_detect_p(void)
-{
-    printf("Inside mv_inference_facial_landmark_detect_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[1024];
-
-    assert_eq(set_facial_landmark_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "faceLandmark.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_facial_landmark_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    _fld_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-    assert_eq(true, gIsFacialLandmarkDetectCallBackInvoked);
-
-    printf("Before return mv_inference_facial_landmark_detect_p\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_facial_landmark_detect()
- * @testcase        utc_mediavision_mv_inference_facial_landmark_detect_n1
- * @since_tizen     5.5
- * @description     Detect landmark on a face which is detected,
- *                  but fail because input parameter is NULL
- */
-int utc_mediavision_mv_inference_facial_landmark_detect_n1(void)
-{
-    printf("Inside mv_inference_facial_landmark_detect_n1\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[1024];
-
-    assert_eq(set_facial_landmark_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "faceLandmark.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    // source is NULL
-    ret = mv_inference_facial_landmark_detect(NULL,
-                                    gInferenceHandle,
-                                    NULL,
-                                    _fld_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    // inference handle is NULL
-    ret = mv_inference_facial_landmark_detect(gSourceHandle,
-                                    NULL,
-                                    NULL,
-                                    _fld_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    // callback is NULL
-    ret = mv_inference_facial_landmark_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    NULL,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_facial_landmark_detect_n1\n");
-    return 0;
-}
-
-/**
- * @brief Positive test case of mv_inference_facial_landmark_detect()
- * @testcase        utc_mediavision_mv_inference_facial_landmark_detect_n2
- * @since_tizen     5.5
- * @description     Detect landmark on a face which is detected,
- *                  but fail because mv_inference_prepare() isn't called before
- */
-int utc_mediavision_mv_inference_facial_landmark_detect_n2(void)
-{
-    printf("Inside mv_inference_facial_landmark_detect_n2\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[1024];
-
-    assert_eq(set_facial_landmark_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, 1024, "%s/images/%s", gInferenceExampleDir, "faceLandmark.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_facial_landmark_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    _fld_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_OPERATION, ret);
-
-    printf("Before return mv_inference_facial_landmark_detect_n2\n");
-    return 0;
-}
-
-static void _pld_detected_cb(mv_source_h source,
-                        mv_inference_pose_result_h locations,
-                        void *user_data)
-{
-    gIsPoseLandmarkDetectCallBackInvoked = true;
-}
-
-/**
- * @brief Positive test case of mv_inference_pose_landmark_detect()
- * @testcase        utc_mediavision_mv_inference_pose_landmark_detect_p
- * @since_tizen     6.0
- * @description     Detect pose landmark on a human body
- */
-int utc_mediavision_mv_inference_pose_landmark_detect_p(void)
-{
-    printf("Inside mv_inference_pose_landmark_detect_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[FILE_PATH_SIZE];
-    assert_eq(set_pose_landmark_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_pose_landmark_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    _pld_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-    assert_eq(true, gIsPoseLandmarkDetectCallBackInvoked);
-
-    printf("Before return mv_inference_pose_landmark_detect_p\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_pose_landmark_detect()
- * @testcase        utc_mediavision_mv_inference_pose_landmark_detect_n1
- * @since_tizen     6.0
- * @description     Detect pose landmark on a human body,
- *                  but fail because an input parameter is NULL
- */
-int utc_mediavision_mv_inference_pose_landmark_detect_n1(void)
-{
-    printf("Inside mv_inference_pose_landmark_detect_n1\n");
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[FILE_PATH_SIZE];
-    assert_eq(set_pose_landmark_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    // source is NULL
-    ret = mv_inference_pose_landmark_detect(NULL,
-                                    gInferenceHandle,
-                                    NULL,
-                                    _pld_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    // inference handle is NULL
-    ret = mv_inference_pose_landmark_detect(gSourceHandle,
-                                    NULL,
-                                    NULL,
-                                    _pld_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    // callback is NULL
-     ret = mv_inference_pose_landmark_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    NULL,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_pose_landmark_detect_n1\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_pose_landmark_detect()
- * @testcase        utc_mediavision_mv_inference_pose_landmark_detect_n2
- * @since_tizen     6.0
- * @description     Detect pose landmark on a human body,
- *                  but fail because mv_inference_prepare() isn't called
- */
-int utc_mediavision_mv_inference_pose_landmark_detect_n2(void)
-{
-    printf("Inside mv_inference_pose_landmark_detect_n2\n");
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    char imageFilename[FILE_PATH_SIZE];
-    assert_eq(set_pose_landmark_detection_engine_config(gEngineConfigHandle), MEDIA_VISION_ERROR_NONE);
-
-    int ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_inference_pose_landmark_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    _pld_detected_cb,
-                                    NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_OPERATION, ret);
-
-    printf("Before return mv_inference_pose_landmark_detect_n2\n");
-    return 0;
-}
-
-static void get_pose_landmark_detection_result_cb1(mv_source_h source,
-                                    mv_inference_pose_result_h result,
-                                    void *user_data)
-{
-    printf("Inside get_pose_landmark_detection_result_cb1\n");
-    gIsPoseLandmarkDetectCallBackInvoked = true;
-
-    gPldResultErr = mv_inference_pose_get_number_of_poses(result, &gPldResultNumberOfPoses);
-    printf("Before retrun get_pose_landmark_detection_result_cb1\n");
-}
-
-/**
- * @function   utc_mediavision_mv_inference_pose_landmark_detect_cb1_startup
- * @description        Inference module UTC startup code for mv_inference_pose_get_number_of_poses()
- * @parameter  NA
- * @return             NA
- */
-void utc_capi_media_vision_inference_pose_landmark_detect_cb1_startup(void)
-{
-    printf("Inside utc_mediavision_mv_inference_pose_landmark_detect_cb1_startup\n");
-    utc_capi_media_vision_inference_startup2();
-
-    if (gStartupError != MEDIA_VISION_ERROR_NONE)
-        return;
-
-    char imageFilename[FILE_PATH_SIZE];
-    int ret = set_pose_landmark_detection_engine_config(gEngineConfigHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("set_pose_landmark_detection_engine_config is failed\n");
-        return;
-    }
-
-    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_configure is failed\n");
-        return;
-    }
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_prepare is failed\n");
-        return;
-    }
-
-    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("load_image_to_media_source is failed\n");
-        return;
-    }
-
-    ret = mv_inference_pose_landmark_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    get_pose_landmark_detection_result_cb1,
-                                    NULL);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_pose_landmark_detect is failed\n");
-        return;
-    }
-
-    printf("Before return utc_mediavision_mv_inference_pose_landmark_detect_cb1_startup\n");
-}
-
-/**
- * @brief Positive test case of mv_inference_get_number_of_poses()
- * @testcase        utc_mediavision_mv_inference_get_number_of_poses_p
- * @since_tizen     6.0
- * @description     Get the number of poses from a detected result
- */
-int utc_mediavision_mv_inference_get_number_of_poses_p(void)
-{
-    printf("Inside mv_inference_get_number_of_poses_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    assert_eq(true, gIsPoseLandmarkDetectCallBackInvoked);
-    assert_eq(MEDIA_VISION_ERROR_NONE, gPldResultErr);
-    assert_gt(gPldResultNumberOfPoses, 0);
-
-    printf("Before return mv_inference_get_number_of_poses_p\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_get_number_of_poses()
- * @testcase        utc_mediavision_mv_inference_get_number_of_poses_n
- * @since_tizen     6.0
- * @description     Get the number of poses from a detected result,
- *                  but fail because handle is NULL
- */
-int utc_mediavision_mv_inference_get_number_of_poses_n(void)
-{
-    printf("Inside mv_inference_get_number_of_poses_n\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    // handle is NULL
-    int ret = mv_inference_pose_get_number_of_poses(NULL, &gPldResultNumberOfPoses);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_get_number_of_poses_n\n");
-    return 0;
-}
-
-static void get_pose_landmark_detection_result_cb2(mv_source_h source,
-                                    mv_inference_pose_result_h result,
-                                    void *user_data)
-{
-    printf("Inside get_pose_landmark_detection_result_cb2\n");
-    gIsPoseLandmarkDetectCallBackInvoked = true;
-
-    gPldResultErr = mv_inference_pose_get_number_of_landmarks(result, &gPldResultNumberOfLandmarks);
-    printf("Before retrun get_pose_landmark_detection_result_cb2\n");
-}
-
-/**
- * @function   utc_mediavision_mv_inference_pose_landmark_detect_cb2_startup
- * @description        Inference module UTC startup code for mv_inference_pose_get_number_of_landmarks()
- * @parameter  NA
- * @return             NA
- */
-void utc_capi_media_vision_inference_pose_landmark_detect_cb2_startup(void)
-{
-    printf("Inside utc_mediavision_mv_inference_pose_landmark_detect_cb2_startup\n");
-    utc_capi_media_vision_inference_startup2();
-
-    if (gStartupError != MEDIA_VISION_ERROR_NONE)
-        return;
-
-    char imageFilename[FILE_PATH_SIZE];
-    int ret = set_pose_landmark_detection_engine_config(gEngineConfigHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("set_pose_landmark_detection_engine_config is failed\n");
-        return;
-    }
-
-    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_configure is failed\n");
-        return;
-    }
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_prepare is failed\n");
-        return;
-    }
-
-    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("load_image_to_media_source is failed\n");
-        return;
-    }
-
-    ret = mv_inference_pose_landmark_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    get_pose_landmark_detection_result_cb2,
-                                    NULL);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_pose_landmark_detect is failed\n");
-        return;
-    }
-
-    printf("Before return utc_mediavision_mv_inference_pose_landmark_detect_cb2_startup\n");
-}
-
-/**
- * @brief Positive test case of mv_inference_get_number_of_landmarks()
- * @testcase        utc_mediavision_mv_inference_get_number_of_landmarks_p
- * @since_tizen     6.0
- * @description     Get the number of landmarks from a detected result
- */
-int utc_mediavision_mv_inference_get_number_of_landmarks_p(void)
-{
-    printf("Inside mv_inference_get_number_of_landmarks_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    assert_eq(true, gIsPoseLandmarkDetectCallBackInvoked);
-    assert_eq(MEDIA_VISION_ERROR_NONE, gPldResultErr);
-    assert_gt(gPldResultNumberOfLandmarks, 0);
-
-    printf("Before return mv_inference_get_number_of_landmarks_p\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_get_number_of_landmarks()
- * @testcase        utc_mediavision_mv_inference_get_number_of_landmarks_n
- * @since_tizen     6.0
- * @description     Get the number of landmarks from a detected result,
- *                  but fail because handle is NULL
- */
-int utc_mediavision_mv_inference_get_number_of_landmarks_n(void)
-{
-    printf("Inside mv_inference_get_number_of_landmarks_n\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    // handle is NULL
-    int ret = mv_inference_pose_get_number_of_landmarks(NULL, &gPldResultNumberOfLandmarks);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_get_number_of_landmarks_n\n");
-    return 0;
-}
-
-static void get_pose_landmark_detection_result_cb3(mv_source_h source,
-                                    mv_inference_pose_result_h result,
-                                    void *user_data)
-{
-    printf("Inside get_pose_landmark_detection_result_cb3\n");
-    gIsPoseLandmarkDetectCallBackInvoked = true;
-
-    // get the number of poses
-    gPldResultErr = mv_inference_pose_get_number_of_poses(result, &gPldResultNumberOfPoses);
-    if (gPldResultErr != MEDIA_VISION_ERROR_NONE)
-        return;
-
-    // get the number of landmarks
-    gPldResultErr = mv_inference_pose_get_number_of_landmarks(result, &gPldResultNumberOfLandmarks);
-    if (gPldResultErr != MEDIA_VISION_ERROR_NONE)
-        return;
-
-    // allocate memory with the number of poses
-    gPldResultLandmarks = (mv_point_s **)malloc(gPldResultNumberOfPoses * sizeof(mv_point_s *));
-    gPldResultScore = (float **)malloc(gPldResultNumberOfPoses * sizeof(float *));
-    for (int pose = 0; pose < gPldResultNumberOfPoses; ++pose) {
-        // allocate memory with the number of landmarks
-        gPldResultLandmarks[pose] = (mv_point_s *)malloc(gPldResultNumberOfLandmarks * sizeof(mv_point_s));
-        gPldResultScore[pose] = (float *)malloc(gPldResultNumberOfLandmarks * sizeof(float));
-        for (int part = 0; part < gPldResultNumberOfLandmarks; ++part) {
-            gPldResultErr = mv_inference_pose_get_landmark(result, pose, part,
-                                                            &(gPldResultLandmarks[pose][part]),
-                                                            &(gPldResultScore[pose][part]));
-            if (gPldResultErr != MEDIA_VISION_ERROR_NONE)
-                return;
-        }
-    }
-
-    gIsGetPoseLandmark = true;
-    printf("Before retrun get_pose_landmark_detection_result_cb3\n");
-}
-
-/**
- * @function   utc_mediavision_mv_inference_pose_landmark_detect_cb3_startup
- * @description        Inference module UTC startup code for mv_inference_pose_get_landmark()
- * @parameter  NA
- * @return             NA
- */
-void utc_capi_media_vision_inference_pose_landmark_detect_cb3_startup(void)
-{
-    printf("Inside utc_mediavision_mv_inference_pose_landmark_detect_cb3_startup\n");
-    utc_capi_media_vision_inference_startup2();
-
-    if (gStartupError != MEDIA_VISION_ERROR_NONE)
-        return;
-
-    char imageFilename[FILE_PATH_SIZE];
-    int ret =  set_pose_landmark_detection_engine_config(gEngineConfigHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("set_pose_landmark_detection_engine_config is failed\n");
-        return;
-    }
-
-    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_configure is failed\n");
-        return;
-    }
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_prepare is failed\n");
-        return;
-    }
-
-    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("load_image_to_media_source is failed\n");
-        return;
-    }
-
-    ret = mv_inference_pose_landmark_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    get_pose_landmark_detection_result_cb3,
-                                    NULL);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_pose_landmark_detect is failed\n");
-        return;
-    }
-
-    printf("Before return utc_mediavision_mv_inference_pose_landmark_detect_cb3_startup\n");
-}
-
-/**
- * @brief Positive test case of mv_inference_get_landmark()
- * @testcase        utc_mediavision_mv_inference_get_landmark_p
- * @since_tizen     6.0
- * @description     Get landmarks from a detected result
- */
-int utc_mediavision_mv_inference_get_landmark_p(void)
-{
-    printf("Inside mv_inference_get_landmark_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    assert_eq(true, gIsPoseLandmarkDetectCallBackInvoked);
-    assert_eq(MEDIA_VISION_ERROR_NONE, gPldResultErr);
-    assert_gt(gPldResultNumberOfPoses, 0);
-    assert_gt(gPldResultNumberOfLandmarks, 0);
-    assert_eq(true, gIsGetPoseLandmark);
-
-    printf("Before return mv_inference_get_landmark_p\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_inference_get_landmark()
- * @testcase        utc_mediavision_mv_inference_get_landmark_n
- * @since_tizen     6.0
- * @description     Get landmarks from a detected result,
- *                  but fail because handle is NULL
- */
-int utc_mediavision_mv_inference_get_landmark_n(void)
-{
-    printf("Inside mv_inference_get_landmark_n\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    // handle is NULL
-    mv_point_s landmark;
-    float score;
-    int ret = mv_inference_pose_get_landmark(NULL, 0, 0, &landmark, &score);
-
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_get_landmark_n\n");
-    return 0;
-}
-
-static void get_pose_landmark_detection_result_cb5(mv_source_h source,
-                                    mv_inference_pose_result_h result,
-                                    void *user_data)
-{
-    printf("Inside get_pose_landmark_detection_result_cb5\n");
-    gIsPoseLandmarkDetectCallBackInvoked = true;
-
-    gPldResultErr = mv_inference_pose_get_number_of_poses(result, &gPldResultNumberOfPoses);
-    if (gPldResultErr != MEDIA_VISION_ERROR_NONE)
-        return;
-
-    if (gPldResultNumberOfPoses <= 0)
-        return;
-
-    gPldResultErr = mv_inference_pose_get_label(result, 0, &gPldResultLabel);
-    printf("Before retrun get_pose_landmark_detection_result_cb5\n");
-}
-
-/**
- * @function   utc_mediavision_mv_inference_pose_landmark_detect_cb5_startup
- * @description        Inference module UTC startup code for mv_inference_pose_get_label()
- * @parameter  NA
- * @return             NA
- */
-void utc_capi_media_vision_inference_pose_landmark_detect_cb5_startup(void)
-{
-    printf("Inside utc_mediavision_mv_inference_pose_landmark_detect_cb5_startup\n");
-    utc_capi_media_vision_inference_startup2();
-
-    if (gStartupError != MEDIA_VISION_ERROR_NONE)
-        return;
-
-    char imageFilename[FILE_PATH_SIZE];
-    int ret = set_pose_landmark_detection_engine_config(gEngineConfigHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("set_pose_landmark_detection_engine_config is failed\n");
-        return;
-    }
-
-    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_configure is failed\n");
-        return;
-    }
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_prepare is failed\n");
-        return;
-    }
-
-    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("load_image_to_media_source is failed\n");
-        return;
-    }
-
-    ret = mv_inference_pose_landmark_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    get_pose_landmark_detection_result_cb5,
-                                    NULL);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_pose_landmark_detect is failed\n");
-        return;
-    }
-
-    printf("Before return utc_mediavision_mv_inference_pose_landmark_detect_cb5_startup\n");
-}
-
-/**
- * @brief Positive test case of mv_inference_get_label()
- * @testcase        utc_mediavision_mv_inference_pose_get_label_p
- * @since_tizen     6.0
- * @description     Get the label from a detected result
- */
-int utc_mediavision_mv_inference_get_label_p(void)
-{
-    printf("Inside mv_inference_pose_get_label_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    assert_eq(true, gIsPoseLandmarkDetectCallBackInvoked);
-    assert_eq(MEDIA_VISION_ERROR_NONE, gPldResultErr);
-    assert_gt(gPldResultNumberOfPoses, 0);
-    assert_leq(gPldResultLabel, 0);
-
-    printf("Before return mv_inference_pose_get_label_p\n");
-    return 0;
-}
-
-/**
- * @brief Positive test case of mv_inference_get_label()
- * @testcase        utc_mediavision_mv_inference_pose_get_label_n
- * @since_tizen     6.0
- * @description     Get the label from a detected result,
- *                  but fail because handle is NULL
- */
-int utc_mediavision_mv_inference_get_label_n(void)
-{
-    printf("Inside mv_inference_pose_get_label_n\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    // handle is NULL
-    int ret = mv_inference_pose_get_label(NULL, 0, &gPldResultLabel);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_inference_pose_get_label_nn");
-    return 0;
-}
-
-/**
- * @brief Positive test case of mv_pose_create_p()
- * @testcase        utc_mediavision_mv_pose_create_p
- * @since_tizen     6.0
- * @description     Create pose handle
- */
-int utc_mediavision_mv_pose_create_p(void)
-{
-    printf("Inside mv_pose_create_p\n");
-
-    mv_pose_h poseHandle = NULL;
-    int ret = mv_pose_create(&poseHandle);
-    if (!isVisionSupported) {
-        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_pose_destroy(poseHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    printf("Before return mv_pose_create_p\n");
-
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_pose_create()
- * @testcase        utc_mediavision_mv_pose_create_n
- * @since_tizen     6.0
- * @description     Create pose handle,
- *                  but fail because input parameter is NULL
- */
-int utc_mediavision_mv_pose_create_n(void)
-{
-    printf("Inside mv_pose_create_n\n");
-
-    int ret = mv_pose_create(NULL);
-    if (!isVisionSupported) {
-        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_pose_create_n\n");
-
-    return 0;
-}
-
-/**
- * @brief Positive test case of mv_pose_destroy_p()
- * @testcase        utc_mediavision_mv_pose_destroy_p
- * @since_tizen     6.0
- * @description     Destroy pose handle
- */
-int utc_mediavision_mv_pose_destroy_p(void)
-{
-    printf("Inside mv_pose_destroy_p\n");
-
-    mv_pose_h poseHandle = NULL;
-    int ret = mv_pose_create(&poseHandle);
-    if (!isVisionSupported) {
-        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_pose_destroy(poseHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    printf("Before return mv_pose_destroy_p\n");
-
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_pose_destroy_n()
- * @testcase        utc_mediavision_mv_pose_destroy_n
- * @since_tizen     6.0
- * @description     Destroy pose handle,
- *                  but fail because input parameter is NULL
- */
-int utc_mediavision_mv_pose_destroy_n(void)
-{
-    printf("Inside mv_pose_destroy_n\n");
-
-    int ret = mv_pose_destroy(NULL);
-    if (!isVisionSupported) {
-        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_pose_destroy_n\n");
-
-    return 0;
-}
-
-/**
- * @brief Positive test case of mv_pose_set_from_file()
- * @testcase        utc_mediavision_mv_pose_set_from_file_p
- * @since_tizen     6.0
- * @description     Set pose mocap file and its mapping file
- */
-int utc_mediavision_mv_pose_set_from_file_p(void)
-{
-    printf("Inside mv_pose_set_from_file_p\n");
-
-    mv_pose_h poseHandle = NULL;
-    int ret = mv_pose_create(&poseHandle);
-    if (!isVisionSupported) {
-        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    char poseMocapFilename[FILE_PATH_SIZE];
-    char poseMocapMappingFilename[FILE_PATH_SIZE];
-    snprintf(poseMocapFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap.bvh");
-    snprintf(poseMocapMappingFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap_mapping.txt");
-
-    ret = mv_pose_set_from_file(poseHandle, poseMocapFilename, poseMocapMappingFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    ret = mv_pose_destroy(poseHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    printf("Before return mv_pose_set_from_file_p\n");
-
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_pose_set_from_file()
- * @testcase        utc_mediavision_mv_pose_set_from_file_n1
- * @since_tizen     6.0
- * @description     Set pose mocap file and its mapping file,
- *                  but fail because file paths are NULL
- */
-int utc_mediavision_mv_pose_set_from_file_n1(void)
-{
-    printf("Inside mv_pose_set_from_file_n1\n");
-
-    mv_pose_h poseHandle = NULL;
-    int ret = mv_pose_create(&poseHandle);
-    if (!isVisionSupported) {
-        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    char poseMocapFilename[FILE_PATH_SIZE];
-    char poseMocapMappingFilename[FILE_PATH_SIZE];
-    snprintf(poseMocapFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap.bvh");
-    snprintf(poseMocapMappingFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap_mapping.txt");
-
-    // hand is NULL
-    ret = mv_pose_set_from_file(NULL, poseMocapFilename, poseMocapMappingFilename);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    // motion capture file path is NULL
-    ret = mv_pose_set_from_file(poseHandle, NULL, poseMocapMappingFilename);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    // motion mapping file path is NULL
-    ret = mv_pose_set_from_file(poseHandle, poseMocapFilename, NULL);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    ret = mv_pose_destroy(poseHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    printf("Before return mv_pose_set_from_file_n1\n");
-
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_pose_set_from_file()
- * @testcase        utc_mediavision_mv_pose_set_from_file_n2
- * @since_tizen     6.0
- * @description     Set pose mocap file and its mapping file,
- *                  but fail because file paths are fake (invalid)
- */
-int utc_mediavision_mv_pose_set_from_file_n2(void)
-{
-    printf("Inside mv_pose_set_from_file_n2\n");
-
-    mv_pose_h poseHandle = NULL;
-    int ret = mv_pose_create(&poseHandle);
-    if (!isVisionSupported) {
-        assert_eq(ret, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    char poseMocapFilename[FILE_PATH_SIZE];
-    char poseMocapMappingFilename[FILE_PATH_SIZE];
-    snprintf(poseMocapFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap.bvh");
-    snprintf(poseMocapMappingFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap_mapping.txt");
-
-    // fakefile doesn't exist
-    char fakeFilename[FILE_PATH_SIZE];
-    snprintf(fakeFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "fakefile");
-
-    // motion capture file path is invalid
-    ret = mv_pose_set_from_file(poseHandle, fakeFilename, poseMocapMappingFilename);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PATH, ret);
-
-    // motion mapping file path is invalid
-    ret = mv_pose_set_from_file(poseHandle, poseMocapFilename, fakeFilename);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PATH, ret);
-
-    ret = mv_pose_destroy(poseHandle);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    printf("Before return mv_pose_set_from_file_n2\n");
-
-    return 0;
-}
-
-static void get_pose_landmark_detection_result_cb4(mv_source_h source,
-                                    mv_inference_pose_result_h result,
-                                    void *user_data)
-{
-    printf("Inside get_pose_landmark_detection_result_cb4\n");
-    gIsPoseLandmarkDetectCallBackInvoked = true;
-
-    mv_pose_h *pose = (mv_pose_h *)user_data;
-    int part = MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT | MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT;
-    gPldResultErr = mv_pose_compare(*pose, result, part, &gPoseCompareScore);
-    if (gPldResultErr != MEDIA_VISION_ERROR_NONE)
-        return;
-
-    printf("Before retrun get_pose_landmark_detection_result_cb4\n");
-}
-
-/**
- * @function   utc_mediavision_mv_inference_pose_landmark_detect_cb4_startup
- * @description        Inference module UTC startup code for mv_pose_compare()
- * @parameter  NA
- * @return             NA
- */
-void utc_capi_media_vision_inference_pose_landmark_detect_cb4_startup(void)
-{
-    printf("Inside utc_mediavision_mv_inference_pose_landmark_detect_cb4_startup\n");
-    utc_capi_media_vision_inference_startup2();
-
-    if (gStartupError != MEDIA_VISION_ERROR_NONE)
-        return;
-
-    char imageFilename[FILE_PATH_SIZE];
-    int ret = set_pose_landmark_detection_engine_config(gEngineConfigHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("set_pose_landmark_detection_engine_config is failed\n");
-        return;
-    }
-
-    ret = mv_inference_configure(gInferenceHandle, gEngineConfigHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_configure is failed\n");
-        return;
-    }
-
-    ret = mv_inference_prepare(gInferenceHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_prepare is failed\n");
-        return;
-    }
-
-    snprintf(imageFilename, FILE_PATH_SIZE, "%s/images/%s", gInferenceExampleDir, "poseLandmark.jpg");
-    ret = load_image_to_media_source(imageFilename, gSourceHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("load_image_to_media_source is failed\n");
-        return;
-    }
-
-    ret = mv_pose_create(&gPoseHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_pose_create is failed\n");
-        return;
-    }
-
-    char poseMocapFilename[FILE_PATH_SIZE];
-    char poseMocapMappingFilename[FILE_PATH_SIZE];
-    snprintf(poseMocapFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap.bvh");
-    snprintf(poseMocapMappingFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap_mapping.txt");
-
-    ret = mv_pose_set_from_file(gPoseHandle, poseMocapFilename, poseMocapMappingFilename);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_pose_set_from_file is failed\n");
-        return;
-    }
-
-    ret = mv_inference_pose_landmark_detect(gSourceHandle,
-                                    gInferenceHandle,
-                                    NULL,
-                                    get_pose_landmark_detection_result_cb4,
-                                    &gPoseHandle);
-    if (ret != MEDIA_VISION_ERROR_NONE) {
-        printf("mv_inference_pose_landmark_detect is failed\n");
-        return;
-    }
-
-    printf("Before return utc_mediavision_mv_inference_pose_landmark_detect_cb4_startup\n");
-}
-
-/**
- * @brief Positive test case of mv_pose_compare()
- * @testcase        utc_mediavision_mv_pose_compare_p
- * @since_tizen     6.0
- * @description     Compare a result which is detected by mv_inference_pose_landmark_detect()
- *                  with a pose which is set by mv_pose_set_from_file()
- */
-int utc_mediavision_mv_pose_compare_p(void)
-{
-    printf("Inside mv_inference_get_landmark_p\n");
-
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-
-    assert_eq(true, gIsPoseLandmarkDetectCallBackInvoked);
-    assert_eq(MEDIA_VISION_ERROR_NONE, gPldResultErr);
-    assert_gt(gPoseCompareScore, 0.5);
-
-    printf("Before return mv_inference_get_landmark_p\n");
-    return 0;
-}
-
-/**
- * @brief Negative test case of mv_pose_compare()
- * @testcase        utc_mediavision_mv_pose_compare_n
- * @since_tizen     6.0
- * @description     Compare a result which is detected by mv_inference_pose_landmark_detect()
- *                  with a pose which is set by mv_pose_set_from_file(),
- *                  but fail because hanlde is NULL
- */
-int utc_mediavision_mv_pose_compare_n(void)
-{
-    printf("Inside mv_pose_compare_n\n");
-
-    mv_pose_h poseHandle = NULL;
-    int ret = mv_pose_create(&poseHandle);
-    if (!isVisionSupported) {
-        assert_eq(gStartupError, MEDIA_VISION_ERROR_NOT_SUPPORTED);
-        return 0;
-    }
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    char poseMocapFilename[FILE_PATH_SIZE];
-    char poseMocapMappingFilename[FILE_PATH_SIZE];
-    snprintf(poseMocapFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap.bvh");
-    snprintf(poseMocapMappingFilename, FILE_PATH_SIZE, "%s/models/%s", gInferenceExampleDir, "pld_mocap_mapping.txt");
-
-    ret = mv_pose_set_from_file(poseHandle, poseMocapFilename, poseMocapMappingFilename);
-    assert_eq(MEDIA_VISION_ERROR_NONE, ret);
-
-    int part = MV_INFERENCE_HUMAN_BODY_PART_LEG_RIGHT | MV_INFERENCE_HUMAN_BODY_PART_LEG_LEFT;
-    // handle is NULL
-    ret = mv_pose_compare(poseHandle, NULL, part, &gPoseCompareScore);
-    assert_eq(MEDIA_VISION_ERROR_INVALID_PARAMETER, ret);
-
-    printf("Before return mv_pose_compare_n\n");
-    return 0;
-}