From: HyoungJoo Ahn Date: Fri, 6 Sep 2019 05:46:28 +0000 (+0900) Subject: [UTC][nnstreamer][ACR-1382][Add new test cases] X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8a0b663f3702e001ed560f07b7d9ebc1debf90de;p=test%2Ftct%2Fnative%2Fapi.git [UTC][nnstreamer][ACR-1382][Add new test cases] Change-Id: I929afcb99be6030b0584b066429daeb671806857 Signed-off-by: HyoungJoo Ahn --- diff --git a/packaging/utc/core-nnstreamer-tests.spec b/packaging/utc/core-nnstreamer-tests.spec new file mode 100755 index 000000000..6cff2b53f --- /dev/null +++ b/packaging/utc/core-nnstreamer-tests.spec @@ -0,0 +1,73 @@ +%define MODULE_NAME nnstreamer +%define MODULE_LIBNAME nnstreamer +Name: core-%{MODULE_NAME}-tests +Summary: Core API unit TC (%{name}) +Version: 0.1 +Release: 0 +Group: Development/Tools +License: Apache License, Version 2.0 +Source0: %{name}-%{version}.tar.gz +BuildRequires: pkgconfig(%{MODULE_LIBNAME}) +BuildRequires: pkgconfig(glib-2.0) +BuildRequires: cmake +BuildRequires: pkgconfig(nnstreamer-capi) +BuildRequires: pkgconfig(dlog) +%description +Core API unit TC (%{name}) + +%prep +%setup -q + +%build + +%define PREFIX "%{_libdir}/%{name}" + +export LDFLAGS+="-Wl,--rpath=%{PREFIX} -Wl,--as-needed" + +%if %{?ASAN_BUILD:1}0 + %if %{?DEVICE_BUILD_TYPE_MOBILE:1}0 + cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="mobile" -DASANBUILD="true" -DCMAKE_INSTALL_PREFIX=%{_prefix} + %endif + %if %{?DEVICE_BUILD_TYPE_WEARABLE:1}0 + cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="wearable" -DASANBUILD="true" -DCMAKE_INSTALL_PREFIX=%{_prefix} + %endif + %if %{?DEVICE_BUILD_TYPE_TV:1}0 + cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="tv" -DASANBUILD="true" -DCMAKE_INSTALL_PREFIX=%{_prefix} + %endif + %if %{?DEVICE_BUILD_TYPE_TIZENIOT:1}0 + cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="tizeniot" -DASANBUILD="true" -DCMAKE_INSTALL_PREFIX=%{_prefix} + %endif +%else + %if %{?DEVICE_BUILD_TYPE_MOBILE:1}0 + cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="mobile" -DASANBUILD="false" -DCMAKE_INSTALL_PREFIX=%{_prefix} + %endif + %if %{?DEVICE_BUILD_TYPE_WEARABLE:1}0 + cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="wearable" -DASANBUILD="false" -DCMAKE_INSTALL_PREFIX=%{_prefix} + %endif + %if %{?DEVICE_BUILD_TYPE_TV:1}0 + cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="tv" -DASANBUILD="false" -DCMAKE_INSTALL_PREFIX=%{_prefix} + %endif + %if %{?DEVICE_BUILD_TYPE_TIZENIOT:1}0 + cmake . -DMODULE="%{MODULE_NAME}" -DBUILDTCTYPE="utc" -DDEVICE_BUILD_TYPE="tizeniot" -DASANBUILD="false" -DCMAKE_INSTALL_PREFIX=%{_prefix} + %endif +%endif + +make %{?jobs:-j%jobs} + +%install +rm -rf %{buildroot} +%make_install +mkdir -p %{buildroot}/usr/share/license +cp LICENSE %{buildroot}/usr/share/license/%{name} +mkdir -p %{buildroot}/usr/share/packages/ +cp packaging/utc/%{name}.xml %{buildroot}/usr/share/packages/ +mkdir -p %{buildroot}/usr/apps/%{name}/bin +%post + +%postun + + +%files +/usr/apps/%{name}/* +/usr/share/packages/%{name}.xml +/usr/share/license/%{name} diff --git a/packaging/utc/core-nnstreamer-tests.xml b/packaging/utc/core-nnstreamer-tests.xml new file mode 100755 index 000000000..8f9897653 --- /dev/null +++ b/packaging/utc/core-nnstreamer-tests.xml @@ -0,0 +1,14 @@ + + + + test + Core API test Application + + + + + + + + + diff --git a/src/utc/nnstreamer/CMakeLists.txt b/src/utc/nnstreamer/CMakeLists.txt new file mode 100644 index 000000000..c1497fa38 --- /dev/null +++ b/src/utc/nnstreamer/CMakeLists.txt @@ -0,0 +1,47 @@ +SET(PKG_NAME "nnstreamer") + +SET(EXEC_NAME "tct-${PKG_NAME}-core") +SET(RPM_NAME "core-${PKG_NAME}-tests") + +SET(CAPI_LIB "nnstreamer") +SET(TC_SOURCES + utc-nnstreamer-pipeline-sink.c + utc-nnstreamer-pipeline-src.c + utc-nnstreamer-pipeline-switch.c + utc-nnstreamer-pipeline-tensor-data.c + utc-nnstreamer-pipeline-tensor-info.c + utc-nnstreamer-pipeline-valve.c + utc-nnstreamer-pipeline.c + utc-nnstreamer-single.c + utc-nnstreamer-scenario.c +) + +PKG_CHECK_MODULES(${CAPI_LIB} REQUIRED + ${CAPI_LIB} + capi-appfw-application + capi-system-info + glib-2.0 + nnstreamer +) + +INCLUDE_DIRECTORIES( + ${${CAPI_LIB}_INCLUDE_DIRS} +) + +ADD_EXECUTABLE(${EXEC_NAME} ${EXEC_NAME}.c ${TC_SOURCES} ${COMMON_FILE}) +TARGET_LINK_LIBRARIES(${EXEC_NAME} + ${${CAPI_LIB}_LIBRARIES} +) + +INSTALL(PROGRAMS ${EXEC_NAME} + DESTINATION ${BIN_DIR}/${RPM_NAME}/bin +) + +IF( DEFINED ASAN ) +SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC -Wall -pie -g -fsanitize=address -fsanitize-recover=address -U_FORTIFY_SOURCE -fno-omit-frame-pointer") +SET(CMAKE_EXE_LINKER_FLAGS "-Wl,--as-needed -Wl,--rpath=/usr/lib -Wl,-fsanitize=address") +ELSE() +SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O0 -g -fPIE -Wall") +SET(CMAKE_EXE_LINKER_FLAGS "-Wl,--as-needed -Wl,--rpath=/usr/lib -pie") +ENDIF() + diff --git a/src/utc/nnstreamer/public.list b/src/utc/nnstreamer/public.list new file mode 100755 index 000000000..e69de29bb diff --git a/src/utc/nnstreamer/res/mobilenet_v1_1.0_224_quant.tflite b/src/utc/nnstreamer/res/mobilenet_v1_1.0_224_quant.tflite new file mode 100644 index 000000000..9a81d7c81 Binary files /dev/null and b/src/utc/nnstreamer/res/mobilenet_v1_1.0_224_quant.tflite differ diff --git a/src/utc/nnstreamer/tct-nnstreamer-core.c b/src/utc/nnstreamer/tct-nnstreamer-core.c new file mode 100644 index 000000000..e9b7f5d3b --- /dev/null +++ b/src/utc/nnstreamer/tct-nnstreamer-core.c @@ -0,0 +1,137 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tct_common.h" +#include +#include + +#ifdef MOBILE +#include "tct-nnstreamer-core_mobile.h" +#endif + +#ifdef WEARABLE +#include "tct-nnstreamer-core_wearable.h" +#endif + +#ifdef TV +#include "tct-nnstreamer-core_tv.h" +#endif + +#ifdef TIZENIOT +#include "tct-nnstreamer-core_tizeniot.h" +#endif //TIZENIOT + +static bool app_create(void *data) +{ + return true; +} + +static void app_control(app_control_h app_control, void *data) +{ + char* pszGetTCName = NULL; + int i=0, result=0, nRet=0; + nRet = app_control_get_extra_data(app_control, "testcase_name", &pszGetTCName); + if(nRet != APP_CONTROL_ERROR_NONE) + { + dlog_print(DLOG_ERROR, "NativeTCT", "[%s:%d] app_control_get_extra_data returns error = %d", __FUNCTION__, __LINE__, nRet); + PRINT_UTC_LOG("\\n[%s][Line : %d]Unable to fetch test case name: app_control_get_extra_data API call fails\\n", __FILE__, __LINE__); + PRINT_TC_RESULT("%d",1); + FREE_MEMORY_TC(pszGetTCName); + return; + } + + dlog_print(DLOG_INFO, "NativeTCT", "[%s:%d] Executing TC Name = %s", __FUNCTION__, __LINE__, pszGetTCName); + for ( i = 0; tc_array[i].name; i++ ) + { + if ( 0 == strncmp(pszGetTCName, tc_array[i].name, strlen(pszGetTCName)) ) + { + DUMP_UTC_ERRLOG(); + dlog_print(DLOG_INFO, "NativeTCT", "%s : Startup begin", pszGetTCName); + if ( tc_array[i].startup ) + { + tc_array[i].startup(); + } + dlog_print(DLOG_INFO, "NativeTCT", "%s : Startup end", pszGetTCName); + + dlog_print(DLOG_INFO, "NativeTCT", "%s : Body begin", pszGetTCName); + result = tc_array[i].function(); + dlog_print(DLOG_INFO, "NativeTCT", "%s returns value = %d", pszGetTCName, result); + dlog_print(DLOG_INFO, "NativeTCT", "%s : Body end", pszGetTCName); + + dlog_print(DLOG_INFO, "NativeTCT", "%s : Cleanup begin", pszGetTCName); + if ( tc_array[i].cleanup ) + { + tc_array[i].cleanup(); + } + dlog_print(DLOG_INFO, "NativeTCT", "%s : Cleanup end", pszGetTCName); + + CLOSE_UTC_ERRLOG(); + PRINT_TC_RESULT("%d",result); + FREE_MEMORY_TC(pszGetTCName); + return; + } + } + + dlog_print(DLOG_ERROR, "NativeTCT", "[%s:%d] Unable to execute %s : Unknown Test Case Name", __FUNCTION__, __LINE__, pszGetTCName); + PRINT_UTC_LOG("\\n[%s][Line : %d]Unable to execute %s : Unknown Test Case Name\\n", __FILE__, __LINE__, pszGetTCName); + PRINT_TC_RESULT("%d",1); + FREE_MEMORY_TC(pszGetTCName); + return; +} + +static void app_terminate(void *data) +{ + dlog_print(DLOG_INFO, "NativeTCT", "[%s:%d] Application Package is now Terminating", __FUNCTION__, __LINE__); +} + +int main(int argc, char *argv[]) +{ + int ret = 0; + + ui_app_lifecycle_callback_s event_callback = {0,}; + event_callback.create = app_create; + event_callback.terminate = app_terminate; + event_callback.app_control = app_control; + +#ifdef MOBILE + + //setting gcda file location for coverage + setenv("GCOV_PREFIX","/tmp",1); + dlog_print(DLOG_INFO, "NativeTCT", "[%s:%d] Coverage *.gcda File location set to /tmp/home/abuild/rpmbuild/BUILD/ ", __FUNCTION__, __LINE__); + +#endif + + dlog_print(DLOG_INFO, "NativeTCT", "[%s:%d] Application Main Function is Invoked", __FUNCTION__, __LINE__); + ret = ui_app_main(argc, argv, &event_callback, NULL); + if (ret != APP_ERROR_NONE) + { + dlog_print(DLOG_ERROR, "NativeTCT", "Application ui_app_main call gets failed. err = %d", ret); + PRINT_UTC_LOG("\\n[%s][Line : %d]Application ui_app_main call gets failed. err = %d\\n", __FILE__, __LINE__, ret); + PRINT_TC_RESULT("%d",1); + return ret; + } + + dlog_print(DLOG_INFO, "NativeTCT", "[%s:%d] Application Package is Terminated", __FUNCTION__, __LINE__); + return ret; +} diff --git a/src/utc/nnstreamer/tct-nnstreamer-core_mobile.h b/src/utc/nnstreamer/tct-nnstreamer-core_mobile.h new file mode 100755 index 000000000..cfe4a63d3 --- /dev/null +++ b/src/utc/nnstreamer/tct-nnstreamer-core_mobile.h @@ -0,0 +1,292 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef __TCT_NNSTREAMER-NATIVE_H__ +#define __TCT_NNSTREAMER-NATIVE_H__ + +#include "testcase.h" +#include "tct_common.h" + +extern void utc_nnstreamer_single_startup (void); +extern void utc_nnstreamer_single_cleanup (void); +extern void utc_nnstreamer_pipeline_sink_startup (void); +extern void utc_nnstreamer_pipeline_sink_cleanup (void); +extern void utc_nnstreamer_pipeline_src_startup (void); +extern void utc_nnstreamer_pipeline_src_cleanup (void); +extern void utc_nnstreamer_pipeline_switch_startup (void); +extern void utc_nnstreamer_pipeline_switch_cleanup (void); +extern void utc_nnstreamer_pipeline_valve_startup (void); +extern void utc_nnstreamer_pipeline_valve_cleanup (void); +extern void utc_nnstreamer_pipeline_tensor_info_startup (void); +extern void utc_nnstreamer_pipeline_tensor_info_cleanup (void); +extern void utc_nnstreamer_pipeline_tensor_data_startup (void); +extern void utc_nnstreamer_pipeline_tensor_data_cleanup (void); +extern void utc_nnstreamer_scenario_startup (void); +extern void utc_nnstreamer_scenario_cleanup (void); + +extern int utc_ml_pipeline_construct_p (void); +extern int utc_ml_pipeline_construct_n (void); +extern int utc_ml_pipeline_destroy_p (void); +extern int utc_ml_pipeline_destroy_n (void); +extern int utc_ml_pipeline_get_state_p (void); + +extern int utc_ml_pipeline_get_state_n (void); +extern int utc_ml_pipeline_start_p (void); +extern int utc_ml_pipeline_start_n (void); +extern int utc_ml_pipeline_stop_p (void); +extern int utc_ml_pipeline_stop_n (void); + +extern int utc_ml_pipeline_sink_register_p (void); +extern int utc_ml_pipeline_sink_register_n (void); +extern int utc_ml_pipeline_sink_unregister_p (void); +extern int utc_ml_pipeline_sink_unregister_n (void); +extern int utc_ml_pipeline_src_get_handle_p (void); + +extern int utc_ml_pipeline_src_get_handle_n (void); +extern int utc_ml_pipeline_src_release_handle_p (void); +extern int utc_ml_pipeline_src_release_handle_n (void); +extern int utc_ml_pipeline_src_input_data_p (void); +extern int utc_ml_pipeline_src_input_data_n (void); + +extern int utc_ml_pipeline_src_get_tensors_info_p (void); +extern int utc_ml_pipeline_src_get_tensors_info_n (void); +extern int utc_ml_pipeline_switch_get_handle_p (void); +extern int utc_ml_pipeline_switch_get_handle_n (void); +extern int utc_ml_pipeline_switch_release_handle_p (void); + +extern int utc_ml_pipeline_switch_release_handle_n (void); +extern int utc_ml_pipeline_switch_select_p (void); +extern int utc_ml_pipeline_switch_select_n (void); +extern int utc_ml_pipeline_switch_get_pad_list_p (void); +extern int utc_ml_pipeline_switch_get_pad_list_n (void); + +extern int utc_ml_pipeline_valve_get_handle_p (void); +extern int utc_ml_pipeline_valve_get_handle_n (void); +extern int utc_ml_pipeline_valve_release_handle_p (void); +extern int utc_ml_pipeline_valve_release_handle_n (void); +extern int utc_ml_pipeline_valve_set_open_p (void); + +extern int utc_ml_pipeline_valve_set_open_n (void); +extern int utc_ml_tensors_info_create_p (void); +extern int utc_ml_tensors_info_create_n (void); +extern int utc_ml_tensors_info_destroy_p (void); +extern int utc_ml_tensors_info_destroy_n (void); + +extern int utc_ml_tensors_info_validate_p (void); +extern int utc_ml_tensors_info_validate_n (void); +extern int utc_ml_tensors_info_clone_p (void); +extern int utc_ml_tensors_info_clone_n (void); +extern int utc_ml_tensors_info_set_count_p (void); + +extern int utc_ml_tensors_info_set_count_n (void); +extern int utc_ml_tensors_info_get_count_p (void); +extern int utc_ml_tensors_info_get_count_n (void); +extern int utc_ml_tensors_info_set_tensor_name_p (void); +extern int utc_ml_tensors_info_set_tensor_name_n (void); + +extern int utc_ml_tensors_info_get_tensor_name_p (void); +extern int utc_ml_tensors_info_get_tensor_name_n (void); +extern int utc_ml_tensors_info_set_tensor_type_p (void); +extern int utc_ml_tensors_info_set_tensor_type_n (void); +extern int utc_ml_tensors_info_get_tensor_type_p (void); + +extern int utc_ml_tensors_info_get_tensor_type_n (void); +extern int utc_ml_tensors_info_set_tensor_dimension_p (void); +extern int utc_ml_tensors_info_set_tensor_dimension_n (void); +extern int utc_ml_tensors_info_get_tensor_dimension_p (void); +extern int utc_ml_tensors_info_get_tensor_dimension_n (void); + +extern int utc_ml_tensors_data_create_p (void); +extern int utc_ml_tensors_data_create_n (void); +extern int utc_ml_tensors_data_destroy_p (void); +extern int utc_ml_tensors_data_destroy_n (void); +extern int utc_ml_tensors_data_get_tensor_data_p (void); + +extern int utc_ml_tensors_data_get_tensor_data_n (void); +extern int utc_ml_tensors_data_set_tensor_data_p (void); +extern int utc_ml_tensors_data_set_tensor_data_n (void); +extern int utc_ml_check_nnfw_availability_p (void); +extern int utc_ml_check_nnfw_availability_n (void); + +extern int utc_ml_single_open_p (void); +extern int utc_ml_single_open_n (void); +extern int utc_ml_single_close_p (void); +extern int utc_ml_single_close_n (void); +extern int utc_ml_single_invoke_p (void); + +extern int utc_ml_single_invoke_n (void); +extern int utc_ml_single_get_input_info_p (void); +extern int utc_ml_single_get_input_info_n (void); +extern int utc_ml_single_get_output_info_p (void); +extern int utc_ml_single_get_output_info_n (void); + +extern int utc_nnstreamer_scenario_construct_destruct_empty_pipeline_p (void); +extern int utc_nnstreamer_scenario_construct_destruct_pipeline_p1 (void); +extern int utc_nnstreamer_scenario_construct_destruct_pipeline_p2 (void); +extern int utc_nnstreamer_scenario_construct_pipeline_error_case_n1 (void); +extern int utc_nnstreamer_scenario_construct_pipeline_error_case_n2 (void); + +extern int utc_nnstreamer_scenario_pipeline_state_test_p1 (void); +extern int utc_nnstreamer_scenario_pipeline_state_test_p2 (void); +extern int utc_nnstreamer_scenario_pipeline_state_test_p3 (void); +extern int utc_nnstreamer_scenario_valve_error_cases_n (void); +extern int utc_nnstreamer_scenario_sink_p1 (void); + +extern int utc_nnstreamer_scenario_sink_p2 (void); +extern int utc_nnstreamer_scenario_sink_error_cases_n (void); +extern int utc_nnstreamer_scenario_src_p (void); +extern int utc_nnstreamer_scenario_src_error_case_n1 (void); +extern int utc_nnstreamer_scenario_src_error_case_n2 (void); + +extern int utc_nnstreamer_scenario_src_error_case_n3 (void); +extern int utc_nnstreamer_scenario_switch_pipeline_p1 (void); +extern int utc_nnstreamer_scenario_switch_pipeline_p2 (void); +extern int utc_nnstreamer_scenario_switch_pipeline_error_cases_n (void); +extern int utc_nnstreamer_scenario_check_nnfw_availability_p (void); + +extern int utc_nnstreamer_scenario_check_tensor_info_utilities_p (void); +extern int utc_nnstreamer_scenario_filter_tensorflow_lite_p1 (void); +extern int utc_nnstreamer_scenario_filter_tensorflow_lite_p2 (void); +extern int utc_nnstreamer_scenario_filter_tensorflow_lite_n (void); + +testcase tc_array[] = { + {"utc_ml_pipeline_construct_p", utc_ml_pipeline_construct_p, NULL, NULL}, + {"utc_ml_pipeline_construct_n", utc_ml_pipeline_construct_n, NULL, NULL}, + {"utc_ml_pipeline_destroy_p", utc_ml_pipeline_destroy_p, NULL, NULL}, + {"utc_ml_pipeline_destroy_n", utc_ml_pipeline_destroy_n, NULL, NULL}, + {"utc_ml_pipeline_get_state_p", utc_ml_pipeline_get_state_p, NULL, NULL}, + + {"utc_ml_pipeline_get_state_n", utc_ml_pipeline_get_state_n, NULL, NULL}, + {"utc_ml_pipeline_start_p", utc_ml_pipeline_start_p, NULL, NULL}, + {"utc_ml_pipeline_start_n", utc_ml_pipeline_start_n, NULL, NULL}, + {"utc_ml_pipeline_stop_p", utc_ml_pipeline_stop_p, NULL, NULL}, + {"utc_ml_pipeline_stop_n", utc_ml_pipeline_stop_n, NULL, NULL}, + + {"utc_ml_pipeline_sink_register_p", utc_ml_pipeline_sink_register_p, utc_nnstreamer_pipeline_sink_startup, utc_nnstreamer_pipeline_sink_cleanup}, + {"utc_ml_pipeline_sink_register_n", utc_ml_pipeline_sink_register_n, NULL, NULL}, + {"utc_ml_pipeline_sink_unregister_p", utc_ml_pipeline_sink_unregister_p, utc_nnstreamer_pipeline_sink_startup, utc_nnstreamer_pipeline_sink_cleanup}, + {"utc_ml_pipeline_sink_unregister_n", utc_ml_pipeline_sink_unregister_n, NULL, NULL}, + {"utc_ml_pipeline_src_get_handle_p", utc_ml_pipeline_src_get_handle_p, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + + {"utc_ml_pipeline_src_get_handle_n", utc_ml_pipeline_src_get_handle_n, NULL, NULL}, + {"utc_ml_pipeline_src_release_handle_p", utc_ml_pipeline_src_release_handle_p, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + {"utc_ml_pipeline_src_release_handle_n", utc_ml_pipeline_src_release_handle_n, NULL, NULL}, + {"utc_ml_pipeline_src_input_data_p", utc_ml_pipeline_src_input_data_p, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + {"utc_ml_pipeline_src_input_data_n", utc_ml_pipeline_src_input_data_n, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + + {"utc_ml_pipeline_src_get_tensors_info_p", utc_ml_pipeline_src_get_tensors_info_p, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + {"utc_ml_pipeline_src_get_tensors_info_n", utc_ml_pipeline_src_get_tensors_info_n, NULL, NULL}, + {"utc_ml_pipeline_switch_get_handle_p", utc_ml_pipeline_switch_get_handle_p, utc_nnstreamer_pipeline_switch_startup, utc_nnstreamer_pipeline_switch_cleanup}, + {"utc_ml_pipeline_switch_get_handle_n", utc_ml_pipeline_switch_get_handle_n, NULL, NULL}, + {"utc_ml_pipeline_switch_release_handle_p", utc_ml_pipeline_switch_release_handle_p, utc_nnstreamer_pipeline_switch_startup, utc_nnstreamer_pipeline_switch_cleanup}, + + {"utc_ml_pipeline_switch_release_handle_n", utc_ml_pipeline_switch_release_handle_n, NULL, NULL}, + {"utc_ml_pipeline_switch_select_p", utc_ml_pipeline_switch_select_p, utc_nnstreamer_pipeline_switch_startup, utc_nnstreamer_pipeline_switch_cleanup}, + {"utc_ml_pipeline_switch_select_n", utc_ml_pipeline_switch_select_n, NULL, NULL}, + {"utc_ml_pipeline_switch_get_pad_list_p", utc_ml_pipeline_switch_get_pad_list_p, utc_nnstreamer_pipeline_switch_startup, utc_nnstreamer_pipeline_switch_cleanup}, + {"utc_ml_pipeline_switch_get_pad_list_n", utc_ml_pipeline_switch_get_pad_list_n, NULL, NULL}, + + {"utc_ml_pipeline_valve_get_handle_p", utc_ml_pipeline_valve_get_handle_p, utc_nnstreamer_pipeline_valve_startup, utc_nnstreamer_pipeline_valve_cleanup}, + {"utc_ml_pipeline_valve_get_handle_n", utc_ml_pipeline_valve_get_handle_n, NULL, NULL}, + {"utc_ml_pipeline_valve_release_handle_p", utc_ml_pipeline_valve_release_handle_p, utc_nnstreamer_pipeline_valve_startup, utc_nnstreamer_pipeline_valve_cleanup}, + {"utc_ml_pipeline_valve_release_handle_n", utc_ml_pipeline_valve_release_handle_n, NULL, NULL}, + {"utc_ml_pipeline_valve_set_open_p", utc_ml_pipeline_valve_set_open_p, utc_nnstreamer_pipeline_valve_startup, utc_nnstreamer_pipeline_valve_cleanup}, + + {"utc_ml_pipeline_valve_set_open_n", utc_ml_pipeline_valve_set_open_n, NULL, NULL}, + {"utc_ml_tensors_info_create_p", utc_ml_tensors_info_create_p, NULL, NULL}, + {"utc_ml_tensors_info_create_n", utc_ml_tensors_info_create_n, NULL, NULL}, + {"utc_ml_tensors_info_destroy_p", utc_ml_tensors_info_destroy_p, NULL, NULL}, + {"utc_ml_tensors_info_destroy_n", utc_ml_tensors_info_destroy_n, NULL, NULL}, + + {"utc_ml_tensors_info_validate_p", utc_ml_tensors_info_validate_p, NULL, NULL}, + {"utc_ml_tensors_info_validate_n", utc_ml_tensors_info_validate_n, NULL, NULL}, + {"utc_ml_tensors_info_clone_p", utc_ml_tensors_info_clone_p, NULL, NULL}, + {"utc_ml_tensors_info_clone_n", utc_ml_tensors_info_clone_n, NULL, NULL}, + {"utc_ml_tensors_info_set_count_p", utc_ml_tensors_info_set_count_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + + {"utc_ml_tensors_info_set_count_n", utc_ml_tensors_info_set_count_n, NULL, NULL}, + {"utc_ml_tensors_info_get_count_p", utc_ml_tensors_info_get_count_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_get_count_n", utc_ml_tensors_info_get_count_n, NULL, NULL}, + {"utc_ml_tensors_info_set_tensor_name_p", utc_ml_tensors_info_set_tensor_name_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_set_tensor_name_n", utc_ml_tensors_info_set_tensor_name_n, NULL, NULL}, + + {"utc_ml_tensors_info_get_tensor_name_p", utc_ml_tensors_info_get_tensor_name_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_get_tensor_name_n", utc_ml_tensors_info_get_tensor_name_n, NULL, NULL}, + {"utc_ml_tensors_info_set_tensor_type_p", utc_ml_tensors_info_set_tensor_type_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_set_tensor_type_n", utc_ml_tensors_info_set_tensor_type_n, NULL, NULL}, + {"utc_ml_tensors_info_get_tensor_type_p", utc_ml_tensors_info_get_tensor_type_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + + {"utc_ml_tensors_info_get_tensor_type_n", utc_ml_tensors_info_get_tensor_type_n, NULL, NULL}, + {"utc_ml_tensors_info_set_tensor_dimension_p", utc_ml_tensors_info_set_tensor_dimension_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_set_tensor_dimension_n", utc_ml_tensors_info_set_tensor_dimension_n, NULL, NULL}, + {"utc_ml_tensors_info_get_tensor_dimension_p", utc_ml_tensors_info_get_tensor_dimension_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_get_tensor_dimension_n", utc_ml_tensors_info_get_tensor_dimension_n, NULL, NULL}, + + {"utc_ml_tensors_data_create_p", utc_ml_tensors_data_create_p, utc_nnstreamer_pipeline_tensor_data_startup, utc_nnstreamer_pipeline_tensor_data_cleanup}, + {"utc_ml_tensors_data_create_n", utc_ml_tensors_data_create_n, NULL, NULL}, + {"utc_ml_tensors_data_destroy_p", utc_ml_tensors_data_destroy_p, utc_nnstreamer_pipeline_tensor_data_startup, NULL}, + {"utc_ml_tensors_data_destroy_n", utc_ml_tensors_data_destroy_n, NULL, NULL}, + {"utc_ml_tensors_data_get_tensor_data_p", utc_ml_tensors_data_get_tensor_data_p, utc_nnstreamer_pipeline_tensor_data_startup, utc_nnstreamer_pipeline_tensor_data_cleanup}, + + {"utc_ml_tensors_data_get_tensor_data_n", utc_ml_tensors_data_get_tensor_data_n, NULL, NULL}, + {"utc_ml_tensors_data_set_tensor_data_p", utc_ml_tensors_data_set_tensor_data_p, utc_nnstreamer_pipeline_tensor_data_startup, utc_nnstreamer_pipeline_tensor_data_cleanup}, + {"utc_ml_tensors_data_set_tensor_data_n", utc_ml_tensors_data_set_tensor_data_n, NULL, NULL}, + {"utc_ml_check_nnfw_availability_p", utc_ml_check_nnfw_availability_p, NULL, NULL}, + {"utc_ml_check_nnfw_availability_n", utc_ml_check_nnfw_availability_n, NULL, NULL}, + + {"utc_ml_single_open_p", utc_ml_single_open_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_open_n", utc_ml_single_open_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_close_p", utc_ml_single_close_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_close_n", utc_ml_single_close_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_invoke_p", utc_ml_single_invoke_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + + {"utc_ml_single_invoke_n", utc_ml_single_invoke_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_get_input_info_p", utc_ml_single_get_input_info_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_get_input_info_n", utc_ml_single_get_input_info_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_get_output_info_p", utc_ml_single_get_output_info_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_get_output_info_n", utc_ml_single_get_output_info_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + + {"utc_nnstreamer_scenario_construct_destruct_empty_pipeline_p", utc_nnstreamer_scenario_construct_destruct_empty_pipeline_p, NULL, NULL}, + {"utc_nnstreamer_scenario_construct_destruct_pipeline_p1", utc_nnstreamer_scenario_construct_destruct_pipeline_p1, NULL, NULL}, + {"utc_nnstreamer_scenario_construct_destruct_pipeline_p2", utc_nnstreamer_scenario_construct_destruct_pipeline_p2, NULL, NULL}, + {"utc_nnstreamer_scenario_construct_pipeline_error_case_n1", utc_nnstreamer_scenario_construct_pipeline_error_case_n1, NULL, NULL}, + {"utc_nnstreamer_scenario_construct_pipeline_error_case_n2", utc_nnstreamer_scenario_construct_pipeline_error_case_n2, NULL, NULL}, + + {"utc_nnstreamer_scenario_pipeline_state_test_p1", utc_nnstreamer_scenario_pipeline_state_test_p1, NULL, NULL}, + {"utc_nnstreamer_scenario_pipeline_state_test_p2", utc_nnstreamer_scenario_pipeline_state_test_p2, NULL, NULL}, + {"utc_nnstreamer_scenario_pipeline_state_test_p3", utc_nnstreamer_scenario_pipeline_state_test_p3, NULL, NULL}, + {"utc_nnstreamer_scenario_valve_error_cases_n", utc_nnstreamer_scenario_valve_error_cases_n, NULL, NULL}, + {"utc_nnstreamer_scenario_sink_p1", utc_nnstreamer_scenario_sink_p1, NULL, NULL}, + + {"utc_nnstreamer_scenario_sink_p2", utc_nnstreamer_scenario_sink_p2, NULL, NULL}, + {"utc_nnstreamer_scenario_sink_error_cases_n", utc_nnstreamer_scenario_sink_error_cases_n, NULL, NULL}, + {"utc_nnstreamer_scenario_src_p", utc_nnstreamer_scenario_src_p, utc_nnstreamer_scenario_startup, utc_nnstreamer_scenario_cleanup}, + {"utc_nnstreamer_scenario_src_error_case_n1", utc_nnstreamer_scenario_src_error_case_n1, NULL, NULL}, + {"utc_nnstreamer_scenario_src_error_case_n2", utc_nnstreamer_scenario_src_error_case_n2, NULL, NULL}, + + {"utc_nnstreamer_scenario_src_error_case_n3", utc_nnstreamer_scenario_src_error_case_n3, NULL, NULL}, + {"utc_nnstreamer_scenario_switch_pipeline_p1", utc_nnstreamer_scenario_switch_pipeline_p1, NULL, NULL}, + {"utc_nnstreamer_scenario_switch_pipeline_p2", utc_nnstreamer_scenario_switch_pipeline_p2, NULL, NULL}, + {"utc_nnstreamer_scenario_switch_pipeline_error_cases_n", utc_nnstreamer_scenario_switch_pipeline_error_cases_n, NULL, NULL}, + {"utc_nnstreamer_scenario_check_nnfw_availability_p", utc_nnstreamer_scenario_check_nnfw_availability_p, NULL, NULL}, + + {"utc_nnstreamer_scenario_check_tensor_info_utilities_p", utc_nnstreamer_scenario_check_tensor_info_utilities_p, NULL, NULL}, + {"utc_nnstreamer_scenario_filter_tensorflow_lite_p1", utc_nnstreamer_scenario_filter_tensorflow_lite_p1, NULL, NULL}, + {"utc_nnstreamer_scenario_filter_tensorflow_lite_p2", utc_nnstreamer_scenario_filter_tensorflow_lite_p2, NULL, NULL}, + {"utc_nnstreamer_scenario_filter_tensorflow_lite_n", utc_nnstreamer_scenario_filter_tensorflow_lite_n, NULL, NULL}, + {NULL, NULL} +}; + +#endif // __TCT_NNSTREAMER-NATIVE_H__ diff --git a/src/utc/nnstreamer/tct-nnstreamer-core_tizeniot.h b/src/utc/nnstreamer/tct-nnstreamer-core_tizeniot.h new file mode 100755 index 000000000..5c517b6e5 --- /dev/null +++ b/src/utc/nnstreamer/tct-nnstreamer-core_tizeniot.h @@ -0,0 +1,26 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef __TCT_NNSTREAMER-NATIVE_H__ +#define __TCT_NNSTREAMER-NATIVE_H__ + +#include "testcase.h" +#include "tct_common.h" + +testcase tc_array[] = { + {NULL, NULL} +}; + +#endif // __TCT_NNSTREAMER-NATIVE_H__ diff --git a/src/utc/nnstreamer/tct-nnstreamer-core_tv.h b/src/utc/nnstreamer/tct-nnstreamer-core_tv.h new file mode 100755 index 000000000..3cfd3f6d1 --- /dev/null +++ b/src/utc/nnstreamer/tct-nnstreamer-core_tv.h @@ -0,0 +1,270 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef __TCT_NNSTREAMER-NATIVE_H__ +#define __TCT_NNSTREAMER-NATIVE_H__ + +#include "testcase.h" +#include "tct_common.h" + +extern void utc_nnstreamer_single_startup(void); +extern void utc_nnstreamer_single_cleanup(void); +extern void utc_nnstreamer_pipeline_sink_startup(void); +extern void utc_nnstreamer_pipeline_sink_cleanup(void); +extern void utc_nnstreamer_pipeline_src_startup(void); +extern void utc_nnstreamer_pipeline_src_cleanup(void); +extern void utc_nnstreamer_pipeline_switch_startup(void); +extern void utc_nnstreamer_pipeline_switch_cleanup(void); +extern void utc_nnstreamer_pipeline_valve_startup(void); +extern void utc_nnstreamer_pipeline_valve_cleanup(void); +extern void utc_nnstreamer_pipeline_tensor_info_startup(void); +extern void utc_nnstreamer_pipeline_tensor_info_cleanup(void); +extern void utc_nnstreamer_pipeline_tensor_data_startup(void); +extern void utc_nnstreamer_pipeline_tensor_data_cleanup(void); + +extern int utc_ml_pipeline_construct_p (void); +extern int utc_ml_pipeline_construct_n (void); +extern int utc_ml_pipeline_destroy_p (void); +extern int utc_ml_pipeline_destroy_n (void); +extern int utc_ml_pipeline_get_state_p (void); + +extern int utc_ml_pipeline_get_state_n (void); +extern int utc_ml_pipeline_start_p (void); +extern int utc_ml_pipeline_start_n (void); +extern int utc_ml_pipeline_stop_p (void); +extern int utc_ml_pipeline_stop_n (void); + +extern int utc_ml_pipeline_sink_register_p (void); +extern int utc_ml_pipeline_sink_register_n (void); +extern int utc_ml_pipeline_sink_unregister_p (void); +extern int utc_ml_pipeline_sink_unregister_n (void); +extern int utc_ml_pipeline_src_get_handle_p (void); + +extern int utc_ml_pipeline_src_get_handle_n (void); +extern int utc_ml_pipeline_src_release_handle_p (void); +extern int utc_ml_pipeline_src_release_handle_n (void); +extern int utc_ml_pipeline_src_input_data_p (void); +extern int utc_ml_pipeline_src_input_data_n (void); + +extern int utc_ml_pipeline_src_get_tensors_info_p (void); +extern int utc_ml_pipeline_src_get_tensors_info_n (void); +extern int utc_ml_pipeline_switch_get_handle_p (void); +extern int utc_ml_pipeline_switch_get_handle_n (void); +extern int utc_ml_pipeline_switch_release_handle_p (void); + +extern int utc_ml_pipeline_switch_release_handle_n (void); +extern int utc_ml_pipeline_switch_select_p (void); +extern int utc_ml_pipeline_switch_select_n (void); +extern int utc_ml_pipeline_switch_get_pad_list_p (void); +extern int utc_ml_pipeline_switch_get_pad_list_n (void); + +extern int utc_ml_pipeline_valve_get_handle_p (void); +extern int utc_ml_pipeline_valve_get_handle_n (void); +extern int utc_ml_pipeline_valve_release_handle_p (void); +extern int utc_ml_pipeline_valve_release_handle_n (void); +extern int utc_ml_pipeline_valve_set_open_p (void); + +extern int utc_ml_pipeline_valve_set_open_n (void); +extern int utc_ml_tensors_info_create_p (void); +extern int utc_ml_tensors_info_create_n (void); +extern int utc_ml_tensors_info_destroy_p (void); +extern int utc_ml_tensors_info_destroy_n (void); + +extern int utc_ml_tensors_info_validate_p (void); +extern int utc_ml_tensors_info_validate_n (void); +extern int utc_ml_tensors_info_clone_p (void); +extern int utc_ml_tensors_info_clone_n (void); +extern int utc_ml_tensors_info_set_count_p (void); + +extern int utc_ml_tensors_info_set_count_n (void); +extern int utc_ml_tensors_info_get_count_p (void); +extern int utc_ml_tensors_info_get_count_n (void); +extern int utc_ml_tensors_info_set_tensor_name_p (void); +extern int utc_ml_tensors_info_set_tensor_name_n (void); + +extern int utc_ml_tensors_info_get_tensor_name_p (void); +extern int utc_ml_tensors_info_get_tensor_name_n (void); +extern int utc_ml_tensors_info_set_tensor_type_p (void); +extern int utc_ml_tensors_info_set_tensor_type_n (void); +extern int utc_ml_tensors_info_get_tensor_type_p (void); + +extern int utc_ml_tensors_info_get_tensor_type_n (void); +extern int utc_ml_tensors_info_set_tensor_dimension_p (void); +extern int utc_ml_tensors_info_set_tensor_dimension_n (void); +extern int utc_ml_tensors_info_get_tensor_dimension_p (void); +extern int utc_ml_tensors_info_get_tensor_dimension_n (void); + +extern int utc_ml_tensors_data_create_p (void); +extern int utc_ml_tensors_data_create_n (void); +extern int utc_ml_tensors_data_destroy_p (void); +extern int utc_ml_tensors_data_destroy_n (void); +extern int utc_ml_tensors_data_get_tensor_data_p (void); + +extern int utc_ml_tensors_data_get_tensor_data_n (void); +extern int utc_ml_tensors_data_set_tensor_data_p (void); +extern int utc_ml_tensors_data_set_tensor_data_n (void); +extern int utc_ml_check_nnfw_availability_p (void); +extern int utc_ml_check_nnfw_availability_n (void); + +extern int utc_ml_single_open_p (void); +extern int utc_ml_single_open_n (void); +extern int utc_ml_single_close_p (void); +extern int utc_ml_single_close_n (void); +extern int utc_ml_single_invoke_p (void); + +extern int utc_ml_single_invoke_n (void); +extern int utc_ml_single_get_input_info_p (void); +extern int utc_ml_single_get_input_info_n (void); +extern int utc_ml_single_get_output_info_p (void); +extern int utc_ml_single_get_output_info_n (void); +extern int utc_nnstreamer_scenario_construct_destruct_empty_pipeline_p (void); +extern int utc_nnstreamer_scenario_construct_destruct_pipeline_p1 (void); +extern int utc_nnstreamer_scenario_construct_destruct_pipeline_p2 (void); + +extern int utc_nnstreamer_scenario_construct_pipeline_error_case_n1 (void); +extern int utc_nnstreamer_scenario_construct_pipeline_error_case_n2 (void); +extern int utc_nnstreamer_scenario_pipeline_state_test_p1 (void); +extern int utc_nnstreamer_scenario_pipeline_state_test_p2 (void); +extern int utc_nnstreamer_scenario_pipeline_state_test_p3 (void); + +extern int utc_nnstreamer_scenario_valve_error_cases_n (void); +extern int utc_nnstreamer_scenario_switch_pipeline_p2 (void); +extern int utc_nnstreamer_scenario_switch_pipeline_error_cases_n (void); +extern int utc_nnstreamer_scenario_check_nnfw_availability_p (void); +extern int utc_nnstreamer_scenario_check_tensor_info_utilities_p (void); + +extern int utc_nnstreamer_scenario_filter_tensorflow_lite_p1 (void); +extern int utc_nnstreamer_scenario_filter_tensorflow_lite_p2 (void); +extern int utc_nnstreamer_scenario_filter_tensorflow_lite_n (void); + +testcase tc_array[] = { + {"utc_ml_pipeline_construct_p", utc_ml_pipeline_construct_p, NULL, NULL}, + {"utc_ml_pipeline_construct_n", utc_ml_pipeline_construct_n, NULL, NULL}, + {"utc_ml_pipeline_destroy_p", utc_ml_pipeline_destroy_p, NULL, NULL}, + {"utc_ml_pipeline_destroy_n", utc_ml_pipeline_destroy_n, NULL, NULL}, + {"utc_ml_pipeline_get_state_p", utc_ml_pipeline_get_state_p, NULL, NULL}, + + {"utc_ml_pipeline_get_state_n", utc_ml_pipeline_get_state_n, NULL, NULL}, + {"utc_ml_pipeline_start_p", utc_ml_pipeline_start_p, NULL, NULL}, + {"utc_ml_pipeline_start_n", utc_ml_pipeline_start_n, NULL, NULL}, + {"utc_ml_pipeline_stop_p", utc_ml_pipeline_stop_p, NULL, NULL}, + {"utc_ml_pipeline_stop_n", utc_ml_pipeline_stop_n, NULL, NULL}, + + {"utc_ml_pipeline_sink_register_p", utc_ml_pipeline_sink_register_p, utc_nnstreamer_pipeline_sink_startup, utc_nnstreamer_pipeline_sink_cleanup}, + {"utc_ml_pipeline_sink_register_n", utc_ml_pipeline_sink_register_n, NULL, NULL}, + {"utc_ml_pipeline_sink_unregister_p", utc_ml_pipeline_sink_unregister_p, utc_nnstreamer_pipeline_sink_startup, utc_nnstreamer_pipeline_sink_cleanup}, + {"utc_ml_pipeline_sink_unregister_n", utc_ml_pipeline_sink_unregister_n, NULL, NULL}, + {"utc_ml_pipeline_src_get_handle_p", utc_ml_pipeline_src_get_handle_p, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + + {"utc_ml_pipeline_src_get_handle_n", utc_ml_pipeline_src_get_handle_n, NULL, NULL}, + {"utc_ml_pipeline_src_release_handle_p", utc_ml_pipeline_src_release_handle_p, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + {"utc_ml_pipeline_src_release_handle_n", utc_ml_pipeline_src_release_handle_n, NULL, NULL}, + {"utc_ml_pipeline_src_input_data_p", utc_ml_pipeline_src_input_data_p, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + {"utc_ml_pipeline_src_input_data_n", utc_ml_pipeline_src_input_data_n, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + + {"utc_ml_pipeline_src_get_tensors_info_p", utc_ml_pipeline_src_get_tensors_info_p, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + {"utc_ml_pipeline_src_get_tensors_info_n", utc_ml_pipeline_src_get_tensors_info_n, NULL, NULL}, + {"utc_ml_pipeline_switch_get_handle_p", utc_ml_pipeline_switch_get_handle_p, utc_nnstreamer_pipeline_switch_startup, utc_nnstreamer_pipeline_switch_cleanup}, + {"utc_ml_pipeline_switch_get_handle_n", utc_ml_pipeline_switch_get_handle_n, NULL, NULL}, + {"utc_ml_pipeline_switch_release_handle_p", utc_ml_pipeline_switch_release_handle_p, utc_nnstreamer_pipeline_switch_startup, utc_nnstreamer_pipeline_switch_cleanup}, + + {"utc_ml_pipeline_switch_release_handle_n", utc_ml_pipeline_switch_release_handle_n, NULL, NULL}, + {"utc_ml_pipeline_switch_select_p", utc_ml_pipeline_switch_select_p, utc_nnstreamer_pipeline_switch_startup, utc_nnstreamer_pipeline_switch_cleanup}, + {"utc_ml_pipeline_switch_select_n", utc_ml_pipeline_switch_select_n, NULL, NULL}, + {"utc_ml_pipeline_switch_get_pad_list_p", utc_ml_pipeline_switch_get_pad_list_p, utc_nnstreamer_pipeline_switch_startup, utc_nnstreamer_pipeline_switch_cleanup}, + {"utc_ml_pipeline_switch_get_pad_list_n", utc_ml_pipeline_switch_get_pad_list_n, NULL, NULL}, + + {"utc_ml_pipeline_valve_get_handle_p", utc_ml_pipeline_valve_get_handle_p, utc_nnstreamer_pipeline_valve_startup, utc_nnstreamer_pipeline_valve_cleanup}, + {"utc_ml_pipeline_valve_get_handle_n", utc_ml_pipeline_valve_get_handle_n, NULL, NULL}, + {"utc_ml_pipeline_valve_release_handle_p", utc_ml_pipeline_valve_release_handle_p, utc_nnstreamer_pipeline_valve_startup, utc_nnstreamer_pipeline_valve_cleanup}, + {"utc_ml_pipeline_valve_release_handle_n", utc_ml_pipeline_valve_release_handle_n, NULL, NULL}, + {"utc_ml_pipeline_valve_set_open_p", utc_ml_pipeline_valve_set_open_p, utc_nnstreamer_pipeline_valve_startup, utc_nnstreamer_pipeline_valve_cleanup}, + + {"utc_ml_pipeline_valve_set_open_n", utc_ml_pipeline_valve_set_open_n, NULL, NULL}, + {"utc_ml_tensors_info_create_p", utc_ml_tensors_info_create_p, NULL, NULL}, + {"utc_ml_tensors_info_create_n", utc_ml_tensors_info_create_n, NULL, NULL}, + {"utc_ml_tensors_info_destroy_p", utc_ml_tensors_info_destroy_p, NULL, NULL}, + {"utc_ml_tensors_info_destroy_n", utc_ml_tensors_info_destroy_n, NULL, NULL}, + + {"utc_ml_tensors_info_validate_p", utc_ml_tensors_info_validate_p, NULL, NULL}, + {"utc_ml_tensors_info_validate_n", utc_ml_tensors_info_validate_n, NULL, NULL}, + {"utc_ml_tensors_info_clone_p", utc_ml_tensors_info_clone_p, NULL, NULL}, + {"utc_ml_tensors_info_clone_n", utc_ml_tensors_info_clone_n, NULL, NULL}, + {"utc_ml_tensors_info_set_count_p", utc_ml_tensors_info_set_count_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + + {"utc_ml_tensors_info_set_count_n", utc_ml_tensors_info_set_count_n, NULL, NULL}, + {"utc_ml_tensors_info_get_count_p", utc_ml_tensors_info_get_count_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_get_count_n", utc_ml_tensors_info_get_count_n, NULL, NULL}, + {"utc_ml_tensors_info_set_tensor_name_p", utc_ml_tensors_info_set_tensor_name_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_set_tensor_name_n", utc_ml_tensors_info_set_tensor_name_n, NULL, NULL}, + + {"utc_ml_tensors_info_get_tensor_name_p", utc_ml_tensors_info_get_tensor_name_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_get_tensor_name_n", utc_ml_tensors_info_get_tensor_name_n, NULL, NULL}, + {"utc_ml_tensors_info_set_tensor_type_p", utc_ml_tensors_info_set_tensor_type_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_set_tensor_type_n", utc_ml_tensors_info_set_tensor_type_n, NULL, NULL}, + {"utc_ml_tensors_info_get_tensor_type_p", utc_ml_tensors_info_get_tensor_type_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + + {"utc_ml_tensors_info_get_tensor_type_n", utc_ml_tensors_info_get_tensor_type_n, NULL, NULL}, + {"utc_ml_tensors_info_set_tensor_dimension_p", utc_ml_tensors_info_set_tensor_dimension_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_set_tensor_dimension_n", utc_ml_tensors_info_set_tensor_dimension_n, NULL, NULL}, + {"utc_ml_tensors_info_get_tensor_dimension_p", utc_ml_tensors_info_get_tensor_dimension_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_get_tensor_dimension_n", utc_ml_tensors_info_get_tensor_dimension_n, NULL, NULL}, + + {"utc_ml_tensors_data_create_p", utc_ml_tensors_data_create_p, utc_nnstreamer_pipeline_tensor_data_startup, utc_nnstreamer_pipeline_tensor_data_cleanup}, + {"utc_ml_tensors_data_create_n", utc_ml_tensors_data_create_n, NULL, NULL}, + {"utc_ml_tensors_data_destroy_p", utc_ml_tensors_data_destroy_p, utc_nnstreamer_pipeline_tensor_data_startup, NULL}, + {"utc_ml_tensors_data_destroy_n", utc_ml_tensors_data_destroy_n, NULL, NULL}, + {"utc_ml_tensors_data_get_tensor_data_p", utc_ml_tensors_data_get_tensor_data_p, utc_nnstreamer_pipeline_tensor_data_startup, utc_nnstreamer_pipeline_tensor_data_cleanup}, + + {"utc_ml_tensors_data_get_tensor_data_n", utc_ml_tensors_data_get_tensor_data_n, NULL, NULL}, + {"utc_ml_tensors_data_set_tensor_data_p", utc_ml_tensors_data_set_tensor_data_p, utc_nnstreamer_pipeline_tensor_data_startup, utc_nnstreamer_pipeline_tensor_data_cleanup}, + {"utc_ml_tensors_data_set_tensor_data_n", utc_ml_tensors_data_set_tensor_data_n, NULL, NULL}, + {"utc_ml_check_nnfw_availability_p", utc_ml_check_nnfw_availability_p, NULL, NULL}, + {"utc_ml_check_nnfw_availability_n", utc_ml_check_nnfw_availability_n, NULL, NULL}, + + {"utc_ml_single_open_p", utc_ml_single_open_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_open_n", utc_ml_single_open_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_close_p", utc_ml_single_close_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_close_n", utc_ml_single_close_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_invoke_p", utc_ml_single_invoke_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + + {"utc_ml_single_invoke_n", utc_ml_single_invoke_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_get_input_info_p", utc_ml_single_get_input_info_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_get_input_info_n", utc_ml_single_get_input_info_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_get_output_info_p", utc_ml_single_get_output_info_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_get_output_info_n", utc_ml_single_get_output_info_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_nnstreamer_scenario_construct_destruct_empty_pipeline_p", utc_nnstreamer_scenario_construct_destruct_empty_pipeline_p, NULL, NULL}, + {"utc_nnstreamer_scenario_construct_destruct_pipeline_p1", utc_nnstreamer_scenario_construct_destruct_pipeline_p1, NULL, NULL}, + {"utc_nnstreamer_scenario_construct_destruct_pipeline_p2", utc_nnstreamer_scenario_construct_destruct_pipeline_p2, NULL, NULL}, + + {"utc_nnstreamer_scenario_construct_pipeline_error_case_n1", utc_nnstreamer_scenario_construct_pipeline_error_case_n1, NULL, NULL}, + {"utc_nnstreamer_scenario_construct_pipeline_error_case_n2", utc_nnstreamer_scenario_construct_pipeline_error_case_n2, NULL, NULL}, + {"utc_nnstreamer_scenario_pipeline_state_test_p1", utc_nnstreamer_scenario_pipeline_state_test_p1, NULL, NULL}, + {"utc_nnstreamer_scenario_pipeline_state_test_p2", utc_nnstreamer_scenario_pipeline_state_test_p2, NULL, NULL}, + {"utc_nnstreamer_scenario_pipeline_state_test_p3", utc_nnstreamer_scenario_pipeline_state_test_p3, NULL, NULL}, + + {"utc_nnstreamer_scenario_valve_error_cases_n", utc_nnstreamer_scenario_valve_error_cases_n, NULL, NULL}, + {"utc_nnstreamer_scenario_switch_pipeline_p2", utc_nnstreamer_scenario_switch_pipeline_p2, NULL, NULL}, + {"utc_nnstreamer_scenario_switch_pipeline_error_cases_n", utc_nnstreamer_scenario_switch_pipeline_error_cases_n, NULL, NULL}, + {"utc_nnstreamer_scenario_check_nnfw_availability_p", utc_nnstreamer_scenario_check_nnfw_availability_p, NULL, NULL}, + {"utc_nnstreamer_scenario_check_tensor_info_utilities_p", utc_nnstreamer_scenario_check_tensor_info_utilities_p, NULL, NULL}, + + {"utc_nnstreamer_scenario_filter_tensorflow_lite_p1", utc_nnstreamer_scenario_filter_tensorflow_lite_p1, NULL, NULL}, + {"utc_nnstreamer_scenario_filter_tensorflow_lite_p2", utc_nnstreamer_scenario_filter_tensorflow_lite_p2, NULL, NULL}, + {"utc_nnstreamer_scenario_filter_tensorflow_lite_n", utc_nnstreamer_scenario_filter_tensorflow_lite_n, NULL, NULL}, + {NULL, NULL} +}; + +#endif // __TCT_NNSTREAMER-NATIVE_H__ diff --git a/src/utc/nnstreamer/tct-nnstreamer-core_wearable.h b/src/utc/nnstreamer/tct-nnstreamer-core_wearable.h new file mode 100755 index 000000000..3cfd3f6d1 --- /dev/null +++ b/src/utc/nnstreamer/tct-nnstreamer-core_wearable.h @@ -0,0 +1,270 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +#ifndef __TCT_NNSTREAMER-NATIVE_H__ +#define __TCT_NNSTREAMER-NATIVE_H__ + +#include "testcase.h" +#include "tct_common.h" + +extern void utc_nnstreamer_single_startup(void); +extern void utc_nnstreamer_single_cleanup(void); +extern void utc_nnstreamer_pipeline_sink_startup(void); +extern void utc_nnstreamer_pipeline_sink_cleanup(void); +extern void utc_nnstreamer_pipeline_src_startup(void); +extern void utc_nnstreamer_pipeline_src_cleanup(void); +extern void utc_nnstreamer_pipeline_switch_startup(void); +extern void utc_nnstreamer_pipeline_switch_cleanup(void); +extern void utc_nnstreamer_pipeline_valve_startup(void); +extern void utc_nnstreamer_pipeline_valve_cleanup(void); +extern void utc_nnstreamer_pipeline_tensor_info_startup(void); +extern void utc_nnstreamer_pipeline_tensor_info_cleanup(void); +extern void utc_nnstreamer_pipeline_tensor_data_startup(void); +extern void utc_nnstreamer_pipeline_tensor_data_cleanup(void); + +extern int utc_ml_pipeline_construct_p (void); +extern int utc_ml_pipeline_construct_n (void); +extern int utc_ml_pipeline_destroy_p (void); +extern int utc_ml_pipeline_destroy_n (void); +extern int utc_ml_pipeline_get_state_p (void); + +extern int utc_ml_pipeline_get_state_n (void); +extern int utc_ml_pipeline_start_p (void); +extern int utc_ml_pipeline_start_n (void); +extern int utc_ml_pipeline_stop_p (void); +extern int utc_ml_pipeline_stop_n (void); + +extern int utc_ml_pipeline_sink_register_p (void); +extern int utc_ml_pipeline_sink_register_n (void); +extern int utc_ml_pipeline_sink_unregister_p (void); +extern int utc_ml_pipeline_sink_unregister_n (void); +extern int utc_ml_pipeline_src_get_handle_p (void); + +extern int utc_ml_pipeline_src_get_handle_n (void); +extern int utc_ml_pipeline_src_release_handle_p (void); +extern int utc_ml_pipeline_src_release_handle_n (void); +extern int utc_ml_pipeline_src_input_data_p (void); +extern int utc_ml_pipeline_src_input_data_n (void); + +extern int utc_ml_pipeline_src_get_tensors_info_p (void); +extern int utc_ml_pipeline_src_get_tensors_info_n (void); +extern int utc_ml_pipeline_switch_get_handle_p (void); +extern int utc_ml_pipeline_switch_get_handle_n (void); +extern int utc_ml_pipeline_switch_release_handle_p (void); + +extern int utc_ml_pipeline_switch_release_handle_n (void); +extern int utc_ml_pipeline_switch_select_p (void); +extern int utc_ml_pipeline_switch_select_n (void); +extern int utc_ml_pipeline_switch_get_pad_list_p (void); +extern int utc_ml_pipeline_switch_get_pad_list_n (void); + +extern int utc_ml_pipeline_valve_get_handle_p (void); +extern int utc_ml_pipeline_valve_get_handle_n (void); +extern int utc_ml_pipeline_valve_release_handle_p (void); +extern int utc_ml_pipeline_valve_release_handle_n (void); +extern int utc_ml_pipeline_valve_set_open_p (void); + +extern int utc_ml_pipeline_valve_set_open_n (void); +extern int utc_ml_tensors_info_create_p (void); +extern int utc_ml_tensors_info_create_n (void); +extern int utc_ml_tensors_info_destroy_p (void); +extern int utc_ml_tensors_info_destroy_n (void); + +extern int utc_ml_tensors_info_validate_p (void); +extern int utc_ml_tensors_info_validate_n (void); +extern int utc_ml_tensors_info_clone_p (void); +extern int utc_ml_tensors_info_clone_n (void); +extern int utc_ml_tensors_info_set_count_p (void); + +extern int utc_ml_tensors_info_set_count_n (void); +extern int utc_ml_tensors_info_get_count_p (void); +extern int utc_ml_tensors_info_get_count_n (void); +extern int utc_ml_tensors_info_set_tensor_name_p (void); +extern int utc_ml_tensors_info_set_tensor_name_n (void); + +extern int utc_ml_tensors_info_get_tensor_name_p (void); +extern int utc_ml_tensors_info_get_tensor_name_n (void); +extern int utc_ml_tensors_info_set_tensor_type_p (void); +extern int utc_ml_tensors_info_set_tensor_type_n (void); +extern int utc_ml_tensors_info_get_tensor_type_p (void); + +extern int utc_ml_tensors_info_get_tensor_type_n (void); +extern int utc_ml_tensors_info_set_tensor_dimension_p (void); +extern int utc_ml_tensors_info_set_tensor_dimension_n (void); +extern int utc_ml_tensors_info_get_tensor_dimension_p (void); +extern int utc_ml_tensors_info_get_tensor_dimension_n (void); + +extern int utc_ml_tensors_data_create_p (void); +extern int utc_ml_tensors_data_create_n (void); +extern int utc_ml_tensors_data_destroy_p (void); +extern int utc_ml_tensors_data_destroy_n (void); +extern int utc_ml_tensors_data_get_tensor_data_p (void); + +extern int utc_ml_tensors_data_get_tensor_data_n (void); +extern int utc_ml_tensors_data_set_tensor_data_p (void); +extern int utc_ml_tensors_data_set_tensor_data_n (void); +extern int utc_ml_check_nnfw_availability_p (void); +extern int utc_ml_check_nnfw_availability_n (void); + +extern int utc_ml_single_open_p (void); +extern int utc_ml_single_open_n (void); +extern int utc_ml_single_close_p (void); +extern int utc_ml_single_close_n (void); +extern int utc_ml_single_invoke_p (void); + +extern int utc_ml_single_invoke_n (void); +extern int utc_ml_single_get_input_info_p (void); +extern int utc_ml_single_get_input_info_n (void); +extern int utc_ml_single_get_output_info_p (void); +extern int utc_ml_single_get_output_info_n (void); +extern int utc_nnstreamer_scenario_construct_destruct_empty_pipeline_p (void); +extern int utc_nnstreamer_scenario_construct_destruct_pipeline_p1 (void); +extern int utc_nnstreamer_scenario_construct_destruct_pipeline_p2 (void); + +extern int utc_nnstreamer_scenario_construct_pipeline_error_case_n1 (void); +extern int utc_nnstreamer_scenario_construct_pipeline_error_case_n2 (void); +extern int utc_nnstreamer_scenario_pipeline_state_test_p1 (void); +extern int utc_nnstreamer_scenario_pipeline_state_test_p2 (void); +extern int utc_nnstreamer_scenario_pipeline_state_test_p3 (void); + +extern int utc_nnstreamer_scenario_valve_error_cases_n (void); +extern int utc_nnstreamer_scenario_switch_pipeline_p2 (void); +extern int utc_nnstreamer_scenario_switch_pipeline_error_cases_n (void); +extern int utc_nnstreamer_scenario_check_nnfw_availability_p (void); +extern int utc_nnstreamer_scenario_check_tensor_info_utilities_p (void); + +extern int utc_nnstreamer_scenario_filter_tensorflow_lite_p1 (void); +extern int utc_nnstreamer_scenario_filter_tensorflow_lite_p2 (void); +extern int utc_nnstreamer_scenario_filter_tensorflow_lite_n (void); + +testcase tc_array[] = { + {"utc_ml_pipeline_construct_p", utc_ml_pipeline_construct_p, NULL, NULL}, + {"utc_ml_pipeline_construct_n", utc_ml_pipeline_construct_n, NULL, NULL}, + {"utc_ml_pipeline_destroy_p", utc_ml_pipeline_destroy_p, NULL, NULL}, + {"utc_ml_pipeline_destroy_n", utc_ml_pipeline_destroy_n, NULL, NULL}, + {"utc_ml_pipeline_get_state_p", utc_ml_pipeline_get_state_p, NULL, NULL}, + + {"utc_ml_pipeline_get_state_n", utc_ml_pipeline_get_state_n, NULL, NULL}, + {"utc_ml_pipeline_start_p", utc_ml_pipeline_start_p, NULL, NULL}, + {"utc_ml_pipeline_start_n", utc_ml_pipeline_start_n, NULL, NULL}, + {"utc_ml_pipeline_stop_p", utc_ml_pipeline_stop_p, NULL, NULL}, + {"utc_ml_pipeline_stop_n", utc_ml_pipeline_stop_n, NULL, NULL}, + + {"utc_ml_pipeline_sink_register_p", utc_ml_pipeline_sink_register_p, utc_nnstreamer_pipeline_sink_startup, utc_nnstreamer_pipeline_sink_cleanup}, + {"utc_ml_pipeline_sink_register_n", utc_ml_pipeline_sink_register_n, NULL, NULL}, + {"utc_ml_pipeline_sink_unregister_p", utc_ml_pipeline_sink_unregister_p, utc_nnstreamer_pipeline_sink_startup, utc_nnstreamer_pipeline_sink_cleanup}, + {"utc_ml_pipeline_sink_unregister_n", utc_ml_pipeline_sink_unregister_n, NULL, NULL}, + {"utc_ml_pipeline_src_get_handle_p", utc_ml_pipeline_src_get_handle_p, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + + {"utc_ml_pipeline_src_get_handle_n", utc_ml_pipeline_src_get_handle_n, NULL, NULL}, + {"utc_ml_pipeline_src_release_handle_p", utc_ml_pipeline_src_release_handle_p, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + {"utc_ml_pipeline_src_release_handle_n", utc_ml_pipeline_src_release_handle_n, NULL, NULL}, + {"utc_ml_pipeline_src_input_data_p", utc_ml_pipeline_src_input_data_p, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + {"utc_ml_pipeline_src_input_data_n", utc_ml_pipeline_src_input_data_n, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + + {"utc_ml_pipeline_src_get_tensors_info_p", utc_ml_pipeline_src_get_tensors_info_p, utc_nnstreamer_pipeline_src_startup, utc_nnstreamer_pipeline_src_cleanup}, + {"utc_ml_pipeline_src_get_tensors_info_n", utc_ml_pipeline_src_get_tensors_info_n, NULL, NULL}, + {"utc_ml_pipeline_switch_get_handle_p", utc_ml_pipeline_switch_get_handle_p, utc_nnstreamer_pipeline_switch_startup, utc_nnstreamer_pipeline_switch_cleanup}, + {"utc_ml_pipeline_switch_get_handle_n", utc_ml_pipeline_switch_get_handle_n, NULL, NULL}, + {"utc_ml_pipeline_switch_release_handle_p", utc_ml_pipeline_switch_release_handle_p, utc_nnstreamer_pipeline_switch_startup, utc_nnstreamer_pipeline_switch_cleanup}, + + {"utc_ml_pipeline_switch_release_handle_n", utc_ml_pipeline_switch_release_handle_n, NULL, NULL}, + {"utc_ml_pipeline_switch_select_p", utc_ml_pipeline_switch_select_p, utc_nnstreamer_pipeline_switch_startup, utc_nnstreamer_pipeline_switch_cleanup}, + {"utc_ml_pipeline_switch_select_n", utc_ml_pipeline_switch_select_n, NULL, NULL}, + {"utc_ml_pipeline_switch_get_pad_list_p", utc_ml_pipeline_switch_get_pad_list_p, utc_nnstreamer_pipeline_switch_startup, utc_nnstreamer_pipeline_switch_cleanup}, + {"utc_ml_pipeline_switch_get_pad_list_n", utc_ml_pipeline_switch_get_pad_list_n, NULL, NULL}, + + {"utc_ml_pipeline_valve_get_handle_p", utc_ml_pipeline_valve_get_handle_p, utc_nnstreamer_pipeline_valve_startup, utc_nnstreamer_pipeline_valve_cleanup}, + {"utc_ml_pipeline_valve_get_handle_n", utc_ml_pipeline_valve_get_handle_n, NULL, NULL}, + {"utc_ml_pipeline_valve_release_handle_p", utc_ml_pipeline_valve_release_handle_p, utc_nnstreamer_pipeline_valve_startup, utc_nnstreamer_pipeline_valve_cleanup}, + {"utc_ml_pipeline_valve_release_handle_n", utc_ml_pipeline_valve_release_handle_n, NULL, NULL}, + {"utc_ml_pipeline_valve_set_open_p", utc_ml_pipeline_valve_set_open_p, utc_nnstreamer_pipeline_valve_startup, utc_nnstreamer_pipeline_valve_cleanup}, + + {"utc_ml_pipeline_valve_set_open_n", utc_ml_pipeline_valve_set_open_n, NULL, NULL}, + {"utc_ml_tensors_info_create_p", utc_ml_tensors_info_create_p, NULL, NULL}, + {"utc_ml_tensors_info_create_n", utc_ml_tensors_info_create_n, NULL, NULL}, + {"utc_ml_tensors_info_destroy_p", utc_ml_tensors_info_destroy_p, NULL, NULL}, + {"utc_ml_tensors_info_destroy_n", utc_ml_tensors_info_destroy_n, NULL, NULL}, + + {"utc_ml_tensors_info_validate_p", utc_ml_tensors_info_validate_p, NULL, NULL}, + {"utc_ml_tensors_info_validate_n", utc_ml_tensors_info_validate_n, NULL, NULL}, + {"utc_ml_tensors_info_clone_p", utc_ml_tensors_info_clone_p, NULL, NULL}, + {"utc_ml_tensors_info_clone_n", utc_ml_tensors_info_clone_n, NULL, NULL}, + {"utc_ml_tensors_info_set_count_p", utc_ml_tensors_info_set_count_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + + {"utc_ml_tensors_info_set_count_n", utc_ml_tensors_info_set_count_n, NULL, NULL}, + {"utc_ml_tensors_info_get_count_p", utc_ml_tensors_info_get_count_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_get_count_n", utc_ml_tensors_info_get_count_n, NULL, NULL}, + {"utc_ml_tensors_info_set_tensor_name_p", utc_ml_tensors_info_set_tensor_name_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_set_tensor_name_n", utc_ml_tensors_info_set_tensor_name_n, NULL, NULL}, + + {"utc_ml_tensors_info_get_tensor_name_p", utc_ml_tensors_info_get_tensor_name_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_get_tensor_name_n", utc_ml_tensors_info_get_tensor_name_n, NULL, NULL}, + {"utc_ml_tensors_info_set_tensor_type_p", utc_ml_tensors_info_set_tensor_type_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_set_tensor_type_n", utc_ml_tensors_info_set_tensor_type_n, NULL, NULL}, + {"utc_ml_tensors_info_get_tensor_type_p", utc_ml_tensors_info_get_tensor_type_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + + {"utc_ml_tensors_info_get_tensor_type_n", utc_ml_tensors_info_get_tensor_type_n, NULL, NULL}, + {"utc_ml_tensors_info_set_tensor_dimension_p", utc_ml_tensors_info_set_tensor_dimension_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_set_tensor_dimension_n", utc_ml_tensors_info_set_tensor_dimension_n, NULL, NULL}, + {"utc_ml_tensors_info_get_tensor_dimension_p", utc_ml_tensors_info_get_tensor_dimension_p, utc_nnstreamer_pipeline_tensor_info_startup, utc_nnstreamer_pipeline_tensor_info_cleanup}, + {"utc_ml_tensors_info_get_tensor_dimension_n", utc_ml_tensors_info_get_tensor_dimension_n, NULL, NULL}, + + {"utc_ml_tensors_data_create_p", utc_ml_tensors_data_create_p, utc_nnstreamer_pipeline_tensor_data_startup, utc_nnstreamer_pipeline_tensor_data_cleanup}, + {"utc_ml_tensors_data_create_n", utc_ml_tensors_data_create_n, NULL, NULL}, + {"utc_ml_tensors_data_destroy_p", utc_ml_tensors_data_destroy_p, utc_nnstreamer_pipeline_tensor_data_startup, NULL}, + {"utc_ml_tensors_data_destroy_n", utc_ml_tensors_data_destroy_n, NULL, NULL}, + {"utc_ml_tensors_data_get_tensor_data_p", utc_ml_tensors_data_get_tensor_data_p, utc_nnstreamer_pipeline_tensor_data_startup, utc_nnstreamer_pipeline_tensor_data_cleanup}, + + {"utc_ml_tensors_data_get_tensor_data_n", utc_ml_tensors_data_get_tensor_data_n, NULL, NULL}, + {"utc_ml_tensors_data_set_tensor_data_p", utc_ml_tensors_data_set_tensor_data_p, utc_nnstreamer_pipeline_tensor_data_startup, utc_nnstreamer_pipeline_tensor_data_cleanup}, + {"utc_ml_tensors_data_set_tensor_data_n", utc_ml_tensors_data_set_tensor_data_n, NULL, NULL}, + {"utc_ml_check_nnfw_availability_p", utc_ml_check_nnfw_availability_p, NULL, NULL}, + {"utc_ml_check_nnfw_availability_n", utc_ml_check_nnfw_availability_n, NULL, NULL}, + + {"utc_ml_single_open_p", utc_ml_single_open_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_open_n", utc_ml_single_open_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_close_p", utc_ml_single_close_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_close_n", utc_ml_single_close_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_invoke_p", utc_ml_single_invoke_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + + {"utc_ml_single_invoke_n", utc_ml_single_invoke_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_get_input_info_p", utc_ml_single_get_input_info_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_get_input_info_n", utc_ml_single_get_input_info_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_get_output_info_p", utc_ml_single_get_output_info_p, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_ml_single_get_output_info_n", utc_ml_single_get_output_info_n, utc_nnstreamer_single_startup, utc_nnstreamer_single_cleanup}, + {"utc_nnstreamer_scenario_construct_destruct_empty_pipeline_p", utc_nnstreamer_scenario_construct_destruct_empty_pipeline_p, NULL, NULL}, + {"utc_nnstreamer_scenario_construct_destruct_pipeline_p1", utc_nnstreamer_scenario_construct_destruct_pipeline_p1, NULL, NULL}, + {"utc_nnstreamer_scenario_construct_destruct_pipeline_p2", utc_nnstreamer_scenario_construct_destruct_pipeline_p2, NULL, NULL}, + + {"utc_nnstreamer_scenario_construct_pipeline_error_case_n1", utc_nnstreamer_scenario_construct_pipeline_error_case_n1, NULL, NULL}, + {"utc_nnstreamer_scenario_construct_pipeline_error_case_n2", utc_nnstreamer_scenario_construct_pipeline_error_case_n2, NULL, NULL}, + {"utc_nnstreamer_scenario_pipeline_state_test_p1", utc_nnstreamer_scenario_pipeline_state_test_p1, NULL, NULL}, + {"utc_nnstreamer_scenario_pipeline_state_test_p2", utc_nnstreamer_scenario_pipeline_state_test_p2, NULL, NULL}, + {"utc_nnstreamer_scenario_pipeline_state_test_p3", utc_nnstreamer_scenario_pipeline_state_test_p3, NULL, NULL}, + + {"utc_nnstreamer_scenario_valve_error_cases_n", utc_nnstreamer_scenario_valve_error_cases_n, NULL, NULL}, + {"utc_nnstreamer_scenario_switch_pipeline_p2", utc_nnstreamer_scenario_switch_pipeline_p2, NULL, NULL}, + {"utc_nnstreamer_scenario_switch_pipeline_error_cases_n", utc_nnstreamer_scenario_switch_pipeline_error_cases_n, NULL, NULL}, + {"utc_nnstreamer_scenario_check_nnfw_availability_p", utc_nnstreamer_scenario_check_nnfw_availability_p, NULL, NULL}, + {"utc_nnstreamer_scenario_check_tensor_info_utilities_p", utc_nnstreamer_scenario_check_tensor_info_utilities_p, NULL, NULL}, + + {"utc_nnstreamer_scenario_filter_tensorflow_lite_p1", utc_nnstreamer_scenario_filter_tensorflow_lite_p1, NULL, NULL}, + {"utc_nnstreamer_scenario_filter_tensorflow_lite_p2", utc_nnstreamer_scenario_filter_tensorflow_lite_p2, NULL, NULL}, + {"utc_nnstreamer_scenario_filter_tensorflow_lite_n", utc_nnstreamer_scenario_filter_tensorflow_lite_n, NULL, NULL}, + {NULL, NULL} +}; + +#endif // __TCT_NNSTREAMER-NATIVE_H__ diff --git a/src/utc/nnstreamer/utc-nnstreamer-pipeline-sink.c b/src/utc/nnstreamer/utc-nnstreamer-pipeline-sink.c new file mode 100644 index 000000000..fb8036cf9 --- /dev/null +++ b/src/utc/nnstreamer/utc-nnstreamer-pipeline-sink.c @@ -0,0 +1,166 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include +#include +#include +#include /* GStatBuf */ + +#include "tct_common.h" + +#define API_NAMESPACE "nnstreamer" + +/** + * @brief Struct to check the pipeline state changes. + */ +typedef struct +{ + gboolean paused; + gboolean playing; +} TestPipeState; + +static int status; +static gchar * pipeline; +static guint * count_sink; +static ml_pipeline_h handle; +static ml_pipeline_sink_h sinkhandle; +static TestPipeState * pipe_state; + +/** + * @function test_pipe_state_callback + * @description Pipeline state changed callback + */ +static void +test_pipe_state_callback (ml_pipeline_state_e state, void *user_data) +{ + TestPipeState *pipe_state; + + pipe_state = (TestPipeState *) user_data; + + switch (state) { + case ML_PIPELINE_STATE_PAUSED: + pipe_state->paused = TRUE; + break; + case ML_PIPELINE_STATE_PLAYING: + pipe_state->playing = TRUE; + break; + default: + break; + } +} + +/** + * @function test_sink_callback_count + * @description A tensor-sink callback for sink handle in a pipeline + */ +static void +test_sink_callback_count (const ml_tensors_data_h data, + const ml_tensors_info_h info, void *user_data) +{ + guint *count = (guint *) user_data; + + *count = *count + 1; +} + +/** + * @function utc_nnstreamer_pipeline_sink_startup + * @since_tizen 5.5 + * @description called before the specific test cases + */ +void utc_nnstreamer_pipeline_sink_startup(void) +{ + /* pipeline with appsink */ + pipeline = g_strdup ("videotestsrc num-buffers=3 ! videoconvert ! tensor_converter ! appsink name=sinkx"); + + count_sink = (guint *) g_malloc (sizeof (guint)); + *count_sink = 0; + + pipe_state = (TestPipeState *) g_new0 (TestPipeState, 1); + status = ml_pipeline_construct (pipeline, test_pipe_state_callback, pipe_state, &handle); +} + +/** + * @function utc_nnstreamer_pipeline_sink_cleanup + * @since_tizen 5.5 + * @description called after the specific test cases + */ +void utc_nnstreamer_pipeline_sink_cleanup(void) +{ + status = ml_pipeline_destroy (handle); + + g_free (pipeline); + g_free (count_sink); + g_free (pipe_state); +} + +/** + * @testcase utc_ml_pipeline_sink_register_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline register sink + */ +int utc_ml_pipeline_sink_register_p (void) +{ + status = ml_pipeline_sink_register (handle, "sinkx", test_sink_callback_count, count_sink, &sinkhandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_sink_unregister (sinkhandle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_sink_register_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline register sink, Failure case + */ +int utc_ml_pipeline_sink_register_n (void) +{ + status = ml_pipeline_sink_register (NULL, "sinkx", test_sink_callback_count, NULL, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_sink_unregister_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline unregister sink + */ +int utc_ml_pipeline_sink_unregister_p (void) +{ + status = ml_pipeline_sink_register (handle, "sinkx", test_sink_callback_count, count_sink, &sinkhandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_sink_unregister (sinkhandle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_sink_unregister_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline register sink, Failure case + */ +int utc_ml_pipeline_sink_unregister_n (void) +{ + status = ml_pipeline_sink_unregister (NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} diff --git a/src/utc/nnstreamer/utc-nnstreamer-pipeline-src.c b/src/utc/nnstreamer/utc-nnstreamer-pipeline-src.c new file mode 100644 index 000000000..745c68ada --- /dev/null +++ b/src/utc/nnstreamer/utc-nnstreamer-pipeline-src.c @@ -0,0 +1,225 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include +#include +#include +#include /* GStatBuf */ + +#include "tct_common.h" + +#define API_NAMESPACE "nnstreamer" + +static int status; + +static ml_pipeline_h handle; +static ml_pipeline_src_h srchandle; + +static ml_tensors_data_h data; +static ml_tensors_info_h info; + +static gchar * pipeline; + +/** + * @function utc_nnstreamer_pipeline_src_startup + * @since_tizen 5.5 + * @description called before the specific test cases + */ +void utc_nnstreamer_pipeline_src_startup(void) +{ + pipeline = g_strdup ("appsrc name=srcx ! other/tensor,dimension=(string)4:1:1:1,type=(string)uint8,framerate=(fraction)0/1 ! tensor_sink"); + + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + status = ml_pipeline_start (handle); +} + +/** + * @function utc_nnstreamer_pipeline_src_cleanup + * @since_tizen 5.5 + * @description called after the specific test cases + */ +void utc_nnstreamer_pipeline_src_cleanup(void) +{ + status = ml_pipeline_stop (handle); + status = ml_pipeline_destroy (handle); + + g_free (pipeline); +} + +/** + * @testcase utc_ml_pipeline_src_get_handle_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline get handle of src + */ +int utc_ml_pipeline_src_get_handle_p (void) +{ + status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_src_get_handle_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline get handle of src, Failure case + */ +int utc_ml_pipeline_src_get_handle_n (void) +{ + status = ml_pipeline_src_get_handle (NULL, "dummy", &srchandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_src_release_handle_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline release handle of src + */ +int utc_ml_pipeline_src_release_handle_p (void) +{ + status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_release_handle (srchandle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_src_release_handle_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline release handle of src, Failure case + */ +int utc_ml_pipeline_src_release_handle_n (void) +{ + status = ml_pipeline_src_release_handle (NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_src_input_data_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline get input data + */ +int utc_ml_pipeline_src_input_data_p (void) +{ + int i, ret = 0; + uint8_t *uintarray1[10]; + + status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_get_tensors_info (srchandle, &info); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_data_create (info, &data); + assert_eq (status, ML_ERROR_NONE); + + for (i = 0; i < 10; i++) { + uintarray1[i] = (uint8_t *) g_malloc (4); + uintarray1[i][0] = i + 4; + uintarray1[i][1] = i + 1; + uintarray1[i][2] = i + 3; + uintarray1[i][3] = i + 2; + } + + status = ml_tensors_data_set_tensor_data (data, 0, uintarray1[0], 4); + if (status != ML_ERROR_NONE){ + ret = 1; + goto error_return; + } + + status = ml_pipeline_src_input_data (srchandle, data, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); + if (status != ML_ERROR_NONE){ + ret = 1; + goto error_return; + } + + status = ml_pipeline_src_release_handle (srchandle); + if (status != ML_ERROR_NONE){ + ret = 1; + goto error_return; + } + +error_return: + for (i = 0; i < 10; i++) { + g_free (uintarray1[i]); + } + status = ml_tensors_data_destroy (data); + assert_eq (status, ML_ERROR_NONE); + + return ret; +} + +/** + * @testcase utc_ml_pipeline_src_input_data_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline get input data, Failure case + */ +int utc_ml_pipeline_src_input_data_n (void) +{ + status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_get_tensors_info (srchandle, &info); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_input_data (srchandle, NULL, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_pipeline_src_release_handle (srchandle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_src_get_tensors_info_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline get tensor info + */ +int utc_ml_pipeline_src_get_tensors_info_p (void) +{ + status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_get_tensors_info (srchandle, &info); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_release_handle (srchandle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_src_get_tensors_info_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline get tensor info, Failure case + */ +int utc_ml_pipeline_src_get_tensors_info_n (void) +{ + status = ml_pipeline_src_get_tensors_info (NULL, &info); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} diff --git a/src/utc/nnstreamer/utc-nnstreamer-pipeline-switch.c b/src/utc/nnstreamer/utc-nnstreamer-pipeline-switch.c new file mode 100644 index 000000000..f51811b26 --- /dev/null +++ b/src/utc/nnstreamer/utc-nnstreamer-pipeline-switch.c @@ -0,0 +1,227 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include +#include +#include +#include /* GStatBuf */ + +#include "tct_common.h" + +#define API_NAMESPACE "nnstreamer" + +/** + * @brief Struct to check the pipeline state changes. + */ +typedef struct +{ + gboolean paused; + gboolean playing; +} TestPipeState; + +static int status; +static gchar * pipeline; +static guint * count_sink; +static gchar ** node_list; + +static TestPipeState * pipe_state; +static ml_pipeline_h handle; +static ml_pipeline_switch_h switchhandle; +static ml_pipeline_switch_e type; + +/** + * @function test_pipe_state_callback + * @description Pipeline state changed callback + */ +static void +test_pipe_state_callback (ml_pipeline_state_e state, void *user_data) +{ + TestPipeState *pipe_state; + + pipe_state = (TestPipeState *) user_data; + + switch (state) { + case ML_PIPELINE_STATE_PAUSED: + pipe_state->paused = TRUE; + break; + case ML_PIPELINE_STATE_PLAYING: + pipe_state->playing = TRUE; + break; + default: + break; + } +} + +/** + * @function utc_nnstreamer_pipeline_switch_startup + * @since_tizen 5.5 + * @description called before the specific test cases + */ +void utc_nnstreamer_pipeline_switch_startup(void) +{ + node_list = NULL; + + pipeline = g_strdup ("input-selector name=ins ! tensor_converter ! tensor_sink name=sinkx " + "videotestsrc is-live=true ! videoconvert ! ins.sink_0 " + "videotestsrc num-buffers=3 is-live=true ! videoconvert ! ins.sink_1"); + + count_sink = (guint *) g_malloc (sizeof (guint)); + *count_sink = 0; + + pipe_state = (TestPipeState *) g_new0 (TestPipeState, 1); + status = ml_pipeline_construct (pipeline, test_pipe_state_callback, pipe_state, &handle); +} + +/** + * @function utc_nnstreamer_pipeline_switch_cleanup + * @since_tizen 5.5 + * @description called after the specific test cases + */ +void utc_nnstreamer_pipeline_switch_cleanup(void) +{ + status = ml_pipeline_destroy (handle); + + g_free (pipeline); + g_free (count_sink); + g_free (pipe_state); +} + +/** + * @testcase utc_ml_pipeline_switch_get_handle_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline get switch handle + */ +int utc_ml_pipeline_switch_get_handle_p (void) +{ + status = ml_pipeline_switch_get_handle (handle, "ins", &type, &switchhandle); + assert_eq (status, ML_ERROR_NONE); + assert_eq (type, ML_PIPELINE_SWITCH_INPUT_SELECTOR); + + status = ml_pipeline_switch_release_handle (switchhandle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_switch_get_handle_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline get switch handle, Failure case + */ +int utc_ml_pipeline_switch_get_handle_n (void) +{ + status = ml_pipeline_switch_get_handle (NULL, "ins", &type, &switchhandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_switch_release_handle_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline release switch handle + */ +int utc_ml_pipeline_switch_release_handle_p (void) +{ + status = ml_pipeline_switch_get_handle (handle, "ins", &type, &switchhandle); + assert_eq (status, ML_ERROR_NONE); + assert_eq (type, ML_PIPELINE_SWITCH_INPUT_SELECTOR); + + status = ml_pipeline_switch_release_handle (switchhandle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_switch_release_handle_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline release switch handle, Failure case + */ +int utc_ml_pipeline_switch_release_handle_n (void) +{ + status = ml_pipeline_switch_release_handle (NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_switch_select_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline select switch + */ +int utc_ml_pipeline_switch_select_p (void) +{ + status = ml_pipeline_switch_get_handle (handle, "ins", &type, &switchhandle); + assert_eq (status, ML_ERROR_NONE); + assert_eq (type, ML_PIPELINE_SWITCH_INPUT_SELECTOR); + + status = ml_pipeline_switch_select (switchhandle, "sink_1"); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_switch_release_handle (switchhandle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_switch_select_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline select switch, Failure case + */ +int utc_ml_pipeline_switch_select_n (void) +{ + status = ml_pipeline_switch_select (NULL, "sink_1"); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_switch_get_pad_list_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline get pad list + */ +int utc_ml_pipeline_switch_get_pad_list_p (void) +{ + status = ml_pipeline_switch_get_handle (handle, "ins", &type, &switchhandle); + assert_eq (status, ML_ERROR_NONE); + assert_eq (type, ML_PIPELINE_SWITCH_INPUT_SELECTOR); + + status = ml_pipeline_switch_get_pad_list (switchhandle, &node_list); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_switch_release_handle (switchhandle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_switch_get_pad_list_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline get pad list, Failure case + */ +int utc_ml_pipeline_switch_get_pad_list_n (void) +{ + status = ml_pipeline_switch_get_pad_list (NULL, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} diff --git a/src/utc/nnstreamer/utc-nnstreamer-pipeline-tensor-data.c b/src/utc/nnstreamer/utc-nnstreamer-pipeline-tensor-data.c new file mode 100644 index 000000000..4748af986 --- /dev/null +++ b/src/utc/nnstreamer/utc-nnstreamer-pipeline-tensor-data.c @@ -0,0 +1,176 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include +#include +#include +#include /* GStatBuf */ + +#include "tct_common.h" + +#define API_NAMESPACE "nnstreamer" + +static int status; +static gchar * pipeline; +static ml_pipeline_h handle; +static ml_pipeline_src_h srchandle; +static ml_tensors_data_h data; +static ml_tensors_info_h info; + +/** + * @function utc_nnstreamer_pipeline_tensor_data_startup + * @since_tizen 5.5 + * @description called before the specific test cases + */ +void utc_nnstreamer_pipeline_tensor_data_startup(void) +{ + pipeline = "appsrc name=srcx ! other/tensor,dimension=(string)4:1:1:1,type=(string)uint8,framerate=(fraction)0/1 ! tensor_sink"; + + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + status = ml_pipeline_start (handle); + status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle); + status = ml_pipeline_src_get_tensors_info (srchandle, &info); +} + +/** + * @function utc_nnstreamer_pipeline_tensor_data_cleanup + * @since_tizen 5.5 + * @description called after the specific test cases + */ +void utc_nnstreamer_pipeline_tensor_data_cleanup(void) +{ + status = ml_pipeline_src_release_handle (srchandle); + status = ml_pipeline_stop (handle); + status = ml_pipeline_destroy (handle); + status = ml_tensors_data_destroy (data); +} + +/** + * @testcase utc_ml_tensors_data_create_p + * @since_tizen 5.5 + * @description Test NNStreamer create tensor data + */ +int utc_ml_tensors_data_create_p (void) +{ + status = ml_tensors_data_create (info, &data); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_data_create_n + * @since_tizen 5.5 + * @description Test NNStreamer create tensor data, Failure case + */ +int utc_ml_tensors_data_create_n (void) +{ + status = ml_tensors_data_create (NULL, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_data_destroy_p + * @since_tizen 5.5 + * @description Test NNStreamer destroy tensor data + */ +int utc_ml_tensors_data_destroy_p (void) +{ + status = ml_tensors_data_create (info, &data); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_release_handle (srchandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_data_destroy (data); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_data_destroy_n + * @since_tizen 5.5 + * @description Test NNStreamer destroy tensor data, Failure case + */ +int utc_ml_tensors_data_destroy_n (void) +{ + status = ml_pipeline_destroy (NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_data_set_tensor_data_p + * @since_tizen 5.5 + * @description Test NNStreamer set tensor data + */ +int utc_ml_tensors_data_set_tensor_data_p (void) +{ + status = ml_tensors_data_create (info, &data); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_data_set_tensor_data_n + * @since_tizen 5.5 + * @description Test NNStreamer set tensor data, Failure case + */ +int utc_ml_tensors_data_set_tensor_data_n (void) +{ + status = ml_tensors_data_set_tensor_data (NULL, 0, NULL, 0); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_data_get_tensor_data_p + * @since_tizen 5.5 + * @description Test NNStreamer get tensor data + */ +int utc_ml_tensors_data_get_tensor_data_p (void) +{ + status = ml_tensors_data_create (info, &data); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_data_get_tensor_data_n + * @since_tizen 5.5 + * @description Test NNStreamer get tensor data, Failure case + */ +int utc_ml_tensors_data_get_tensor_data_n (void) +{ + status = ml_tensors_data_get_tensor_data (NULL, 0, NULL, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} diff --git a/src/utc/nnstreamer/utc-nnstreamer-pipeline-tensor-info.c b/src/utc/nnstreamer/utc-nnstreamer-pipeline-tensor-info.c new file mode 100644 index 000000000..6ec88cbe8 --- /dev/null +++ b/src/utc/nnstreamer/utc-nnstreamer-pipeline-tensor-info.c @@ -0,0 +1,456 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include +#include +#include +#include /* GStatBuf */ + +#include "tct_common.h" + +#define API_NAMESPACE "nnstreamer" + +static int status; +static ml_tensors_info_h info; + +/** + * @function utc_nnstreamer_pipeline_tensor_info_startup + * @since_tizen 5.5 + * @description called before the specific test cases + */ +void utc_nnstreamer_pipeline_tensor_info_startup(void) +{ + status = ml_tensors_info_create (&info); +} + +/** + * @function utc_nnstreamer_pipeline_tensor_info_cleanup + * @since_tizen 5.5 + * @description called after the specific test cases + */ +void utc_nnstreamer_pipeline_tensor_info_cleanup(void) +{ + status = ml_tensors_info_destroy (info); +} + +/** + * @testcase utc_ml_tensors_info_create_p + * @since_tizen 5.5 + * @description Test NNStreamer create tensor info + */ +int utc_ml_tensors_info_create_p (void) +{ + status = ml_tensors_info_create (&info); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_create_n + * @since_tizen 5.5 + * @description Test NNStreamer create tensor info, Failure case + */ +int utc_ml_tensors_info_create_n (void) +{ + status = ml_tensors_info_create (NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_destroy_p + * @since_tizen 5.5 + * @description Test NNStreamer destroy tensor info + */ +int utc_ml_tensors_info_destroy_p (void) +{ + status = ml_tensors_info_create (&info); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_destroy (info); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_destroy_n + * @since_tizen 5.5 + * @description Test NNStreamer destroy tensor info, Failure case + */ +int utc_ml_tensors_info_destroy_n (void) +{ + status = ml_tensors_info_destroy (NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_validate_p + * @since_tizen 5.5 + * @description Test NNStreamer validate tensor info + */ +int utc_ml_tensors_info_validate_p (void) +{ + ml_tensors_info_h t_info; + ml_tensor_dimension t_dim; + bool valid = false; + + status = ml_tensors_info_create (&t_info); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_count (t_info, 1); + assert_eq (status, ML_ERROR_NONE); + + t_dim[0] = 10; + t_dim[1] = 1; + t_dim[2] = 1; + t_dim[3] = 1; + + status = ml_tensors_info_set_tensor_type (t_info, 0, ML_TENSOR_TYPE_INT16); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_dimension (t_info, 0, t_dim); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_validate (t_info, &valid); + assert_eq (status, ML_ERROR_NONE); + assert (valid); + + status = ml_tensors_info_destroy (t_info); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_validate_n + * @since_tizen 5.5 + * @description Test NNStreamer validate tensor info, Failure case + */ +int utc_ml_tensors_info_validate_n (void) +{ + status = ml_tensors_info_validate (NULL, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_clone_p + * @since_tizen 5.5 + * @description Test NNStreamer clone tensor info + */ +int utc_ml_tensors_info_clone_p (void) +{ + ml_tensors_info_h in_info, out_info; + + status = ml_tensors_info_create (&in_info); + assert_eq (status, ML_ERROR_NONE); + status = ml_tensors_info_create (&out_info); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_clone (out_info, in_info); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_destroy (in_info); + assert_eq (status, ML_ERROR_NONE); + status = ml_tensors_info_destroy (out_info); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_clone_n + * @since_tizen 5.5 + * @description Test NNStreamer clone tensor info, Failure case + */ +int utc_ml_tensors_info_clone_n (void) +{ + status = ml_tensors_info_clone (NULL, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_set_count_p + * @since_tizen 5.5 + * @description Test NNStreamer set tensor info count + */ +int utc_ml_tensors_info_set_count_p (void) +{ + status = ml_tensors_info_set_count (info, 2); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_set_count_n + * @since_tizen 5.5 + * @description Test NNStreamer set tensor info count, Failure case + */ +int utc_ml_tensors_info_set_count_n (void) +{ + status = ml_tensors_info_set_count (NULL, 2); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_get_count_p + * @since_tizen 5.5 + * @description Test NNStreamer get tensor info count + */ +int utc_ml_tensors_info_get_count_p (void) +{ + unsigned int num = 0; + + status = ml_tensors_info_set_count (info, 2); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_get_count (info, &num); + assert_eq (status, ML_ERROR_NONE); + assert_eq (num, 2); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_get_count_n + * @since_tizen 5.5 + * @description Test NNStreamer get tensor info count, Failure case + */ +int utc_ml_tensors_info_get_count_n (void) +{ + status = ml_tensors_info_get_count (NULL, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_set_tensor_name_p + * @since_tizen 5.5 + * @description Test NNStreamer set tensor name + */ +int utc_ml_tensors_info_set_tensor_name_p (void) +{ + status = ml_tensors_info_set_count (info, 1); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_name (info, 0, "tensor-name-test"); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_set_tensor_name_n + * @since_tizen 5.5 + * @description Test NNStreamer set tensor name, Failure case + */ +int utc_ml_tensors_info_set_tensor_name_n (void) +{ + status = ml_tensors_info_set_tensor_name (NULL, 1, "tensor-name-test"); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_get_tensor_name_p + * @since_tizen 5.5 + * @description Test NNStreamer get tensor name + */ +int utc_ml_tensors_info_get_tensor_name_p (void) +{ + gchar * out_name = NULL; + + status = ml_tensors_info_set_count (info, 1); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_name (info, 0, "tensor-name-test"); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_get_tensor_name (info, 0, &out_name); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_get_tensor_name_n + * @since_tizen 5.5 + * @description Test NNStreamer get tensor name, Failure case + */ +int utc_ml_tensors_info_get_tensor_name_n (void) +{ + status = ml_tensors_info_get_tensor_name (NULL, 0, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_set_tensor_type_p + * @since_tizen 5.5 + * @description Test NNStreamer set tensor type + */ +int utc_ml_tensors_info_set_tensor_type_p (void) +{ + status = ml_tensors_info_set_count (info, 1); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_type (info, 0, ML_TENSOR_TYPE_UINT8); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_set_tensor_type_n + * @since_tizen 5.5 + * @description Test NNStreamer set tensor type, Failure case + */ +int utc_ml_tensors_info_set_tensor_type_n (void) +{ + status = ml_tensors_info_set_tensor_type (NULL, 0, ML_TENSOR_TYPE_UINT8); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_get_tensor_type_p + * @since_tizen 5.5 + * @description Test NNStreamer get tensor type + */ +int utc_ml_tensors_info_get_tensor_type_p (void) +{ + ml_tensor_type_e out_type; + + status = ml_tensors_info_set_count (info, 1); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_type (info, 0, ML_TENSOR_TYPE_UINT8); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_get_tensor_type (info, 0, &out_type); + assert_eq (status, ML_ERROR_NONE); + assert_eq (out_type, ML_TENSOR_TYPE_UINT8); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_get_tensor_type_n + * @since_tizen 5.5 + * @description Test NNStreamer get tensor type, Failure case + */ +int utc_ml_tensors_info_get_tensor_type_n (void) +{ + status = ml_tensors_info_get_tensor_type (NULL, 0, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_set_tensor_dimension_p + * @since_tizen 5.5 + * @description Test NNStreamer set tensor dimension + */ +int utc_ml_tensors_info_set_tensor_dimension_p (void) +{ + ml_tensor_dimension in_dim; + + in_dim[0] = 3; + in_dim[1] = 300; + in_dim[2] = 300; + in_dim[3] = 1; + + status = ml_tensors_info_set_count (info, 1); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_dimension (info, 0, in_dim); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_set_tensor_dimension_n + * @since_tizen 5.5 + * @description Test NNStreamer set tensor dimension, Failure case + */ +int utc_ml_tensors_info_set_tensor_dimension_n (void) +{ + status = ml_tensors_info_set_tensor_dimension (NULL, 1, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_get_tensor_dimension_p + * @since_tizen 5.5 + * @description Test NNStreamer get tensor dimension, Failure case + */ +int utc_ml_tensors_info_get_tensor_dimension_p (void) +{ + ml_tensor_dimension in_dim, out_dim; + + in_dim[0] = 3; + in_dim[1] = 300; + in_dim[2] = 300; + in_dim[3] = 1; + + status = ml_tensors_info_set_count (info, 1); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_dimension (info, 0, in_dim); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_get_tensor_dimension (info, 0, out_dim); + assert_eq (status, ML_ERROR_NONE); + assert_eq (out_dim[0], 3U); + assert_eq (out_dim[1], 300U); + assert_eq (out_dim[2], 300U); + assert_eq (out_dim[3], 1U); + + return 0; +} + +/** + * @testcase utc_ml_tensors_info_get_tensor_dimension_n + * @since_tizen 5.5 + * @description Test NNStreamer get tensor dimension, Failure case + */ +int utc_ml_tensors_info_get_tensor_dimension_n (void) +{ + status = ml_tensors_info_get_tensor_dimension (NULL, 1, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} diff --git a/src/utc/nnstreamer/utc-nnstreamer-pipeline-valve.c b/src/utc/nnstreamer/utc-nnstreamer-pipeline-valve.c new file mode 100644 index 000000000..d27cd25d5 --- /dev/null +++ b/src/utc/nnstreamer/utc-nnstreamer-pipeline-valve.c @@ -0,0 +1,141 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include +#include +#include +#include /* GStatBuf */ + +#include "tct_common.h" + +#define API_NAMESPACE "nnstreamer" + +static int status; +static gchar * pipeline; +static ml_pipeline_h handle; +static ml_pipeline_valve_h valve1; + +/** + * @function utc_nnstreamer_pipeline_valve_startup + * @since_tizen 5.5 + * @description called before the specific test cases + */ +void utc_nnstreamer_pipeline_valve_startup(void) +{ + pipeline = g_strdup ("videotestsrc is-live=true ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=16,height=16,framerate=10/1 ! tensor_converter ! queue ! valve name=valve1"); + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); +} + +/** + * @function utc_nnstreamer_pipeline_valve_cleanup + * @since_tizen 5.5 + * @description called after the specific test cases + */ +void utc_nnstreamer_pipeline_valve_cleanup(void) +{ + status = ml_pipeline_destroy (handle); + g_free (pipeline); +} + +/** + * @testcase utc_ml_pipeline_valve_get_handle_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline get valve handle + */ +int utc_ml_pipeline_valve_get_handle_p (void) +{ + status = ml_pipeline_valve_get_handle (handle, "valve1", &valve1); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_valve_get_handle_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline get valve handle, Failure case + */ +int utc_ml_pipeline_valve_get_handle_n (void) +{ + status = ml_pipeline_valve_get_handle (NULL, "valve1", &valve1); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_valve_release_handle_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline release valve handle + */ +int utc_ml_pipeline_valve_release_handle_p (void) +{ + status = ml_pipeline_valve_get_handle (handle, "valve1", &valve1); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_valve_set_open (valve1, true); /* close */ + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_valve_release_handle (valve1); /* release valve handle */ + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_valve_release_handle_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline release valve handle, Failure case + */ +int utc_ml_pipeline_valve_release_handle_n (void) +{ + status = ml_pipeline_valve_release_handle (NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_valve_set_open_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline set valve open + */ +int utc_ml_pipeline_valve_set_open_p (void) +{ + status = ml_pipeline_valve_get_handle (handle, "valve1", &valve1); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_valve_set_open (valve1, false); /* close */ + assert_eq (status, ML_ERROR_NONE); + return 0; +} + +/** + * @testcase utc_ml_pipeline_valve_set_open_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline set valve open, Failure case + */ +int utc_ml_pipeline_valve_set_open_n (void) +{ + status = ml_pipeline_valve_set_open (NULL, true); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} diff --git a/src/utc/nnstreamer/utc-nnstreamer-pipeline.c b/src/utc/nnstreamer/utc-nnstreamer-pipeline.c new file mode 100644 index 000000000..6b2d772f0 --- /dev/null +++ b/src/utc/nnstreamer/utc-nnstreamer-pipeline.c @@ -0,0 +1,274 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include +#include +#include +#include /* GStatBuf */ + +#include "tct_common.h" + +#define API_NAMESPACE "nnstreamer" + +#define wait_for_start(handle, state, status) \ +do {\ + int counter = 0;\ + while ((state == ML_PIPELINE_STATE_PAUSED || \ + state == ML_PIPELINE_STATE_READY) && counter < 20) {\ + g_usleep (50000);\ + counter ++;\ + status = ml_pipeline_get_state (handle, &state);\ + assert_eq (status, ML_ERROR_NONE);\ + }\ +} while (0)\ + +static int status; +static gchar * pipeline; +static ml_pipeline_h handle; +static ml_pipeline_state_e state; + +/** + * @testcase utc_ml_pipeline_construct_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline construct + */ +int utc_ml_pipeline_construct_p (void) +{ + pipeline = "videotestsrc num_buffers=2 ! fakesink"; + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_construct_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline construct, failure case + */ +int utc_ml_pipeline_construct_n (void) +{ + pipeline = "nonexistsrc ! fakesink"; + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_STREAMS_PIPE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_destroy_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline destroy + */ +int utc_ml_pipeline_destroy_p (void) +{ + pipeline = "videotestsrc num_buffers=2 ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=224,height=224 ! tensor_converter ! fakesink"; + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_destroy_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline destroy, failure case + */ +int utc_ml_pipeline_destroy_n (void) +{ + status = ml_pipeline_destroy (NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_get_state_p + * @since_tizen 5.5 + * @description Test NNStreamer get pipeline state + */ +int utc_ml_pipeline_get_state_p (void) +{ + pipeline = "videotestsrc is-live=true ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=224,height=224,framerate=60/1 ! tensor_converter ! valve name=valvex ! valve name=valvey ! input-selector name=is01 ! tensor_sink name=sinkx"; + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); /* At this moment, it can be READY, PAUSED, or PLAYING */ + assert_neq (state, ML_PIPELINE_STATE_UNKNOWN); + assert_neq (state, ML_PIPELINE_STATE_NULL); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_get_state_n + * @since_tizen 5.5 + * @description Test NNStreamer get pipeline state, failure case + */ +int utc_ml_pipeline_get_state_n (void) +{ + pipeline = "videotestsrc is-live=true ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=224,height=224,framerate=60/1 ! tensor_converter ! valve name=valvex ! valve name=valvey ! input-selector name=is01 ! tensor_sink name=sinkx"; + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_get_state (handle, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_start_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline construct + */ +int utc_ml_pipeline_start_p (void) +{ + pipeline = "videotestsrc is-live=true ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=224,height=224,framerate=60/1 ! tensor_converter ! valve name=valvex ! valve name=valvey ! input-selector name=is01 ! tensor_sink name=sinkx"; + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); /* At this moment, it can be READY, PAUSED, or PLAYING */ + assert_eq (state, ML_PIPELINE_STATE_PAUSED); + assert_neq (state, ML_PIPELINE_STATE_UNKNOWN); + assert_neq (state, ML_PIPELINE_STATE_NULL); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_start_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline construct, failure case + */ +int utc_ml_pipeline_start_n (void) +{ + status = ml_pipeline_start (NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_stop_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline stop + */ +int utc_ml_pipeline_stop_p (void) +{ + pipeline = "videotestsrc is-live=true ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=224,height=224,framerate=60/1 ! tensor_converter ! valve name=valvex ! valve name=valvey ! input-selector name=is01 ! tensor_sink name=sinkx"; + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); /* At this moment, it can be READY, PAUSED, or PLAYING */ + assert_neq (state, ML_PIPELINE_STATE_UNKNOWN); + assert_neq (state, ML_PIPELINE_STATE_NULL); + + g_usleep (50000); /* 50ms is good for general systems, but not enough for emulators to start gst pipeline. Let a few frames flow. */ + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); + wait_for_start (handle, state, status); + assert_eq (state, ML_PIPELINE_STATE_PLAYING); + + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + g_usleep (50000); /* 50ms. Let a few frames flow. */ + + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); + assert_eq (state, ML_PIPELINE_STATE_PAUSED); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_pipeline_stop_n + * @since_tizen 5.5 + * @description Test NNStreamer pipeline construct, Failure case + */ +int utc_ml_pipeline_stop_n (void) +{ + status = ml_pipeline_stop (NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_check_nnfw_availability_p + * @since_tizen 5.5 + * @description Test NNStreamer check nnfw availabiliry + */ +int utc_ml_check_nnfw_availability_p (void) +{ + bool result; + int status = ml_check_nnfw_availability (ML_NNFW_TYPE_NNFW, ML_NNFW_HW_ANY, &result); + assert_eq (status, ML_ERROR_NONE); + assert_eq (result, false); + + status = ml_check_nnfw_availability (ML_NNFW_TYPE_NNFW, ML_NNFW_HW_AUTO, &result); + assert_eq (status, ML_ERROR_NONE); + assert_eq (result, false); + + status = ml_check_nnfw_availability (ML_NNFW_TYPE_NNFW, ML_NNFW_HW_NPU, &result); + assert_eq (status, ML_ERROR_NONE); + assert_eq (result, false); + + return 0; +} + +/** + * @testcase utc_ml_check_nnfw_availability_n + * @since_tizen 5.5 + * @description Test NNStreamer check nnfw availabiliry, Failure case. + */ +int utc_ml_check_nnfw_availability_n (void) +{ + status = ml_check_nnfw_availability (ML_NNFW_TYPE_NNFW, ML_NNFW_HW_NPU, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} diff --git a/src/utc/nnstreamer/utc-nnstreamer-scenario.c b/src/utc/nnstreamer/utc-nnstreamer-scenario.c new file mode 100644 index 000000000..0d2cd89fd --- /dev/null +++ b/src/utc/nnstreamer/utc-nnstreamer-scenario.c @@ -0,0 +1,1615 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include +#include +#include +#include /* GStatBuf */ + +#include "tct_common.h" + +#define API_NAMESPACE "nnstreamer" + +/** + * @brief Struct to check the pipeline state changes. + */ +typedef struct +{ + gboolean paused; + gboolean playing; +} TestPipeState; + +#define wait_for_start(handle, state, status) \ +do {\ + int counter = 0;\ + while ((state == ML_PIPELINE_STATE_PAUSED || \ + state == ML_PIPELINE_STATE_READY) && counter < 20) {\ + g_usleep (50000);\ + counter ++;\ + status = ml_pipeline_get_state (handle, &state);\ + assert_eq (status, ML_ERROR_NONE);\ + }\ +} while (0)\ + +/** + * @brief A tensor-sink callback for sink handle in a pipeline + */ +static void +test_sink_callback_dm01 (const ml_tensors_data_h data, + const ml_tensors_info_h info, void *user_data) +{ + gchar *filepath = (gchar *) user_data; + unsigned int i, num = 0; + void *data_ptr; + size_t data_size; + int status; + FILE *fp = g_fopen (filepath, "a"); + + if (fp == NULL) + return; + + ml_tensors_info_get_count (info, &num); + + for (i = 0; i < num; i++) { + status = ml_tensors_data_get_tensor_data (data, i, &data_ptr, &data_size); + if (status == ML_ERROR_NONE) + fwrite (data_ptr, data_size, 1, fp); + } + + fclose (fp); +} + +/** + * @brief A tensor-sink callback for sink handle in a pipeline + */ +static void +test_sink_callback_count (const ml_tensors_data_h data, + const ml_tensors_info_h info, void *user_data) +{ + guint *count = (guint *) user_data; + + *count = *count + 1; +} + +/** + * @brief Pipeline state changed callback + */ +static void +test_pipe_state_callback (ml_pipeline_state_e state, void *user_data) +{ + TestPipeState *pipe_state; + + pipe_state = (TestPipeState *) user_data; + + switch (state) { + case ML_PIPELINE_STATE_PAUSED: + pipe_state->paused = TRUE; + break; + case ML_PIPELINE_STATE_PLAYING: + pipe_state->playing = TRUE; + break; + default: + break; + } +} + +/** + * @brief compare the two files. + */ +static int +file_cmp (const gchar * f1, const gchar * f2) +{ + gboolean r; + gchar *content1, *content2; + gsize len1, len2; + int cmp = 0; + + r = g_file_get_contents (f1, &content1, &len1, NULL); + if (r != TRUE) + return -1; + + r = g_file_get_contents (f2, &content2, &len2, NULL); + if (r != TRUE) { + g_free (content1); + return -2; + } + + if (len1 == len2) { + cmp = memcmp (content1, content2, len1); + } else { + cmp = 1; + } + + g_free (content1); + g_free (content2); + + return cmp; +} + +static gchar * pipeline; +static uint8_t * content; +static uint8_t * uintarray1[10]; +static uint8_t * uintarray2[10]; + +/** + * @function utc_nnstreamer_scenario_startup + * @since_tizen 5.5 + * @description called before the specific test cases + */ +void utc_nnstreamer_scenario_startup(void) +{ + int i; + for (i = 0; i < 10; i++) { + uintarray1[i] = (uint8_t *) g_malloc (4); + uintarray1[i][0] = i + 4; + uintarray1[i][1] = i + 1; + uintarray1[i][2] = i + 3; + uintarray1[i][3] = i + 2; + + uintarray2[i] = (uint8_t *) g_malloc (4); + uintarray2[i][0] = i + 3; + uintarray2[i][1] = i + 2; + uintarray2[i][2] = i + 1; + uintarray2[i][3] = i + 4; + /* These will be free'ed by gstreamer (ML_PIPELINE_BUF_POLICY_AUTO_FREE) */ + /** @todo Check whether gstreamer really deallocates this */ + } +} + +/** + * @function utc_nnstreamer_scenario_cleanup + * @since_tizen 5.5 + * @description called after the specific test cases + */ +void utc_nnstreamer_scenario_cleanup(void) +{ + int i; + for (i = 0; i < 10; i++) { + g_free (uintarray1[i]); + g_free (uintarray2[i]); + } + g_free (pipeline); + g_free (content); +} + +/** + * @testcase utc_nnstreamer_scenario_construct_destruct_empty_pipeline_p + * @since_tizen 5.5 + * @description Positive test case of pipeline construct & destruct + */ +int utc_nnstreamer_scenario_construct_destruct_empty_pipeline_p (void) +{ + const char *pipeline = "videotestsrc num_buffers=2 ! fakesink"; + ml_pipeline_h handle; + int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq(status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq(status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_construct_destruct_pipeline_p1 + * @since_tizen 5.5 + * @description Positive test case of pipeline construct & destruct + */ +int utc_nnstreamer_scenario_construct_destruct_pipeline_p1 (void) +{ + const char *pipeline = "videotestsrc num_buffers=2 ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=224,height=224 ! tensor_converter ! fakesink"; + ml_pipeline_h handle; + int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_construct_destruct_pipeline_p2 + * @since_tizen 5.5 + * @description Positive test case of pipeline construct & destruct + */ +int utc_nnstreamer_scenario_construct_destruct_pipeline_p2 (void) +{ + const char *pipeline = "videotestsrc num_buffers=2 ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=224,height=224 ! tensor_converter ! valve name=valvex ! tensor_sink name=sinkx"; + ml_pipeline_h handle; + int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_construct_pipeline_error_case_n1 + * @since_tizen 5.5 + * @description Negative test case of pipeline construct & destruct + */ +int utc_nnstreamer_scenario_construct_pipeline_error_case_n1 (void) +{ + const char *pipeline = "nonexistsrc ! fakesink"; + ml_pipeline_h handle; + int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_STREAMS_PIPE); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_construct_pipeline_error_case_n2 + * @since_tizen 5.5 + * @description Negative test case of pipeline construct & destruct + */ +int utc_nnstreamer_scenario_construct_pipeline_error_case_n2 (void) +{ + const char *pipeline = "videotestsrc num_buffers=2 ! audioconvert ! fakesink"; + ml_pipeline_h handle; + int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_STREAMS_PIPE); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_pipeline_state_test_p1 + * @since_tizen 5.5 + * @description Positive test case of pipeline construct & destruct + */ +int utc_nnstreamer_scenario_pipeline_state_test_p1 (void) +{ + const char *pipeline = "videotestsrc is-live=true ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=224,height=224,framerate=60/1 ! tensor_converter ! valve name=valvex ! valve name=valvey ! input-selector name=is01 ! tensor_sink name=sinkx"; + ml_pipeline_h handle; + ml_pipeline_state_e state; + int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); /* At this moment, it can be READY, PAUSED, or PLAYING */ + assert_neq (state, ML_PIPELINE_STATE_UNKNOWN); + assert_neq (state, ML_PIPELINE_STATE_NULL); + + g_usleep (50000); /* 50ms is good for general systems, but not enough for emulators to start gst pipeline. Let a few frames flow. */ + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); + wait_for_start (handle, state, status); + assert_eq (state, ML_PIPELINE_STATE_PLAYING); + + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + g_usleep (50000); /* 50ms. Let a few frames flow. */ + + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); + assert_eq (state, ML_PIPELINE_STATE_PAUSED); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_pipeline_state_test_p2 + * @since_tizen 5.5 + * @description Positive test case of pipeline construct & destruct + */ +int utc_nnstreamer_scenario_pipeline_state_test_p2 (void) +{ + const char *pipeline = "videotestsrc is-live=true ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=224,height=224,framerate=60/1 ! tensor_converter ! valve name=valvex ! valve name=valvey ! input-selector name=is01 ! tensor_sink name=sinkx"; + ml_pipeline_h handle; + ml_pipeline_state_e state; + int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); /* At this moment, it can be READY, PAUSED, or PLAYING */ + assert_neq (state, ML_PIPELINE_STATE_UNKNOWN); + assert_neq (state, ML_PIPELINE_STATE_NULL); + + g_usleep (50000); /* 50ms is good for general systems, but not enough for emulators to start gst pipeline. Let a few frames flow. */ + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); + wait_for_start (handle, state, status); + assert_eq (state, ML_PIPELINE_STATE_PLAYING); + + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + g_usleep (50000); /* 50ms. Let a few frames flow. */ + + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); + assert_eq (state, ML_PIPELINE_STATE_PAUSED); + + /* Resume playing */ + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + assert_neq (state, ML_PIPELINE_STATE_UNKNOWN); + assert_neq (state, ML_PIPELINE_STATE_NULL); + + g_usleep (50000); /* 50ms. Enough to empty the queue */ + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); + assert_eq (state, ML_PIPELINE_STATE_PAUSED); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_pipeline_state_test_p3 + * @since_tizen 5.5 + * @description Positive test case of pipeline construct & destruct + */ +int utc_nnstreamer_scenario_pipeline_state_test_p3 (void) +{ + const gchar *_tmpdir = g_get_tmp_dir (); + const gchar *_dirname = "nns-tizen-XXXXXX"; + gchar *fullpath = g_build_path ("/", _tmpdir, _dirname, NULL); + gchar *dir = g_mkdtemp ((gchar *) fullpath); + gchar *file1 = g_build_path ("/", dir, "valve1", NULL); + gchar *pipeline = + g_strdup_printf + ("videotestsrc is-live=true ! videoconvert ! videoscale ! video/x-raw,format=RGBx,width=16,height=16,framerate=10/1 ! tensor_converter ! queue ! valve name=valve1 ! filesink location=\"%s\"", + file1); + GStatBuf buf; + + ml_pipeline_h handle; + ml_pipeline_state_e state; + ml_pipeline_valve_h valve1; + + int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + assert (dir != NULL); + + status = ml_pipeline_valve_get_handle (handle, "valve1", &valve1); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_valve_set_open (valve1, false); /* close */ + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + + g_usleep (50000); /* 50ms. Wait for the pipeline stgart. */ + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); /* At this moment, it can be READY, PAUSED, or PLAYING */ + assert_neq (state, ML_PIPELINE_STATE_UNKNOWN); + assert_neq (state, ML_PIPELINE_STATE_NULL); + + wait_for_start (handle, state, status); + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + + status = g_lstat (file1, &buf); + assert_eq (status, 0); + assert_eq (buf.st_size, 0); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_valve_set_open (valve1, true); /* open */ + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_valve_release_handle (valve1); /* release valve handle */ + assert_eq (status, ML_ERROR_NONE); + + g_usleep (500000); /* 500ms. Let a few frames flow. (10Hz x 0.5s --> 5)*/ + + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + status = g_lstat (file1, &buf); + assert_eq (status, 0); + assert_geq (buf.st_size, 2048); /* At least two frames during 500ms */ + assert_leq (buf.st_size, 6144); /* At most six frames during 500ms */ + assert_eq (buf.st_size % 1024, 0); /* It should be divided by 1024 */ + + g_free (fullpath); + g_free (file1); + g_free (pipeline); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_valve_error_cases_n + * @since_tizen 5.5 + * @description Failure case to handle valve element with invalid param. + */ +int utc_nnstreamer_scenario_valve_error_cases_n (void) +{ + ml_pipeline_h handle; + ml_pipeline_valve_h valve_h; + gchar *pipeline; + int status; + + pipeline = g_strdup ("videotestsrc num-buffers=3 ! videoconvert ! valve name=valvex ! tensor_converter ! tensor_sink name=sinkx"); + + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + /* invalid param : pipe */ + status = ml_pipeline_valve_get_handle (NULL, "valvex", &valve_h); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : name */ + status = ml_pipeline_valve_get_handle (handle, NULL, &valve_h); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : wrong name */ + status = ml_pipeline_valve_get_handle (handle, "wrongname", &valve_h); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : invalid type */ + status = ml_pipeline_valve_get_handle (handle, "sinkx", &valve_h); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : handle */ + status = ml_pipeline_valve_get_handle (handle, "valvex", NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + g_free (pipeline); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_sink_p1 + * @since_tizen 5.5 + * @description Test NNStreamer pipeline sink + */ +int utc_nnstreamer_scenario_sink_p1 (void) +{ + const gchar *_tmpdir = g_get_tmp_dir (); + const gchar *_dirname = "nns-tizen-XXXXXX"; + gchar *fullpath = g_build_path ("/", _tmpdir, _dirname, NULL); + gchar *dir = g_mkdtemp ((gchar *) fullpath); + + assert_neq (dir, (gchar *) NULL); + + gchar *file1 = g_build_path ("/", dir, "original", NULL); + gchar *file2 = g_build_path ("/", dir, "sink", NULL); + gchar *pipeline = + g_strdup_printf + ("videotestsrc num-buffers=3 ! videoconvert ! video/x-raw,format=BGRx,width=64,height=48,famerate=60/1 ! tee name=t t. ! queue ! filesink location=\"%s\" t. ! queue ! tensor_converter ! tensor_sink name=sinkx", + file1); + ml_pipeline_h handle; + ml_pipeline_state_e state; + ml_pipeline_sink_h sinkhandle; + int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_sink_register (handle, "sinkx", test_sink_callback_dm01, file2, &sinkhandle); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + g_usleep (10000); /* 10ms. Wait a bit. */ + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); /* At this moment, it can be READY, PAUSED, or PLAYING */ + assert_neq (state, ML_PIPELINE_STATE_UNKNOWN); + assert_neq (state, ML_PIPELINE_STATE_NULL); + + g_usleep (100000); /* 100ms. Let a few frames flow. */ + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); + assert_eq (state, ML_PIPELINE_STATE_PLAYING); + + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + g_usleep (10000); /* 10ms. Wait a bit. */ + + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); + assert_eq (state, ML_PIPELINE_STATE_PAUSED); + + status = ml_pipeline_sink_unregister (sinkhandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + g_free (pipeline); + + /* File Comparison to check the integrity */ + assert_eq (file_cmp (file1, file2), 0); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_sink_p2 + * @since_tizen 5.5 + * @description Test NNStreamer pipeline sink + */ +int utc_nnstreamer_scenario_sink_p2 (void) +{ + ml_pipeline_h handle; + ml_pipeline_state_e state; + ml_pipeline_sink_h sinkhandle; + gchar *pipeline; + int status; + guint *count_sink; + TestPipeState *pipe_state; + + /* pipeline with appsink */ + pipeline = g_strdup ("videotestsrc num-buffers=3 ! videoconvert ! tensor_converter ! appsink name=sinkx"); + + count_sink = (guint *) g_malloc (sizeof (guint)); + *count_sink = 0; + + pipe_state = (TestPipeState *) g_new0 (TestPipeState, 1); + + status = ml_pipeline_construct (pipeline, test_pipe_state_callback, pipe_state, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_sink_register (handle, "sinkx", test_sink_callback_count, count_sink, &sinkhandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + + g_usleep (100000); /* 100ms. Let a few frames flow. */ + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); + assert_eq (state, ML_PIPELINE_STATE_PLAYING); + + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + g_usleep (10000); /* 10ms. Wait a bit. */ + + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); + assert_eq (state, ML_PIPELINE_STATE_PAUSED); + + status = ml_pipeline_sink_unregister (sinkhandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + assert (*count_sink > 0U); + assert (pipe_state->paused); + assert (pipe_state->playing); + + g_free (pipeline); + g_free (count_sink); + g_free (pipe_state); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_sink_error_cases_n + * @since_tizen 5.5 + * @description Failure case to register callback with invalid param. + */ +int utc_nnstreamer_scenario_sink_error_cases_n (void) +{ + ml_pipeline_h handle; + ml_pipeline_sink_h sinkhandle; + gchar *pipeline; + int status; + guint *count_sink; + + pipeline = g_strdup ("videotestsrc num-buffers=3 ! videoconvert ! valve name=valvex ! tensor_converter ! tensor_sink name=sinkx"); + + count_sink = (guint *) g_malloc (sizeof (guint)); + *count_sink = 0; + + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + /* invalid param : pipe */ + status = ml_pipeline_sink_register (NULL, "sinkx", test_sink_callback_count, count_sink, &sinkhandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : name */ + status = ml_pipeline_sink_register (handle, NULL, test_sink_callback_count, count_sink, &sinkhandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : wrong name */ + status = ml_pipeline_sink_register (handle, "wrongname", test_sink_callback_count, count_sink, &sinkhandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : invalid type */ + status = ml_pipeline_sink_register (handle, "valvex", test_sink_callback_count, count_sink, &sinkhandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : callback */ + status = ml_pipeline_sink_register (handle, "sinkx", NULL, count_sink, &sinkhandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : handle */ + status = ml_pipeline_sink_register (handle, "sinkx", test_sink_callback_count, count_sink, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_pipeline_sink_register (handle, "sinkx", test_sink_callback_count, count_sink, &sinkhandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + + g_usleep (100000); /* 100ms. Let a few frames flow. */ + + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_sink_unregister (sinkhandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + assert (*count_sink > 0U); + + g_free (pipeline); + g_free (count_sink); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_src_p + * @since_tizen 5.5 + * @description Test NNStreamer pipeline src + */ +int utc_nnstreamer_scenario_src_p (void) +{ + const gchar *_tmpdir = g_get_tmp_dir (); + const gchar *_dirname = "nns-tizen-XXXXXX"; + gchar *fullpath = g_build_path ("/", _tmpdir, _dirname, NULL); + gchar *dir = g_mkdtemp ((gchar *) fullpath); + gchar *file1 = g_build_path ("/", dir, "output", NULL); + ml_pipeline_h handle; + ml_pipeline_state_e state; + ml_pipeline_src_h srchandle; + int status; + ml_tensors_info_h info; + ml_tensors_data_h data1, data2; + unsigned int count = 0; + ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN; + ml_tensor_dimension dim = { 0, }; + + int i; + gsize len; + + pipeline = + g_strdup_printf ("appsrc name=srcx ! other/tensor,dimension=(string)4:1:1:1,type=(string)uint8,framerate=(fraction)0/1 ! filesink location=\"%s\"", + file1); + + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + assert (dir != NULL); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + g_usleep (10000); /* 10ms. Wait a bit. */ + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); /* At this moment, it can be READY, PAUSED, or PLAYING */ + assert_neq (state, ML_PIPELINE_STATE_UNKNOWN); + assert_neq (state, ML_PIPELINE_STATE_NULL); + + status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_get_tensors_info (srchandle, &info); + assert_eq (status, ML_ERROR_NONE); + + ml_tensors_info_get_count (info, &count); + assert_eq (count, 1U); + + ml_tensors_info_get_tensor_type (info, 0, &type); + assert_eq (type, ML_TENSOR_TYPE_UINT8); + + ml_tensors_info_get_tensor_dimension (info, 0, dim); + assert_eq (dim[0], 4U); + assert_eq (dim[1], 1U); + assert_eq (dim[2], 1U); + assert_eq (dim[3], 1U); + + status = ml_tensors_data_create (info, &data1); + assert_eq (status, ML_ERROR_NONE); + + ml_tensors_info_destroy (info); + + status = ml_tensors_data_set_tensor_data (data1, 0, uintarray1[0], 4); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); + assert_eq (status, ML_ERROR_NONE); + g_usleep (50000); /* 50ms. Wait a bit. */ + + status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); + assert_eq (status, ML_ERROR_NONE); + g_usleep (50000); /* 50ms. Wait a bit. */ + + status = ml_pipeline_src_release_handle (srchandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_get_tensors_info (srchandle, &info); + assert_eq (status, ML_ERROR_NONE); + + ml_tensors_info_get_count (info, &count); + assert_eq (count, 1U); + + ml_tensors_info_get_tensor_type (info, 0, &type); + assert_eq (type, ML_TENSOR_TYPE_UINT8); + + ml_tensors_info_get_tensor_dimension (info, 0, dim); + assert_eq (dim[0], 4U); + assert_eq (dim[1], 1U); + assert_eq (dim[2], 1U); + assert_eq (dim[3], 1U); + + for (i = 0; i < 10; i++) { + status = ml_tensors_data_set_tensor_data (data1, 0, uintarray1[i], 4); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_data_create (info, &data2); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_data_set_tensor_data (data2, 0, uintarray2[i], 4); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_input_data (srchandle, data2, ML_PIPELINE_BUF_POLICY_AUTO_FREE); + assert_eq (status, ML_ERROR_NONE); + + g_usleep (50000); /* 50ms. Wait a bit. */ + } + + status = ml_pipeline_src_release_handle (srchandle); + assert_eq (status, ML_ERROR_NONE); + + g_usleep (50000); /* Wait for the pipeline to flush all */ + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + assert (g_file_get_contents (file1, (gchar **) &content, &len, NULL)); + assert_eq (len, 8U * 11); + + for (i = 0; i < 10; i++) { + assert_eq (content[i * 8 + 0 + 8], i + 4); + assert_eq (content[i * 8 + 1 + 8], i + 1); + assert_eq (content[i * 8 + 2 + 8], i + 3); + assert_eq (content[i * 8 + 3 + 8], i + 2); + assert_eq (content[i * 8 + 4 + 8], i + 3); + assert_eq (content[i * 8 + 5 + 8], i + 2); + assert_eq (content[i * 8 + 6 + 8], i + 1); + assert_eq (content[i * 8 + 7 + 8], i + 4); + } + + status = ml_tensors_info_destroy (info); + status = ml_tensors_data_destroy (data1); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_src_error_case_n1 + * @since_tizen 5.5 + * @description Failure case when pipeline is NULL. + */ +int utc_nnstreamer_scenario_src_error_case_n1 (void) +{ + int status; + ml_pipeline_src_h srchandle; + + status = ml_pipeline_src_get_handle (NULL, "dummy", &srchandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_src_error_case_n2 + * @since_tizen 5.5 + * @description Failure case when the name of source node is wrong. + */ +int utc_nnstreamer_scenario_src_error_case_n2 (void) +{ + const char *pipeline = "appsrc name=mysource ! other/tensor,dimension=(string)4:1:1:1,type=(string)uint8,framerate=(fraction)0/1 ! valve name=valvex ! tensor_sink"; + ml_pipeline_h handle; + ml_pipeline_src_h srchandle; + + int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + /* invalid param : pipe */ + status = ml_pipeline_src_get_handle (NULL, "mysource", &srchandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : name */ + status = ml_pipeline_src_get_handle (handle, NULL, &srchandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : wrong name */ + status = ml_pipeline_src_get_handle (handle, "wrongname", &srchandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : invalid type */ + status = ml_pipeline_src_get_handle (handle, "valvex", &srchandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : handle */ + status = ml_pipeline_src_get_handle (handle, "mysource", NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_src_error_case_n3 + * @since_tizen 5.5 + * @description Failure case when the number of tensors is 0 or bigger than ML_TENSOR_SIZE_LIMIT; + */ +int utc_nnstreamer_scenario_src_error_case_n3 (void) +{ + const char *pipeline = "appsrc name=srcx ! other/tensor,dimension=(string)4:1:1:1,type=(string)uint8,framerate=(fraction)0/1 ! tensor_sink"; + ml_pipeline_h handle; + ml_pipeline_src_h srchandle; + ml_tensors_data_h data; + ml_tensors_info_h info; + + int status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_src_get_tensors_info (srchandle, &info); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_data_create (info, &data); + assert_eq (status, ML_ERROR_NONE); + + /* null data */ + status = ml_pipeline_src_input_data (srchandle, NULL, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_pipeline_src_release_handle (srchandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_data_destroy (data); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_switch_pipeline_p1 + * @since_tizen 5.5 + * @description Test NNStreamer pipeline switch + */ +int utc_nnstreamer_scenario_switch_pipeline_p1 (void) +{ + ml_pipeline_h handle; + ml_pipeline_switch_h switchhandle; + ml_pipeline_sink_h sinkhandle; + ml_pipeline_switch_e type; + ml_pipeline_state_e state; + gchar *pipeline; + int status; + guint *count_sink; + TestPipeState *pipe_state; + gchar **node_list = NULL; + + pipeline = g_strdup ("input-selector name=ins ! tensor_converter ! tensor_sink name=sinkx " + "videotestsrc is-live=true ! videoconvert ! ins.sink_0 " + "videotestsrc num-buffers=3 is-live=true ! videoconvert ! ins.sink_1"); + + count_sink = (guint *) g_malloc (sizeof (guint)); + *count_sink = 0; + + pipe_state = (TestPipeState *) g_new0 (TestPipeState, 1); + + status = ml_pipeline_construct (pipeline, test_pipe_state_callback, pipe_state, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_switch_get_handle (handle, "ins", &type, &switchhandle); + assert_eq (status, ML_ERROR_NONE); + assert_eq (type, ML_PIPELINE_SWITCH_INPUT_SELECTOR); + + status = ml_pipeline_switch_get_pad_list (switchhandle, &node_list); + assert_eq (status, ML_ERROR_NONE); + + if (node_list) { + gchar *name = NULL; + guint idx = 0; + + while ((name = node_list[idx]) != NULL) { + assert (g_str_equal (name, "sink_0") || g_str_equal (name, "sink_1")); + idx++; + g_free (name); + } + + assert_eq (idx, 2U); + g_free (node_list); + } + + status = ml_pipeline_sink_register (handle, "sinkx", test_sink_callback_count, count_sink, &sinkhandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_switch_select (switchhandle, "sink_1"); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + + g_usleep (50000); + status = ml_pipeline_get_state (handle, &state); + assert_eq (status, ML_ERROR_NONE); + wait_for_start (handle, state, status); + + g_usleep (300000); /* 300ms. Let a few frames flow. */ + + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_sink_unregister (sinkhandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_switch_release_handle (switchhandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + assert_eq (*count_sink, 3U); + + assert (pipe_state->paused); + assert (pipe_state->playing); + + g_free (pipeline); + g_free (count_sink); + g_free (pipe_state); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_switch_pipeline_p2 + * @since_tizen 5.5 + * @description Test NNStreamer pipeline switch + */ +int utc_nnstreamer_scenario_switch_pipeline_p2 (void) +{ + ml_pipeline_h handle; + ml_pipeline_switch_h switchhandle; + ml_pipeline_sink_h sinkhandle0, sinkhandle1; + ml_pipeline_switch_e type; + gchar *pipeline; + int status; + guint *count_sink0, *count_sink1; + gchar **node_list = NULL; + + /** + * Prerolling problem + * For running the test, set async=false in the sink element when using an output selector. + * The pipeline state can be changed to paused after all sink element receive buffer. + */ + pipeline = g_strdup ("videotestsrc is-live=true ! videoconvert ! tensor_converter ! output-selector name=outs " + "outs.src_0 ! tensor_sink name=sink0 async=false " + "outs.src_1 ! tensor_sink name=sink1 async=false"); + + count_sink0 = (guint *) g_malloc (sizeof (guint)); + *count_sink0 = 0; + + count_sink1 = (guint *) g_malloc (sizeof (guint)); + *count_sink1 = 0; + + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_switch_get_handle (handle, "outs", &type, &switchhandle); + assert_eq (status, ML_ERROR_NONE); + assert_eq (type, ML_PIPELINE_SWITCH_OUTPUT_SELECTOR); + + status = ml_pipeline_switch_get_pad_list (switchhandle, &node_list); + assert_eq (status, ML_ERROR_NONE); + + if (node_list) { + gchar *name = NULL; + guint idx = 0; + + while ((name = node_list[idx]) != NULL) { + assert (g_str_equal (name, "src_0") || g_str_equal (name, "src_1")); + idx++; + g_free (name); + } + + assert_eq (idx, 2U); + g_free (node_list); + } + + status = ml_pipeline_sink_register (handle, "sink0", test_sink_callback_count, count_sink0, &sinkhandle0); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_sink_register (handle, "sink1", test_sink_callback_count, count_sink1, &sinkhandle1); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_switch_select (switchhandle, "src_1"); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_start (handle); + assert_eq (status, ML_ERROR_NONE); + + g_usleep (200000); /* 200ms. Let a few frames flow. */ + + status = ml_pipeline_stop (handle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_sink_unregister (sinkhandle0); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_sink_unregister (sinkhandle1); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_switch_release_handle (switchhandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + assert_eq (*count_sink0, 0U); + assert (*count_sink1 > 0U); + + g_free (pipeline); + g_free (count_sink0); + g_free (count_sink1); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_switch_pipeline_error_cases_n + * @since_tizen 5.5 + * @description Failure case to handle input-selector element with invalid param. + */ +int utc_nnstreamer_scenario_switch_pipeline_error_cases_n (void) +{ + ml_pipeline_h handle; + ml_pipeline_switch_h switchhandle; + ml_pipeline_switch_e type; + gchar *pipeline; + int status; + + pipeline = g_strdup ("input-selector name=ins ! tensor_converter ! tensor_sink name=sinkx " + "videotestsrc is-live=true ! videoconvert ! ins.sink_0 " + "videotestsrc num-buffers=3 ! videoconvert ! ins.sink_1"); + + status = ml_pipeline_construct (pipeline, NULL, NULL, &handle); + assert_eq (status, ML_ERROR_NONE); + + /* invalid param : pipe */ + status = ml_pipeline_switch_get_handle (NULL, "ins", &type, &switchhandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : name */ + status = ml_pipeline_switch_get_handle (handle, NULL, &type, &switchhandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : wrong name */ + status = ml_pipeline_switch_get_handle (handle, "wrongname", &type, &switchhandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : invalid type */ + status = ml_pipeline_switch_get_handle (handle, "sinkx", &type, &switchhandle); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : handle */ + status = ml_pipeline_switch_get_handle (handle, "ins", &type, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* succesfully get switch handle if the param type is null */ + status = ml_pipeline_switch_get_handle (handle, "ins", NULL, &switchhandle); + assert_eq (status, ML_ERROR_NONE); + + /* invalid param : handle */ + status = ml_pipeline_switch_select (NULL, "invalidpadname"); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : pad name */ + status = ml_pipeline_switch_select (switchhandle, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid param : wrong pad name */ + status = ml_pipeline_switch_select (switchhandle, "wrongpadname"); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_pipeline_switch_release_handle (switchhandle); + assert_eq (status, ML_ERROR_NONE); + + status = ml_pipeline_destroy (handle); + assert_eq (status, ML_ERROR_NONE); + + g_free (pipeline); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_check_nnfw_availability_p + * @since_tizen 5.5 + * @description Test NNStreamer Utility for checking availability of NNFW + */ +int utc_nnstreamer_scenario_check_nnfw_availability_p (void) +{ + bool result; + int status = ml_check_nnfw_availability (ML_NNFW_TYPE_NNFW, ML_NNFW_HW_ANY, &result); + assert_eq (status, ML_ERROR_NONE); + assert_eq (result, false); + + status = ml_check_nnfw_availability (ML_NNFW_TYPE_NNFW, ML_NNFW_HW_AUTO, &result); + assert_eq (status, ML_ERROR_NONE); + assert_eq (result, false); + + status = ml_check_nnfw_availability (ML_NNFW_TYPE_NNFW, ML_NNFW_HW_NPU, &result); + assert_eq (status, ML_ERROR_NONE); + assert_eq (result, false); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_check_tensor_info_utilities_p + * @since_tizen 5.5 + * @description Test NNStreamer Utility for checking tensors info handle + */ +int utc_nnstreamer_scenario_check_tensor_info_utilities_p (void) +{ + ml_tensors_info_h info; + ml_tensor_dimension in_dim, out_dim; + ml_tensor_type_e out_type; + gchar *out_name; + int status; + + status = ml_tensors_info_create (&info); + assert_eq (status, ML_ERROR_NONE); + + in_dim[0] = 3; + in_dim[1] = 300; + in_dim[2] = 300; + in_dim[3] = 1; + + /* add tensor info */ + status = ml_tensors_info_set_count (info, 2); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_type (info, 0, ML_TENSOR_TYPE_UINT8); + assert_eq (status, ML_ERROR_NONE); + status = ml_tensors_info_set_tensor_dimension (info, 0, in_dim); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_type (info, 1, ML_TENSOR_TYPE_FLOAT64); + assert_eq (status, ML_ERROR_NONE); + status = ml_tensors_info_set_tensor_dimension (info, 1, in_dim); + assert_eq (status, ML_ERROR_NONE); + status = ml_tensors_info_set_tensor_name (info, 1, "tensor-name-test"); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_type (info, 2, ML_TENSOR_TYPE_UINT64); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + status = ml_tensors_info_set_tensor_dimension (info, 2, in_dim); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* get tensor info */ + status = ml_tensors_info_get_tensor_type (info, 0, &out_type); + assert_eq (status, ML_ERROR_NONE); + assert_eq (out_type, ML_TENSOR_TYPE_UINT8); + + status = ml_tensors_info_get_tensor_dimension (info, 0, out_dim); + assert_eq (status, ML_ERROR_NONE); + assert_eq (out_dim[0], 3U); + assert_eq (out_dim[1], 300U); + assert_eq (out_dim[2], 300U); + assert_eq (out_dim[3], 1U); + + status = ml_tensors_info_get_tensor_name (info, 0, &out_name); + assert_eq (status, ML_ERROR_NONE); + assert (out_name == NULL); + + status = ml_tensors_info_get_tensor_type (info, 1, &out_type); + assert_eq (status, ML_ERROR_NONE); + assert_eq (out_type, ML_TENSOR_TYPE_FLOAT64); + + status = ml_tensors_info_get_tensor_dimension (info, 1, out_dim); + assert_eq (status, ML_ERROR_NONE); + assert_eq (out_dim[0], 3U); + assert_eq (out_dim[1], 300U); + assert_eq (out_dim[2], 300U); + assert_eq (out_dim[3], 1U); + + status = ml_tensors_info_get_tensor_name (info, 1, &out_name); + assert_eq (status, ML_ERROR_NONE); + assert (out_name && g_str_equal (out_name, "tensor-name-test")); + + status = ml_tensors_info_get_tensor_type (info, 2, &out_type); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_tensors_info_get_tensor_dimension (info, 2, out_dim); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_tensors_info_get_tensor_name (info, 2, &out_name); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* get tensor size */ + // status = ml_tensors_info_get_tensor_size (info, 0, &data_size); + // assert_eq (status, ML_ERROR_NONE); + // assert (data_size == (3 * 300 * 300)); + + // status = ml_tensors_info_get_tensor_size (info, 1, &data_size); + // assert_eq (status, ML_ERROR_NONE); + // assert (data_size == (3 * 300 * 300 * 8)); + + // status = ml_tensors_info_get_tensor_size (info, -1, &data_size); + // assert_eq (status, ML_ERROR_NONE); + // assert (data_size == ((3 * 300 * 300) + (3 * 300 * 300 * 8))); + + // status = ml_tensors_info_get_tensor_size (info, 2, &data_size); + // assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_tensors_info_destroy (info); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_filter_tensorflow_lite_p1 + * @since_tizen 5.5 + * @description Test NNStreamer single shot (tensorflow-lite) + */ +int utc_nnstreamer_scenario_filter_tensorflow_lite_p1 (void) +{ + ml_single_h single; + ml_tensors_info_h in_info, out_info; + ml_tensors_info_h in_res, out_res; + ml_tensors_data_h input, output; + ml_tensor_dimension in_dim, out_dim, res_dim; + ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN; + unsigned int count = 0; + char *name = NULL; + int status; + + gchar *test_model; + + char pszValue[CONFIG_VALUE_LEN_MAX] = {0,}; + + char *model_file = "mobilenet_v1_1.0_224_quant.tflite"; + char *model_file_path = NULL; + + if ( true == GetValueForTCTSetting("DEVICE_SUITE_TARGET_30", pszValue, API_NAMESPACE )) + { + int model_path_len = strlen(pszValue) + strlen(model_file) + 10; + model_file_path = (char*)malloc(model_path_len); + CHECK_HANDLE(model_file_path,"malloc:failure:utc_nnstreamer_scenario_filter_tensorflow_lite_p1"); + snprintf(model_file_path, model_path_len, "%s/res/res/%s", pszValue, model_file); + } + + test_model = model_file_path; + assert (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + ml_tensors_info_create (&in_info); + ml_tensors_info_create (&out_info); + ml_tensors_info_create (&in_res); + ml_tensors_info_create (&out_res); + + in_dim[0] = 3; + in_dim[1] = 224; + in_dim[2] = 224; + in_dim[3] = 1; + ml_tensors_info_set_count (in_info, 1); + ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim); + + out_dim[0] = 1001; + out_dim[1] = 1; + out_dim[2] = 1; + out_dim[3] = 1; + ml_tensors_info_set_count (out_info, 1); + ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim); + + status = ml_single_open (&single, test_model, in_info, out_info, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_NONE); + + /* input tensor in filter */ + status = ml_single_get_input_info (single, &in_res); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_get_count (in_res, &count); + assert_eq (status, ML_ERROR_NONE); + assert_eq (count, 1U); + + status = ml_tensors_info_get_tensor_name (in_res, 0, &name); + assert_eq (status, ML_ERROR_NONE); + assert (name == NULL); + + status = ml_tensors_info_get_tensor_type (in_res, 0, &type); + assert_eq (status, ML_ERROR_NONE); + assert_eq (type, ML_TENSOR_TYPE_UINT8); + + ml_tensors_info_get_tensor_dimension (in_res, 0, res_dim); + assert (in_dim[0] == res_dim[0]); + assert (in_dim[1] == res_dim[1]); + assert (in_dim[2] == res_dim[2]); + assert (in_dim[3] == res_dim[3]); + + /* output tensor in filter */ + status = ml_single_get_output_info (single, &out_res); + assert_eq (status, ML_ERROR_NONE); + + status = ml_tensors_info_get_count (out_res, &count); + assert_eq (status, ML_ERROR_NONE); + assert_eq (count, 1U); + + status = ml_tensors_info_get_tensor_name (out_res, 0, &name); + assert_eq (status, ML_ERROR_NONE); + assert (name == NULL); + + status = ml_tensors_info_get_tensor_type (out_res, 0, &type); + assert_eq (status, ML_ERROR_NONE); + assert_eq (type, ML_TENSOR_TYPE_UINT8); + + ml_tensors_info_get_tensor_dimension (out_res, 0, res_dim); + assert (out_dim[0] == res_dim[0]); + assert (out_dim[1] == res_dim[1]); + assert (out_dim[2] == res_dim[2]); + assert (out_dim[3] == res_dim[3]); + + input = output = NULL; + + /* generate dummy data */ + status = ml_tensors_data_create (in_info, &input); + assert_eq (status, ML_ERROR_NONE); + assert (input != NULL); + + status = ml_single_invoke (single, input, &output); + assert_eq (status, ML_ERROR_NONE); + assert (output != NULL); + + ml_tensors_data_destroy (output); + ml_tensors_data_destroy (input); + + status = ml_single_close (single); + assert_eq (status, ML_ERROR_NONE); + + g_free (test_model); + ml_tensors_info_destroy (in_info); + ml_tensors_info_destroy (out_info); + ml_tensors_info_destroy (in_res); + ml_tensors_info_destroy (out_res); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_filter_tensorflow_lite_p2 + * @since_tizen 5.5 + * @description Test NNStreamer single shot (tensorflow-lite), Start pipeline without tensor info + */ +int utc_nnstreamer_scenario_filter_tensorflow_lite_p2 (void) +{ + ml_single_h single; + ml_tensors_info_h in_info, out_info; + ml_tensors_data_h input, output; + ml_tensor_dimension in_dim, out_dim; + int status; + + gchar *test_model; + + char pszValue[CONFIG_VALUE_LEN_MAX] = {0,}; + + char *model_file = "mobilenet_v1_1.0_224_quant.tflite"; + char *model_file_path = NULL; + + if ( true == GetValueForTCTSetting("DEVICE_SUITE_TARGET_30", pszValue, API_NAMESPACE )) + { + int model_path_len = strlen(pszValue) + strlen(model_file) + 10; + model_file_path = (char*)malloc(model_path_len); + CHECK_HANDLE(model_file_path,"malloc:failure:utc_nnstreamer_scenario_filter_tensorflow_lite_p2"); + snprintf(model_file_path, model_path_len, "%s/res/res/%s", pszValue, model_file); + } + + test_model = model_file_path; + assert (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + ml_tensors_info_create (&in_info); + ml_tensors_info_create (&out_info); + + in_dim[0] = 3; + in_dim[1] = 224; + in_dim[2] = 224; + in_dim[3] = 1; + ml_tensors_info_set_count (in_info, 1); + ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim); + + out_dim[0] = 1001; + out_dim[1] = 1; + out_dim[2] = 1; + out_dim[3] = 1; + ml_tensors_info_set_count (out_info, 1); + ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim); + + status = ml_single_open (&single, test_model, NULL, NULL, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_NONE); + + input = output = NULL; + + /* generate dummy data */ + status = ml_tensors_data_create (in_info, &input); + assert_eq (status, ML_ERROR_NONE); + assert (input != NULL); + + status = ml_single_invoke (single, input, &output); + assert_eq (status, ML_ERROR_NONE); + assert (output != NULL); + + ml_tensors_data_destroy (output); + ml_tensors_data_destroy (input); + + status = ml_single_close (single); + assert_eq (status, ML_ERROR_NONE); + + g_free (test_model); + ml_tensors_info_destroy (in_info); + ml_tensors_info_destroy (out_info); + + return 0; +} + +/** + * @testcase utc_nnstreamer_scenario_filter_tensorflow_lite_n + * @since_tizen 5.5 + * @description Test NNStreamer single shot (tensorflow-lite), Failure case with invalid param. + */ +int utc_nnstreamer_scenario_filter_tensorflow_lite_n (void) +{ + ml_single_h single; + ml_tensors_info_h in_info, out_info; + ml_tensor_dimension in_dim, out_dim; + int status; + + gchar *test_model; + + char pszValue[CONFIG_VALUE_LEN_MAX] = {0,}; + + char *model_file = "mobilenet_v1_1.0_224_quant.tflite"; + char *model_file_path = NULL; + + if ( true == GetValueForTCTSetting("DEVICE_SUITE_TARGET_30", pszValue, API_NAMESPACE )) + { + int model_path_len = strlen(pszValue) + strlen(model_file) + 10; + model_file_path = (char*)malloc(model_path_len); + CHECK_HANDLE(model_file_path,"malloc:failure:utc_nnstreamer_scenario_filter_tensorflow_lite_n"); + snprintf(model_file_path, model_path_len, "%s/res/res/%s", pszValue, model_file); + } + + test_model = model_file_path; + + ml_tensors_info_create (&in_info); + ml_tensors_info_create (&out_info); + + /* invalid file path */ + status = ml_single_open (&single, "wrong_file_name", in_info, out_info, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* null file path */ + status = ml_single_open (&single, NULL, in_info, out_info, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid handle */ + status = ml_single_open (NULL, test_model, in_info, out_info, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid input tensor info */ + status = ml_single_open (&single, test_model, in_info, out_info, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + in_dim[0] = 3; + in_dim[1] = 224; + in_dim[2] = 224; + in_dim[3] = 1; + ml_tensors_info_set_count (in_info, 1); + ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim); + + /* invalid output tensor info */ + status = ml_single_open (&single, test_model, in_info, out_info, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + out_dim[0] = 1001; + out_dim[1] = 1; + out_dim[2] = 1; + out_dim[3] = 1; + ml_tensors_info_set_count (out_info, 1); + ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim); + + /* invalid file extension */ + status = ml_single_open (&single, test_model, in_info, out_info, + ML_NNFW_TYPE_TENSORFLOW, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* invalid handle */ + status = ml_single_close (single); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + /* Successfully opened unknown fw type (tf-lite) */ + status = ml_single_open (&single, test_model, in_info, out_info, + ML_NNFW_TYPE_ANY, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_NONE); + + status = ml_single_close (single); + assert_eq (status, ML_ERROR_NONE); + + g_free (test_model); + ml_tensors_info_destroy (in_info); + ml_tensors_info_destroy (out_info); + + return 0; +} diff --git a/src/utc/nnstreamer/utc-nnstreamer-single.c b/src/utc/nnstreamer/utc-nnstreamer-single.c new file mode 100644 index 000000000..20248a3f3 --- /dev/null +++ b/src/utc/nnstreamer/utc-nnstreamer-single.c @@ -0,0 +1,324 @@ +// +// Copyright (c) 2019 Samsung Electronics Co., Ltd. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include +#include +#include +#include /* GStatBuf */ + +#include "tct_common.h" + +#define API_NAMESPACE "nnstreamer" + +static ml_single_h single; +static gchar *test_model; +static int status; + +const gchar *model_file = "mobilenet_v1_1.0_224_quant.tflite"; + +/** + * @function utc_nnstreamer_single_startup + * @description Called before each test + * @parameter NA + * @return NA + */ +void utc_nnstreamer_single_startup(void) +{ + test_model = NULL; + + FPRINTF("[%s][%d][%s] [STARTUP]\\n", __FILE__, __LINE__, __FUNCTION__); + char pszValue[CONFIG_VALUE_LEN_MAX] = {0,}; + + if ( true == GetValueForTCTSetting("DEVICE_SUITE_TARGET_30", pszValue, API_NAMESPACE )) + { + int model_path_len = strlen(pszValue) + strlen(model_file) + 10; + test_model = (gchar*)malloc(model_path_len); + snprintf(test_model, model_path_len, "%s/res/res/%s", pszValue, model_file); + FPRINTF("[%s][%d][%s] %s\\n", __FILE__, __LINE__, __FUNCTION__, test_model); + } + else + { + FPRINTF("[%s][%d][%s] ERROR TO GET MODEL PATH\\n", __FILE__, __LINE__, __FUNCTION__); + } +} + +/** + * @function utc_nnstreamer_single_cleanup + * @description Called after each test + * @parameter NA + * @return NA + */ +void utc_nnstreamer_single_cleanup(void) +{ + FPRINTF("[%s][%d][%s] [CLEANUP]\\n", __FILE__, __LINE__, __FUNCTION__); + g_free (test_model); +} + +/** + * @testcase utc_ml_single_open_p + * @since_tizen 5.5 + * @description Test NNStreamer single open + */ +int utc_ml_single_open_p (void) +{ + assert (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + status = ml_single_open (&single, test_model, NULL, NULL, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_NONE); + + status = ml_single_close (single); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_single_open_n + * @since_tizen 5.5 + * @description Test NNStreamer single open, failure case + */ +int utc_ml_single_open_n (void) +{ + status = ml_single_open (&single, "wrong_file_name", NULL, NULL, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_TYPE_ANY); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_single_close_p + * @since_tizen 5.5 + * @description Test NNStreamer single close + */ +int utc_ml_single_close_p (void) +{ + assert (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + status = ml_single_open (&single, test_model, NULL, NULL, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_NONE); + + status = ml_single_close (single); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_single_close_n + * @since_tizen 5.5 + * @description Test NNStreamer single close, failure case + */ +int utc_ml_single_close_n (void) +{ + assert (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + status = ml_single_open (&single, test_model, NULL, NULL, + ML_NNFW_TYPE_TENSORFLOW, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_single_close (single); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + return 0; +} + +/** + * @testcase utc_ml_single_open_p + * @since_tizen 5.5 + * @description Test NNStreamer invoke model + */ +int utc_ml_single_invoke_p (void) +{ + ml_tensors_info_h in_info, out_info; + ml_tensors_data_h input, output; + ml_tensor_dimension in_dim, out_dim; + + assert (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + ml_tensors_info_create (&in_info); + ml_tensors_info_create (&out_info); + + in_dim[0] = 3; + in_dim[1] = 224; + in_dim[2] = 224; + in_dim[3] = 1; + ml_tensors_info_set_count (in_info, 1); + ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim); + + out_dim[0] = 1001; + out_dim[1] = 1; + out_dim[2] = 1; + out_dim[3] = 1; + ml_tensors_info_set_count (out_info, 1); + ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim); + + status = ml_single_open (&single, test_model, in_info, out_info, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_NONE); + + input = output = NULL; + + /* generate dummy data */ + status = ml_tensors_data_create (in_info, &input); + assert_eq (status, ML_ERROR_NONE); + assert (input != NULL); + + status = ml_single_invoke (single, input, &output); + assert_eq (status, ML_ERROR_NONE); + assert (output != NULL); + + ml_tensors_data_destroy (output); + ml_tensors_data_destroy (input); + + status = ml_single_close (single); + assert_eq (status, ML_ERROR_NONE); + + ml_tensors_info_destroy (in_info); + ml_tensors_info_destroy (out_info); + + return 0; +} + +/** + * @testcase utc_ml_single_invoke_n + * @since_tizen 5.5 + * @description Test NNStreamer invoke model, failure case + */ +int utc_ml_single_invoke_n (void) +{ + ml_tensors_data_h input, output; + + assert (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + status = ml_single_open (&single, test_model, NULL, NULL, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_NONE); + + input = output = NULL; + status = ml_single_invoke (single, input, &output); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_single_close (single); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_single_get_input_info_p + * @since_tizen 5.5 + * @description Test NNStreamer get input tensor's info + */ +int utc_ml_single_get_input_info_p (void) +{ + ml_tensors_info_h ts_info; + + assert (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + ml_tensors_info_create (&ts_info); + + status = ml_single_open (&single, test_model, NULL, NULL, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_NONE); + + /* input tensor in filter */ + status = ml_single_get_input_info (single, &ts_info); + assert_eq (status, ML_ERROR_NONE); + + status = ml_single_close (single); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_single_get_input_info_n + * @since_tizen 5.5 + * @description Test NNStreamer get input tensor's info, failure case + */ +int utc_ml_single_get_input_info_n (void) +{ + assert (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + status = ml_single_open (&single, test_model, NULL, NULL, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_NONE); + + /* input tensor in filter */ + status = ml_single_get_input_info (single, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_single_close (single); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_single_get_output_info_p + * @since_tizen 5.5 + * @description Test NNStreamer get output tensor's info + */ +int utc_ml_single_get_output_info_p (void) +{ + ml_tensors_info_h ts_info; + + assert (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + ml_tensors_info_create (&ts_info); + + status = ml_single_open (&single, test_model, NULL, NULL, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_NONE); + + /* input tensor in filter */ + status = ml_single_get_input_info (single, &ts_info); + assert_eq (status, ML_ERROR_NONE); + + status = ml_single_close (single); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} + +/** + * @testcase utc_ml_single_get_output_info_n + * @since_tizen 5.5 + * @description Test NNStreamer get output tensor's info, failure case + */ +int utc_ml_single_get_output_info_n (void) +{ + assert (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + status = ml_single_open (&single, test_model, NULL, NULL, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + assert_eq (status, ML_ERROR_NONE); + + /* input tensor in filter */ + status = ml_single_get_input_info (single, NULL); + assert_eq (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_single_close (single); + assert_eq (status, ML_ERROR_NONE); + + return 0; +} \ No newline at end of file