From 75346552b36f34cc0b47ff20280650b768f38194 Mon Sep 17 00:00:00 2001 From: Parichay Kapoor Date: Tue, 11 Feb 2020 19:24:21 +0900 Subject: [PATCH] [armnn/hw] Added supported acclerators Added supported accelerators for armnn and added corresponding unittests with single API Tested set accelerators on device with caffe model on the device Signed-off-by: Parichay Kapoor --- .../tensor_filter/tensor_filter_armnn.cc | 62 +++++++++++++++++++--- tests/tizen_capi/unittest_tizen_capi.cc | 48 +++++++++++++++++ 2 files changed, 102 insertions(+), 8 deletions(-) diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_armnn.cc b/ext/nnstreamer/tensor_filter/tensor_filter_armnn.cc index 330c328..0b72085 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_armnn.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_armnn.cc @@ -38,6 +38,15 @@ #include #include +static const gchar *armnn_accl_support[] = { + ACCL_AUTO_STR, + ACCL_DEFAULT_STR, + ACCL_CPU_NEON_STR, + ACCL_CPU_STR, + ACCL_GPU_STR, + NULL +}; + /** * @brief ring cache structure */ @@ -82,6 +91,7 @@ private: GstTensorsInfo * tensorMeta); tensor_type getGstTensorType (armnn::DataType armType); int getTensorDim (int tensor_idx, tensor_dim dim); + armnn::Compute getBackend (const accl_hw hw); }; /** @@ -100,8 +110,8 @@ void fini_filter_armnn (void) __attribute__ ((destructor)); * @param hw : hardware accelerator to be used at backend */ ArmNNCore::ArmNNCore (const char *_model_path, accl_hw hw): -runtime (nullptr, &armnn::IRuntime::Destroy), -network (armnn::INetworkPtr (nullptr, nullptr)) + accel (hw), runtime (nullptr, &armnn::IRuntime::Destroy), + network (armnn::INetworkPtr (nullptr, nullptr)) { model_path = g_strdup (_model_path); @@ -306,6 +316,26 @@ ArmNNCore::makeNetwork (const GstTensorFilterProperties * prop) return -EINVAL; } +armnn::Compute ArmNNCore::getBackend (const accl_hw hw) +{ + switch (hw) { + case ACCL_GPU: + return armnn::Compute::GpuAcc; + case ACCL_NONE: + /** intended */ + case ACCL_CPU: + return armnn::Compute::CpuRef; + case ACCL_CPU_NEON: + return armnn::Compute::CpuAcc; + case ACCL_AUTO: + /** intended */ + case ACCL_DEFAULT: + /** intended */ + default: + return armnn::Compute::CpuAcc; + } +} + /** * @brief load the armnn model * @note the model will be loaded @@ -334,11 +364,12 @@ ArmNNCore::loadModel (const GstTensorFilterProperties * prop) throw std::runtime_error ("Error in building the network."); /* Optimize the network for the given runtime */ - /** TODO: set the backend based on config received */ - std::vector < armnn::BackendId > backends = { - armnn::Compute::CpuAcc}; - /** TODO: add option to enable FP32 to FP16 with OptimizerOptions */ - /** TODO: add GPU based optimizations */ + std::vector < armnn::BackendId > backends = {getBackend (accel)}; + /** + * TODO: add option to enable FP32 to FP16 with OptimizerOptions + * TODO: add GPU based optimizations + * Support these with custom_properties from tensor_filter + */ runtime = armnn::IRuntime::Create (options); if (!runtime) throw std::runtime_error ("Error creating runtime"); @@ -600,7 +631,7 @@ static int armnn_open (const GstTensorFilterProperties * prop, void **private_data) { ArmNNCore *core; - accl_hw hw = ACCL_DEFAULT; + accl_hw hw; core = static_cast < ArmNNCore * >(*private_data); @@ -615,6 +646,7 @@ armnn_open (const GstTensorFilterProperties * prop, void **private_data) if (prop->model_files[0] == NULL) return -EINVAL; + hw = parse_accl_hw (prop->accl_str, armnn_accl_support); try { core = new ArmNNCore (prop->model_files[0], hw); } @@ -695,12 +727,26 @@ armnn_getOutputDim (const GstTensorFilterProperties * prop, return core->getOutputTensorDim (info); } +/** + * @brief Check support of the backend + * @param hw: backend to check support of + */ +static int +armnn_checkAvailability (accl_hw hw) +{ + if (g_strv_contains (armnn_accl_support, get_accl_hw_str (hw))) + return 0; + + return -ENOENT; +} + static gchar filter_subplugin_armnn[] = "armnn"; static GstTensorFilterFramework NNS_support_armnn = { .version = GST_TENSOR_FILTER_FRAMEWORK_V0, .open = armnn_open, .close = armnn_close, + .checkAvailability = armnn_checkAvailability, }; /** @brief Initialize this object for tensor_filter subplugin runtime register */ diff --git a/tests/tizen_capi/unittest_tizen_capi.cc b/tests/tizen_capi/unittest_tizen_capi.cc index cfe8e0b..b6dffa5 100644 --- a/tests/tizen_capi/unittest_tizen_capi.cc +++ b/tests/tizen_capi/unittest_tizen_capi.cc @@ -1507,6 +1507,54 @@ TEST (nnstreamer_capi_util, availability_fail_05_n) } #endif /** ENABLE_MOVIDIUS_NCSDK2 */ +#ifdef ENABLE_ARMNN +/** + * @brief Test NNStreamer Utility for checking availability of custom backend + */ +TEST (nnstreamer_capi_util, availability_06) +{ + bool result; + int status; + + status = ml_check_nnfw_availability (ML_NNFW_TYPE_ARMNN, ML_NNFW_HW_ANY, &result); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (result, true); + + status = ml_check_nnfw_availability (ML_NNFW_TYPE_ARMNN, ML_NNFW_HW_AUTO, &result); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (result, true); + + status = ml_check_nnfw_availability (ML_NNFW_TYPE_ARMNN, ML_NNFW_HW_CPU, &result); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (result, true); + + status = ml_check_nnfw_availability (ML_NNFW_TYPE_ARMNN, ML_NNFW_HW_CPU_NEON, &result); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (result, true); + + status = ml_check_nnfw_availability (ML_NNFW_TYPE_ARMNN, ML_NNFW_HW_GPU, &result); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (result, true); +} + +/** + * @brief Test NNStreamer Utility for checking availability of custom backend + */ +TEST (nnstreamer_capi_util, availability_fail_06_n) +{ + bool result; + int status; + + status = ml_check_nnfw_availability (ML_NNFW_TYPE_ARMNN, ML_NNFW_HW_NPU, &result); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (result, false); + + status = ml_check_nnfw_availability (ML_NNFW_TYPE_ARMNN, ML_NNFW_HW_NPU_EDGE_TPU, &result); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (result, false); +} +#endif /** ENABLE_ARMNN */ + /** * @brief Test NNStreamer Utility for checking tensors info handle */ -- 2.7.4