#include <string.h>
#include <tuple>
+#include <map>
#include "gtest/gtest.h"
using namespace InferenceEngineInterface::Common;
typedef std::tuple<std::string, int> ParamType;
+typedef std::tuple<std::string, int, std::vector<std::string>> ParamType_Load;
-class InferenceEngineCommonTest : public testing::TestWithParam<ParamType>
-{ };
+class InferenceEngineCommonTest : public testing::TestWithParam<ParamType> { };
+class InferenceEngineCommonTest_2 : public testing::TestWithParam<ParamType_Load> { };
+
+std::map<std::string, int> Model_Formats = {
+ { "caffemodel", INFERENCE_MODEL_CAFFE },
+ { "pb", INFERENCE_MODEL_TF },
+ { "tflite", INFERENCE_MODEL_TFLITE },
+ { "t7", INFERENCE_MODEL_TORCH },
+ { "weights", INFERENCE_MODEL_DARKNET },
+ { "bin", INFERENCE_MODEL_DLDT },
+ { "onnx", INFERENCE_MODEL_ONNX }
+};
TEST_P(InferenceEngineCommonTest, Bind)
{
delete engine;
}
+int GetModelInfo(std::vector <std::string> &model_paths, std::vector<std::string> &models)
+{
+ std::string model_path = model_paths[0];
+ std::string ext_str = model_path.substr(model_path.find_last_of(".") + 1);
+ std::map<std::string, int>::iterator key = Model_Formats.find(ext_str);
+ int ret = key != Model_Formats.end() ? key->second : -1;
+ EXPECT_NE(ret, -1);
+
+ if (ret == -1) {
+ return ret;
+ }
+
+ switch (ret) {
+ case INFERENCE_MODEL_CAFFE:
+ case INFERENCE_MODEL_TF:
+ case INFERENCE_MODEL_DARKNET:
+ case INFERENCE_MODEL_DLDT:
+ case INFERENCE_MODEL_ONNX:
+ models.push_back(model_paths[0]);
+ models.push_back(model_paths[1]);
+ break;
+ case INFERENCE_MODEL_TFLITE:
+ case INFERENCE_MODEL_TORCH:
+ models.push_back(model_paths[0]);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+TEST_P(InferenceEngineCommonTest_2, Load)
+{
+ std::string backend_name;
+ int target_devices;
+ std::vector<std::string> model_paths;
+
+ std::tie(backend_name, target_devices, model_paths) = GetParam();
+
+ std::cout << "backend : " << backend_name << "\n";
+ std::cout << "target device : " << target_devices << "\n";
+
+ inference_engine_config config = {
+ .backend_name = backend_name,
+ .target_devices = target_devices
+ };
+
+ InferenceEngineCommon *engine = new InferenceEngineCommon(&config);
+ ASSERT_TRUE(engine);
+
+ int ret = engine->BindBackend(&config);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ inference_engine_capacity capacity;
+ ret = engine->GetBackendCapacity(&capacity);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->SetTargetDevices(target_devices);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ std::vector <std::string> models;
+ int model_type = GetModelInfo(model_paths, models);
+ ASSERT_NE(model_type, -1);
+
+ ret = engine->Load(models, (inference_model_format_e)model_type);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ engine->UnbindBackend();
+
+ delete engine;
+}
+
INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest,
testing::Values(
- ParamType("armnn", INFERENCE_TARGET_CPU)
+ ParamType("armnn", INFERENCE_TARGET_CPU),
+ ParamType("armnn", INFERENCE_TARGET_GPU),
+ ParamType("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU)
+ /* TODO */
+ )
+ );
+
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_2,
+ testing::Values(
+ // backend name, target device, model path/s
+ ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+ ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+ ParamType_Load("armnn", INFERENCE_TARGET_GPU | INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" })
/* TODO */
)
);