test: add test case for Hailo NPU
authorInki Dae <inki.dae@samsung.com>
Tue, 14 Jan 2025 06:00:09 +0000 (15:00 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Jan 2025 06:00:09 +0000 (15:00 +0900)
Change-Id: I6a6c6b1822274fe8dcd2f9edbd55f1f9325b8ed7
Signed-off-by: Inki Dae <inki.dae@samsung.com>
test/src/inference_engine_profiler.cpp

index ea6a899fa44dfb02f33afd14eae4ec642d150c8a..f6502ae4befc5674d80ef7d0cc574b38f63eafa8 100644 (file)
@@ -90,6 +90,7 @@
                          123, 99,  287, 381, 451, 287, 381, 475 }
 
 using namespace testing;
+using namespace std;
 
 typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>,
                                   int, int, int, std::vector<std::string>,
@@ -292,6 +293,109 @@ TEST_P(InferenceEngineTfliteTest, Inference)
        models.clear();
 }
 
+TEST(HAILORT, Inference)
+{
+       std::string backend_name = "hailort";
+       int target_devices = INFERENCE_TARGET_CUSTOM;
+       std::vector<std::string> input_layers;
+       std::vector<std::string> output_layers;
+       std::string model_path = "/root/hefs/yolov10s.hef";
+
+       inference_engine_config config = { .backend_name = backend_name,
+                                                                          .backend_type = -1,
+                                                                          .target_devices = target_devices };
+
+       auto engine = std::make_unique<InferenceEngineCommon>();
+       ASSERT_TRUE(engine);
+
+       int ret = engine->LoadConfigFile();
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       ret = engine->BindBackend(&config);
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       ret = engine->SetTargetDevices(target_devices);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       std::vector<std::string> models;
+       models.push_back(model_path);
+
+       cout << "model file = " << models[0] << endl;
+       ret = engine->Load(models, INFERENCE_MODEL_HAILORT);
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       inference_engine_layer_property output_property {};
+
+       ret = engine->GetOutputLayerProperty(output_property);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       IETensorBuffer inputs, outputs;
+       ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       ret = engine->Run(inputs, outputs);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       for (auto &layer : output_property.layers) {
+               cout << "layer name = " << layer.first << endl;
+       }
+
+       CleanupTensorBuffers(inputs, outputs);
+
+       engine->UnbindBackend();
+       models.clear();
+}
+
+TEST(TFLITE, Inference)
+{
+       std::string backend_name = "tflite";
+       int target_devices = INFERENCE_TARGET_CPU;
+       std::vector<std::string> input_layers;
+       std::vector<std::string> output_layers;
+       std::string model_path = "/usr/share/capi-media-vision/res/efficientdet-lite1-uint8.tflite";
+
+       inference_engine_config config = { .backend_name = backend_name,
+                                                                          .backend_type = -1,
+                                                                          .target_devices = target_devices };
+
+       auto engine = std::make_unique<InferenceEngineCommon>();
+       ASSERT_TRUE(engine);
+
+       int ret = engine->BindBackend(&config);
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       ret = engine->SetTargetDevices(target_devices);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       std::vector<std::string> models;
+       models.push_back(model_path);
+
+       ret = engine->Load(models, INFERENCE_MODEL_TFLITE);
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       inference_engine_layer_property output_property {};
+
+       ret = engine->GetOutputLayerProperty(output_property);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       IETensorBuffer inputs, outputs;
+       ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
+       ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       ret = engine->Run(inputs, outputs);
+       EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+       for (auto &layer : output_property.layers) {
+               auto &tensor_info = layer.second;
+               cout << "layer name = " << layer.first << "scale = " << tensor_info.scale << " zero point = " << tensor_info.zero_point << endl;
+       }
+
+       CleanupTensorBuffers(inputs, outputs);
+
+       engine->UnbindBackend();
+       models.clear();
+}
+
 INSTANTIATE_TEST_CASE_P(
                Opensource, InferenceEngineTfliteTest,
                testing::Values(