123, 99, 287, 381, 451, 287, 381, 475 }
using namespace testing;
+using namespace std;
typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>,
int, int, int, std::vector<std::string>,
models.clear();
}
+TEST(HAILORT, Inference)
+{
+ std::string backend_name = "hailort";
+ int target_devices = INFERENCE_TARGET_CUSTOM;
+ std::vector<std::string> input_layers;
+ std::vector<std::string> output_layers;
+ std::string model_path = "/root/hefs/yolov10s.hef";
+
+ inference_engine_config config = { .backend_name = backend_name,
+ .backend_type = -1,
+ .target_devices = target_devices };
+
+ auto engine = std::make_unique<InferenceEngineCommon>();
+ ASSERT_TRUE(engine);
+
+ int ret = engine->LoadConfigFile();
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->BindBackend(&config);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->SetTargetDevices(target_devices);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ std::vector<std::string> models;
+ models.push_back(model_path);
+
+ cout << "model file = " << models[0] << endl;
+ ret = engine->Load(models, INFERENCE_MODEL_HAILORT);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ inference_engine_layer_property output_property {};
+
+ ret = engine->GetOutputLayerProperty(output_property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ IETensorBuffer inputs, outputs;
+ ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->Run(inputs, outputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ for (auto &layer : output_property.layers) {
+ cout << "layer name = " << layer.first << endl;
+ }
+
+ CleanupTensorBuffers(inputs, outputs);
+
+ engine->UnbindBackend();
+ models.clear();
+}
+
+TEST(TFLITE, Inference)
+{
+ std::string backend_name = "tflite";
+ int target_devices = INFERENCE_TARGET_CPU;
+ std::vector<std::string> input_layers;
+ std::vector<std::string> output_layers;
+ std::string model_path = "/usr/share/capi-media-vision/res/efficientdet-lite1-uint8.tflite";
+
+ inference_engine_config config = { .backend_name = backend_name,
+ .backend_type = -1,
+ .target_devices = target_devices };
+
+ auto engine = std::make_unique<InferenceEngineCommon>();
+ ASSERT_TRUE(engine);
+
+ int ret = engine->BindBackend(&config);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->SetTargetDevices(target_devices);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ std::vector<std::string> models;
+ models.push_back(model_path);
+
+ ret = engine->Load(models, INFERENCE_MODEL_TFLITE);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ inference_engine_layer_property output_property {};
+
+ ret = engine->GetOutputLayerProperty(output_property);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ IETensorBuffer inputs, outputs;
+ ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->Run(inputs, outputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ for (auto &layer : output_property.layers) {
+ auto &tensor_info = layer.second;
+ cout << "layer name = " << layer.first << "scale = " << tensor_info.scale << " zero point = " << tensor_info.zero_point << endl;
+ }
+
+ CleanupTensorBuffers(inputs, outputs);
+
+ engine->UnbindBackend();
+ models.clear();
+}
+
INSTANTIATE_TEST_CASE_P(
Opensource, InferenceEngineTfliteTest,
testing::Values(