models.clear();
}
+TEST(TFLITE, Inference)
+{
+ printf("tc v2\n");
+ std::string backend_name = "tflite";
+ int target_devices = INFERENCE_TARGET_CPU;
+ std::string model_path = "/root/efficientdet-lite1-uint8.tflite";
+ unsigend int feature_input[384*384*3];
+
+ inference_engine_config config = { .backend_name = backend_name,
+ .backend_type = -1,
+ .target_devices = target_devices };
+
+ auto engine = std::make_unique<InferenceEngineCommon>();
+ ASSERT_TRUE(engine);
+
+ int ret = engine->BindBackend(&config);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->SetTargetDevices(target_devices);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ std::vector<std::string> models;
+ models.push_back(model_path);
+
+ ret = engine->Load(models, INFERENCE_MODEL_TFLITE);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ IETensorBuffer inputs, outputs;
+ ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ std::cout << inputs.size() << std::endl;
+
+ printf("input address = %p\n", inputs["predict_images:0:0"].buffer);
+ printf("output address = %p\n", outputs["StatefulPartitionedCall:0"].buffer);
+
+ for(int k=0; k<10; k++)
+ {
+ printf("iter %d\n", k);
+ for(int i=0;i < 467*67; ++i)
+ {
+ feature_input[i] = (float)(rand()) / (float)(rand());
+ }
+ memcpy(inputs["serving_default_x:0"].buffer, feature_input, sizeof(float) * 467 *67);
+
+ ret = engine->Run(inputs, outputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ auto *ori_buf = static_cast<float *>(outputs["StatefulPartitionedCall:0"].buffer);
+ printf("input address = %p\n", inputs["serving_default_x:0"].buffer);
+ printf("output address = %p\n", outputs["StatefulPartitionedCall:0"].buffer);
+
+ std::cout << *ori_buf << std::endl;
+ }
+
+ CleanupTensorBuffers(inputs, outputs);
+
+ engine->UnbindBackend();
+ models.clear();
+}
+
INSTANTIATE_TEST_CASE_P(
Opensource, InferenceEngineTfliteTest,
testing::Values(