From: Kwanghoon Son Date: Thu, 31 Oct 2024 05:21:12 +0000 (+0900) Subject: Add test and resource X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=refs%2Fheads%2Fbug%2Fquantization;p=platform%2Fcore%2Fmultimedia%2Finference-engine-interface.git Add test and resource Change-Id: I1a663b4cd31e06666d8baba0740c215ddc19f819 Signed-off-by: Kwanghoon Son --- diff --git a/efficientdet-lite1-uint8.tflite b/efficientdet-lite1-uint8.tflite new file mode 100644 index 0000000..19195fd Binary files /dev/null and b/efficientdet-lite1-uint8.tflite differ diff --git a/test/src/inference_engine_profiler.cpp b/test/src/inference_engine_profiler.cpp index c434cf5..b0ef26d 100644 --- a/test/src/inference_engine_profiler.cpp +++ b/test/src/inference_engine_profiler.cpp @@ -870,6 +870,67 @@ TEST_P(InferenceEngineDldtTest, Inference) models.clear(); } +TEST(TFLITE, Inference) +{ + printf("tc v2\n"); + std::string backend_name = "tflite"; + int target_devices = INFERENCE_TARGET_CPU; + std::string model_path = "/root/efficientdet-lite1-uint8.tflite"; + unsigend int feature_input[384*384*3]; + + inference_engine_config config = { .backend_name = backend_name, + .backend_type = -1, + .target_devices = target_devices }; + + auto engine = std::make_unique(); + ASSERT_TRUE(engine); + + int ret = engine->BindBackend(&config); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + ret = engine->SetTargetDevices(target_devices); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + std::vector models; + models.push_back(model_path); + + ret = engine->Load(models, INFERENCE_MODEL_TFLITE); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + IETensorBuffer inputs, outputs; + ret = PrepareTensorBuffers(engine.get(), inputs, outputs); + ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + std::cout << inputs.size() << std::endl; + + printf("input address = %p\n", inputs["predict_images:0:0"].buffer); + printf("output address = %p\n", outputs["StatefulPartitionedCall:0"].buffer); + + for(int k=0; k<10; k++) + { + printf("iter %d\n", k); + for(int i=0;i < 467*67; ++i) + { + feature_input[i] = (float)(rand()) / (float)(rand()); + } + memcpy(inputs["serving_default_x:0"].buffer, feature_input, sizeof(float) * 467 *67); + + ret = engine->Run(inputs, outputs); + EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE); + + auto *ori_buf = static_cast(outputs["StatefulPartitionedCall:0"].buffer); + printf("input address = %p\n", inputs["serving_default_x:0"].buffer); + printf("output address = %p\n", outputs["StatefulPartitionedCall:0"].buffer); + + std::cout << *ori_buf << std::endl; + } + + CleanupTensorBuffers(inputs, outputs); + + engine->UnbindBackend(); + models.clear(); +} + INSTANTIATE_TEST_CASE_P( Opensource, InferenceEngineTfliteTest, testing::Values(