std::vector<int> >
ParamType_Infer;
+
+typedef std::tuple<std::string, int, bool, bool, inference_engine_cltuner_mode_e,
+ int, int, int, std::vector<std::string>,
+ int, int, int, std::vector<std::string>,
+ std::vector<std::string>, std::vector<std::string>,
+ std::vector<int> >
+ ParamType_CLTuner;
+
+class InferenceEngineTfliteCLTunerTest : public testing::TestWithParam<ParamType_CLTuner>
+{};
class InferenceEngineTfliteTest : public testing::TestWithParam<ParamType_Infer>
{};
class InferenceEngineCaffeTest : public testing::TestWithParam<ParamType_Infer>
ret = engine->GetBackendCapacity(&capacity);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- if (capacity.cltuner_supported) {
- inference_engine_cltuner cltuner;
- cltuner.active = true;
- cltuner.update = false;
- cltuner.tuning_mode = static_cast<inference_engine_cltuner_mode_e>(INFERENCE_ENGINE_CLTUNER_READ);
-
- ret = engine->SetCLTuner(&cltuner);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- }
-
ret = engine->SetTargetDevices(target_devices);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
models.clear();
}
+TEST_P(InferenceEngineTfliteCLTunerTest, Inference)
+{
+ std::string backend_name;
+ int target_devices;
+ bool active;
+ bool update;
+ inference_engine_cltuner_mode_e tuning_mode;
+ int test_type;
+ int iteration;
+ int tensor_type;
+ std::vector<std::string> image_paths;
+ size_t height;
+ size_t width;
+ size_t ch;
+ std::vector<std::string> input_layers;
+ std::vector<std::string> output_layers;
+ std::vector<std::string> model_paths;
+ std::vector<int> answers;
+
+ std::tie(backend_name, target_devices, active, update, tuning_mode, test_type,
+ iteration, tensor_type, image_paths, height, width, ch, input_layers,
+ output_layers, model_paths, answers) = GetParam();
+
+ if (iteration < 1) {
+ iteration = 1;
+ }
+
+ MachineCapacity *Cap = GetMachineCapacity();
+ if (Cap == NULL) {
+ std::cout << "Failed to get machine capacity" << std::endl;
+ return;
+ }
+
+ // If current machine doesn't support inference engine then skip this test.
+ if (Cap->available == false) {
+ return;
+ }
+
+ // If current machine doesn't support OpenCL then skip the inference on GPU.
+ if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
+ return;
+ }
+
+ std::string test_name;
+ switch (test_type) {
+ case TEST_IMAGE_CLASSIFICATION:
+ test_name.append("Image classification");
+ break;
+ case TEST_OBJECT_DETECTION:
+ test_name.append("Object detection");
+ break;
+ case TEST_FACE_DETECTION:
+ test_name.append("Face detection");
+ break;
+ case TEST_FACIAL_LANDMARK_DETECTION:
+ test_name.append("Facial landmark detection");
+ break;
+ case TEST_POSE_ESTIMATION:
+ test_name.append("Pose estimation");
+ break;
+ case TEST_AIC_HAND_GESTURE_1:
+ test_name.append("AIC Hand Gesture detection 1");
+ break;
+ case TEST_AIC_HAND_GESTURE_2:
+ test_name.append("AIC Hand Gesture detection 2");
+ break;
+ }
+
+ std::cout << test_name << " inference test : backend = " << backend_name
+ << ", target device = " << Target_Formats[target_devices]
+ << ", CLTuning mode = " << tuning_mode << std::endl;
+
+ int backend_type = -1;
+
+ // If backend name is "one" then change it to "mlapi"
+ // and set backend_type to INFERENCE_BACKEND_ONE.
+ if (backend_name.compare("one") == 0) {
+ backend_name = "mlapi";
+ backend_type = INFERENCE_BACKEND_ONE;
+ }
+
+ inference_engine_config config = { .backend_name = backend_name,
+ .backend_type = backend_type,
+ .target_devices = target_devices };
+
+ auto engine = std::make_unique<InferenceEngineCommon>();
+ ASSERT_TRUE(engine);
+
+ int ret = engine->EnableProfiler(true);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ if (backend_type == INFERENCE_BACKEND_ONE)
+ backend_name = "one";
+
+ ret = engine->DumpProfileToFile("profile_data_" + backend_name +
+ "_" + Target_Formats[target_devices] +
+ "_tflite_model.txt");
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->LoadConfigFile();
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->BindBackend(&config);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ inference_engine_capacity capacity;
+ ret = engine->GetBackendCapacity(&capacity);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ if (capacity.cltuner_supported) {
+ inference_engine_cltuner cltuner;
+ cltuner.active = active;
+ cltuner.update = update;
+ cltuner.tuning_mode = tuning_mode;
+
+ ret = engine->SetCLTuner(&cltuner);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ }
+
+ ret = engine->SetTargetDevices(target_devices);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ std::vector<std::string> models;
+ int model_type = GetModelInfo(model_paths, models);
+ ASSERT_NE(model_type, -1);
+
+ std::vector<std::string>::iterator iter;
+
+ inference_engine_tensor_info tensor_info = {
+ { 1, ch, height, width },
+ INFERENCE_TENSOR_SHAPE_NCHW,
+ static_cast<inference_tensor_data_type_e>(tensor_type),
+ static_cast<size_t>(1 * ch * height * width)
+ };
+
+ inference_engine_layer_property input_property;
+
+ for (auto &input : input_layers) {
+ input_property.layer_names.push_back(input);
+ input_property.tensor_infos.push_back(tensor_info);
+ }
+
+ ret = engine->SetInputLayerProperty(input_property);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ inference_engine_layer_property output_property = { output_layers, {} };
+
+ ret = engine->SetOutputLayerProperty(output_property);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ ret = engine->Load(models, (inference_model_format_e) model_type);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ std::vector<inference_engine_tensor_buffer> inputs, outputs;
+ ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
+ ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+
+ // Copy input image tensor data from a given file to input tensor buffer.
+ for (int i = 0; i < (int) image_paths.size(); ++i) {
+ CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
+ }
+
+ for (int repeat = 0; repeat < iteration; ++repeat) {
+ ret = engine->Run(inputs, outputs);
+ EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
+ }
+
+ tensor_t result;
+ FillOutputResult(engine.get(), outputs, result);
+
+ switch (test_type) {
+ case TEST_IMAGE_CLASSIFICATION:
+ ret = VerifyImageClassificationResults(result, answers[0]);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_OBJECT_DETECTION:
+ // 1072 : fixed height size of dumped image, 1608 : fixed width size of dumped image.
+ ret = VerifyObjectDetectionResults(result, answers, 1072, 1608);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_FACE_DETECTION:
+ // 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
+ ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_FACIAL_LANDMARK_DETECTION:
+ // TODO.
+ break;
+ case TEST_POSE_ESTIMATION:
+ // 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
+ ret = VerifyPoseEstimationResults(result, answers, 563, 750);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_AIC_HAND_GESTURE_1:
+ ret = VerifyAICHandGesture1Results(outputs);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_AIC_HAND_GESTURE_2:
+ ret = VerifyAICHandGesture2Results(outputs, answers);
+ EXPECT_EQ(ret, 1);
+ break;
+ }
+
+ CleanupTensorBuffers(inputs, outputs);
+
+ engine->UnbindBackend();
+ models.clear();
+}
+
+
TEST_P(InferenceEngineCaffeTest, Inference)
{
std::string backend_name;
/* TODO */
));
+INSTANTIATE_TEST_CASE_P(
+ Opensource, InferenceEngineTfliteCLTunerTest,
+ testing::Values(
+ // parameter order : backend name, target device, CLTuner active flag, CLTuner update flag, CLTuner tuning mode, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
+ // ARMNN.
+ ParamType_CLTuner(
+ "armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_RAPID,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ ParamType_CLTuner(
+ "armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ ParamType_CLTuner(
+ "armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_NORMAL,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ ParamType_CLTuner(
+ "armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ ParamType_CLTuner(
+ "armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ ParamType_CLTuner(
+ "armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 })
+ /* TODO */
+ ));
+
INSTANTIATE_TEST_CASE_P(
Opensource, InferenceEngineCaffeTest,
testing::Values(