std::cout << test_name << " inference test : backend = " << backend_name
<< ", target device = " << Target_Formats[target_devices]
<< std::endl;
+
+ int backend_type = -1;
+
+ // If backend name is "one" then change it to "mlapi"
+ // and set backend_type to INFERENCE_BACKEND_ONE.
+ if (backend_name.compare("one") == 0) {
+ backend_name = "mlapi";
+ backend_type = INFERENCE_BACKEND_ONE;
+ }
+
inference_engine_config config = { .backend_name = backend_name,
- .backend_type = -1,
+ .backend_type = backend_type,
.target_devices = target_devices };
auto engine = std::make_unique<InferenceEngineCommon>();
return;
}
+ if (backend_type == INFERENCE_BACKEND_ONE)
+ backend_name = "one";
+
ret = engine->DumpProfileToFile("profile_data_" + backend_name +
"_tflite_model.txt");
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
123, 99, 287, 381, 451, 287, 381, 475 }),
// parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
// mobilenet based image classification test
+ // ONE via MLAPI.
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // quantized mobilenet based image classification test
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ // object detection test
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ // face detection test
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ // pose estimation test
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
// TFLITE.
ParamType_Infer(
"tflite", INFERENCE_TARGET_CPU,