class InferenceEngineCommonTest : public testing::TestWithParam<ParamType> { };
class InferenceEngineCommonTest_2 : public testing::TestWithParam<ParamType_Load> { };
-class InferenceEngineCommonTest_3 : public testing::TestWithParam<ParamType_Infer> { };
+class InferenceEngineTfliteTest : public testing::TestWithParam<ParamType_Infer> { };
std::map<std::string, int> Model_Formats = {
{ "caffemodel", INFERENCE_MODEL_CAFFE },
return ret;
}
-TEST_P(InferenceEngineCommonTest_3, Inference)
+TEST_P(InferenceEngineTfliteTest, Inference)
{
std::string backend_name;
int target_devices;
INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest,
testing::Values(
// parameter order : backend name, target device
+ // ARMNN.
ParamType("armnn", INFERENCE_TARGET_CPU),
- ParamType("armnn", INFERENCE_TARGET_GPU)
+ ParamType("armnn", INFERENCE_TARGET_GPU),
+ // TFLITE.
+ ParamType("tflite", INFERENCE_TARGET_CPU),
+ ParamType("tflite", INFERENCE_TARGET_GPU)
/* TODO */
)
);
testing::Values(
// parameter order : backend name, target device, model path/s
// mobilenet based image classification model loading test
+ // ARMNN.
ParamType_Load("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
- ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" })
+ ParamType_Load("armnn", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+ // TFLITE.
+ ParamType_Load("tflite", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+ ParamType_Load("tflite", INFERENCE_TARGET_GPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" })
/* TODO */
)
);
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCommonTest_3,
+INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTfliteTest,
testing::Values(
// parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
// mobilenet based image classification test
+ // ARMNN.
ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
// quantized mobilenet based image classification test
76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
{ 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
+ 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
+ // TFLITE.
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
+ // quantized mobilenet based image classification test
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
+ // object detection test
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
+ // face detection test
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
+ // pose estimation test
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
+ 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10, TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 })
/* TODO */
)