{};
class InferenceEngineDldtTest : public testing::TestWithParam<ParamType_Infer>
{};
-class InferenceEngineHandGestureTest : public testing::TestWithParam<ParamType_Infer>
-{};
TEST_P(InferenceEngineTfliteTest, Inference)
{
case TEST_POSE_ESTIMATION:
test_name.append("Pose estimation");
break;
+ case TEST_AIC_HAND_GESTURE_1:
+ test_name.append("AIC Hand Gesture detection 1");
+ break;
+ case TEST_AIC_HAND_GESTURE_2:
+ test_name.append("AIC Hand Gesture detection 2");
+ break;
}
std::cout << test_name << " inference test : backend = " << backend_name
backend_name = "one";
ret = engine->DumpProfileToFile("profile_data_" + backend_name +
+ "_" + Target_Formats[target_devices] +
"_tflite_model.txt");
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
ret = VerifyPoseEstimationResults(result, answers, 563, 750);
EXPECT_EQ(ret, 1);
break;
+ case TEST_AIC_HAND_GESTURE_1:
+ ret = VerifyAICHandGesture1Results(outputs);
+ EXPECT_EQ(ret, 1);
+ break;
+ case TEST_AIC_HAND_GESTURE_2:
+ ret = VerifyAICHandGesture2Results(outputs, answers);
+ EXPECT_EQ(ret, 1);
+ break;
}
CleanupTensorBuffers(inputs, outputs);
models.clear();
}
-TEST_P(InferenceEngineHandGestureTest, Inference)
-{
- std::string backend_name;
- int target_devices;
- int test_type;
- int iteration;
- int tensor_type;
- std::vector<std::string> image_paths;
- size_t height;
- size_t width;
- size_t ch;
- std::vector<std::string> input_layers;
- std::vector<std::string> output_layers;
- std::vector<std::string> model_paths;
- std::vector<int> answers;
-
- std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
-
- if (iteration < 1) {
- iteration = 1;
- }
-
- MachineCapacity *Cap = GetMachineCapacity();
- if (Cap == NULL) {
- std::cout << "Failed to get machine capacity" << std::endl;
- return;
- }
-
- // If current machine doesn't support inference engine then skip this test.
- if (Cap->available == false) {
- return;
- }
-
- // If current machine doesn't support OpenCL then skip the inference on GPU.
- if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
- return;
- }
-
- std::string test_name;
- switch (test_type) {
- case TEST_AIC_HAND_GESTURE_1:
- test_name.append("AIC Hand Gesture detection 1");
- break;
- case TEST_AIC_HAND_GESTURE_2:
- test_name.append("AIC Hand Gesture detection 2");
- break;
- }
-
-
- std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
-
- int backend_type = -1;
-
- // If backend name is "one" then change it to "mlapi"
- // and set backend_type to INFERENCE_BACKEND_ONE.
- if (backend_name.compare("one") == 0) {
- backend_name = "mlapi";
- backend_type = INFERENCE_BACKEND_ONE;
- }
-
- inference_engine_config config = {
- .backend_name = backend_name,
- .backend_type = backend_type,
- .target_devices = target_devices
- };
-
- auto engine = std::make_unique<InferenceEngineCommon>();
- if (engine == nullptr) {
- ASSERT_TRUE(engine);
- return;
- }
-
- int ret = engine->EnableProfiler(true);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- if (backend_type == INFERENCE_BACKEND_ONE)
- backend_name = "one";
-
- ret = engine->DumpProfileToFile("profile_data_" + backend_name +
- "_hand_gesture_model.txt");
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- ret = engine->LoadConfigFile();
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- ret = engine->BindBackend(&config);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- inference_engine_capacity capacity;
- ret = engine->GetBackendCapacity(&capacity);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- ret = engine->SetTargetDevices(target_devices);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
-
- std::vector <std::string> models;
- int model_type = GetModelInfo(model_paths, models);
- if (model_type == -1) {
- ASSERT_NE(model_type, -1);
- return;
- }
-
- inference_engine_layer_property input_property;
- std::vector<std::string>::iterator iter;
-
- for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
- inference_engine_tensor_info tensor_info = {
- { 1, ch, height, width },
- (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e)tensor_type,
- (size_t)(1 * ch * height * width)
- };
-
- input_property.layer_names.push_back(*iter);
- input_property.tensor_infos.push_back(tensor_info);
- }
-
- ret = engine->SetInputLayerProperty(input_property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- inference_engine_layer_property output_property;
-
- for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
- output_property.layer_names.push_back(*iter);
- }
-
- ret = engine->SetOutputLayerProperty(output_property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- ret = engine->Load(models, (inference_model_format_e)model_type);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- std::vector<inference_engine_tensor_buffer> inputs, outputs;
- ret = PrepareTensorBuffers(engine.get(), inputs, outputs);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- return;
- }
-
- // Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int)image_paths.size(); ++i) {
- CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
- }
-
- for (int repeat = 0; repeat < iteration; ++repeat) {
- ret = engine->Run(inputs, outputs);
- EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- }
-
- switch (test_type) {
- case TEST_AIC_HAND_GESTURE_1:
- ret = VerifyAICHandGesture1Results(outputs);
- EXPECT_EQ(ret, 1);
- break;
- case TEST_AIC_HAND_GESTURE_2:
- ret = VerifyAICHandGesture2Results(outputs, answers);
- EXPECT_EQ(ret, 1);
- break;
- }
-
- CleanupTensorBuffers(inputs, outputs);
-
- engine->UnbindBackend();
- models.clear();
-}
-
-
INSTANTIATE_TEST_CASE_P(
- Prefix, InferenceEngineTfliteTest,
+ Opensource, InferenceEngineTfliteTest,
testing::Values(
// parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
// mobilenet based image classification test
224, 3, { "input_2" }, { "dense_3/Softmax" },
{ "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
{ 3 }),
- ParamType_Infer(
- "armnn", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
- INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/image_classification.bin" }, 224,
- 224, 3, { "input_2" }, { "dense_3/Softmax" },
- { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
- { 3 }),
// quantized mobilenet based image classification test
ParamType_Infer(
"armnn", INFERENCE_TARGET_CPU,
{ "MobilenetV1/Predictions/Reshape_1" },
{ "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
{ 955 }),
- ParamType_Infer(
- "armnn", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
- INFERENCE_TENSOR_DATA_TYPE_UINT8,
- { "/opt/usr/images/image_classification_q.bin" }, 224,
- 224, 3, { "input" },
- { "MobilenetV1/Predictions/Reshape_1" },
- { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
- { 955 }),
// object detection test
ParamType_Infer(
"armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
"TFLite_Detection_PostProcess:3" },
{ "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
{ 451, 474, 714, 969 }),
- ParamType_Infer(
- "armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
- { "normalized_input_image_tensor" },
- { "TFLite_Detection_PostProcess",
- "TFLite_Detection_PostProcess:1",
- "TFLite_Detection_PostProcess:2",
- "TFLite_Detection_PostProcess:3" },
- { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
- { 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
"armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
"TFLite_Detection_PostProcess:3" },
{ "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
{ 727, 225, 960, 555 }),
- ParamType_Infer(
- "armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
- INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
- { "normalized_input_image_tensor" },
- { "TFLite_Detection_PostProcess",
- "TFLite_Detection_PostProcess:1",
- "TFLite_Detection_PostProcess:2",
- "TFLite_Detection_PostProcess:3" },
- { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
- { 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
"armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10,
{ 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
- ParamType_Infer(
- "armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10,
- INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
- { "image" },
- { "Convolutional_Pose_Machine/stage_5_out" },
- { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
- 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
- 123, 99, 287, 381, 451, 287, 381, 475 }),
- // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // Hand gesture model 1 from AIC
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+ { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+ // Hand gesture model 2 from AIC
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+ { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+ { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+ 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+ 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+ 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+ 38, 38, 12 }),
// mobilenet based image classification test
- // ONE via MLAPI.
ParamType_Infer(
- "one", INFERENCE_TARGET_CPU,
+ "armnn", INFERENCE_TARGET_GPU,
TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
{ "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
{ 3 }),
+ // quantized mobilenet based image classification test
ParamType_Infer(
- "one", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
- INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/image_classification.bin" }, 224,
- 224, 3, { "input_2" }, { "dense_3/Softmax" },
- { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
- { 3 }),
- // quantized mobilenet based image classification test
- ParamType_Infer(
- "one", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, 10,
- INFERENCE_TENSOR_DATA_TYPE_UINT8,
- { "/opt/usr/images/image_classification_q.bin" }, 224,
- 224, 3, { "input" },
- { "MobilenetV1/Predictions/Reshape_1" },
- { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
- { 955 }),
- ParamType_Infer(
- "one", INFERENCE_TARGET_GPU,
+ "armnn", INFERENCE_TARGET_GPU,
TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
{ 955 }),
// object detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ "armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
"TFLite_Detection_PostProcess:3" },
{ "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
{ 451, 474, 714, 969 }),
+ // face detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ "armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
"TFLite_Detection_PostProcess:1",
"TFLite_Detection_PostProcess:2",
"TFLite_Detection_PostProcess:3" },
- { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
- { 451, 474, 714, 969 }),
- // face detection test
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ // pose estimation test
ParamType_Infer(
- "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ "armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ // Hand gesture model 1 from AIC
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+ { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+ // Hand gesture model 2 from AIC
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+ { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+ { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+ 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+ 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+ 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+ 38, 38, 12 }),
+
+ /*********************************************************************************/
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
+ // TFLITE.
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // quantized mobilenet based image classification test
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ // object detection test
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
"TFLite_Detection_PostProcess:1",
"TFLite_Detection_PostProcess:2",
"TFLite_Detection_PostProcess:3" },
- { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
- { 727, 225, 960, 555 }),
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ // face detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
- "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
- { "image" },
- { "Convolutional_Pose_Machine/stage_5_out" },
- { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
- 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
- 123, 99, 287, 381, 451, 287, 381, 475 }),
- ParamType_Infer(
- "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+ "tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
{ 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
- // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // Hand gesture model 1 from AIC
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+ { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+ // Hand gesture model 2 from AIC
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+ { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+ { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+ 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+ 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+ 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+ 38, 38, 12 }),
+
// mobilenet based image classification test
- // TFLITE.
- ParamType_Infer(
- "tflite", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, 10,
- INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/image_classification.bin" }, 224,
- 224, 3, { "input_2" }, { "dense_3/Softmax" },
- { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
- { 3 }),
ParamType_Infer(
"tflite", INFERENCE_TARGET_GPU,
TEST_IMAGE_CLASSIFICATION, 10,
{ 3 }),
// quantized mobilenet based image classification test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, 10,
- INFERENCE_TENSOR_DATA_TYPE_UINT8,
- { "/opt/usr/images/image_classification_q.bin" }, 224,
- 224, 3, { "input" },
- { "MobilenetV1/Predictions/Reshape_1" },
- { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
- { 955 }),
- ParamType_Infer(
"tflite", INFERENCE_TARGET_GPU,
TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ 955 }),
// object detection test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ "tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
"TFLite_Detection_PostProcess:3" },
{ "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
{ 451, 474, 714, 969 }),
+ // face detection test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
"TFLite_Detection_PostProcess:1",
"TFLite_Detection_PostProcess:2",
"TFLite_Detection_PostProcess:3" },
- { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
- { 451, 474, 714, 969 }),
- // face detection test
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ // pose estimation test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ "tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ // Hand gesture model 1 from AIC
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+ { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+ // Hand gesture model 2 from AIC
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+ { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+ { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+ 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+ 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+ 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+ 38, 38, 12 })
+ /* TODO */
+ ));
+
+INSTANTIATE_TEST_CASE_P(
+ Inhouse, InferenceEngineTfliteTest,
+ testing::Values(
+ /*********************************************************************************/
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
+ // ONE via MLAPI.
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // quantized mobilenet based image classification test
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ // object detection test
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
"TFLite_Detection_PostProcess:1",
"TFLite_Detection_PostProcess:2",
"TFLite_Detection_PostProcess:3" },
- { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
- { 727, 225, 960, 555 }),
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ // face detection test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+ "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
{ 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
+ // Hand gesture model 1 from AIC
+ ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+ { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+ // Hand gesture model 2 from AIC
+ ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+ { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+ { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+ 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+ 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+ 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+ 38, 38, 12 }),
+
+ // mobilenet based image classification test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+ "one", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // quantized mobilenet based image classification test
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ // object detection test
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ // face detection test
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ // pose estimation test
+ ParamType_Infer(
+ "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
{ 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
+ // Hand gesture model 1 from AIC
+ ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+ { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+ // Hand gesture model 2 from AIC
+ ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+ { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+ { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+ 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+ 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+ 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+ 38, 38, 12 }),
+
+ /*********************************************************************************/
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
// TFLITE via MLAPI.
ParamType_Infer(
"mlapi", INFERENCE_TARGET_CPU,
224, 3, { "input_2" }, { "dense_3/Softmax" },
{ "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
{ 3 }),
- ParamType_Infer(
- "mlapi", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
- INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/image_classification.bin" }, 224,
- 224, 3, { "input_2" }, { "dense_3/Softmax" },
- { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
- { 3 }),
// quantized mobilenet based image classification test
ParamType_Infer(
"mlapi", INFERENCE_TARGET_CPU,
{ "MobilenetV1/Predictions/Reshape_1" },
{ "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
{ 955 }),
- ParamType_Infer(
- "mlapi", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
- INFERENCE_TENSOR_DATA_TYPE_UINT8,
- { "/opt/usr/images/image_classification_q.bin" }, 224,
- 224, 3, { "input" },
- { "MobilenetV1/Predictions/Reshape_1" },
- { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
- { 955 }),
// object detection test
ParamType_Infer(
"mlapi", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
"TFLite_Detection_PostProcess:3" },
{ "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
{ 451, 474, 714, 969 }),
+ // face detection test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ "mlapi", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
"TFLite_Detection_PostProcess:1",
"TFLite_Detection_PostProcess:2",
"TFLite_Detection_PostProcess:3" },
- { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
- { 451, 474, 714, 969 }),
- // face detection test
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ // pose estimation test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ "mlapi", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ // Hand gesture model 1 from AIC
+ ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+ { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+ // Hand gesture model 2 from AIC
+ ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+ { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+ { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+ 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+ 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+ 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+ 38, 38, 12 }),
+
+ // mobilenet based image classification test
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+
+ // quantized mobilenet based image classification test
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+
+ // object detection test
+ ParamType_Infer(
+ "mlapi", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
"TFLite_Detection_PostProcess:1",
"TFLite_Detection_PostProcess:2",
"TFLite_Detection_PostProcess:3" },
- { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
- { 727, 225, 960, 555 }),
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+
+ // face detection test
ParamType_Infer(
"mlapi", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
"TFLite_Detection_PostProcess:3" },
{ "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
{ 727, 225, 960, 555 }),
+
// pose estimation test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
- { "image" },
- { "Convolutional_Pose_Machine/stage_5_out" },
- { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
- 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
- 123, 99, 287, 381, 451, 287, 381, 475 }),
- ParamType_Infer(
"mlapi", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
{ 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
- 123, 99, 287, 381, 451, 287, 381, 475 })
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ // Hand gesture model 1 from AIC
+ ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
+ { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
+ // Hand gesture model 2 from AIC
+ ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
+ { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
+ { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
+ 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
+ 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
+ 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
+ 38, 38, 12 })
/* TODO */
));
INSTANTIATE_TEST_CASE_P(
- Prefix, InferenceEngineCaffeTest,
+ Opensource, InferenceEngineCaffeTest,
testing::Values(
// parameter order : backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers
// OPENCV
));
INSTANTIATE_TEST_CASE_P(
- Prefix, InferenceEngineDldtTest,
+ Opensource, InferenceEngineDldtTest,
testing::Values(
// DLDT
ParamType_Infer(
{ "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.xml",
"/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.bin" },
{ 954 })));
-
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineHandGestureTest,
- testing::Values(
- // TFLITE
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
- { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
- { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
- { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
- 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
- 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
- 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
- 38, 38, 12 }),
- // TFLITE via MLAPI
- ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
- { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
- ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
- { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
- { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
- 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
- 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
- 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
- 38, 38, 12 }),
- // ARMNN
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
- { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
- { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
- { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
- 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
- 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
- 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
- 38, 38, 12 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
- { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
- { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
- { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
- 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
- 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
- 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
- 38, 38, 12 }),
- // ONE via MLAPI
- ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
- { "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
- ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- { "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
- { "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
- { 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
- 78, 36, 82, 42, 82, 44, 83, 45, 35, 37,
- 61, 36, 59, 36, 52, 39, 35, 32, 40, 34,
- 62, 39, 70, 40, 58, 41, 34, 42, 34, 41,
- 38, 38, 12 })
- /* TODO */
- ));