#include "inference_engine_common_impl.h"
#include "inference_engine_test_common.h"
+#define INFERENCE_ITERATION 10
+
typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>,
int, int, int, std::vector<std::string>,
std::vector<std::string>, std::vector<std::string>,
// ARMNN.
ParamType_Infer(
"armnn", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"armnn", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
// object detection test
ParamType_Infer(
"armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ "armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
- "armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10,
+ "armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// mobilenet based image classification test
ParamType_Infer(
"armnn", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"armnn", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
// object detection test
ParamType_Infer(
"armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ "armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
- "armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10,
+ "armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// TFLITE.
ParamType_Infer(
"tflite", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"tflite", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
// object detection test
ParamType_Infer(
"tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
// pose estimation test
ParamType_Infer(
"tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
{ "Convolutional_Pose_Machine/stage_5_out" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// mobilenet based image classification test
ParamType_Infer(
"tflite", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"tflite", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
// object detection test
ParamType_Infer(
"tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
// pose estimation test
ParamType_Infer(
"tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
{ "Convolutional_Pose_Machine/stage_5_out" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// ONE via MLAPI.
ParamType_Infer(
"one", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"one", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
// object detection test
ParamType_Infer(
"one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
// pose estimation test
ParamType_Infer(
"one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
{ "Convolutional_Pose_Machine/stage_5_out" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// mobilenet based image classification test
ParamType_Infer(
"one", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"one", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
// object detection test
ParamType_Infer(
"one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
// pose estimation test
ParamType_Infer(
"one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
{ "Convolutional_Pose_Machine/stage_5_out" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// TFLITE via MLAPI.
ParamType_Infer(
"mlapi", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"mlapi", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
// object detection test
ParamType_Infer(
"mlapi", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ "mlapi", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
// pose estimation test
ParamType_Infer(
"mlapi", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
{ "Convolutional_Pose_Machine/stage_5_out" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// mobilenet based image classification test
ParamType_Infer(
"mlapi", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"mlapi", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
// object detection test
ParamType_Infer(
"mlapi", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ "TFLite_Detection_PostProcess",
// face detection test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ "mlapi", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
// pose estimation test
ParamType_Infer(
"mlapi", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
{ "Convolutional_Pose_Machine/stage_5_out" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, 100, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// squeezenet based image classification test
ParamType_Infer(
"opencv", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification_caffe.bin" },
227, 227, 3, { "data" }, { "prob" },
{ 281 }),
ParamType_Infer(
"opencv", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification_caffe.bin" },
227, 227, 3, { "data" }, { "prob" },
// mobilenet-ssd based object detection test
ParamType_Infer(
"opencv", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection_caffe.bin" }, 300,
300, 3, { "data" }, { "detection_out" },
{ "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel",
{ 15, 19, 335, 557 }),
ParamType_Infer(
"opencv", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
- 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection_caffe.bin" }, 300,
300, 3, { "data" }, { "detection_out" },
{ "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel",
// mobilenet-ssd based object detection test
ParamType_Infer(
- "opencv", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ "opencv", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection_caffe.bin" }, 300,
300, 3, { "data" }, { "detection_out" },
"/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" },
{ 733, 233, 965, 539 }),
ParamType_Infer(
- "opencv", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ "opencv", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection_caffe.bin" }, 300,
300, 3, { "data" }, { "detection_out" },
// tweakcnn based facial landmark detection test
ParamType_Infer(
"opencv", INFERENCE_TARGET_CPU,
- TEST_FACIAL_LANDMARK_DETECTION, 10,
+ TEST_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/faciallandmark_detection_caffe.bin" },
128, 128, 3, { "data" }, { "Sigmoid_fc2" },
{ 53, 45, 85, 46, 66, 64, 54, 78, 82, 79 }),
ParamType_Infer(
"opencv", INFERENCE_TARGET_GPU,
- TEST_FACIAL_LANDMARK_DETECTION, 10,
+ TEST_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/faciallandmark_detection_caffe.bin" },
128, 128, 3, { "data" }, { "Sigmoid_fc2" },
// DLDT
ParamType_Infer(
"dldt", INFERENCE_TARGET_CUSTOM,
- TEST_IMAGE_CLASSIFICATION, 10,
+ TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/dldt_banana_classification.bin" },
224, 224, 3, { "data" }, { "prob" },