FillOutputResult(engine.get(), outputs, result);
switch (test_type) {
- case TEST_IMAGE_CLASSIFICATION:
+ case TEST_MODEL_IMAGE_CLASSIFICATION:
ret = VerifyImageClassificationResults(result, answers[0]);
EXPECT_EQ(ret, 1);
break;
- case TEST_OBJECT_DETECTION:
+ case TEST_MODEL_OBJECT_DETECTION:
// 1072 : fixed height size of dumped image, 1608 : fixed width size of dumped image.
ret = VerifyObjectDetectionResults(result, answers, 1072, 1608);
EXPECT_EQ(ret, 1);
break;
- case TEST_FACE_DETECTION:
+ case TEST_MODEL_FACE_DETECTION:
// 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
EXPECT_EQ(ret, 1);
break;
- case TEST_FACIAL_LANDMARK_DETECTION:
+ case TEST_MODEL_FACIAL_LANDMARK_DETECTION:
// TODO.
break;
- case TEST_POSE_ESTIMATION:
+ case TEST_MODEL_POSE_ESTIMATION:
// 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
ret = VerifyPoseEstimationResults(result, answers, 563, 750);
EXPECT_EQ(ret, 1);
break;
- case TEST_AIC_HAND_GESTURE_1:
+ case TEST_MODEL_AIC_HAND_GESTURE_1:
ret = VerifyAICHandGesture1Results(outputs);
EXPECT_EQ(ret, 1);
break;
- case TEST_AIC_HAND_GESTURE_2:
+ case TEST_MODEL_AIC_HAND_GESTURE_2:
ret = VerifyAICHandGesture2Results(outputs, answers);
EXPECT_EQ(ret, 1);
break;
FillOutputResult(engine.get(), outputs, result);
switch (test_type) {
- case TEST_IMAGE_CLASSIFICATION:
+ case TEST_MODEL_IMAGE_CLASSIFICATION:
ret = VerifyImageClassificationResults(result, answers[0]);
EXPECT_EQ(ret, 1);
break;
- case TEST_OBJECT_DETECTION:
+ case TEST_MODEL_OBJECT_DETECTION:
// 1072 : fixed height size of dumped image, 1608 : fixed width size of dumped image.
ret = VerifyObjectDetectionResults(result, answers, 1072, 1608);
EXPECT_EQ(ret, 1);
break;
- case TEST_FACE_DETECTION:
+ case TEST_MODEL_FACE_DETECTION:
// 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
EXPECT_EQ(ret, 1);
break;
- case TEST_FACIAL_LANDMARK_DETECTION:
+ case TEST_MODEL_FACIAL_LANDMARK_DETECTION:
// TODO.
break;
- case TEST_POSE_ESTIMATION:
+ case TEST_MODEL_POSE_ESTIMATION:
// 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
ret = VerifyPoseEstimationResults(result, answers, 563, 750);
EXPECT_EQ(ret, 1);
break;
- case TEST_AIC_HAND_GESTURE_1:
+ case TEST_MODEL_AIC_HAND_GESTURE_1:
ret = VerifyAICHandGesture1Results(outputs);
EXPECT_EQ(ret, 1);
break;
- case TEST_AIC_HAND_GESTURE_2:
+ case TEST_MODEL_AIC_HAND_GESTURE_2:
ret = VerifyAICHandGesture2Results(outputs, answers);
EXPECT_EQ(ret, 1);
break;
FillOutputResult(engine.get(), outputs, result);
switch (test_type) {
- case TEST_IMAGE_CLASSIFICATION:
+ case TEST_MODEL_IMAGE_CLASSIFICATION:
ret = VerifyImageClassificationResults(result, answers[0]);
EXPECT_EQ(ret, 1);
break;
- case TEST_OBJECT_DETECTION:
+ case TEST_MODEL_OBJECT_DETECTION:
// 1024 : fixed height size of dumped image, 636 : fixed width size of dumped image.
ret = VerifyObjectDetectionResults(result, answers, 636, 1024);
EXPECT_EQ(ret, 1);
break;
- case TEST_FACE_DETECTION:
+ case TEST_MODEL_FACE_DETECTION:
// 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
EXPECT_EQ(ret, 1);
break;
- case TEST_FACIAL_LANDMARK_DETECTION:
+ case TEST_MODEL_FACIAL_LANDMARK_DETECTION:
// 128 : fixed height size of dumped image, 128 : fixed width size of dumped image.
ret = VerifyFacialLandmarkDetectionResults(result, answers, 128, 128);
EXPECT_EQ(ret, 1);
break;
- case TEST_POSE_ESTIMATION:
+ case TEST_MODEL_POSE_ESTIMATION:
// 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
ret = VerifyPoseEstimationResults(result, answers, 563, 750);
EXPECT_EQ(ret, 1);
FillOutputResult(engine.get(), outputs, result);
switch (test_type) {
- case TEST_IMAGE_CLASSIFICATION:
+ case TEST_MODEL_IMAGE_CLASSIFICATION:
ret = VerifyImageClassificationResults(result, answers[0]);
EXPECT_EQ(ret, 1);
break;
- case TEST_OBJECT_DETECTION:
+ case TEST_MODEL_OBJECT_DETECTION:
// 1024 : fixed height size of dumped image, 636 : fixed width size of dumped image.
ret = VerifyObjectDetectionResults(result, answers, 636, 1024);
EXPECT_EQ(ret, 1);
break;
- case TEST_FACE_DETECTION:
+ case TEST_MODEL_FACE_DETECTION:
// 1152 : fixed height size of dumped image, 1536 : fixed width size of dumped image.
ret = VerifyObjectDetectionResults(result, answers, 1152, 1536);
EXPECT_EQ(ret, 1);
break;
- case TEST_FACIAL_LANDMARK_DETECTION:
+ case TEST_MODEL_FACIAL_LANDMARK_DETECTION:
// 128 : fixed height size of dumped image, 128 : fixed width size of dumped image.
ret = VerifyFacialLandmarkDetectionResults(result, answers, 128, 128);
EXPECT_EQ(ret, 1);
break;
- case TEST_POSE_ESTIMATION:
+ case TEST_MODEL_POSE_ESTIMATION:
// 563 : fixed height size of dumped image, 750 : fixed width size of dumped image.
ret = VerifyPoseEstimationResults(result, answers, 563, 750);
EXPECT_EQ(ret, 1);
// ARMNN.
ParamType_Infer(
"armnn", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"armnn", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
{ 955 }),
// object detection test
ParamType_Infer(
- "armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ "armnn", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+ "armnn", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
- "armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, INFERENCE_ITERATION,
+ "armnn", INFERENCE_TARGET_CPU, TEST_MODEL_POSE_ESTIMATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// mobilenet based image classification test
ParamType_Infer(
"armnn", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"armnn", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
{ 955 }),
// object detection test
ParamType_Infer(
- "armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ "armnn", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+ "armnn", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
- "armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, INFERENCE_ITERATION,
+ "armnn", INFERENCE_TARGET_GPU, TEST_MODEL_POSE_ESTIMATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// TFLITE.
ParamType_Infer(
"tflite", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"tflite", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
{ 955 }),
// object detection test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ "tflite", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+ "tflite", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+ "tflite", INFERENCE_TARGET_CPU, TEST_MODEL_POSE_ESTIMATION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// mobilenet based image classification test
ParamType_Infer(
"tflite", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"tflite", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
{ 955 }),
// object detection test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ "tflite", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+ "tflite", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
- "tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+ "tflite", INFERENCE_TARGET_GPU, TEST_MODEL_POSE_ESTIMATION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// ONE via MLAPI.
ParamType_Infer(
"one", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"one", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
{ 955 }),
// object detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ "one", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+ "one", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
- "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+ "one", INFERENCE_TARGET_CPU, TEST_MODEL_POSE_ESTIMATION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// mobilenet based image classification test
ParamType_Infer(
"one", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"one", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
{ 955 }),
// object detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ "one", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+ "one", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
- "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+ "one", INFERENCE_TARGET_GPU, TEST_MODEL_POSE_ESTIMATION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// TFLITE via MLAPI.
ParamType_Infer(
"one", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"one", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
{ 955 }),
// object detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ "one", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+ "one", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
- "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+ "one", INFERENCE_TARGET_CPU, TEST_MODEL_POSE_ESTIMATION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// mobilenet based image classification test
ParamType_Infer(
"one", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// quantized mobilenet based image classification test
ParamType_Infer(
"one", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
224, 3, { "input" },
// object detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ "one", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
// face detection test
ParamType_Infer(
- "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+ "one", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
// pose estimation test
ParamType_Infer(
- "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+ "one", INFERENCE_TARGET_GPU, TEST_MODEL_POSE_ESTIMATION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_MODEL_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// ARMNN.
ParamType_CLTuner(
"armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_RAPID,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
{ 3 }),
ParamType_CLTuner(
"armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
{ 3 }),
ParamType_CLTuner(
"armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_NORMAL,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
{ 3 }),
ParamType_CLTuner(
"armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
{ 3 }),
ParamType_CLTuner(
"armnn", INFERENCE_TARGET_GPU, true, true, INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
{ 3 }),
ParamType_CLTuner(
"armnn", INFERENCE_TARGET_GPU, true, false, INFERENCE_ENGINE_CLTUNER_READ,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
224, 3, { "input_2" }, { "dense_3/Softmax" },
// squeezenet based image classification test
ParamType_Infer(
"opencv", INFERENCE_TARGET_CPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification_caffe.bin" },
227, 227, 3, { "data" }, { "prob" },
{ 281 }),
ParamType_Infer(
"opencv", INFERENCE_TARGET_GPU,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification_caffe.bin" },
227, 227, 3, { "data" }, { "prob" },
// mobilenet-ssd based object detection test
ParamType_Infer(
- "opencv", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ "opencv", INFERENCE_TARGET_CPU, TEST_MODEL_OBJECT_DETECTION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection_caffe.bin" }, 300,
300, 3, { "data" }, { "detection_out" },
"/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt" },
{ 15, 19, 335, 557 }),
ParamType_Infer(
- "opencv", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ "opencv", INFERENCE_TARGET_GPU, TEST_MODEL_OBJECT_DETECTION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection_caffe.bin" }, 300,
300, 3, { "data" }, { "detection_out" },
// mobilenet-ssd based object detection test
ParamType_Infer(
- "opencv", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+ "opencv", INFERENCE_TARGET_CPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection_caffe.bin" }, 300,
300, 3, { "data" }, { "detection_out" },
"/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" },
{ 733, 233, 965, 539 }),
ParamType_Infer(
- "opencv", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+ "opencv", INFERENCE_TARGET_GPU, TEST_MODEL_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection_caffe.bin" }, 300,
300, 3, { "data" }, { "detection_out" },
// tweakcnn based facial landmark detection test
ParamType_Infer(
"opencv", INFERENCE_TARGET_CPU,
- TEST_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
+ TEST_MODEL_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/faciallandmark_detection_caffe.bin" },
128, 128, 3, { "data" }, { "Sigmoid_fc2" },
{ 53, 45, 85, 46, 66, 64, 54, 78, 82, 79 }),
ParamType_Infer(
"opencv", INFERENCE_TARGET_GPU,
- TEST_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
+ TEST_MODEL_FACIAL_LANDMARK_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/faciallandmark_detection_caffe.bin" },
128, 128, 3, { "data" }, { "Sigmoid_fc2" },
// DLDT
ParamType_Infer(
"dldt", INFERENCE_TARGET_CUSTOM,
- TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
+ TEST_MODEL_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/dldt_banana_classification.bin" },
224, 224, 3, { "data" }, { "prob" },