// mobilenet based image classification test
// TFLITE via MLAPI.
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_CPU,
+ "one", INFERENCE_TARGET_CPU,
TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
{ 3 }),
// quantized mobilenet based image classification test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_CPU,
+ "one", INFERENCE_TARGET_CPU,
TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
{ 955 }),
// object detection test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ "one", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 451, 474, 714, 969 }),
// face detection test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+ "one", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
{ 727, 225, 960, 555 }),
// pose estimation test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+ "one", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("mlapi", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_CPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// mobilenet based image classification test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_GPU,
+ "one", INFERENCE_TARGET_GPU,
TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/image_classification.bin" }, 224,
// quantized mobilenet based image classification test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_GPU,
+ "one", INFERENCE_TARGET_GPU,
TEST_IMAGE_CLASSIFICATION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" }, 224,
// object detection test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ "one", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
// face detection test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
+ "one", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, INFERENCE_ITERATION,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
{ "normalized_input_image_tensor" },
// pose estimation test
ParamType_Infer(
- "mlapi", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+ "one", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
{ "image" },
351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
123, 99, 287, 381, 451, 287, 381, 475 }),
// Hand gesture model 1 from AIC
- ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_1, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 224, 224, 3, { "input" }, { "mobilenetv2/boundingbox", "mobilenetv2/heatmap" },
{ "/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite" }, { 0 }),
// Hand gesture model 2 from AIC
- ParamType_Infer("mlapi", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ ParamType_Infer("one", INFERENCE_TARGET_GPU, TEST_AIC_HAND_GESTURE_2, INFERENCE_ITERATION, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
{ "/opt/usr/images/hand.bin" }, 56, 56, 21, { "input" }, { "mobilenetv2/coord_refine", "mobilenetv2/gesture" },
{ "/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite" },
{ 55, 39, 51, 40, 50, 42, 61, 43, 71, 39,
// OPENCV
ParamType_One("opencv"),
// ML Single API for NNStreamer.
- ParamType_One("mlapi")
+ ParamType_One("one")
/* TODO */
));
// OPENCV.
ParamType_Two("opencv", INFERENCE_TARGET_CPU),
// ML Single API for NNStreamer.
- ParamType_Two("mlapi", INFERENCE_TARGET_CPU)
+ ParamType_Two("one", INFERENCE_TARGET_CPU)
/* TODO */
));
"/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }),
// ML Single API for NNStreamer.
ParamType_Three(
- "mlapi", INFERENCE_TARGET_CPU,
+ "one", INFERENCE_TARGET_CPU,
{ "/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite" })
/* TODO */
));
ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
224, 3, { "test_name" }),
// ML Single API for NNStreamer.
- ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+ ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
224, 3, { "test_name" })
/* TODO */
));
ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
224, 3, { "" }),
// ML Single API for NNStreamer.
- ParamType_Six("mlapi", -1, 224, 224, 3, { "test_name" }),
- ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 0,
+ ParamType_Six("one", -1, 224, 224, 3, { "test_name" }),
+ ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 0,
224, 3, { "test_name" }),
- ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+ ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
0, 3, { "test_name" }),
- ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+ ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
224, 0, { "test_name" }),
- ParamType_Six("mlapi", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
+ ParamType_Six("one", INFERENCE_TENSOR_DATA_TYPE_UINT8, 224,
224, 3, { "" })
/* TODO */
));
{ 281 }),
// ONE.
ParamType_Many(
- "mlapi", INFERENCE_ENGINE_PROFILER_OFF,
+ "one", INFERENCE_ENGINE_PROFILER_OFF,
INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" },
{ 281 }),
// ONE.
ParamType_Many(
- "mlapi", INFERENCE_ENGINE_PROFILER_FILE,
+ "one", INFERENCE_ENGINE_PROFILER_FILE,
INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" },
{ 281 }),
// ONE.
ParamType_Many(
- "mlapi", INFERENCE_ENGINE_PROFILER_CONSOLE,
+ "one", INFERENCE_ENGINE_PROFILER_CONSOLE,
INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
INFERENCE_TENSOR_DATA_TYPE_UINT8,
{ "/opt/usr/images/image_classification_q.bin" },