int InferenceOpenCV::SetInputTensorParamNode(std::string node)
{
+ mInputLayer = cv::String(node);
return INFERENCE_ENGINE_ERROR_NONE;
}
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceOpenCV::SetOutPutTensorParamNodes(std::string node)
+int InferenceOpenCV::SetOutputTensorParamNodes(std::vector<std::string> nodes)
{
+ mOutputLayer.clear();
+ for (std::vector<std::string>::iterator iter = nodes.begin();
+ iter != nodes.end(); ++iter) {
+ mOutputLayer.push_back(cv::String(*iter));
+ }
+
return INFERENCE_ENGINE_ERROR_NONE;
}
case INFERENCE_TARGET_GPU :
mNet.setPreferableTarget(cv::dnn::DNN_TARGET_OPENCL);
break;
+ case INFERENCE_TARGET_CUSTOM:
case INFERENCE_TARGET_NONE:
default:
LOGE("Not supported device type [%d], Set CPU mode", (int)type);
}
// This call may be changed if OpenCV version would be upgraded
- mNet = cv::dnn::readNetFromCaffe(mConfigFile, mWeightFile);
+ int nPos = mWeightFile.find_last_of(".");
+ std::string weightFileExt = mWeightFile.substr(nPos+1);
+ LOGI("%s", weightFileExt.c_str());
+ if (weightFileExt.compare("caffemodel") == 0) {
+ mNet = cv::dnn::readNetFromCaffe(mConfigFile, mWeightFile);
+ } else if (weightFileExt.compare("pb") == 0) {
+ mNet = cv::dnn::readNetFromTensorflow(mWeightFile, mConfigFile);
+ } else {
+ LOGE("Not supported model file!");
+ }
if (mNet.empty()) {
LOGE("Net is empty");
int InferenceOpenCV::CreateInputLayerPassage()
{
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ /* Do Nothing
+ * In OpenCV, don't need to call this CreateInputLayerPassage()
+ * because that it can uses cv::Mat directly
+ */
+
+ return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceOpenCV::PrepareInputLayerPassage()
{
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ /* Do Nothing
+ * In OpenCV, don't need to call this PrepareInputLayerPassage()
+ * because that it can uses cv::Mat directly
+ */
+ return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceOpenCV::PrepareInputLayerPassage(inference_input_type_e type)
{
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ /* Do Nothing
+ * In OpenCV, don't need to call this PrepareInputLayerPassage()
+ * because that it can uses cv::Mat directly
+ */
+ return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceOpenCV::Run(cv::Mat tensor)
{
double scaleVal = 1.0/mDeviation;
mSourceSize = tensor.size();
+ if (mInputSize == cv::Size(-1,-1)) {
+ mInputSize = mSourceSize;
+ }
+
cv::Scalar meanScalar = cv::Scalar((float)mMean,(float)mMean, (float)mMean);
mInputBlob = cv::dnn::blobFromImage(tensor, scaleVal, mInputSize, meanScalar, false, false);
return INFERENCE_ENGINE_ERROR_INTERNAL;
}
- mNet.setInput(mInputBlob);
+ mNet.setInput(mInputBlob, mInputLayer);
/* foward */
- mOutputProb = mNet.forward();
- /* TODO */
- // std::vector<cv::Mat> mOutputProb,
- // std::vector<std::string> mOuputLayer;
- // mNet.forward(mOutputProb, mOutputLayer);
+ mNet.forward(mOutputProb, mOutputLayer);
if (mOutputProb.empty()) {
LOGE("OutputProb is empty");
int InferenceOpenCV::GetInferenceResult(ImageClassificationResults& results)
{
- int dims = mOutputProb.dims;
+ int dims = mOutputProb[0].dims;
for (int k = 0; k < dims; ++k) {
- LOGE("%d: %d", k, mOutputProb.size[k]);
+ LOGE("%d: %d", k, mOutputProb[0].size[k]);
}
LOGI("dims: %d", dims);
- cv::Mat reShapedProb = mOutputProb.reshape(1,1);
+ cv::Mat reShapedProb = mOutputProb[0].reshape(1,1);
int classIdx = -1;
int InferenceOpenCV::GetInferenceResult(ObjectDetectionResults& results)
{
- int dims = mOutputProb.dims;
+ int dims = mOutputProb[0].dims;
for (int k = 0; k < dims; ++k) {
- LOGE("%d: %d", k, mOutputProb.size[k]);
+ LOGE("%d: %d", k, mOutputProb[0].size[k]);
}
LOGI("dims: %d", dims);
- cv::Mat detectionMat(mOutputProb.size[2], mOutputProb.size[3], CV_32F, mOutputProb.ptr<float>());
+ cv::Mat detectionMat(mOutputProb[0].size[2], mOutputProb[0].size[3], CV_32F, mOutputProb[0].ptr<float>());
float confidence;
size_t objectClass;
int InferenceOpenCV::GetInferenceResult(FaceDetectionResults& results)
{
- int dims = mOutputProb.dims;
+ int dims = mOutputProb[0].dims;
for (int k = 0; k < dims; ++k) {
- LOGE("%d: %d", k, mOutputProb.size[k]);
+ LOGE("%d: %d", k, mOutputProb[0].size[k]);
}
LOGI("dims: %d", dims);
- cv::Mat detectionMat(mOutputProb.size[2], mOutputProb.size[3], CV_32F, mOutputProb.ptr<float>());
+ cv::Mat detectionMat(mOutputProb[0].size[2], mOutputProb[0].size[3], CV_32F, mOutputProb[0].ptr<float>());
float confidence;
int left, top, right, bottom;
int InferenceOpenCV::GetInferenceResult(FacialLandMarkDetectionResults& results)
{
- int dims = mOutputProb.dims;
+ int dims = mOutputProb[0].dims;
for (int k = 0; k < dims; ++k) {
- LOGE("%d: %d", k, mOutputProb.size[k]);
+ LOGE("%d: %d", k, mOutputProb[0].size[k]);
}
LOGI("dims: %d", dims);
- cv::Mat outputs = mOutputProb.reshape(1,1);
+ cv::Mat outputs = mOutputProb[0].reshape(1,1);
int number_of_landmarks = (outputs.cols >> 1);
{
dimInfo.clear();
results.clear();
-
- int dims = mOutputProb.dims;
std::vector<int> tmpDimInfo;
- for (int d = 0; d < dims; ++d) {
- tmpDimInfo.push_back(mOutputProb.size[d]);
- }
+ LOGE("outputProb size: %d", mOutputProb.size());
+ for (std::vector<cv::Mat>::iterator iter = mOutputProb.begin();
+ iter != mOutputProb.end(); ++iter) {
+ tmpDimInfo.clear();
+ for (int d = 0; d < (*iter).dims; ++d) {
+ tmpDimInfo.push_back((*iter).size[d]);
+ }
- dimInfo.push_back(tmpDimInfo);
- results.push_back(mOutputProb.ptr<float>());
+ dimInfo.push_back(tmpDimInfo);
+ results.push_back((*iter).ptr<float>());
+ }
return INFERENCE_ENGINE_ERROR_NONE;
}
}
}
} /* OpenCVImpl */
-} /* InferenceEngineImpl */
\ No newline at end of file
+} /* InferenceEngineImpl */