From: Tae-Young Chung Date: Thu, 2 Apr 2020 05:38:47 +0000 (+0900) Subject: Fixed wrong input/output buffer allocation X-Git-Tag: submit/tizen/20200423.063253~3 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=bbf1cc99a34b9449537300f03e4febd1b6bbb95b;p=platform%2Fcore%2Fmultimedia%2Finference-engine-opencv.git Fixed wrong input/output buffer allocation Fixed wrong input/output buffer size in GetInputTensorBuffers() and GetOutputTensorBuffers(). In Run(), the output buffer addr is changed after forward() so buffer addr should be updated. Add a trick to handle output without post-process. Signed-off-by: Tae-Young Chung --- diff --git a/src/inference_engine_opencv.cpp b/src/inference_engine_opencv.cpp index 924d634..80ce922 100644 --- a/src/inference_engine_opencv.cpp +++ b/src/inference_engine_opencv.cpp @@ -118,7 +118,9 @@ int InferenceOpenCV::GetInputTensorBuffers(std::vector(0); - inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32 }; + size_t sizeBuff = mInputData.back().elemSize() * mInputData.back().rows * mInputData.back().cols; + LOGI("elemSize: %zd, rows: %d, cols: %d", mInputData.back().elemSize(), mInputData.back().rows, mInputData.back().cols ); + inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1 }; buffers.push_back(buffer); } @@ -143,7 +145,8 @@ int InferenceOpenCV::GetOutputTensorBuffers(std::vector::iterator iter; for (iter = mOutputBlobs.begin(); iter != mOutputBlobs.end(); ++iter) { pBuff = (*iter).ptr(0); - inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32 }; + size_t sizeBuff = (*iter).total() * (*iter).elemSize(); + inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1}; buffers.push_back(buffer); } @@ -305,9 +308,20 @@ int InferenceOpenCV::Run(std::vector &input_buff return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; } - std::vector ouputLayers(mOutputLayers.begin(), mOutputLayers.end()); + std::vector outputLayers(mOutputLayers.begin(), mOutputLayers.end()); + + mNet.forward(mOutputBlobs, outputLayers); + + // mOutputBlobs[0] (the shape is 1x1xNx7 and the 1st of 7 + // indicats the image id). use the 1st of 7 as the number of detections if a batch mode isn't supported. + if (outputLayers[0].compare("detection_out") == 0) { + cv::Mat cvOutputData(mOutputBlobs[0].size[2], mOutputBlobs[0].size[3], CV_32F, reinterpret_cast(mOutputBlobs[0].ptr(0))); + cvOutputData.at(0,0) = mOutputBlobs[0].size[2]; + } + + for (int k = 0; k < output_buffers.size(); ++k) + output_buffers[k].buffer = mOutputBlobs[k].ptr(0); - mNet.forward(mOutputBlobs, ouputLayers); LOGI("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE;