Fixed wrong input/output buffer allocation
authorTae-Young Chung <ty83.chung@samsung.com>
Thu, 2 Apr 2020 05:38:47 +0000 (14:38 +0900)
committerInki Dae <inki.dae@samsung.com>
Mon, 20 Apr 2020 00:51:05 +0000 (09:51 +0900)
Fixed wrong input/output buffer size in GetInputTensorBuffers() and
GetOutputTensorBuffers(). In Run(), the output buffer addr is changed
after forward() so buffer addr should be updated.

Add a trick to handle output without post-process.

Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
src/inference_engine_opencv.cpp

index 924d634..80ce922 100644 (file)
@@ -118,7 +118,9 @@ int InferenceOpenCV::GetInputTensorBuffers(std::vector<inference_engine_tensor_b
         mInputData.push_back(inputBlob);
 
         pBuff = mInputData.back().ptr<void*>(0);
-        inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32 };
+        size_t sizeBuff = mInputData.back().elemSize() * mInputData.back().rows * mInputData.back().cols;
+        LOGI("elemSize: %zd, rows: %d, cols: %d", mInputData.back().elemSize(), mInputData.back().rows, mInputData.back().cols );
+        inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1 };
         buffers.push_back(buffer);
     }
 
@@ -143,7 +145,8 @@ int InferenceOpenCV::GetOutputTensorBuffers(std::vector<inference_engine_tensor_
     std::vector<cv::Mat>::iterator iter;
     for (iter = mOutputBlobs.begin(); iter != mOutputBlobs.end(); ++iter) {
         pBuff = (*iter).ptr<void*>(0);
-        inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32 };
+        size_t sizeBuff = (*iter).total() * (*iter).elemSize();
+        inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1};
         buffers.push_back(buffer);
     }
 
@@ -305,9 +308,20 @@ int InferenceOpenCV::Run(std::vector<inference_engine_tensor_buffer> &input_buff
         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
     }
 
-    std::vector<cv::String> ouputLayers(mOutputLayers.begin(), mOutputLayers.end());
+    std::vector<cv::String> outputLayers(mOutputLayers.begin(), mOutputLayers.end());
+
+    mNet.forward(mOutputBlobs, outputLayers);
+
+    // mOutputBlobs[0] (the shape is 1x1xNx7 and the 1st of 7
+    // indicats the image id). use the 1st of 7 as the number of detections if a batch mode isn't supported.
+    if (outputLayers[0].compare("detection_out") == 0) {
+        cv::Mat cvOutputData(mOutputBlobs[0].size[2], mOutputBlobs[0].size[3], CV_32F, reinterpret_cast<float*>(mOutputBlobs[0].ptr<float*>(0)));
+        cvOutputData.at<float>(0,0) = mOutputBlobs[0].size[2];
+    }
+
+    for (int k = 0; k < output_buffers.size(); ++k)
+        output_buffers[k].buffer = mOutputBlobs[k].ptr<void>(0);
 
-    mNet.forward(mOutputBlobs, ouputLayers);
     LOGI("LEAVE");
 
     return INFERENCE_ENGINE_ERROR_NONE;