mInputData.push_back(inputBlob);
pBuff = mInputData.back().ptr<void*>(0);
- inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32 };
+ size_t sizeBuff = mInputData.back().elemSize() * mInputData.back().rows * mInputData.back().cols;
+ LOGI("elemSize: %zd, rows: %d, cols: %d", mInputData.back().elemSize(), mInputData.back().rows, mInputData.back().cols );
+ inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1 };
buffers.push_back(buffer);
}
std::vector<cv::Mat>::iterator iter;
for (iter = mOutputBlobs.begin(); iter != mOutputBlobs.end(); ++iter) {
pBuff = (*iter).ptr<void*>(0);
- inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32 };
+ size_t sizeBuff = (*iter).total() * (*iter).elemSize();
+ inference_engine_tensor_buffer buffer = { pBuff, TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1};
buffers.push_back(buffer);
}
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
- std::vector<cv::String> ouputLayers(mOutputLayers.begin(), mOutputLayers.end());
+ std::vector<cv::String> outputLayers(mOutputLayers.begin(), mOutputLayers.end());
+
+ mNet.forward(mOutputBlobs, outputLayers);
+
+ // mOutputBlobs[0] (the shape is 1x1xNx7 and the 1st of 7
+ // indicats the image id). use the 1st of 7 as the number of detections if a batch mode isn't supported.
+ if (outputLayers[0].compare("detection_out") == 0) {
+ cv::Mat cvOutputData(mOutputBlobs[0].size[2], mOutputBlobs[0].size[3], CV_32F, reinterpret_cast<float*>(mOutputBlobs[0].ptr<float*>(0)));
+ cvOutputData.at<float>(0,0) = mOutputBlobs[0].size[2];
+ }
+
+ for (int k = 0; k < output_buffers.size(); ++k)
+ output_buffers[k].buffer = mOutputBlobs[k].ptr<void>(0);
- mNet.forward(mOutputBlobs, ouputLayers);
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;