{
LOGI("ENTER");
- LOGI("Inferece targets are: ");
+ LOGI("Inferece target is [%d]", types);
switch (types) {
case INFERENCE_TARGET_CPU:
mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
case INFERENCE_TARGET_CUSTOM:
case INFERENCE_TARGET_NONE:
default:
- LOGE("Not supported device type [%d], Set CPU mode", (int) types);
+ LOGE("Not supported device type [%d], Set CPU mode", types);
mNet.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
}
LOGI("LEAVE");
}
int InferenceOpenCV::GetInputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
LOGI("ENTER");
mInputData.clear();
void *pBuff = NULL;
- std::vector<inference_engine_tensor_info>::iterator info_iter;
- for (info_iter = mInputTensorInfo.begin();
- info_iter != mInputTensorInfo.end(); ++info_iter) {
- cv::Mat inputBlob(cv::Size((*info_iter).shape[3],
- (*info_iter).shape[2]),
- CV_32FC3);
+ for (auto& layer : mInputLayers) {
+ std::vector<size_t>& shape = layer.second.shape;
+ cv::Mat inputBlob(cv::Size(shape[3], shape[2]), CV_32FC3);
mInputData.push_back(inputBlob);
pBuff = mInputData.back().ptr<void *>(0);
inference_engine_tensor_buffer buffer = {
pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1
};
- buffers.push_back(buffer);
+ buffers.insert(std::make_pair(layer.first, buffer));
}
LOGI("LEAVE");
}
int InferenceOpenCV::GetOutputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
LOGI("ENTER");
mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(),
cv::Scalar(), false, false);
- mNet.setInput(mInputBlobs, mInputLayers.front());
+ mNet.setInput(mInputBlobs, mInputLayers.begin()->first);
- std::vector<cv::String> ouputLayers(mOutputLayers.begin(),
- mOutputLayers.end());
- mNet.forward(mOutputBlobs, ouputLayers);
+ std::vector<cv::String> outputLayers;
+ for (auto& layer : mOutputLayers) {
+ LOGI("output layer: %s", layer.first.c_str());
+ outputLayers.push_back(layer.first);
+ }
+
+ mNet.forward(mOutputBlobs, outputLayers);
void *pBuff = NULL;
- std::vector<cv::Mat>::iterator iter;
- for (iter = mOutputBlobs.begin(); iter != mOutputBlobs.end(); ++iter) {
- pBuff = (*iter).ptr<void *>(0);
- size_t sizeBuff = (*iter).total() * (*iter).elemSize();
+ std::map<std::string, int>().swap(mOutputLayerId);
+ for (size_t idx = 0; idx < mOutputBlobs.size(); ++idx) {
+ pBuff = mOutputBlobs[idx].ptr<void *>(0);
+ size_t sizeBuff = mOutputBlobs[idx].total() * mOutputBlobs[idx].elemSize();
inference_engine_tensor_buffer buffer = {
pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, sizeBuff, 1
};
- buffers.push_back(buffer);
+
+ LOGI("output layer: %s with %p", outputLayers[idx].c_str(), buffer.buffer);
+ buffers.insert(std::make_pair(outputLayers[idx], buffer));
+ mOutputLayerId.insert(std::make_pair(outputLayers[idx], mNet.getLayerId(outputLayers[idx])));
}
if (buffers.empty()) {
LOGI("buff empty");
- inference_engine_tensor_buffer buffer = {
- nullptr, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 1
- };
- buffers.push_back(buffer);
}
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- property.layer_names = mInputLayers;
- property.tensor_infos = mInputTensorInfo;
+ property.layers = mInputLayers;
LOGI("LEAVE");
}
int lid = -1;
- int idx = 0;
- std::vector<inference_engine_tensor_info>().swap(mOutputTensorInfo);
- for (std::vector<std::string>::iterator iter = mOutputLayers.begin();
- iter != mOutputLayers.end(); ++iter, ++idx) {
- LOGI("output layer: %s", (*iter).c_str());
- lid = mNet.getLayerId((*iter));
+ std::vector<std::vector<int>> inputTensorShapes;
+ for (auto& layer : mInputLayers) {
+ inputTensorShapes.push_back(std::vector<int>(
+ layer.second.shape.begin(),
+ layer.second.shape.end()));
+ LOGI("input layer[%s]: %zu, %zu, %zu, %zu",layer.first.c_str(),
+ layer.second.shape[0], layer.second.shape[1],
+ layer.second.shape[2], layer.second.shape[3]);
+ }
+
+ std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
+ for (auto& layer : mOutputLayerId) {
+ LOGI("output layer: %s", layer.first.c_str());
+ lid = layer.second;
LOGI("output layer Id: %d", lid);
if (lid < 0) {
- LOGE("Invalid output %s layer", (*iter).c_str());
+ LOGE("Invalid output %s layer", layer.first.c_str());
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
std::vector<cv::dnn::MatShape> lInputShape, lOutputShape;
- LOGI("%zu, %zu, %zu, %zu", mInputTensorInfo[idx].shape[0],
- mInputTensorInfo[idx].shape[1], mInputTensorInfo[idx].shape[2],
- mInputTensorInfo[idx].shape[3]);
-
- std::vector<int> cvInputTensorShape(
- mInputTensorInfo[idx].shape.begin(),
- mInputTensorInfo[idx].shape.end());
- mNet.getLayerShapes(cvInputTensorShape, lid, lInputShape,
+
+ mNet.getLayerShapes(inputTensorShapes, lid, lInputShape,
lOutputShape);
inference_engine_tensor_info tensor_info;
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
tensor_info.size = 1;
LOGE("tensor_info");
- for (std::vector<size_t>::iterator iter2 =
- tensor_info.shape.begin();
- iter2 != tensor_info.shape.end(); ++iter2) {
- LOGI("%zu", (*iter2));
- tensor_info.size *= (*iter2);
+ for (auto& dim : tensor_info.shape) {
+ LOGI("%zu", dim);
+ tensor_info.size *= dim;
}
- mOutputTensorInfo.push_back(tensor_info);
+ mOutputLayers.insert(std::make_pair(layer.first, tensor_info));
}
- property.layer_names = mOutputLayers;
- property.tensor_infos = mOutputTensorInfo;
+ property.layers = mOutputLayers;
LOGI("LEAVE");
{
LOGI("ENTER");
- std::vector<std::string>::iterator iter;
- for (iter = property.layer_names.begin();
- iter != property.layer_names.end(); iter++) {
- std::string name = *iter;
- LOGI("input layer name = %s", name.c_str());
+ for (auto& layer : property.layers) {
+ LOGI("input layer name = %s", layer.first.c_str());
}
mInputLayers.clear();
- std::vector<std::string>().swap(mInputLayers);
+ std::map<std::string, inference_engine_tensor_info>().swap(mInputLayers);
- mInputTensorInfo.clear();
- std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
-
- mInputLayers = property.layer_names;
- mInputTensorInfo = property.tensor_infos;
+ mInputLayers = property.layers;
LOGI("LEAVE");
int InferenceOpenCV::SetOutputLayerProperty(
inference_engine_layer_property &property)
{
- std::vector<std::string>::iterator iter;
- for (iter = property.layer_names.begin();
- iter != property.layer_names.end(); iter++) {
- std::string name = *iter;
- LOGI("output layer name = %s", name.c_str());
+ for (auto& layer : property.layers) {
+ LOGI("output layer name = %s", layer.first.c_str());
}
mOutputLayers.clear();
- std::vector<std::string>().swap(mOutputLayers);
+ std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
- mOutputLayers = property.layer_names;
+ mOutputLayers = property.layers;
return INFERENCE_ENGINE_ERROR_NONE;
}
}
int InferenceOpenCV::Run(
- std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+ std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
{
LOGI("ENTER");
// need to check memoery
mInputBlobs = cv::dnn::blobFromImages(mInputData, 1.0, cv::Size(),
- cv::Scalar(), false, false);
+ cv::Scalar(), true, false);
// Currently it supports that one input layer with multiple input tensors.
// it doesn't support that mulitple input layer with multiple input tensors.
// To suppor that, setInput is called manually while we matching inputblobs
// and their corresponding input layer.
// Suppose a case that an input layer and mulitple input tensors are given.
- mNet.setInput(mInputBlobs, mInputLayers.front());
+ mNet.setInput(mInputBlobs, input_buffers.begin()->first);
if (mOutputBlobs.size() != output_buffers.size()) {
LOGE("output_buffers size is %zu but outputBlobs %zu",
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
- std::vector<cv::String> outputLayers(mOutputLayers.begin(),
- mOutputLayers.end());
+ std::vector<cv::String> outputLayers;
+ for (auto& layer : mOutputLayers) {
+ outputLayers.push_back(layer.first);
+ }
mNet.forward(mOutputBlobs, outputLayers);
cvOutputData.at<float>(0, 0) = mOutputBlobs[0].size[2];
}
- for (unsigned int k = 0; k < output_buffers.size(); ++k)
- output_buffers[k].buffer = mOutputBlobs[k].ptr<void>(0);
+ int idx = 0;
+ for (auto iter = output_buffers.begin(); iter != output_buffers.end(); ++iter, ++idx) {
+ LOGI("output: %s", iter->first.c_str());
+ iter->second.buffer = mOutputBlobs[idx].ptr<void *>(0);
+ }
LOGI("LEAVE");