mInterpreter->tensors_size());
// input tensor
+ std::map<std::string, int>().swap(mInputLayerId);
if (mInterpreter->inputs().size()) {
- mInputLayerId = mInterpreter->inputs();
+ for (auto iter = mInterpreter->inputs().begin();
+ iter != mInterpreter->inputs().end(); ++iter) {
+ mInputLayerId.insert(std::make_pair(mInterpreter->tensor((*iter))->name, (*iter)));
+ }
} else {
- std::vector<std::string>::iterator iter;
mInputLayerId.clear();
- for (iter = mInputLayer.begin(); iter != mInputLayer.end();
+ for (auto iter = mInputLayers.begin(); iter != mInputLayers.end();
++iter) {
- LOGI("mInputLayer list [%s]", (*iter).c_str());
+ LOGI("mInputLayer list [%s]", (iter->first).c_str());
for (unsigned int idx = 0; idx < mInterpreter->tensors_size();
++idx) {
if (mInterpreter->tensor(idx)->name == NULL)
continue;
- if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) {
- mInputLayerId.push_back(idx);
+ if ((iter->first).compare(mInterpreter->tensor(idx)->name) == 0) {
+ mInputLayerId.insert(std::make_pair(mInterpreter->tensor(idx)->name, idx));
break;
}
}
}
// output tensor
+ std::map<std::string, int>().swap(mOutputLayerId);
if (mInterpreter->outputs().size()) {
- mOutputLayerId = mInterpreter->outputs();
+ for (auto iter = mInterpreter->outputs().begin();
+ iter != mInterpreter->outputs().end(); ++iter) {
+ mOutputLayerId.insert(std::make_pair(mInterpreter->tensor((*iter))->name, (*iter)));
+ }
} else {
- std::vector<std::string>::iterator iter;
mOutputLayerId.clear();
- for (iter = mOutputLayer.begin(); iter != mOutputLayer.end();
+ for (auto iter = mOutputLayers.begin(); iter != mOutputLayers.end();
++iter) {
- LOGI("mOutputLayer list [%s]", (*iter).c_str());
+ LOGI("mOutputLayer list [%s]", iter->first.c_str());
for (unsigned int idx = 0; idx < mInterpreter->tensors_size();
++idx) {
if (mInterpreter->tensor(idx)->name == NULL)
continue;
- if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) {
- mOutputLayerId.push_back(idx);
+ if ((iter->first).compare(mInterpreter->tensor(idx)->name) == 0) {
+ mOutputLayerId.insert(std::make_pair(mInterpreter->tensor(idx)->name, idx));
break;
}
}
return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
}
- for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx) {
- mInputAttrType.push_back(
- mInterpreter->tensor(mInputLayerId[idx])->type);
- }
-
return ret;
}
{
LOGI("ENTER");
- if (mInputTensorInfo.empty()) {
+ if (mInputLayers.empty()) {
SetInterpreterInfo();
}
void *pBuff = NULL;
- for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx) {
+ for (auto iter_layer = mInputLayers.begin(); iter_layer != mInputLayers.end(); ++iter_layer/*unsigned int idx = 0; idx < mInputLayerId.size(); ++idx*/) {
size_t size = 1;
inference_engine_tensor_buffer buffer;
- for (std::vector<size_t>::iterator iter =
+ for (auto iter = (iter_layer->second).shape.begin();
+ iter != (iter_layer->second).shape.end();
+ ++iter
+ /*std::vector<size_t>::iterator iter =
mInputTensorInfo[idx].shape.begin();
- iter != mInputTensorInfo[idx].shape.end(); ++iter) {
+ iter != mInputTensorInfo[idx].shape.end(); ++iter*/) {
size *= (*iter);
}
- if (mInputAttrType[idx] == kTfLiteUInt8) {
+
+ if ( (iter_layer->second).data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
mInputData.push_back(mInterpreter->typed_tensor<uint8_t>(
- mInputLayerId[idx]));
+ mInputLayerId.find(iter_layer->first)->second));
pBuff = mInputData.back();
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
- } else if (mInputAttrType[idx] == kTfLiteFloat32) {
+ } else if ( (iter_layer->second).data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
mInputData.push_back(
- mInterpreter->typed_tensor<float>(mInputLayerId[idx]));
+ mInterpreter->typed_tensor<float>(mInputLayerId.find(iter_layer->first)->second));
pBuff = mInputData.back();
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4,
1 };
LOGE("Not supported");
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
}
- buffers.insert(std::make_pair(mInputLayer[idx], buffer));
+ buffers.insert(std::make_pair(iter_layer->first, buffer));
}
return INFERENCE_ENGINE_ERROR_NONE;
{
void *pBuff = NULL;
- for (unsigned int idx = 0; idx < mOutputLayerId.size(); ++idx) {
+ //여기할 차례!!!
+ for (auto iter_layer = mOutputLayers.begin(); iter_layer != mOutputLayers.end(); ++iter_layer) {
inference_engine_tensor_buffer buffer;
size_t size = 1;
for (int idx2 = 0;
- idx2 < mInterpreter->tensor(mOutputLayerId[idx])->dims->size;
+ idx2 < mInterpreter->tensor(mOutputLayerId.find(iter_layer->first)->second)->dims->size;
++idx2) {
- size *= mInterpreter->tensor(mOutputLayerId[idx])
- ->dims->data[idx2];
+ size *= mInterpreter->tensor(mOutputLayerId.find(iter_layer->first)->second)->dims->data[idx2];
}
- if (mInterpreter->tensor(mOutputLayerId[idx])->type ==
+ if (mInterpreter->tensor(mOutputLayerId.find(iter_layer->first)->second)->type ==
kTfLiteUInt8) {
LOGI("type is kTfLiteUInt8");
pBuff = (void *) mInterpreter->typed_tensor<uint8_t>(
- mOutputLayerId[idx]);
+ mOutputLayerId.find(iter_layer->first)->second);
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
- } else if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteInt64) {
+ } else if (mInterpreter->tensor(mOutputLayerId.find(iter_layer->first)->second)->type == kTfLiteInt64) {
LOGI("type is kTfLiteInt64");
- pBuff = (void*)mInterpreter->typed_tensor<int64_t>(mOutputLayerId[idx]);
+ pBuff = (void*)mInterpreter->typed_tensor<int64_t>(mOutputLayerId.find(iter_layer->first)->second);
buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1};
- } else if (mInterpreter->tensor(mOutputLayerId[idx])->type ==
+ } else if (mInterpreter->tensor(mOutputLayerId.find(iter_layer->first)->second)->type ==
kTfLiteFloat32) {
LOGI("type is kTfLiteFloat32");
pBuff = (void *) mInterpreter->typed_tensor<float>(
- mOutputLayerId[idx]);
+ mOutputLayerId.find(iter_layer->first)->second);
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4,
1 };
} else {
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
}
- buffers.insert(std::make_pair(mOutputLayer[idx], buffer));
+ buffers.insert(std::make_pair(mInterpreter->tensor(mOutputLayerId.find(iter_layer->first)->second)->name, buffer));
}
return INFERENCE_ENGINE_ERROR_NONE;
}
LOGI("ENTER");
SetInterpreterInfo();
- property.layer_names = mInputLayer;
- property.tensor_infos = mInputTensorInfo;
+ property.layers = mInputLayers;
LOGI("LEAVE");
{
LOGI("ENTER");
- std::vector<inference_engine_tensor_info>().swap(mOutputTensorInfo);
+ std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
- for (std::vector<int>::iterator iter = mOutputLayerId.begin();
+ for (auto iter = mOutputLayerId.begin();
iter != mOutputLayerId.end(); ++iter) {
- LOGI("output layer ID: %d", (*iter));
- if ((*iter) < 0) {
+ LOGI("output layer ID: %d", iter->second);
+ if ( iter->second < 0) {
LOGE("Invalid output layer");
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- mOutputLayer.push_back(mInterpreter->tensor((*iter))->name);
-
inference_engine_tensor_info tensor_info;
LOGI("mInterpreter->tensor((*iter))->dims name[%s]",
- mInterpreter->tensor((*iter))->name);
+ mInterpreter->tensor(iter->second)->name);
LOGI("mInterpreter->tensor((*iter))->dims size[%d]",
- mInterpreter->tensor((*iter))->dims->size);
+ mInterpreter->tensor(iter->second)->dims->size);
LOGI("mInterpreter->tensor((*iter))->dims type[%d]",
- mInterpreter->tensor((*iter))->type);
+ mInterpreter->tensor(iter->second)->type);
std::vector<size_t> shape_nhwc;
- for (int idx = 0; idx < mInterpreter->tensor((*iter))->dims->size;
+ for (int idx = 0; idx < mInterpreter->tensor(iter->second)->dims->size;
idx++) {
shape_nhwc.push_back(
- mInterpreter->tensor((*iter))->dims->data[idx]);
+ mInterpreter->tensor(iter->second)->dims->data[idx]);
}
//tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility).
tensor_info.shape = shape_nhwc;
tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NHWC;
- if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) {
+ if (mInterpreter->tensor(iter->second)->type == kTfLiteUInt8) {
LOGI("type is kTfLiteUInt8");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
- } else if (mInterpreter->tensor((*iter))->type == kTfLiteInt64) {
+ } else if (mInterpreter->tensor(iter->second)->type == kTfLiteInt64) {
LOGI("type is kTfLiteInt64");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_INT64;
- } else if (mInterpreter->tensor((*iter))->type == kTfLiteFloat32) {
+ } else if (mInterpreter->tensor(iter->second)->type == kTfLiteFloat32) {
LOGI("type is kTfLiteFloat32");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
} else {
iter2 != tensor_info.shape.end(); ++iter2) {
tensor_info.size *= (*iter2);
}
- mOutputTensorInfo.push_back(tensor_info);
+ mOutputLayers.insert(std::make_pair(mInterpreter->tensor( iter->second /*(*iter)*/)->name, tensor_info));
}
- property.layer_names = mOutputLayer;
- property.tensor_infos = mOutputTensorInfo;
+ property.layers = mOutputLayers;
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
inference_engine_layer_property &property)
{
LOGI("ENTER");
-
- std::vector<std::string>::iterator iter;
- for (iter = property.layer_names.begin();
- iter != property.layer_names.end(); iter++) {
- std::string name = *iter;
- LOGI("input layer name = %s", name.c_str());
+ for (auto iter = property.layers.begin(); iter != property.layers.end(); ++iter) {
+ LOGI("input layer name = %s", (iter->first).c_str());
}
-
- mInputLayer.clear();
- std::vector<std::string>().swap(mInputLayer);
-
- mInputTensorInfo.clear();
- std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
-
- mInputLayer = property.layer_names;
- mInputTensorInfo = property.tensor_infos;
+ std::map<std::string, inference_engine_tensor_info>().swap(mInputLayers);
+ mInputLayers = property.layers;
LOGI("LEAVE");
{
LOGI("ENTER");
- std::vector<std::string>::iterator iter;
- for (iter = property.layer_names.begin();
- iter != property.layer_names.end(); iter++) {
- std::string name = *iter;
- LOGI("output layer name = %s", name.c_str());
+ for (auto iter = property.layers.begin(); iter != property.layers.end(); ++iter) {
+ LOGI("input layer name = %s", (iter->first).c_str());
}
-
- mOutputLayer.clear();
- std::vector<std::string>().swap(mOutputLayer);
-
- mOutputLayer = property.layer_names;
+ std::map<std::string, inference_engine_tensor_info>().swap(mOutputLayers);
+ mOutputLayers = property.layers;
LOGI("LEAVE");
int InferenceTFLite::SetInterpreterInfo()
{
- if (mInputLayer.empty() || mInputTensorInfo.empty()) {
+ if (mInputLayers.empty()) {
LOGI("mInputLayer is empty. layers and tensors that mInterpreter has will be returned.");
- mInputLayer.clear();
- std::vector<std::string>().swap(mInputLayer);
-
- mInputTensorInfo.clear();
- std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
-
+ mInputLayers.clear();
for (auto iter = mInputLayerId.begin(); iter != mInputLayerId.end();
++iter) {
- mInputLayer.push_back(mInterpreter->tensor((*iter))->name);
std::vector<size_t> shape_nhwc;
for (int idx = 0;
- idx < mInterpreter->tensor((*iter))->dims->size; idx++) {
+ idx < mInterpreter->tensor(iter->second)->dims->size; idx++) {
shape_nhwc.push_back(
- mInterpreter->tensor((*iter))->dims->data[idx]);
+ mInterpreter->tensor(iter->second)->dims->data[idx]);
}
inference_engine_tensor_info tensor_info {
INFERENCE_TENSOR_DATA_TYPE_NONE, 1
};
- if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) {
+ if (mInterpreter->tensor(iter->second)->type == kTfLiteUInt8) {
LOGI("type is kTfLiteUInt8");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
- } else if (mInterpreter->tensor((*iter))->type ==
+ } else if (mInterpreter->tensor(iter->second)->type ==
kTfLiteFloat32) {
LOGI("type is kTfLiteFloat32");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
for (auto iter2 : tensor_info.shape) {
tensor_info.size *= iter2;
}
- mInputTensorInfo.push_back(tensor_info);
+ mInputLayers.insert(std::make_pair(mInterpreter->tensor(iter->second)->name, tensor_info));
}
}