FillLayerId(mInputLayerId, mInputLayers, mInterpreter->inputs());
FillLayerId(mOutputLayerId, mOutputLayers, mInterpreter->outputs());
+ for (auto iter = mInputLayerId.begin(); iter != mInputLayerId.end(); iter++ )
+ LOGI("Input layer : name(%s) -> idx(%d)", iter->first.c_str(), iter->second);
+
+ for (auto iter = mOutputLayerId.begin(); iter != mOutputLayerId.end(); iter++ )
+ LOGI("Output layer : name(%s) -> idx(%d)", iter->first.c_str(), iter->second);
+
if (mInterpreter->AllocateTensors() != kTfLiteOk) {
LOGE("Fail to allocate tensor");
return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
{
LOGI("ENTER");
- SetInterpreterInfo();
+ mOutputLayers.clear();
+
+ for (auto& layer : mOutputLayerId) {
+ if (layer.second < 0) {
+ LOGE("Invalid output layer ID [%d]", layer.second);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ inference_engine_tensor_info tensor_info;
+
+ LOGI("mInterpreter->tensor(%d)->dims name[%s] size[%d] type[%d]",
+ layer.second,
+ mInterpreter->tensor(layer.second)->name,
+ mInterpreter->tensor(layer.second)->dims->size,
+ mInterpreter->tensor(layer.second)->type);
+
+ std::vector<size_t> shape_nhwc;
+ for (int idx = 0; idx < mInterpreter->tensor(layer.second)->dims->size; idx++)
+ shape_nhwc.push_back(mInterpreter->tensor(layer.second)->dims->data[idx]);
+
+ //tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility).
+ tensor_info.shape = shape_nhwc;
+ tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NHWC;
+ if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
+ LOGI("type is kTfLiteUInt8");
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+ } else if (mInterpreter->tensor(layer.second)->type == kTfLiteInt8) {
+ LOGI("type is kTfLiteInt8");
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_INT8;
+ } else if (mInterpreter->tensor(layer.second)->type == kTfLiteInt64) {
+ LOGI("type is kTfLiteInt64");
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_INT64;
+ } else if (mInterpreter->tensor(layer.second)->type == kTfLiteFloat32) {
+ LOGI("type is kTfLiteFloat32");
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ } else {
+ LOGE("Not supported");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+ tensor_info.size = 1;
+
+ for (auto & dim : tensor_info.shape)
+ tensor_info.size *= dim;
+
+ mOutputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
+ }
+
property.layers = mOutputLayers;
LOGI("LEAVE");
LOGI("ENTER");
for (auto& layer : property.layers)
- LOGI("input layer name = %s", layer.first.c_str());
+ LOGI("output layer name = %s", layer.first.c_str());
mOutputLayers.clear();
mOutputLayers = property.layers;
std::map<std::string, inference_engine_tensor_info>& layers,
const std::vector<int>& buffer)
{
+ LOGI("ENTER");
+
layerId.clear();
if (!buffer.empty()) {
- for (auto& idx : buffer)
+ for (auto& idx : buffer) {
+ LOGI("%s : %d", mInterpreter->tensor(idx)->name, idx);
layerId[mInterpreter->tensor(idx)->name] = idx;
+ }
+
return;
}
if (layerId.size() == layers.size())
break;
}
+
+ LOGI("LEAVE");
}
int InferenceTFLite::FillLayer(std::map<std::string, inference_engine_tensor_info>& layers,