int InferenceTFLite::SetInterpreterInfo()
{
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
LOGI("ENTER");
+
if (mInputLayers.empty()) {
LOGI("mInputLayer is empty. layers and tensors that mInterpreter has will be returned.");
- mInputLayers.clear();
- for (auto& layer : mInputLayerId) {
-
- std::vector<size_t> shape_nhwc;
-
- for (int idx = 0;
- idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
- shape_nhwc.push_back(
- mInterpreter->tensor(layer.second)->dims->data[idx]);
- }
-
- inference_engine_tensor_info tensor_info {
- shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
- INFERENCE_TENSOR_DATA_TYPE_NONE, 1
- };
-
- if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
- LOGI("type is kTfLiteUInt8");
- tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
- } else if (mInterpreter->tensor(layer.second)->type ==
- kTfLiteFloat32) {
- LOGI("type is kTfLiteFloat32");
- tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- } else {
- LOGE("Not supported");
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
- }
-
- for (auto& dim : tensor_info.shape) {
- tensor_info.size *= dim;
- }
- mInputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
- }
+ ret = FillLayer(mInputLayers, mInputLayerId);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ return ret;
}
if (mOutputLayers.empty()) {
LOGI("mOutputLayers is empty. layers and tensors that mInterpreter has will be returned.");
-
- mOutputLayers.clear();
- for (auto& layer : mOutputLayerId) {
-
- std::vector<size_t> shape_nhwc;
-
- for (int idx = 0;
- idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
- shape_nhwc.push_back(
- mInterpreter->tensor(layer.second)->dims->data[idx]);
- }
-
- inference_engine_tensor_info tensor_info {
- shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
- INFERENCE_TENSOR_DATA_TYPE_NONE, 1
- };
-
- if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
- LOGI("type is kTfLiteUInt8");
- tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
- } else if (mInterpreter->tensor(layer.second)->type ==
- kTfLiteFloat32) {
- LOGI("type is kTfLiteFloat32");
- tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- } else {
- LOGE("Not supported");
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
- }
-
- for (auto& dim : tensor_info.shape) {
- tensor_info.size *= dim;
- }
- mOutputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
- }
+ ret = FillLayer(mOutputLayers, mOutputLayerId);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ return ret;
}
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
}
+ int InferenceTFLite::FillLayer(std::map<std::string, inference_engine_tensor_info>& layers,
+ std::map<std::string, int>& layerId)
+ {
+ layers.clear();
+ for (auto& layer : layerId) {
+
+ std::vector<size_t> shape_nhwc;
+
+ for (int idx = 0;
+ idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
+ shape_nhwc.push_back(
+ mInterpreter->tensor(layer.second)->dims->data[idx]);
+ }
+
+ inference_engine_tensor_info tensor_info {
+ shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
+ INFERENCE_TENSOR_DATA_TYPE_NONE, 1
+ };
+
+ switch (mInterpreter->tensor(layer.second)->type)
+ {
+ case kTfLiteUInt8:
+ LOGI("type is kTfLiteUInt8");
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+ break;
+ case kTfLiteFloat32:
+ LOGI("type is kTfLiteFloat32");
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ break;
+ default:
+ LOGE("Not supported");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ for (auto& dim : tensor_info.shape) {
+ tensor_info.size *= dim;
+ }
+ layers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
+
+ }
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
extern "C"
{
class IInferenceEngineCommon *EngineCommonInit(void)