}
int InferenceTFLite::GetInputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
LOGI("ENTER");
LOGE("Not supported");
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
}
- buffers.push_back(buffer);
+ buffers.insert(std::make_pair(mInputLayer[idx], buffer));
}
return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceTFLite::GetOutputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
void *pBuff = NULL;
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
}
- buffers.push_back(buffer);
+ buffers.insert(std::make_pair(mOutputLayer[idx], buffer));
}
return INFERENCE_ENGINE_ERROR_NONE;
}
}
int InferenceTFLite::Run(
- std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+ std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
{
LOGI("ENTER");
TfLiteStatus status = mInterpreter->Invoke();
inference_model_format_e model_format) override;
int GetInputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers) override;
+ std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
int GetOutputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers) override;
+ std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
int GetInputLayerProperty(
inference_engine_layer_property &property) override;
int GetBackendCapacity(inference_engine_capacity *capacity) override;
- int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
+ int Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+ std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
override;
private: