inference_model_format_e model_format) override;
int GetInputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers) override;
+ std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
int GetOutputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers) override;
+ std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
int GetInputLayerProperty(
inference_engine_layer_property &property) override;
int GetBackendCapacity(inference_engine_capacity *capacity) override;
- int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
+ int Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+ std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
override;
private:
int CheckTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers);
+ std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+ std::map<std::string, inference_engine_tensor_buffer> &output_buffers);
int ConvertTensorType(int tensor_type);
int UpdateTensorsInfo();
ml_tensors_info_h mOutputInfoHandle;
ml_tensors_data_h mInputDataHandle;
ml_tensors_data_h mOutputDataHandle;
- std::vector<std::string> mDesignated_inputs;
- std::vector<std::string> mDesignated_outputs;
+ std::map<std::string, int> mDesignated_inputs;
+ std::map<std::string, int> mDesignated_outputs;
inference_engine_layer_property mInputProperty;
inference_engine_layer_property mOutputProperty;
};