return engine->GetInputTensorBuffers(buffers);
}
+int InferenceEngineCommon::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
+{
+ return engine->GetOutputTensorBuffers(buffers);
+}
+
int InferenceEngineCommon::GetInputLayerProperty(inference_engine_layer_property &property)
{
return engine->GetInputLayerProperty(property);
return engine->GetBackendCapacity(capacity);
}
-int InferenceEngineCommon::Run(std::vector<inference_engine_tensor_buffer> &input_buffers)
+int InferenceEngineCommon::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers)
{
- return engine->Run(input_buffers);
+ return engine->Run(input_buffers, output_buffers);
}
int InferenceEngineCommon::GetInferenceResult(tensor_t& results)
*/
virtual int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) = 0;
+ /**
+ * @brief Get output tensor buffers from a given backend engine.
+ *
+ * @since_tizen 6.0
+ */
+ virtual int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) = 0;
+
/**
* @brief Get input layer property information from a given backend engine.
*
virtual int GetBackendCapacity(inference_engine_capacity *capacity) = 0;
/**
- * @brief Run an inference with user-given input buffers.
+ * @brief Run an inference with user-given input and output buffers.
*
* @since_tizen 6.0
*/
- virtual int Run(std::vector<inference_engine_tensor_buffer> &input_buffers) = 0;
+ virtual int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers) = 0;
/**
* @brief Get inference results. Deprecated.
*/
int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers);
+ /**
+ * @brief Get an output tensor buffer/buffers.
+ *
+ * @since_tizen 6.0
+ */
+ int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers);
+
/**
* @brief Get an input layer property information from a given backend engine.
*
*
* @since_tizen 6.0
*/
- int Run(std::vector<inference_engine_tensor_buffer> &input_buffers);
+ int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers);
/**
* @brief Get inference results.
*/
typedef struct _inference_engine_tensor_buffer {
void *buffer; /**< a buffer which contains tensor data. */
+ inference_tensor_data_type_e data_type; /**< a tensor type of the layer. */
// TODO.
} inference_engine_tensor_buffer;