Pass output tensor buffers to backend engine for the inference
authorInki Dae <inki.dae@samsung.com>
Fri, 14 Feb 2020 04:26:05 +0000 (13:26 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:42:53 +0000 (09:42 +0900)
Change-Id: I3c5ad1499daa1953874441bef42e62accf2bedc0
Signed-off-by: Inki Dae <inki.dae@samsung.com>
common/inference_engine_common_impl.cpp
include/inference_engine_common.h
include/inference_engine_common_impl.h
include/inference_engine_type.h

index cd7edcd8d5c397e9672e53b787e996fbc51197c0..4f3c8c71d538fd140d5b347009e1e2cb891f4110 100755 (executable)
@@ -170,6 +170,11 @@ int InferenceEngineCommon::GetInputTensorBuffers(std::vector<inference_engine_te
     return engine->GetInputTensorBuffers(buffers);
 }
 
+int InferenceEngineCommon::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
+{
+    return engine->GetOutputTensorBuffers(buffers);
+}
+
 int InferenceEngineCommon::GetInputLayerProperty(inference_engine_layer_property &property)
 {
     return engine->GetInputLayerProperty(property);
@@ -201,9 +206,10 @@ int InferenceEngineCommon::GetBackendCapacity(inference_engine_capacity *capacit
     return engine->GetBackendCapacity(capacity);
 }
 
-int InferenceEngineCommon::Run(std::vector<inference_engine_tensor_buffer> &input_buffers)
+int InferenceEngineCommon::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+                                std::vector<inference_engine_tensor_buffer> &output_buffers)
 {
-    return engine->Run(input_buffers);
+    return engine->Run(input_buffers, output_buffers);
 }
 
 int InferenceEngineCommon::GetInferenceResult(tensor_t& results)
index ed47a1c2239f0966f74333aee173adb4be440c1d..b16b2144456d0c3f81daf21f03b278d533e8ac14 100755 (executable)
@@ -52,6 +52,13 @@ public:
      */
     virtual int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) = 0;
 
+    /**
+     * @brief Get output tensor buffers from a given backend engine.
+     *
+     * @since_tizen 6.0
+     */
+    virtual int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) = 0;
+
     /**
      * @brief Get input layer property information from a given backend engine.
      *
@@ -88,11 +95,12 @@ public:
     virtual int GetBackendCapacity(inference_engine_capacity *capacity) = 0;
 
     /**
-     * @brief Run an inference with user-given input buffers.
+     * @brief Run an inference with user-given input and output buffers.
      *
      * @since_tizen 6.0
      */
-    virtual int Run(std::vector<inference_engine_tensor_buffer> &input_buffers) = 0;
+    virtual int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+                    std::vector<inference_engine_tensor_buffer> &output_buffers) = 0;
 
     /**
      * @brief Get inference results. Deprecated.
index 5bfe3d828607b04d795f6616e0b4224153b5c9aa..f5be834ba49f9b56632269688c6a758d4dad2765 100755 (executable)
@@ -81,6 +81,13 @@ public:
      */
     int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers);
 
+    /**
+     * @brief Get an output tensor buffer/buffers.
+     *
+     * @since_tizen 6.0
+     */
+    int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers);
+
     /**
      * @brief Get an input layer property information from a given backend engine.
      *
@@ -121,7 +128,8 @@ public:
      *
      * @since_tizen 6.0
      */
-    int Run(std::vector<inference_engine_tensor_buffer> &input_buffers);
+    int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+            std::vector<inference_engine_tensor_buffer> &output_buffers);
 
     /**
      * @brief Get inference results.
index 8f0c9fafb6d202baf467c2d2e159b91f2fb9f7b5..7b5ef62533d39eacab4f8bf1e05199ec373d313c 100644 (file)
@@ -140,6 +140,7 @@ typedef struct _inference_engine_config {
  */
 typedef struct _inference_engine_tensor_buffer {
     void *buffer; /**< a buffer which contains tensor data. */
+    inference_tensor_data_type_e data_type; /**< a tensor type of the layer. */
     // TODO.
 } inference_engine_tensor_buffer;