Pass input tensor buffers coming from Inference layer to backend engine
authorInki Dae <inki.dae@samsung.com>
Thu, 13 Feb 2020 07:35:11 +0000 (16:35 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:42:53 +0000 (09:42 +0900)
This patch makes InferenceEngineCommon layer to pass input tensor
buffers allocated by Inference layer to a backend engine for
the inference. As for this, it adds new several callbacks, and
drops unnecessary ones.

Change-Id: I03fd9aa9ce73cb2c1092a5a50d4147ff90e4462e
Signed-off-by: Inki Dae <inki.dae@samsung.com>
common/inference_engine_common_impl.cpp
include/inference_engine_common.h
include/inference_engine_common_impl.h
include/inference_engine_type.h

index 503ecc1ca98d424c9bcf712f69df848f651280ab..cd7edcd8d5c397e9672e53b787e996fbc51197c0 100755 (executable)
@@ -143,16 +143,6 @@ void InferenceEngineCommon::UnbindBackend(void)
     LOGW("LEAVE");
 }
 
-int InferenceEngineCommon::SetOutputTensorParamNodes(std::vector<std::string> nodes)
-{
-    LOGI("ENTER");
-    int ret = engine->SetOutputTensorParamNodes(nodes);
-    if (ret != INFERENCE_ENGINE_ERROR_NONE)
-        LOGE("Fail to SetOutputTensorParamNodes");
-    LOGI("LEAVE");
-    return ret;
-}
-
 int InferenceEngineCommon::SetTargetDevices(int types)
 {
     int ret = engine->SetTargetDevices(types);
@@ -170,44 +160,24 @@ int InferenceEngineCommon::Load(std::vector<std::string> model_paths, inference_
     if (ret != INFERENCE_ENGINE_ERROR_NONE)
         LOGE("Fail to load InferenceEngineVision");
 
-    ret = engine->CreateInputLayerPassage();
-    if (ret != INFERENCE_ENGINE_ERROR_NONE)
-        LOGE("Fail to load CreateInputLayerPassage");
-
     LOGI("LEAVE");
 
     return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-int InferenceEngineCommon::GetInputLayerAttrType()
-{
-    return engine->GetInputLayerAttrType();
-}
-
-void * InferenceEngineCommon::GetInputDataPtr()
+int InferenceEngineCommon::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
 {
-    return engine->GetInputDataPtr();
+    return engine->GetInputTensorBuffers(buffers);
 }
 
-int InferenceEngineCommon::GetInputTensorProperty(inference_engine_layer_property *property)
+int InferenceEngineCommon::GetInputLayerProperty(inference_engine_layer_property &property)
 {
-    LOGI("ENTER");
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceEngineCommon::GetOutputTensorProperty(inference_engine_layer_property *property)
-{
-    LOGI("ENTER");
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
+    return engine->GetInputLayerProperty(property);
 }
 
-int InferenceEngineCommon::SetInputDataBuffer(tensor_t data)
+int InferenceEngineCommon::GetOutputLayerProperty(inference_engine_layer_property &property)
 {
-    return engine->SetInputDataBuffer(data);
+    return engine->GetOutputLayerProperty(property);
 }
 
 int InferenceEngineCommon::SetInputTensorProperty(inference_engine_layer_property &property)
@@ -231,42 +201,9 @@ int InferenceEngineCommon::GetBackendCapacity(inference_engine_capacity *capacit
     return engine->GetBackendCapacity(capacity);
 }
 
-int InferenceEngineCommon::Run()
+int InferenceEngineCommon::Run(std::vector<inference_engine_tensor_buffer> &input_buffers)
 {
-    int ret = engine->Run();
-    if (ret != INFERENCE_ENGINE_ERROR_NONE)
-        LOGE("Fail to run InferenceEngineCommon");
-
-    return ret;
-}
-
-int InferenceEngineCommon::Run(std::vector<float> tensor)
-{
-    int ret = engine->Run(tensor);
-    if (ret != INFERENCE_ENGINE_ERROR_NONE)
-        LOGE("Fail to run InferenceEngineCommon");
-
-    return ret;
-}
-
-
-int InferenceEngineCommon::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-                               std::vector<inference_engine_tensor_buffer> &output_buffers)
-{
-    LOGI("ENTER");
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceEngineCommon::SetInputTensorParamNode(std::string node)
-{
-    LOGE("ENTER");
-    int ret = engine->SetInputTensorParamNode(node);
-    if (ret != INFERENCE_ENGINE_ERROR_NONE)
-        LOGE("Fail to SetInputTensorParamNode");
-    LOGE("LEAVE");
-    return ret;
+    return engine->Run(input_buffers);
 }
 
 int InferenceEngineCommon::GetInferenceResult(tensor_t& results)
index 886d4314e2f757486c0b876129628319fbf1bf02..ed47a1c2239f0966f74333aee173adb4be440c1d 100755 (executable)
@@ -30,20 +30,6 @@ public:
 
     virtual ~IInferenceEngineCommon() {};
 
-    /**
-     * @brief Set an input node name. Deprecated.
-     *
-     * @since_tizen 5.5
-     */
-    virtual int SetInputTensorParamNode(std::string node) = 0;
-
-    /**
-     * @brief Set output nodes' names. Deprecated.
-     *
-     * @since_tizen 5.5
-     */
-    virtual int SetOutputTensorParamNodes(std::vector<std::string> nodes) = 0;
-
     /**
      * @brief Set target devices.
      * @details See #inference_target_type_e
@@ -60,46 +46,25 @@ public:
     virtual int Load(std::vector<std::string> model_paths, inference_model_format_e model_format) { return 0; }
 
     /**
-     * @brief Create a memory. Deprecated.
-     *
-     * @since_tizen 5.5
-     */
-    virtual int CreateInputLayerPassage() = 0;
-
-    /**
-     * @brief Get an input layer's type such as float32, float16, and so on. Deprecated.
+     * @brief Get input tensor buffers from a given backend engine.
      *
-     * @since_tizen 5.5
-     */
-    virtual int GetInputLayerAttrType() = 0;
-
-    /**
-     * @brief Get an input data pointer. Deprecated.
-     *
-     * @since_tizen 5.5
+     * @since_tizen 6.0
      */
-    virtual void* GetInputDataPtr() = 0;
+    virtual int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) = 0;
 
     /**
      * @brief Get input layer property information from a given backend engine.
      *
      * @since_tizen 6.0
      */
-    virtual int GetInputTensorProperty(inference_engine_layer_property *property) { return 0; }
+    virtual int GetInputLayerProperty(inference_engine_layer_property &property) = 0;
 
     /**
      * @brief Get output layer property information from a given backend engine.
      *
      * @since_tizen 6.0
      */
-    virtual int GetOutputTensorProperty(inference_engine_layer_property *property) { return 0; }
-
-    /**
-     * @brief Set an input data buffer. Deprecated.
-     *
-     * @since_tizen 5.5
-     */
-    virtual int SetInputDataBuffer(tensor_t data) = 0;
+    virtual int GetOutputLayerProperty(inference_engine_layer_property &property) = 0;
 
     /**
      * @brief Set input layer property information to a given backend engine.
@@ -123,21 +88,11 @@ public:
     virtual int GetBackendCapacity(inference_engine_capacity *capacity) = 0;
 
     /**
-     * @brief Run an inference. Deprecated.
-     *
-     * @since_tizen 5.5
-     */
-    virtual int Run() = 0;
-
-    virtual int Run(std::vector<float> tensor) = 0;
-
-    /**
-     * @brief Run an inference with user-given input and output buffers.
+     * @brief Run an inference with user-given input buffers.
      *
      * @since_tizen 6.0
      */
-    virtual int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-                    std::vector<inference_engine_tensor_buffer> &output_buffers) { return 0; }
+    virtual int Run(std::vector<inference_engine_tensor_buffer> &input_buffers) = 0;
 
     /**
      * @brief Get inference results. Deprecated.
index a299205a5d9ab4e5180535f4e565616adddc7a7b..5bfe3d828607b04d795f6616e0b4224153b5c9aa 100755 (executable)
@@ -59,20 +59,6 @@ public:
 
     void UnbindBackend(void);
 
-    /**
-     * @brief Set input node name. Deprecated.
-     *
-     * @since_tizen 5.5
-     */
-    int SetInputTensorParamNode(std::string node);
-
-    /**
-     * @brief Set output nodes' names. Deprecated.
-     *
-     * @since_tizen 5.5
-     */
-    int SetOutputTensorParamNodes(std::vector<std::string> nodes);
-
     /**
      * @brief Set target devices.
      * @details See #inference_target_type_e
@@ -89,39 +75,25 @@ public:
     int Load(std::vector<std::string> model_paths, inference_model_format_e model_format);
 
     /**
-     * @brief Get an input layer's type such as float32, float16, and so on. Deprecated.
-     *
-     * @since_tizen 5.5
-     */
-    int GetInputLayerAttrType();
-
-    /**
-     * @brief Get an input data pointer. Deprecated.
+     * @brief Get an input tensor buffer/buffers.
      *
-     * @since_tizen 5.5
+     * @since_tizen 6.0
      */
-    void* GetInputDataPtr();
+    int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers);
 
     /**
      * @brief Get an input layer property information from a given backend engine.
      *
      * @since_tizen 6.0
      */
-    int GetInputTensorProperty(inference_engine_layer_property *property);
+    int GetInputLayerProperty(inference_engine_layer_property &property);
 
     /**
      * @brief Get an output layer property information from a given backend engine.
      *
      * @since_tizen 6.0
      */
-    int GetOutputTensorProperty(inference_engine_layer_property *property);
-
-    /**
-     * @brief Set an input data buffer. Deprecated.
-     *
-     * @since_tizen 5.5
-     */
-    int SetInputDataBuffer(tensor_t data);
+    int GetOutputLayerProperty(inference_engine_layer_property &property);
 
     /**
      * @brief Set an input layer property information to a given backend engine.
@@ -144,22 +116,12 @@ public:
      */
     int GetBackendCapacity(inference_engine_capacity *capacity);
 
-    /**
-     * @brief Run an inference. Deprecated.
-     *
-     * @since_tizen 5.5
-     */
-    int Run();
-
-    int Run(std::vector<float> tensor);
-
     /**
      * @brief Run an inference with user-given input and output buffers.
      *
      * @since_tizen 6.0
      */
-    int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-            std::vector<inference_engine_tensor_buffer> &output_buffers);
+    int Run(std::vector<inference_engine_tensor_buffer> &input_buffers);
 
     /**
      * @brief Get inference results.
index 925605d524bac8a6e90993b691ef2bd414254eef..8f0c9fafb6d202baf467c2d2e159b91f2fb9f7b5 100644 (file)
@@ -74,9 +74,9 @@ typedef enum {
  *
  */
 typedef enum {
-    TENSOR_SHAPE_NCHW = 0, /**< tensor order is batch size, number of channel, height, width. */
-    TENSOR_SHAPE_NHWC, /**< tensor order is batch size, height, width, number of channel. */
-} inference_tensor_shape_e;
+    TENSOR_SHAPE_NCHW = 0, /**< tensor order is batch size, a number of channels, height, width. */
+    TENSOR_SHAPE_NHWC, /**< tensor order is batch size, height, width, a number of channels. */
+} inference_tensor_shape_type_e;
 
 /**
  * @brief Enumeration for tensor data type.
@@ -89,7 +89,7 @@ typedef enum {
     TENSOR_DATA_TYPE_FLOAT32,
     TENSOR_DATA_TYPE_UINT8,
     TENSOR_DATA_TYPE_UINT16,
-    TENSOR_DATA_TYPE_UINT32,
+    TENSOR_DATA_TYPE_UINT32
 } inference_tensor_data_type_e;
 
 #define INFERENCE_TARGET_MASK   (INFERENCE_TARGET_CPU | INFERENCE_TARGET_GPU | INFERENCE_TARGET_CUSTOM)
@@ -154,9 +154,10 @@ typedef struct _inference_engine_tensor_buffer {
  * @since_tizen 6.0
  */
 typedef struct _inference_engine_tensor_info {
-    std::string layer_name; /**< a given layer's name */
-    std::vector<inference_tensor_shape_e> tensor_shape; /**< a tensor shape of the layer. */
-    inference_tensor_data_type_e tensor_type; /**< a tensor type of the layer. */
+    std::vector<int> shape; /** a tensor shape. */
+    inference_tensor_shape_type_e shape_type; /**< a tensor shape of the layer. */
+    inference_tensor_data_type_e data_type; /**< a tensor type of the layer. */
+    size_t size; /** tensor buffer size. */
     // TODO.
 } inference_engine_tensor_info;
 
@@ -185,7 +186,7 @@ typedef struct _inference_engine_layer_property {
  */
 typedef struct _inference_engine_capacity {
     int supported_accel_devices;
-    inference_tensor_shape_e supported_tensor_shape;
+    inference_tensor_shape_type_e supported_tensor_shape_type;
     std::vector<std::string> supported_nn_models;
     // TODO.
 } inference_engine_capacity;