LOGW("LEAVE");
}
-int InferenceEngineCommon::SetOutputTensorParamNodes(std::vector<std::string> nodes)
-{
- LOGI("ENTER");
- int ret = engine->SetOutputTensorParamNodes(nodes);
- if (ret != INFERENCE_ENGINE_ERROR_NONE)
- LOGE("Fail to SetOutputTensorParamNodes");
- LOGI("LEAVE");
- return ret;
-}
-
int InferenceEngineCommon::SetTargetDevices(int types)
{
int ret = engine->SetTargetDevices(types);
if (ret != INFERENCE_ENGINE_ERROR_NONE)
LOGE("Fail to load InferenceEngineVision");
- ret = engine->CreateInputLayerPassage();
- if (ret != INFERENCE_ENGINE_ERROR_NONE)
- LOGE("Fail to load CreateInputLayerPassage");
-
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceEngineCommon::GetInputLayerAttrType()
-{
- return engine->GetInputLayerAttrType();
-}
-
-void * InferenceEngineCommon::GetInputDataPtr()
+int InferenceEngineCommon::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
{
- return engine->GetInputDataPtr();
+ return engine->GetInputTensorBuffers(buffers);
}
-int InferenceEngineCommon::GetInputTensorProperty(inference_engine_layer_property *property)
+int InferenceEngineCommon::GetInputLayerProperty(inference_engine_layer_property &property)
{
- LOGI("ENTER");
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceEngineCommon::GetOutputTensorProperty(inference_engine_layer_property *property)
-{
- LOGI("ENTER");
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
+ return engine->GetInputLayerProperty(property);
}
-int InferenceEngineCommon::SetInputDataBuffer(tensor_t data)
+int InferenceEngineCommon::GetOutputLayerProperty(inference_engine_layer_property &property)
{
- return engine->SetInputDataBuffer(data);
+ return engine->GetOutputLayerProperty(property);
}
int InferenceEngineCommon::SetInputTensorProperty(inference_engine_layer_property &property)
return engine->GetBackendCapacity(capacity);
}
-int InferenceEngineCommon::Run()
+int InferenceEngineCommon::Run(std::vector<inference_engine_tensor_buffer> &input_buffers)
{
- int ret = engine->Run();
- if (ret != INFERENCE_ENGINE_ERROR_NONE)
- LOGE("Fail to run InferenceEngineCommon");
-
- return ret;
-}
-
-int InferenceEngineCommon::Run(std::vector<float> tensor)
-{
- int ret = engine->Run(tensor);
- if (ret != INFERENCE_ENGINE_ERROR_NONE)
- LOGE("Fail to run InferenceEngineCommon");
-
- return ret;
-}
-
-
-int InferenceEngineCommon::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
-{
- LOGI("ENTER");
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceEngineCommon::SetInputTensorParamNode(std::string node)
-{
- LOGE("ENTER");
- int ret = engine->SetInputTensorParamNode(node);
- if (ret != INFERENCE_ENGINE_ERROR_NONE)
- LOGE("Fail to SetInputTensorParamNode");
- LOGE("LEAVE");
- return ret;
+ return engine->Run(input_buffers);
}
int InferenceEngineCommon::GetInferenceResult(tensor_t& results)
virtual ~IInferenceEngineCommon() {};
- /**
- * @brief Set an input node name. Deprecated.
- *
- * @since_tizen 5.5
- */
- virtual int SetInputTensorParamNode(std::string node) = 0;
-
- /**
- * @brief Set output nodes' names. Deprecated.
- *
- * @since_tizen 5.5
- */
- virtual int SetOutputTensorParamNodes(std::vector<std::string> nodes) = 0;
-
/**
* @brief Set target devices.
* @details See #inference_target_type_e
virtual int Load(std::vector<std::string> model_paths, inference_model_format_e model_format) { return 0; }
/**
- * @brief Create a memory. Deprecated.
- *
- * @since_tizen 5.5
- */
- virtual int CreateInputLayerPassage() = 0;
-
- /**
- * @brief Get an input layer's type such as float32, float16, and so on. Deprecated.
+ * @brief Get input tensor buffers from a given backend engine.
*
- * @since_tizen 5.5
- */
- virtual int GetInputLayerAttrType() = 0;
-
- /**
- * @brief Get an input data pointer. Deprecated.
- *
- * @since_tizen 5.5
+ * @since_tizen 6.0
*/
- virtual void* GetInputDataPtr() = 0;
+ virtual int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) = 0;
/**
* @brief Get input layer property information from a given backend engine.
*
* @since_tizen 6.0
*/
- virtual int GetInputTensorProperty(inference_engine_layer_property *property) { return 0; }
+ virtual int GetInputLayerProperty(inference_engine_layer_property &property) = 0;
/**
* @brief Get output layer property information from a given backend engine.
*
* @since_tizen 6.0
*/
- virtual int GetOutputTensorProperty(inference_engine_layer_property *property) { return 0; }
-
- /**
- * @brief Set an input data buffer. Deprecated.
- *
- * @since_tizen 5.5
- */
- virtual int SetInputDataBuffer(tensor_t data) = 0;
+ virtual int GetOutputLayerProperty(inference_engine_layer_property &property) = 0;
/**
* @brief Set input layer property information to a given backend engine.
virtual int GetBackendCapacity(inference_engine_capacity *capacity) = 0;
/**
- * @brief Run an inference. Deprecated.
- *
- * @since_tizen 5.5
- */
- virtual int Run() = 0;
-
- virtual int Run(std::vector<float> tensor) = 0;
-
- /**
- * @brief Run an inference with user-given input and output buffers.
+ * @brief Run an inference with user-given input buffers.
*
* @since_tizen 6.0
*/
- virtual int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers) { return 0; }
+ virtual int Run(std::vector<inference_engine_tensor_buffer> &input_buffers) = 0;
/**
* @brief Get inference results. Deprecated.
void UnbindBackend(void);
- /**
- * @brief Set input node name. Deprecated.
- *
- * @since_tizen 5.5
- */
- int SetInputTensorParamNode(std::string node);
-
- /**
- * @brief Set output nodes' names. Deprecated.
- *
- * @since_tizen 5.5
- */
- int SetOutputTensorParamNodes(std::vector<std::string> nodes);
-
/**
* @brief Set target devices.
* @details See #inference_target_type_e
int Load(std::vector<std::string> model_paths, inference_model_format_e model_format);
/**
- * @brief Get an input layer's type such as float32, float16, and so on. Deprecated.
- *
- * @since_tizen 5.5
- */
- int GetInputLayerAttrType();
-
- /**
- * @brief Get an input data pointer. Deprecated.
+ * @brief Get an input tensor buffer/buffers.
*
- * @since_tizen 5.5
+ * @since_tizen 6.0
*/
- void* GetInputDataPtr();
+ int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers);
/**
* @brief Get an input layer property information from a given backend engine.
*
* @since_tizen 6.0
*/
- int GetInputTensorProperty(inference_engine_layer_property *property);
+ int GetInputLayerProperty(inference_engine_layer_property &property);
/**
* @brief Get an output layer property information from a given backend engine.
*
* @since_tizen 6.0
*/
- int GetOutputTensorProperty(inference_engine_layer_property *property);
-
- /**
- * @brief Set an input data buffer. Deprecated.
- *
- * @since_tizen 5.5
- */
- int SetInputDataBuffer(tensor_t data);
+ int GetOutputLayerProperty(inference_engine_layer_property &property);
/**
* @brief Set an input layer property information to a given backend engine.
*/
int GetBackendCapacity(inference_engine_capacity *capacity);
- /**
- * @brief Run an inference. Deprecated.
- *
- * @since_tizen 5.5
- */
- int Run();
-
- int Run(std::vector<float> tensor);
-
/**
* @brief Run an inference with user-given input and output buffers.
*
* @since_tizen 6.0
*/
- int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers);
+ int Run(std::vector<inference_engine_tensor_buffer> &input_buffers);
/**
* @brief Get inference results.
*
*/
typedef enum {
- TENSOR_SHAPE_NCHW = 0, /**< tensor order is batch size, number of channel, height, width. */
- TENSOR_SHAPE_NHWC, /**< tensor order is batch size, height, width, number of channel. */
-} inference_tensor_shape_e;
+ TENSOR_SHAPE_NCHW = 0, /**< tensor order is batch size, a number of channels, height, width. */
+ TENSOR_SHAPE_NHWC, /**< tensor order is batch size, height, width, a number of channels. */
+} inference_tensor_shape_type_e;
/**
* @brief Enumeration for tensor data type.
TENSOR_DATA_TYPE_FLOAT32,
TENSOR_DATA_TYPE_UINT8,
TENSOR_DATA_TYPE_UINT16,
- TENSOR_DATA_TYPE_UINT32,
+ TENSOR_DATA_TYPE_UINT32
} inference_tensor_data_type_e;
#define INFERENCE_TARGET_MASK (INFERENCE_TARGET_CPU | INFERENCE_TARGET_GPU | INFERENCE_TARGET_CUSTOM)
* @since_tizen 6.0
*/
typedef struct _inference_engine_tensor_info {
- std::string layer_name; /**< a given layer's name */
- std::vector<inference_tensor_shape_e> tensor_shape; /**< a tensor shape of the layer. */
- inference_tensor_data_type_e tensor_type; /**< a tensor type of the layer. */
+ std::vector<int> shape; /** a tensor shape. */
+ inference_tensor_shape_type_e shape_type; /**< a tensor shape of the layer. */
+ inference_tensor_data_type_e data_type; /**< a tensor type of the layer. */
+ size_t size; /** tensor buffer size. */
// TODO.
} inference_engine_tensor_info;
*/
typedef struct _inference_engine_capacity {
int supported_accel_devices;
- inference_tensor_shape_e supported_tensor_shape;
+ inference_tensor_shape_type_e supported_tensor_shape_type;
std::vector<std::string> supported_nn_models;
// TODO.
} inference_engine_capacity;