#include "inference_engine_type.h"
-namespace InferenceEngineInterface {
-namespace Common {
-
-class IInferenceEngineCommon {
-public:
-
- virtual ~IInferenceEngineCommon() {};
-
- /**
- * @brief Set backend private data if needed.
- * @details This callback passes a backend private data to a given backend.
- * I.e., ML Single API backend needs which tensor filter type of NNStreamer should be used such as NNFW or VIVANTE.
- *
- * @since_tizen 6.0
- * @param[in] data This could be backend specific data object.
- */
- virtual int SetPrivateData(void *data) = 0;
-
- /**
- * @brief Set target devices.
- * @details See #inference_target_type_e
- * This callback passes given device types - CPU, GPU, CUSTOM or combinated one if a backend engine supports hybrid inferencea - to a backend engine.
- * Some backend engine can optimize a given NN model at graph level such as dropping operation or fusing operations targating
- * a given device or devices when the backend engine loads the NN model file.
- *
- * @since_tizen 5.5
- * @param[in] types This could be one or more among device types enumerated on inference_target_type_e.
- */
- virtual int SetTargetDevices(int types) = 0;
-
- /**
- * @brief Request to load model data with user-given model file information.
- * @details This callback requests a backend engine to load given model files for inference.
- * The backend engine should load the given model files according to a given model file format.
- *
- * @since_tizen 6.0
- * @param[in] model_paths Upper framework should add full path strings needed according to a given model format.
- * @param[in] model_format It indicates what kinds of model file should be passed to a backend engine.
- */
- virtual int Load(std::vector<std::string> model_paths, inference_model_format_e model_format) = 0;
-
- /**
- * @brief Get input tensor buffers from a given backend engine.
- * @details This callback requests a backend engine input tensor buffers.
- * If the backend engine is able to allocate the input tensor buffers internally, then
- * it has to add the input tensor buffers to buffers vector. By doing this, upper layer
- * will request a inference with the input tensor buffers.
- * Otherwise, the backend engine should just return INFERENCE_ENGINE_ERROR_NONE so that
- * upper layer can allocate input tensor buffers according to input layer property.
- * As for the input layer property, you can see GetInputLyaerProperty callback.
- *
- * @since_tizen 6.0
- * @param[out] buffers A backend engine should add input tensor buffers allocated itself to buffers vector.
- * Otherwise, it should put buffers to be empty.
- */
- virtual int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) = 0;
-
- /**
- * @brief Get output tensor buffers from a given backend engine.
- * @details This callback requests a backend engine output tensor buffers.
- * If the backend engine is able to allocate the output tensor buffers internally, then
- * it has to add the output tensor buffers to buffers vector. By doing this, upper layer
- * will request a inference with the output tensor buffers.
- * Otherwise, the backend engine should just return INFERENCE_ENGINE_ERROR_NONE so that
- * upper layer can allocate output tensor buffers according to output layer property.
- * As for the output layer property, you can see GetOutputLyaerProperty callback.
- *
- * @since_tizen 6.0
- * @param[out] buffers A backend engine should add output tensor buffers allocated itself to buffers vector.
- * Otherwise, it should put buffers to be empty.
- */
- virtual int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) = 0;
-
- /**
- * @brief Get input layer property information from a given backend engine.
- * @details This callback requests a backend engine information about given layers by
- * SetInputTensorProperty callback.
- * If user wants to specify input layer that the backend engine starts from
- * then user can call SetInputTensorProperty callback with desired layer information.
- * If user dosn't specify the input layer then the backend engine should add tensor information
- * of the first layer in model graph in default.
- *
- * @since_tizen 6.0
- * @param[out] property A backend engine should add given tensor information to be used as input layer.
- */
- virtual int GetInputLayerProperty(inference_engine_layer_property &property) = 0;
-
- /**
- * @brief Get output layer property information from a given backend engine.
- * @details This callback requests a backend engine information about given layers by
- * SetOutputTensorProperty callback.
- * If user wants to specify output layer that the backend engine stores the inference result
- * then user can call SetOutputTensorProperty callback with desired layer information.
- * If user dosn't specify the output layer then the backend engine should add tensor information
- * of the last layer in model graph in default.
- *
- * @since_tizen 6.0
- * @param[out] property A backend engine should add given tensor information to be used as output layer.
- */
- virtual int GetOutputLayerProperty(inference_engine_layer_property &property) = 0;
-
- /**
- * @brief Set input layer property information to a given backend engine.
- * @details This callback passes a given input layer information to a backend engine.
- * If user wants to start the inference from some given layer in model graph.
- * The backend engine should keep the input layer found with the given information
- * in model graph and then set the layer as input layer.
- *
- * @since_tizen 6.0
- * @param[in] property User should set layer information to be used as input layer.
- */
- virtual int SetInputLayerProperty(inference_engine_layer_property &property) = 0;
-
- /**
- * @brief Set output layer property information to a given backend engine.
- * @details This callback passes a given output layer information to a backend engine.
- * If user wants to start the inference from some given layer in model graph.
- * The backend engine should keep the output layer found with the given information
- * in model graph and then set the layer as output layer.
- *
- * @since_tizen 6.0
- * @param[in] property User should set layer information to be used as output layer.
- */
- virtual int SetOutputLayerProperty(inference_engine_layer_property &property) = 0;
-
- /**
- * @brief Get capacity from a given backend engine.
- * @details This callback requests what supported features and constraints the backend engine has.
- * Upper layer should call this callback just after the backend engine library is loaded.
- *
- * @since_tizen 6.0
- * @param[out] capacity A backend engine should add the features and constraints it has.
- */
- virtual int GetBackendCapacity(inference_engine_capacity *capacity) = 0;
-
- /**
- * @brief Run an inference with user-given input and output buffers.
- * @details This callback requests a backend engine to do inference with given input and output tensor buffers.
- * input and output tensor buffers can be allocated by either a backend engine or upper layer according to
- * backend engine. So upper layer needs to make sure to clean up the buffers.
- *
- * @since_tizen 6.0
- * @param[in] input_buffers It contains tensor buffers to be used as input layer.
- * @param[in] output_buffers It contains tensor buffers to be used as output layer.
- */
- virtual int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers) = 0;
-};
-
-typedef void destroy_t(IInferenceEngineCommon*);
-typedef IInferenceEngineCommon* init_t(void);
+namespace InferenceEngineInterface
+{
+namespace Common
+{
+ class IInferenceEngineCommon
+ {
+ public:
+ virtual ~IInferenceEngineCommon() {};
+
+ /**
+ * @brief Set backend private data if needed.
+ * @details This callback passes a backend private data to a given backend.
+ * I.e., ML Single API backend needs which tensor filter type of NNStreamer should be used such as NNFW or VIVANTE.
+ *
+ * @since_tizen 6.0
+ * @param[in] data This could be backend specific data object.
+ */
+ virtual int SetPrivateData(void *data) = 0;
+
+ /**
+ * @brief Set target devices.
+ * @details See #inference_target_type_e
+ * This callback passes given device types - CPU, GPU, CUSTOM or combinated one if a backend engine supports hybrid inferencea - to a backend engine.
+ * Some backend engine can optimize a given NN model at graph level such as dropping operation or fusing operations targating
+ * a given device or devices when the backend engine loads the NN model file.
+ *
+ * @since_tizen 5.5
+ * @param[in] types This could be one or more among device types enumerated on inference_target_type_e.
+ */
+ virtual int SetTargetDevices(int types) = 0;
+
+ /**
+ * @brief Request to load model data with user-given model file information.
+ * @details This callback requests a backend engine to load given model files for inference.
+ * The backend engine should load the given model files according to a given model file format.
+ *
+ * @since_tizen 6.0
+ * @param[in] model_paths Upper framework should add full path strings needed according to a given model format.
+ * @param[in] model_format It indicates what kinds of model file should be passed to a backend engine.
+ */
+ virtual int Load(std::vector<std::string> model_paths,
+ inference_model_format_e model_format) = 0;
+
+ /**
+ * @brief Get input tensor buffers from a given backend engine.
+ * @details This callback requests a backend engine input tensor buffers.
+ * If the backend engine is able to allocate the input tensor buffers internally, then
+ * it has to add the input tensor buffers to buffers vector. By doing this, upper layer
+ * will request a inference with the input tensor buffers.
+ * Otherwise, the backend engine should just return INFERENCE_ENGINE_ERROR_NONE so that
+ * upper layer can allocate input tensor buffers according to input layer property.
+ * As for the input layer property, you can see GetInputLyaerProperty callback.
+ *
+ * @since_tizen 6.0
+ * @param[out] buffers A backend engine should add input tensor buffers allocated itself to buffers vector.
+ * Otherwise, it should put buffers to be empty.
+ */
+ virtual int GetInputTensorBuffers(
+ std::vector<inference_engine_tensor_buffer> &buffers) = 0;
+
+ /**
+ * @brief Get output tensor buffers from a given backend engine.
+ * @details This callback requests a backend engine output tensor buffers.
+ * If the backend engine is able to allocate the output tensor buffers internally, then
+ * it has to add the output tensor buffers to buffers vector. By doing this, upper layer
+ * will request a inference with the output tensor buffers.
+ * Otherwise, the backend engine should just return INFERENCE_ENGINE_ERROR_NONE so that
+ * upper layer can allocate output tensor buffers according to output layer property.
+ * As for the output layer property, you can see GetOutputLyaerProperty callback.
+ *
+ * @since_tizen 6.0
+ * @param[out] buffers A backend engine should add output tensor buffers allocated itself to buffers vector.
+ * Otherwise, it should put buffers to be empty.
+ */
+ virtual int GetOutputTensorBuffers(
+ std::vector<inference_engine_tensor_buffer> &buffers) = 0;
+
+ /**
+ * @brief Get input layer property information from a given backend engine.
+ * @details This callback requests a backend engine information about given layers by
+ * SetInputTensorProperty callback.
+ * If user wants to specify input layer that the backend engine starts from
+ * then user can call SetInputTensorProperty callback with desired layer information.
+ * If user dosn't specify the input layer then the backend engine should add tensor information
+ * of the first layer in model graph in default.
+ *
+ * @since_tizen 6.0
+ * @param[out] property A backend engine should add given tensor information to be used as input layer.
+ */
+ virtual int
+ GetInputLayerProperty(inference_engine_layer_property &property) = 0;
+
+ /**
+ * @brief Get output layer property information from a given backend engine.
+ * @details This callback requests a backend engine information about given layers by
+ * SetOutputTensorProperty callback.
+ * If user wants to specify output layer that the backend engine stores the inference result
+ * then user can call SetOutputTensorProperty callback with desired layer information.
+ * If user dosn't specify the output layer then the backend engine should add tensor information
+ * of the last layer in model graph in default.
+ *
+ * @since_tizen 6.0
+ * @param[out] property A backend engine should add given tensor information to be used as output layer.
+ */
+ virtual int
+ GetOutputLayerProperty(inference_engine_layer_property &property) = 0;
+
+ /**
+ * @brief Set input layer property information to a given backend engine.
+ * @details This callback passes a given input layer information to a backend engine.
+ * If user wants to start the inference from some given layer in model graph.
+ * The backend engine should keep the input layer found with the given information
+ * in model graph and then set the layer as input layer.
+ *
+ * @since_tizen 6.0
+ * @param[in] property User should set layer information to be used as input layer.
+ */
+ virtual int
+ SetInputLayerProperty(inference_engine_layer_property &property) = 0;
+
+ /**
+ * @brief Set output layer property information to a given backend engine.
+ * @details This callback passes a given output layer information to a backend engine.
+ * If user wants to start the inference from some given layer in model graph.
+ * The backend engine should keep the output layer found with the given information
+ * in model graph and then set the layer as output layer.
+ *
+ * @since_tizen 6.0
+ * @param[in] property User should set layer information to be used as output layer.
+ */
+ virtual int
+ SetOutputLayerProperty(inference_engine_layer_property &property) = 0;
+
+ /**
+ * @brief Get capacity from a given backend engine.
+ * @details This callback requests what supported features and constraints the backend engine has.
+ * Upper layer should call this callback just after the backend engine library is loaded.
+ *
+ * @since_tizen 6.0
+ * @param[out] capacity A backend engine should add the features and constraints it has.
+ */
+ virtual int GetBackendCapacity(inference_engine_capacity *capacity) = 0;
+
+ /**
+ * @brief Run an inference with user-given input and output buffers.
+ * @details This callback requests a backend engine to do inference with given input and output tensor buffers.
+ * input and output tensor buffers can be allocated by either a backend engine or upper layer according to
+ * backend engine. So upper layer needs to make sure to clean up the buffers.
+ *
+ * @since_tizen 6.0
+ * @param[in] input_buffers It contains tensor buffers to be used as input layer.
+ * @param[in] output_buffers It contains tensor buffers to be used as output layer.
+ */
+ virtual int
+ Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers) = 0;
+ };
+
+ typedef void destroy_t(IInferenceEngineCommon *);
+ typedef IInferenceEngineCommon *init_t(void);
} /* Common */
} /* InferenceEngineInterface */
using namespace InferenceEngineInterface::Profiler;
-namespace InferenceEngineInterface {
-namespace Common {
-
-class InferenceEngineCommon {
-public:
-
- InferenceEngineCommon();
-
- ~InferenceEngineCommon();
-
- /**
- * @brief Load a backend engine library with a given backend name.
- * @details This callback loads a backend engine library with a given backend name.
- * In order to find a backend engine library corresponding to the given backend name,
- * this function makes a full name of the library file with given backend name.
- * After that, it opens the library file by calling dlopen function to find a entry point
- * function - EngineInit - of a actual backend library.
- *
- * @since_tizen 6.0
- * @param[in] config A configuraion data needed to load a backend library.
- */
- int BindBackend(inference_engine_config *config);
-
- /**
- * @brief Load a backend engine library with a given backend type.
- * @details This callback loads a backend engine library with a given backend type.
- * In order to find a backend engine library corresponding to the given backend type,
- * this function makes a full name of the library file with given backend type.
- * After that, it opens the library file by calling dlopen function to find a entry point
- * function - EngineInit - of a actual backend library.
- *
- * @since_tizen 6.0
- * @param[in] backend_type A eumeration value which indicates one of backend types - refer to inference_backend_type_e.
- */
- int BindBackend(int backend_type);
-
- /**
- * @brief Unload a backend engine library.
- * @details This callback unload a backend engine library.
- *
- * @since_tizen 6.0
- */
- void UnbindBackend(void);
-
- /**
- * @brief Set target devices.
- * @details See #inference_target_type_e
- * This callback passes given device types - CPU, GPU, CUSTOM or combinated one if a backend engine supports hybrid inferencea - to a backend engine.
- * Some backend engine can optimize a given NN model at graph level such as dropping operation or fusing operations targating
- * a given device or devices when the backend engine loads the NN model file.
- *
- * @since_tizen 5.5
- * @param[in] types This could be one or more among device types enumerated on inference_target_type_e.
- */
- int SetTargetDevices(int types);
-
- /**
- * @brief Request to load model data with user-given model file information.
- * @details This callback requests a backend engine to load given model files for inference.
- * The backend engine should load the given model files according to a given model file format.
- *
- * @since_tizen 6.0
- * @param[in] model_paths Upper framework should add full path strings needed according to a given model format.
- * @param[in] model_format It indicates what kinds of model file should be passed to a backend engine.
- */
- int Load(std::vector<std::string> model_paths, inference_model_format_e model_format);
-
- /**
- * @brief Get input tensor buffers from a given backend engine.
- * @details This callback requests a backend engine input tensor buffers.
- * If the backend engine is able to allocate the input tensor buffers internally, then
- * it has to add the input tensor buffers to buffers vector. By doing this, upper layer
- * will request a inference with the input tensor buffers.
- * Otherwise, the backend engine should just return INFERENCE_ENGINE_ERROR_NONE so that
- * upper layer can allocate input tensor buffers according to input layer property.
- * As for the input layer property, you can see GetInputLyaerProperty callback.
- *
- * @since_tizen 6.0
- * @param[out] buffers A backend engine should add input tensor buffers allocated itself to buffers vector.
- * Otherwise, it should put buffers to be empty.
- */
- int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers);
-
- /**
- * @brief Get output tensor buffers from a given backend engine.
- * @details This callback requests a backend engine output tensor buffers.
- * If the backend engine is able to allocate the output tensor buffers internally, then
- * it has to add the output tensor buffers to buffers vector. By doing this, upper layer
- * will request a inference with the output tensor buffers.
- * Otherwise, the backend engine should just return INFERENCE_ENGINE_ERROR_NONE so that
- * upper layer can allocate output tensor buffers according to output layer property.
- * As for the output layer property, you can see GetOutputLyaerProperty callback.
- *
- * @since_tizen 6.0
- * @param[out] buffers A backend engine should add output tensor buffers allocated itself to buffers vector.
- * Otherwise, it should put buffers to be empty.
- */
- int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers);
-
- /**
- * @brief Get input layer property information from a given backend engine.
- * @details This callback requests a backend engine information about given layers by
- * SetInputTensorProperty callback.
- * If user wants to specify input layer that the backend engine starts from
- * then user can call SetInputTensorProperty callback with desired layer information.
- * If user dosn't specify the input layer then the backend engine should add tensor information
- * of the first layer in model graph in default.
- *
- * @since_tizen 6.0
- * @param[out] property A backend engine should add given tensor information to be used as input layer.
- */
- int GetInputLayerProperty(inference_engine_layer_property &property);
-
- /**
- * @brief Get output layer property information from a given backend engine.
- * @details This callback requests a backend engine information about given layers by
- * SetOutputTensorProperty callback.
- * If user wants to specify output layer that the backend engine stores the inference result
- * then user can call SetOutputTensorProperty callback with desired layer information.
- * If user dosn't specify the output layer then the backend engine should add tensor information
- * of the last layer in model graph in default.
- *
- * @since_tizen 6.0
- * @param[out] property A backend engine should add given tensor information to be used as output layer.
- */
- int GetOutputLayerProperty(inference_engine_layer_property &property);
-
- /**
- * @brief Set input layer property information to a given backend engine.
- * @details This callback passes a given input layer information to a backend engine.
- * If user wants to start the inference from some given layer in model graph.
- * The backend engine should keep the input layer found with the given information
- * in model graph and then set the layer as input layer.
- *
- * @since_tizen 6.0
- * @param[in] property User should set layer information to be used as input layer.
- */
- int SetInputLayerProperty(inference_engine_layer_property &property);
-
- /**
- * @brief Set output layer property information to a given backend engine.
- * @details This callback passes a given output layer information to a backend engine.
- * If user wants to start the inference from some given layer in model graph.
- * The backend engine should keep the output layer found with the given information
- * in model graph and then set the layer as output layer.
- *
- * @since_tizen 6.0
- * @param[in] property User should set layer information to be used as output layer.
- */
- int SetOutputLayerProperty(inference_engine_layer_property &property);
-
- /**
- * @brief Get capacity from a given backend engine.
- * @details This callback requests what supported features and constraints the backend engine has.
- * Upper layer should call this callback just after the backend engine library is loaded.
- *
- * @since_tizen 6.0
- * @param[out] capacity A backend engine should add the features and constraints it has.
- */
- int GetBackendCapacity(inference_engine_capacity *capacity);
-
- /**
- * @brief Run an inference with user-given input and output buffers.
- * @details This callback requests a backend engine to do inference with given input and output tensor buffers.
- * input and output tensor buffers can be allocated by either a backend engine or upper layer according to
- * backend engine. So upper layer needs to make sure to clean up the buffers.
- *
- * @since_tizen 6.0
- * @param[in] input_buffers It contains tensor buffers to be used as input layer.
- * @param[in] output_buffers It contains tensor buffers to be used as output layer.
- */
- int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers);
-
- /**
- * @brief Enable or disable Inference engine profiler.
- * @details This function is used for inference engine user to control inference engine profiler itself.
- * If inference engine user wants to collect profile data itself then disable inference engine profiler
- * by calling EnableProfiler(false) and then call InferenceEngineProfiler interfaces properly.
- *
- * @since_tizen 6.0
- * @param[in] enable whether using inference engine profiler or not, which can be true or false.
- */
- int EnableProfiler(bool enable);
-
- /**
- * @brief Dump profile data to console screen.
- * @details This function is used to print out profile data on console screen.
- *
- * @since_tizen 6.0
- */
- int DumpProfileToConsole(void);
-
- /**
- * @brief Dump profile data to a file.
- * @details This function is used to store profile data to file.
- *
- * @since_tizen 6.0
- * @param[in] filename A file name for profile data to be stored in.
- */
- int DumpProfileToFile(const std::string filename = "dump.txt");
-
-private:
- int InitBackendEngine(const std::string &backend_path, int backend_type);
- int CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers);
- int CheckLayerProperty(inference_engine_layer_property &property);
-
- inference_backend_type_e mSelectedBackendEngine;
-
- // Profiler
- InferenceEngineProfiler *mProfiler;
- // In default, we use profiler.
- bool mUseProfiler;
- unsigned int mProfilerDumpType;
-
-protected:
- void *mBackendModule;
- IInferenceEngineCommon *mBackendHandle;
-
-};
+namespace InferenceEngineInterface
+{
+namespace Common
+{
+ class InferenceEngineCommon
+ {
+ public:
+ InferenceEngineCommon();
+
+ ~InferenceEngineCommon();
+
+ /**
+ * @brief Load a backend engine library with a given backend name.
+ * @details This callback loads a backend engine library with a given backend name.
+ * In order to find a backend engine library corresponding to the given backend name,
+ * this function makes a full name of the library file with given backend name.
+ * After that, it opens the library file by calling dlopen function to find a entry point
+ * function - EngineInit - of a actual backend library.
+ *
+ * @since_tizen 6.0
+ * @param[in] config A configuraion data needed to load a backend library.
+ */
+ int BindBackend(inference_engine_config *config);
+
+ /**
+ * @brief Load a backend engine library with a given backend type.
+ * @details This callback loads a backend engine library with a given backend type.
+ * In order to find a backend engine library corresponding to the given backend type,
+ * this function makes a full name of the library file with given backend type.
+ * After that, it opens the library file by calling dlopen function to find a entry point
+ * function - EngineInit - of a actual backend library.
+ *
+ * @since_tizen 6.0
+ * @param[in] backend_type A eumeration value which indicates one of backend types - refer to inference_backend_type_e.
+ */
+ int BindBackend(int backend_type);
+
+ /**
+ * @brief Unload a backend engine library.
+ * @details This callback unload a backend engine library.
+ *
+ * @since_tizen 6.0
+ */
+ void UnbindBackend(void);
+
+ /**
+ * @brief Set target devices.
+ * @details See #inference_target_type_e
+ * This callback passes given device types - CPU, GPU, CUSTOM or combinated one if a backend engine supports hybrid inferencea - to a backend engine.
+ * Some backend engine can optimize a given NN model at graph level such as dropping operation or fusing operations targating
+ * a given device or devices when the backend engine loads the NN model file.
+ *
+ * @since_tizen 5.5
+ * @param[in] types This could be one or more among device types enumerated on inference_target_type_e.
+ */
+ int SetTargetDevices(int types);
+
+ /**
+ * @brief Request to load model data with user-given model file information.
+ * @details This callback requests a backend engine to load given model files for inference.
+ * The backend engine should load the given model files according to a given model file format.
+ *
+ * @since_tizen 6.0
+ * @param[in] model_paths Upper framework should add full path strings needed according to a given model format.
+ * @param[in] model_format It indicates what kinds of model file should be passed to a backend engine.
+ */
+ int Load(std::vector<std::string> model_paths,
+ inference_model_format_e model_format);
+
+ /**
+ * @brief Get input tensor buffers from a given backend engine.
+ * @details This callback requests a backend engine input tensor buffers.
+ * If the backend engine is able to allocate the input tensor buffers internally, then
+ * it has to add the input tensor buffers to buffers vector. By doing this, upper layer
+ * will request a inference with the input tensor buffers.
+ * Otherwise, the backend engine should just return INFERENCE_ENGINE_ERROR_NONE so that
+ * upper layer can allocate input tensor buffers according to input layer property.
+ * As for the input layer property, you can see GetInputLyaerProperty callback.
+ *
+ * @since_tizen 6.0
+ * @param[out] buffers A backend engine should add input tensor buffers allocated itself to buffers vector.
+ * Otherwise, it should put buffers to be empty.
+ */
+ int GetInputTensorBuffers(
+ std::vector<inference_engine_tensor_buffer> &buffers);
+
+ /**
+ * @brief Get output tensor buffers from a given backend engine.
+ * @details This callback requests a backend engine output tensor buffers.
+ * If the backend engine is able to allocate the output tensor buffers internally, then
+ * it has to add the output tensor buffers to buffers vector. By doing this, upper layer
+ * will request a inference with the output tensor buffers.
+ * Otherwise, the backend engine should just return INFERENCE_ENGINE_ERROR_NONE so that
+ * upper layer can allocate output tensor buffers according to output layer property.
+ * As for the output layer property, you can see GetOutputLyaerProperty callback.
+ *
+ * @since_tizen 6.0
+ * @param[out] buffers A backend engine should add output tensor buffers allocated itself to buffers vector.
+ * Otherwise, it should put buffers to be empty.
+ */
+ int GetOutputTensorBuffers(
+ std::vector<inference_engine_tensor_buffer> &buffers);
+
+ /**
+ * @brief Get input layer property information from a given backend engine.
+ * @details This callback requests a backend engine information about given layers by
+ * SetInputTensorProperty callback.
+ * If user wants to specify input layer that the backend engine starts from
+ * then user can call SetInputTensorProperty callback with desired layer information.
+ * If user dosn't specify the input layer then the backend engine should add tensor information
+ * of the first layer in model graph in default.
+ *
+ * @since_tizen 6.0
+ * @param[out] property A backend engine should add given tensor information to be used as input layer.
+ */
+ int GetInputLayerProperty(inference_engine_layer_property &property);
+
+ /**
+ * @brief Get output layer property information from a given backend engine.
+ * @details This callback requests a backend engine information about given layers by
+ * SetOutputTensorProperty callback.
+ * If user wants to specify output layer that the backend engine stores the inference result
+ * then user can call SetOutputTensorProperty callback with desired layer information.
+ * If user dosn't specify the output layer then the backend engine should add tensor information
+ * of the last layer in model graph in default.
+ *
+ * @since_tizen 6.0
+ * @param[out] property A backend engine should add given tensor information to be used as output layer.
+ */
+ int GetOutputLayerProperty(inference_engine_layer_property &property);
+
+ /**
+ * @brief Set input layer property information to a given backend engine.
+ * @details This callback passes a given input layer information to a backend engine.
+ * If user wants to start the inference from some given layer in model graph.
+ * The backend engine should keep the input layer found with the given information
+ * in model graph and then set the layer as input layer.
+ *
+ * @since_tizen 6.0
+ * @param[in] property User should set layer information to be used as input layer.
+ */
+ int SetInputLayerProperty(inference_engine_layer_property &property);
+
+ /**
+ * @brief Set output layer property information to a given backend engine.
+ * @details This callback passes a given output layer information to a backend engine.
+ * If user wants to start the inference from some given layer in model graph.
+ * The backend engine should keep the output layer found with the given information
+ * in model graph and then set the layer as output layer.
+ *
+ * @since_tizen 6.0
+ * @param[in] property User should set layer information to be used as output layer.
+ */
+ int SetOutputLayerProperty(inference_engine_layer_property &property);
+
+ /**
+ * @brief Get capacity from a given backend engine.
+ * @details This callback requests what supported features and constraints the backend engine has.
+ * Upper layer should call this callback just after the backend engine library is loaded.
+ *
+ * @since_tizen 6.0
+ * @param[out] capacity A backend engine should add the features and constraints it has.
+ */
+ int GetBackendCapacity(inference_engine_capacity *capacity);
+
+ /**
+ * @brief Run an inference with user-given input and output buffers.
+ * @details This callback requests a backend engine to do inference with given input and output tensor buffers.
+ * input and output tensor buffers can be allocated by either a backend engine or upper layer according to
+ * backend engine. So upper layer needs to make sure to clean up the buffers.
+ *
+ * @since_tizen 6.0
+ * @param[in] input_buffers It contains tensor buffers to be used as input layer.
+ * @param[in] output_buffers It contains tensor buffers to be used as output layer.
+ */
+ int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers);
+
+ /**
+ * @brief Enable or disable Inference engine profiler.
+ * @details This function is used for inference engine user to control inference engine profiler itself.
+ * If inference engine user wants to collect profile data itself then disable inference engine profiler
+ * by calling EnableProfiler(false) and then call InferenceEngineProfiler interfaces properly.
+ *
+ * @since_tizen 6.0
+ * @param[in] enable whether using inference engine profiler or not, which can be true or false.
+ */
+ int EnableProfiler(bool enable);
+
+ /**
+ * @brief Dump profile data to console screen.
+ * @details This function is used to print out profile data on console screen.
+ *
+ * @since_tizen 6.0
+ */
+ int DumpProfileToConsole(void);
+
+ /**
+ * @brief Dump profile data to a file.
+ * @details This function is used to store profile data to file.
+ *
+ * @since_tizen 6.0
+ * @param[in] filename A file name for profile data to be stored in.
+ */
+ int DumpProfileToFile(const std::string filename = "dump.txt");
+
+ private:
+ int InitBackendEngine(const std::string &backend_path,
+ int backend_type);
+ int CheckTensorBuffers(
+ std::vector<inference_engine_tensor_buffer> &buffers);
+ int CheckLayerProperty(inference_engine_layer_property &property);
+
+ inference_backend_type_e mSelectedBackendEngine;
+
+ // Profiler
+ InferenceEngineProfiler *mProfiler;
+ // In default, we use profiler.
+ bool mUseProfiler;
+ unsigned int mProfilerDumpType;
+
+ protected:
+ void *mBackendModule;
+ IInferenceEngineCommon *mBackendHandle;
+ };
} /* Common */
} /* InferenceEngineInterface */
#include "inference_engine_type.h"
-namespace InferenceEngineInterface {
-namespace Profiler {
-
-/**
- * @brief Enumeration for how-to-dump to profile data.
- *
- * @since_tizen 6.0
- *
- */
-enum {
- IE_PROFILER_DUMP_MIN,
- // Profile data will be printed out on console screen.
- IE_PROFILER_DUMP_CONSOLE,
- // Profile data will be stored on a given file.
- IE_PROFILER_DUMP_FILE,
- IE_PROFILER_DUMP_MAX
-};
-
-/**
- * @brief Enumeration for profile types.
- *
- * @since_tizen 6.0
- *
- */
-enum {
- IE_PROFILER_MIN,
- // Measure performance in millisecond.
- IE_PROFILER_LATENCY,
- // Measure physical memory usage.
- IE_PROFILER_MEMORY,
- IE_PROFILER_MAX
-};
-
-/**
- * @brief Enumeration for dump format to profile data.
- *
- * @since_tizen 6.0
- *
- */
-enum {
- IE_PROFILER_DUMP_FORMAT_MIN,
- // Store profiling data to a given file in Markdown syntax[1]
- // [1] https://daringfireball.net/projects/markdown/syntax
- IE_PROFILER_DUMP_FORMAT_MARKDOWN,
- IE_PROFILER_DUMP_FORMAT_MAX
-};
-
-/**
- * @brief A structure of containg inference env.
- * @details This structure contains inference information which says what env. the inference is being performed on.
- *
- * @since_tizen 6.0
- */
-typedef struct _ProfileEnv {
- std::string backend_name; /**< backend name such as armnn, tflite, opencv and dldt. */
- std::string model_name; /**< model name which contains full path of a given model file. */
- unsigned int target_devices; /**< Hardware type the inference will be performed on. */
-} ProfileEnv;
-
-/**
- * @brief A structure of containg profiled elased time data.
- * @details This structure contains profiling data while in inference.
- *
- * @since_tizen 6.0
- */
-typedef struct _ProileData {
- unsigned int env_idx; /**< An index of v_mProfileEnv vector.*/
- std::string function_name; /**< A function name targetd to profile. */
- unsigned int elapsed_time; /**< A latency to how long time a given function is performed. */
-} ProfileData;
-
-/**
- * @brief A structure of containg profiled memory usage data.
- * @details This structure contains profiling data while in inference.
- *
- * @since_tizen 6.0
- */
-typedef struct _MemoryData {
- long rss; /** A number of physical pages consumed by current process. */
- long gpu_memory; /** A number of physical pages consumed by GPU device. */
- // TODO.
-} MemoryData;
-
-/**
- * @brief A class of representing profiler.
- * @details This class interfaces will be called by InferenceEngineCommon class properly.
- *
- * @since_tizen 6.0
- */
-class InferenceEngineProfiler {
-public:
- InferenceEngineProfiler();
- ~InferenceEngineProfiler();
-
+namespace InferenceEngineInterface
+{
+namespace Profiler
+{
/**
- * @brief Set backend name.
- * @details It will be set in BindBackend callback of InferenceEngineCommon object
- * to indicate which backend - armnn, opencv, tflite or dldt - inference will be performed by.
+ * @brief Enumeration for how-to-dump to profile data.
*
* @since_tizen 6.0
- * @param[in] name A backend name.
- */
- void AddBackendName(std::string &name) { mProfileEnv.backend_name = name; }
-
- /**
- * @brief Set model name.
- * @details It will be set in Load callback of InferenceEngineCommon object to indicate which pre-trained model
- * the inference will be performed on.
*
- * @since_tizen 6.0
- * @param[in] name A full path to model file.
*/
- void AddModelName(std::string &name) { mProfileEnv.model_name = name; }
+ enum {
+ IE_PROFILER_DUMP_MIN,
+ // Profile data will be printed out on console screen.
+ IE_PROFILER_DUMP_CONSOLE,
+ // Profile data will be stored on a given file.
+ IE_PROFILER_DUMP_FILE,
+ IE_PROFILER_DUMP_MAX
+ };
/**
- * @brief Set taget devices the inference runs on.
- * @details It will be set in SetTargetDevices callback of InferenceEngineCommon object to indicate
- * which Hardware - CPU or GPU - the inference will be performed on.
+ * @brief Enumeration for profile types.
*
* @since_tizen 6.0
- * @param[in] name A target device type. Please refer to inference_target_type_e enumeration of inference_engine_type.h.
+ *
*/
- void AddTargetDevices(unsigned int devices) { mProfileEnv.target_devices = devices; }
+ enum {
+ IE_PROFILER_MIN,
+ // Measure performance in millisecond.
+ IE_PROFILER_LATENCY,
+ // Measure physical memory usage.
+ IE_PROFILER_MEMORY,
+ IE_PROFILER_MAX
+ };
/**
- * @brief Add inference env. information to a vector member, v_mProfileEnv.
- * @details It will be called in Load callback of InferenceEngineCommon object to add inference env. information
- * updated already to the vector member, which will be used to get inference env. information
- * when dumping profile data.
+ * @brief Enumeration for dump format to profile data.
*
* @since_tizen 6.0
+ *
*/
- void PushEnv(void) { v_mProfileEnv.push_back(mProfileEnv); mEnvNum++; }
+ enum {
+ IE_PROFILER_DUMP_FORMAT_MIN,
+ // Store profiling data to a given file in Markdown syntax[1]
+ // [1] https://daringfireball.net/projects/markdown/syntax
+ IE_PROFILER_DUMP_FORMAT_MARKDOWN,
+ IE_PROFILER_DUMP_FORMAT_MAX
+ };
/**
- * @brief Start profiling with a given profile type.
- * @details It will be called at top of a callback function of InferenceEngineCommon object to collect profile data.
+ * @brief A structure of containg inference env.
+ * @details This structure contains inference information which says what env. the inference is being performed on.
*
* @since_tizen 6.0
- * @param[in] type Profile type which can be IR_PROFILER_LATENCY or IR_PROFILER_MEMORY for now.
*/
- void Start(const unsigned int type);
+ typedef struct _ProfileEnv {
+ std::string backend_name; /**< backend name such as armnn, tflite, opencv and dldt. */
+ std::string model_name; /**< model name which contains full path of a given model file. */
+ unsigned int target_devices; /**< Hardware type the inference will be performed on. */
+ } ProfileEnv;
/**
- * @brief Stop profiling to a given profile type.
- * @details It will be called at bottom of a callback function of InferenceEngineCommon object to collect profile data.
+ * @brief A structure of containg profiled elased time data.
+ * @details This structure contains profiling data while in inference.
*
* @since_tizen 6.0
- * @param[in] type Profile type which can be IR_PROFILER_LATENCY or IR_PROFILER_MEMORY for now.
- * @param[in] env_idx A index to v_mProfileEnv vector object.
- * @param[in] func_name A function name to be profiled.
*/
- void Stop(const unsigned int type, const char *func_name = "Unknown");
+ typedef struct _ProileData {
+ unsigned int env_idx; /**< An index of v_mProfileEnv vector.*/
+ std::string function_name; /**< A function name targetd to profile. */
+ unsigned int elapsed_time; /**< A latency to how long time a given function is performed. */
+ } ProfileData;
/**
- * @brief Dump profiled data to console or a given file.
- * @details It will be called in deconstructor of InferenceEngineCommon object to dump all of collected profile data.
+ * @brief A structure of containg profiled memory usage data.
+ * @details This structure contains profiling data while in inference.
*
* @since_tizen 6.0
- * @param[in] dump_type A dump type which can be IR_PROFILER_DUMP_TEXT or IR_PROFILER_DUMP_FILE for now.
*/
- void Dump(const unsigned int dump_type);
+ typedef struct _MemoryData {
+ long rss; /** A number of physical pages consumed by current process. */
+ long gpu_memory; /** A number of physical pages consumed by GPU device. */
+ // TODO.
+ } MemoryData;
/**
- * @brief Set user-given dump file name.
- * @details If a file name is set using this function then profiled data will be stored to the given file.
+ * @brief A class of representing profiler.
+ * @details This class interfaces will be called by InferenceEngineCommon class properly.
*
* @since_tizen 6.0
- * @param[in] filename A name to user-given dump file.
*/
- void SetDumpFilename(const std::string filename) { mDumpFilename = filename; }
-
-private:
- void PushData(ProfileData &data);
- struct timespec GetTimeDiff(struct timespec &start, struct timespec &end);
- unsigned long ConvertMillisec(const struct timespec &time);
- void GetMemoryUsage(MemoryData &data);
- void DumpToConsole(void);
- void DumpToFile(const unsigned int dump_type, std::string filename);
-
- struct timespec mStartTime, mEndTime;
- unsigned int mEnvNum;
- ProfileEnv mProfileEnv;
- std::vector<ProfileEnv> v_mProfileEnv;
- std::vector<ProfileData> v_mProfileData;
- std::map<const char *, const void *> m_mDataTable;
- std::string mDumpFilename;
- MemoryData mStartMemoryData;
- MemoryData mEndMemoryData;
-};
+ class InferenceEngineProfiler
+ {
+ public:
+ InferenceEngineProfiler();
+ ~InferenceEngineProfiler();
+
+ /**
+ * @brief Set backend name.
+ * @details It will be set in BindBackend callback of InferenceEngineCommon object
+ * to indicate which backend - armnn, opencv, tflite or dldt - inference will be performed by.
+ *
+ * @since_tizen 6.0
+ * @param[in] name A backend name.
+ */
+ void AddBackendName(std::string &name)
+ {
+ mProfileEnv.backend_name = name;
+ }
+
+ /**
+ * @brief Set model name.
+ * @details It will be set in Load callback of InferenceEngineCommon object to indicate which pre-trained model
+ * the inference will be performed on.
+ *
+ * @since_tizen 6.0
+ * @param[in] name A full path to model file.
+ */
+ void AddModelName(std::string &name)
+ {
+ mProfileEnv.model_name = name;
+ }
+
+ /**
+ * @brief Set taget devices the inference runs on.
+ * @details It will be set in SetTargetDevices callback of InferenceEngineCommon object to indicate
+ * which Hardware - CPU or GPU - the inference will be performed on.
+ *
+ * @since_tizen 6.0
+ * @param[in] name A target device type. Please refer to inference_target_type_e enumeration of inference_engine_type.h.
+ */
+ void AddTargetDevices(unsigned int devices)
+ {
+ mProfileEnv.target_devices = devices;
+ }
+
+ /**
+ * @brief Add inference env. information to a vector member, v_mProfileEnv.
+ * @details It will be called in Load callback of InferenceEngineCommon object to add inference env. information
+ * updated already to the vector member, which will be used to get inference env. information
+ * when dumping profile data.
+ *
+ * @since_tizen 6.0
+ */
+ void PushEnv(void)
+ {
+ v_mProfileEnv.push_back(mProfileEnv);
+ mEnvNum++;
+ }
+
+ /**
+ * @brief Start profiling with a given profile type.
+ * @details It will be called at top of a callback function of InferenceEngineCommon object to collect profile data.
+ *
+ * @since_tizen 6.0
+ * @param[in] type Profile type which can be IR_PROFILER_LATENCY or IR_PROFILER_MEMORY for now.
+ */
+ void Start(const unsigned int type);
+
+ /**
+ * @brief Stop profiling to a given profile type.
+ * @details It will be called at bottom of a callback function of InferenceEngineCommon object to collect profile data.
+ *
+ * @since_tizen 6.0
+ * @param[in] type Profile type which can be IR_PROFILER_LATENCY or IR_PROFILER_MEMORY for now.
+ * @param[in] env_idx A index to v_mProfileEnv vector object.
+ * @param[in] func_name A function name to be profiled.
+ */
+ void Stop(const unsigned int type, const char *func_name = "Unknown");
+
+ /**
+ * @brief Dump profiled data to console or a given file.
+ * @details It will be called in deconstructor of InferenceEngineCommon object to dump all of collected profile data.
+ *
+ * @since_tizen 6.0
+ * @param[in] dump_type A dump type which can be IR_PROFILER_DUMP_TEXT or IR_PROFILER_DUMP_FILE for now.
+ */
+ void Dump(const unsigned int dump_type);
+
+ /**
+ * @brief Set user-given dump file name.
+ * @details If a file name is set using this function then profiled data will be stored to the given file.
+ *
+ * @since_tizen 6.0
+ * @param[in] filename A name to user-given dump file.
+ */
+ void SetDumpFilename(const std::string filename)
+ {
+ mDumpFilename = filename;
+ }
+
+ private:
+ void PushData(ProfileData &data);
+ struct timespec GetTimeDiff(struct timespec &start,
+ struct timespec &end);
+ unsigned long ConvertMillisec(const struct timespec &time);
+ void GetMemoryUsage(MemoryData &data);
+ void DumpToConsole(void);
+ void DumpToFile(const unsigned int dump_type, std::string filename);
+
+ struct timespec mStartTime, mEndTime;
+ unsigned int mEnvNum;
+ ProfileEnv mProfileEnv;
+ std::vector<ProfileEnv> v_mProfileEnv;
+ std::vector<ProfileData> v_mProfileData;
+ std::map<const char *, const void *> m_mDataTable;
+ std::string mDumpFilename;
+ MemoryData mStartMemoryData;
+ MemoryData mEndMemoryData;
+ };
} /* Profiler */
} /* InferenceEngineInterface */
#include <tizen.h>
#ifdef __cplusplus
-extern "C" {
+extern "C"
+{
#endif /* __cplusplus */
-/**
- * @file inference_engine_error.h
- * @brief This file contains error type required by
- * inference engine
-*/
-
-typedef enum {
- INFERENCE_ENGINE_ERROR_NONE
- = TIZEN_ERROR_NONE, /**< Successful */
- INFERENCE_ENGINE_ERROR_NOT_SUPPORTED
- = TIZEN_ERROR_NOT_SUPPORTED, /**< Not supported */
- INFERENCE_ENGINE_ERROR_MSG_TOO_LONG
- = TIZEN_ERROR_MSG_TOO_LONG, /**< Message too long */
- INFERENCE_ENGINE_ERROR_NO_DATA
- = TIZEN_ERROR_NO_DATA, /**< No data */
- INFERENCE_ENGINE_ERROR_KEY_NOT_AVAILABLE
- = TIZEN_ERROR_KEY_NOT_AVAILABLE, /**< Key not available */
- INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY
- = TIZEN_ERROR_OUT_OF_MEMORY, /**< Out of memory */
- INFERENCE_ENGINE_ERROR_INVALID_PARAMETER
- = TIZEN_ERROR_INVALID_PARAMETER, /**< Invalid parameter */
- INFERENCE_ENGINE_ERROR_INVALID_OPERATION
- = TIZEN_ERROR_INVALID_OPERATION, /**< Invalid operation */
- INFERENCE_ENGINE_ERROR_PERMISSION_DENIED
- = TIZEN_ERROR_NOT_PERMITTED, /**< Not permitted */
- INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT
- = TIZEN_ERROR_MEDIA_VISION | 0x01, /**< Not supported format */
- INFERENCE_ENGINE_ERROR_INTERNAL
- = TIZEN_ERROR_MEDIA_VISION | 0x02, /**< Internal error */
- INFERENCE_ENGINE_ERROR_INVALID_DATA
- = TIZEN_ERROR_MEDIA_VISION | 0x03, /**< Invalid data */
- INFERENCE_ENGINE_ERROR_INVALID_PATH
- = TIZEN_ERROR_MEDIA_VISION | 0x04, /**< Invalid path*/
-} inference_engine_error_e;
+ /**
+ * @file inference_engine_error.h
+ * @brief This file contains error type required by
+ * inference engine
+ */
+ typedef enum {
+ INFERENCE_ENGINE_ERROR_NONE = TIZEN_ERROR_NONE, /**< Successful */
+ INFERENCE_ENGINE_ERROR_NOT_SUPPORTED =
+ TIZEN_ERROR_NOT_SUPPORTED, /**< Not supported */
+ INFERENCE_ENGINE_ERROR_MSG_TOO_LONG =
+ TIZEN_ERROR_MSG_TOO_LONG, /**< Message too long */
+ INFERENCE_ENGINE_ERROR_NO_DATA = TIZEN_ERROR_NO_DATA, /**< No data */
+ INFERENCE_ENGINE_ERROR_KEY_NOT_AVAILABLE =
+ TIZEN_ERROR_KEY_NOT_AVAILABLE, /**< Key not available */
+ INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY =
+ TIZEN_ERROR_OUT_OF_MEMORY, /**< Out of memory */
+ INFERENCE_ENGINE_ERROR_INVALID_PARAMETER =
+ TIZEN_ERROR_INVALID_PARAMETER, /**< Invalid parameter */
+ INFERENCE_ENGINE_ERROR_INVALID_OPERATION =
+ TIZEN_ERROR_INVALID_OPERATION, /**< Invalid operation */
+ INFERENCE_ENGINE_ERROR_PERMISSION_DENIED =
+ TIZEN_ERROR_NOT_PERMITTED, /**< Not permitted */
+ INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT =
+ TIZEN_ERROR_MEDIA_VISION | 0x01, /**< Not supported format */
+ INFERENCE_ENGINE_ERROR_INTERNAL = TIZEN_ERROR_MEDIA_VISION |
+ 0x02, /**< Internal error */
+ INFERENCE_ENGINE_ERROR_INVALID_DATA = TIZEN_ERROR_MEDIA_VISION |
+ 0x03, /**< Invalid data */
+ INFERENCE_ENGINE_ERROR_INVALID_PATH = TIZEN_ERROR_MEDIA_VISION |
+ 0x04, /**< Invalid path*/
+ } inference_engine_error_e;
#ifdef __cplusplus
}
#include <vector>
#include <inference_engine_type.h>
-namespace InferenceEngineInterface {
-namespace Common {
+namespace InferenceEngineInterface
+{
+namespace Common
+{
+ class InferenceEngineInI
+ {
+ public:
+ /**
+ * @brief Creates an Inference ini class instance.
+ *
+ * @since_tizen 5.5
+ */
+ InferenceEngineInI();
-class InferenceEngineInI {
-public:
- /**
- * @brief Creates an Inference ini class instance.
- *
- * @since_tizen 5.5
- */
- InferenceEngineInI();
+ /**
+ * @brief Destroys an Inference ini class instance including
+ * its all resources.
+ *
+ * @since_tizen 5.5
+ */
+ ~InferenceEngineInI();
- /**
- * @brief Destroys an Inference ini class instance including
- * its all resources.
- *
- * @since_tizen 5.5
- */
- ~InferenceEngineInI();
+ /**
+ * @brief Load InI class()
+ *
+ * @since_tizen 5.5
+ */
+ int LoadInI();
- /**
- * @brief Load InI class()
- *
- * @since_tizen 5.5
- */
- int LoadInI();
+ /**
+ * @brief UnLoad InI class()
+ *
+ * @since_tizen 5.5
+ */
+ void UnLoadInI();
- /**
- * @brief UnLoad InI class()
- *
- * @since_tizen 5.5
- */
- void UnLoadInI();
+ int GetSelectedBackendEngine();
- int GetSelectedBackendEngine();
-private:
- std::string mIniDefaultPath;
- inference_backend_type_e mSelectedBackendEngine;
-};
+ private:
+ std::string mIniDefaultPath;
+ inference_backend_type_e mSelectedBackendEngine;
+ };
} /* InferenceEngineInterface */
} /* Common */
#define __INFERENCE_ENGINE_TYPE_H__
#ifdef __cplusplus
-extern "C" {
+extern "C"
+{
#endif /* __cplusplus */
-/**
- * @file inference_engine_type.h
- * @brief This file contains enumerations and handles definition required by
- * inference engine API.
- */
-
-/**
- * @brief Enumeration for inference target.
- *
- * @since_tizen 5.5
- *
- */
-typedef enum {
- INFERENCE_BACKEND_NONE = -1, /**< None */
- INFERENCE_BACKEND_OPENCV, /**< OpenCV */
- INFERENCE_BACKEND_TFLITE, /**< TensorFlow-Lite */
- INFERENCE_BACKEND_ARMNN, /**< ARMNN */
- INFERENCE_BACKEND_MLAPI, /** < ML Single API of NNStreamer.*/
- INFERENCE_BACKEND_NNFW, /** < NNFW */
- INFERENCE_BACKEND_MAX /**< Backend MAX */
-} inference_backend_type_e;
-
-/**
- * @brief Enumeration for inference target device.
- *
- * @since_tizen 5.5
- *
- */
-typedef enum {
- INFERENCE_TARGET_NONE = 0,
- INFERENCE_TARGET_CPU = 1 << 0, /**< CPU */
- INFERENCE_TARGET_GPU = 1 << 1, /**< GPU */
- INFERENCE_TARGET_CUSTOM = 1 << 2, /**< NPU */
- INFERENCE_TARGET_MAX = 1 << 3,
-} inference_target_type_e;
-
-/**
- * @brief Enumeration for NN model formats.
- *
- * @since_tizen 5.5
- *
- */
-typedef enum {
- INFERENCE_MODEL_NONE = 0,
- INFERENCE_MODEL_CAFFE, /**< CAFFE. *.prototxt config file is needed. */
- INFERENCE_MODEL_TF, /**< Tensorflow. *.pbtxt config file is needed. */
- INFERENCE_MODEL_TFLITE, /**< Tensorflow-Lite. */
- INFERENCE_MODEL_TORCH, /**< Torch */
- INFERENCE_MODEL_DARKNET, /**< Darknet. *.cfg config file is needed. */
- INFERENCE_MODEL_DLDT, /**< DLDT. *.xml config file is needed. */
- INFERENCE_MODEL_ONNX, /**< ONNX */
- INFERENCE_MODEL_VIVANTE, /**< Vivante. model specific so library and nb model files are needed. */
- INFERENCE_MODEL_MAX
-} inference_model_format_e;
-
-/**
- * @brief Enumeration for tensor shape type.
- *
- * @since_tizen 6.0
- *
- */
-typedef enum {
- INFERENCE_TENSOR_SHAPE_NCHW = 0, /**< tensor order is batch size, a number of channels, height, width. */
- INFERENCE_TENSOR_SHAPE_NHWC, /**< tensor order is batch size, height, width, a number of channels. */
-} inference_tensor_shape_type_e;
-
-/**
- * @brief Enumeration for tensor data type.
- *
- * @since_tizen 6.0
- *
- */
-typedef enum {
- INFERENCE_TENSOR_DATA_TYPE_NONE = 0,
- INFERENCE_TENSOR_DATA_TYPE_FLOAT16,
- INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- INFERENCE_TENSOR_DATA_TYPE_UINT8,
- INFERENCE_TENSOR_DATA_TYPE_UINT16,
- INFERENCE_TENSOR_DATA_TYPE_UINT32,
- INFERENCE_TENSOR_DATA_TYPE_MAX
-} inference_tensor_data_type_e;
-
-/**
- * @brief Tensor defined by the dimension and their corresponding data
- * @details @a dimInfo is the information
- * of a tensor, which is multi-dimension matix. @a data is the data pointer
- * corresponding to @a dimInfo. In case of an input image tensor with
- * resolution 224 x 224 and chanel 3, for example:
- * @code
- * // assume that image data address is known
- * float *pImagedata = ...;
- * std::vector<int> dim{ 1, 3, 224, 224};
- * tensor_t inputTensor;
- * inputTensor.dimInfo.push_back(dim);
- * inputTensor.data = (void*)pImageData;
- * @endcode
- *
- * // Do something with inputTensor
- * @since_tizen 5.5
- */
-typedef struct _tensor_t {
- std::vector<std::vector<int>> dimInfo;
- std::vector<void*> data;
-} tensor_t;
-
-/**
- * @brief Inference engine backend configuration
- *
- * @details This structure should be configured before InferenceEngineCommon object is
- * created and then passed to InferenceEngineCommon's constructor.
- *
- * @since_tizen 6.0
- */
-typedef struct _inference_engine_config {
- std::string backend_name; /**< a backend name which could be one among supported backends(tflite, opencv, armnn, dldt, nnstreamer) */
- int backend_type; /**< a tensor filter plugin type for NNStreamer if a backend is NNStreamer. */
- int target_devices; /**< which device or devices to be targeted for inference. (Please, refer to inference_target_type_e) */
- // TODO.
-} inference_engine_config;
-
-/**
- * @brief Tensor buffer structure
- *
- * @details This buffer contains actual tensor data so type-casting is required
- * according to a tensor data type.
- *
- * @since_tizen 6.0
- */
-typedef struct _inference_engine_tensor_buffer {
- void *buffer; /**< a buffer which contains tensor data. */
- inference_tensor_data_type_e data_type; /**< a tensor type of the layer. */
- size_t size; /**< actual tensor buffer size in bytes. The size should be height * width * channel count * bytes per pixel. */
- int owner_is_backend; /** < it indicates whether tensor buffer owner is backend or not. setting to 1 means that backend has to allocate and release the buffer. */
- // TODO.
-} inference_engine_tensor_buffer;
-
-/**
- * @brief Tensor information structure
- *
- * @details This structure corresponding to a tensor contains below tensor information,
- * - a name of a given layer
- * - a tensor shape of the layer
- * - a tensor type of the layer
- * - a tensor element size of the layer.
- *
- * @remarks Tensor element size is not in-memory buffer size in bytes so based on a given tensor element size,
- * upper framework should allocate actual tensor buffer according to tensor data types (i.e., uint8, float32...)
- *
- * @since_tizen 6.0
- */
-typedef struct _inference_engine_tensor_info {
- std::vector<size_t> shape; /**< a tensor shape. */
- inference_tensor_shape_type_e shape_type; /**< a tensor shape of the layer. */
- inference_tensor_data_type_e data_type; /**< a tensor type of the layer. */
- size_t size; /** tensor element size. The size should be height * width * channel count */
- // TODO.
-} inference_engine_tensor_info;
-
-/**
- * @brief A layer property structure
- *
- * @details This structure is used to get/set information to one more tensors from/to a backend engine.
- * - layer names of input or output layer.
- * - information of tensors.
- * @since_tizen 6.0
- */
-typedef struct _inference_engine_layer_property {
- std::vector<std::string> layer_names; /**< names of layers. */
- std::vector<inference_engine_tensor_info> tensor_infos; /**< information of tensors. */
- // TODO.
-} inference_engine_layer_property;
-
-/**
- * @brief A capacity structure to a backend engine.
- *
- * @details This structure is used to get information such as what features and
- * constraints a given backend engine has, and it contains below information,
- * - device list which is able to compute operations.
- * - tensor shape information a given backend engine supports for.
- * - neural network models a given backend engine supports for.
- *
- * @since_tizen 6.0
- */
-typedef struct _inference_engine_capacity {
- int supported_accel_devices;
- inference_tensor_shape_type_e supported_tensor_shape_type;
- std::vector<std::string> supported_nn_models;
- // TODO.
-} inference_engine_capacity;
+ /**
+ * @file inference_engine_type.h
+ * @brief This file contains enumerations and handles definition required by
+ * inference engine API.
+ */
+
+ /**
+ * @brief Enumeration for inference target.
+ *
+ * @since_tizen 5.5
+ *
+ */
+ typedef enum {
+ INFERENCE_BACKEND_NONE = -1, /**< None */
+ INFERENCE_BACKEND_OPENCV, /**< OpenCV */
+ INFERENCE_BACKEND_TFLITE, /**< TensorFlow-Lite */
+ INFERENCE_BACKEND_ARMNN, /**< ARMNN */
+ INFERENCE_BACKEND_MLAPI, /** < ML Single API of NNStreamer.*/
+ INFERENCE_BACKEND_NNFW, /** < NNFW */
+ INFERENCE_BACKEND_MAX /**< Backend MAX */
+ } inference_backend_type_e;
+
+ /**
+ * @brief Enumeration for inference target device.
+ *
+ * @since_tizen 5.5
+ *
+ */
+ typedef enum {
+ INFERENCE_TARGET_NONE = 0,
+ INFERENCE_TARGET_CPU = 1 << 0, /**< CPU */
+ INFERENCE_TARGET_GPU = 1 << 1, /**< GPU */
+ INFERENCE_TARGET_CUSTOM = 1 << 2, /**< NPU */
+ INFERENCE_TARGET_MAX = 1 << 3,
+ } inference_target_type_e;
+
+ /**
+ * @brief Enumeration for NN model formats.
+ *
+ * @since_tizen 5.5
+ *
+ */
+ typedef enum {
+ INFERENCE_MODEL_NONE = 0,
+ INFERENCE_MODEL_CAFFE, /**< CAFFE. *.prototxt config file is needed. */
+ INFERENCE_MODEL_TF, /**< Tensorflow. *.pbtxt config file is needed. */
+ INFERENCE_MODEL_TFLITE, /**< Tensorflow-Lite. */
+ INFERENCE_MODEL_TORCH, /**< Torch */
+ INFERENCE_MODEL_DARKNET, /**< Darknet. *.cfg config file is needed. */
+ INFERENCE_MODEL_DLDT, /**< DLDT. *.xml config file is needed. */
+ INFERENCE_MODEL_ONNX, /**< ONNX */
+ INFERENCE_MODEL_VIVANTE, /**< Vivante. model specific so library and nb model files are needed. */
+ INFERENCE_MODEL_MAX
+ } inference_model_format_e;
+
+ /**
+ * @brief Enumeration for tensor shape type.
+ *
+ * @since_tizen 6.0
+ *
+ */
+ typedef enum {
+ INFERENCE_TENSOR_SHAPE_NCHW = 0, /**< tensor order is batch size, a number of channels, height, width. */
+ INFERENCE_TENSOR_SHAPE_NHWC, /**< tensor order is batch size, height, width, a number of channels. */
+ } inference_tensor_shape_type_e;
+
+ /**
+ * @brief Enumeration for tensor data type.
+ *
+ * @since_tizen 6.0
+ *
+ */
+ typedef enum {
+ INFERENCE_TENSOR_DATA_TYPE_NONE = 0,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT16,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ INFERENCE_TENSOR_DATA_TYPE_UINT16,
+ INFERENCE_TENSOR_DATA_TYPE_UINT32,
+ INFERENCE_TENSOR_DATA_TYPE_MAX
+ } inference_tensor_data_type_e;
+
+ /**
+ * @brief Tensor defined by the dimension and their corresponding data
+ * @details @a dimInfo is the information
+ * of a tensor, which is multi-dimension matix. @a data is the data pointer
+ * corresponding to @a dimInfo. In case of an input image tensor with
+ * resolution 224 x 224 and chanel 3, for example:
+ * @code
+ * // assume that image data address is known
+ * float *pImagedata = ...;
+ * std::vector<int> dim{ 1, 3, 224, 224};
+ * tensor_t inputTensor;
+ * inputTensor.dimInfo.push_back(dim);
+ * inputTensor.data = (void*)pImageData;
+ * @endcode
+ *
+ * // Do something with inputTensor
+ * @since_tizen 5.5
+ */
+ typedef struct _tensor_t {
+ std::vector<std::vector<int> > dimInfo;
+ std::vector<void *> data;
+ } tensor_t;
+
+ /**
+ * @brief Inference engine backend configuration
+ *
+ * @details This structure should be configured before InferenceEngineCommon object is
+ * created and then passed to InferenceEngineCommon's constructor.
+ *
+ * @since_tizen 6.0
+ */
+ typedef struct _inference_engine_config {
+ std::string backend_name; /**< a backend name which could be one among supported backends(tflite, opencv, armnn, dldt, nnstreamer) */
+ int backend_type; /**< a tensor filter plugin type for NNStreamer if a backend is NNStreamer. */
+ int target_devices; /**< which device or devices to be targeted for inference. (Please, refer to inference_target_type_e) */
+ // TODO.
+ } inference_engine_config;
+
+ /**
+ * @brief Tensor buffer structure
+ *
+ * @details This buffer contains actual tensor data so type-casting is required
+ * according to a tensor data type.
+ *
+ * @since_tizen 6.0
+ */
+ typedef struct _inference_engine_tensor_buffer {
+ void *buffer; /**< a buffer which contains tensor data. */
+ inference_tensor_data_type_e data_type; /**< a tensor type of the layer. */
+ size_t size; /**< actual tensor buffer size in bytes. The size should be height * width * channel count * bytes per pixel. */
+ int owner_is_backend; /** < it indicates whether tensor buffer owner is backend or not. setting to 1 means that backend has to allocate and release the buffer. */
+ // TODO.
+ } inference_engine_tensor_buffer;
+
+ /**
+ * @brief Tensor information structure
+ *
+ * @details This structure corresponding to a tensor contains below tensor information,
+ * - a name of a given layer
+ * - a tensor shape of the layer
+ * - a tensor type of the layer
+ * - a tensor element size of the layer.
+ *
+ * @remarks Tensor element size is not in-memory buffer size in bytes so based on a given tensor element size,
+ * upper framework should allocate actual tensor buffer according to tensor data types (i.e., uint8, float32...)
+ *
+ * @since_tizen 6.0
+ */
+ typedef struct _inference_engine_tensor_info {
+ std::vector<size_t> shape; /**< a tensor shape. */
+ inference_tensor_shape_type_e shape_type; /**< a tensor shape of the layer. */
+ inference_tensor_data_type_e data_type; /**< a tensor type of the layer. */
+ size_t size; /** tensor element size. The size should be height * width * channel count */
+ // TODO.
+ } inference_engine_tensor_info;
+
+ /**
+ * @brief A layer property structure
+ *
+ * @details This structure is used to get/set information to one more tensors from/to a backend engine.
+ * - layer names of input or output layer.
+ * - information of tensors.
+ * @since_tizen 6.0
+ */
+ typedef struct _inference_engine_layer_property {
+ std::vector<std::string> layer_names; /**< names of layers. */
+ std::vector<inference_engine_tensor_info> tensor_infos; /**< information of tensors. */
+ // TODO.
+ } inference_engine_layer_property;
+
+ /**
+ * @brief A capacity structure to a backend engine.
+ *
+ * @details This structure is used to get information such as what features and
+ * constraints a given backend engine has, and it contains below information,
+ * - device list which is able to compute operations.
+ * - tensor shape information a given backend engine supports for.
+ * - neural network models a given backend engine supports for.
+ *
+ * @since_tizen 6.0
+ */
+ typedef struct _inference_engine_capacity {
+ int supported_accel_devices;
+ inference_tensor_shape_type_e supported_tensor_shape_type;
+ std::vector<std::string> supported_nn_models;
+ // TODO.
+ } inference_engine_capacity;
#ifdef __cplusplus
}
#include <dlfcn.h>
#include <experimental/filesystem>
-extern "C" {
-
+extern "C"
+{
#include <dlog.h>
#ifdef LOG_TAG
#define LOG_TAG "INFERENCE_ENGINE_COMMON"
}
-#define CHECK_ENGINE_INSTANCE(object) \
- if (object == nullptr) { \
- LOGE("Inference engine handle is null."); \
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; \
+#define CHECK_ENGINE_INSTANCE(object) \
+ if (object == nullptr) { \
+ LOGE("Inference engine handle is null."); \
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; \
}
namespace fs = std::experimental::filesystem;
-namespace InferenceEngineInterface {
-namespace Common {
-
-InferenceEngineCommon::InferenceEngineCommon() :
- mSelectedBackendEngine(INFERENCE_BACKEND_NONE),
- mProfiler(),
- mUseProfiler(false),
- mProfilerDumpType(IE_PROFILER_DUMP_MIN),
- mBackendModule(),
- mBackendHandle()
+namespace InferenceEngineInterface
{
- LOGI("ENTER");
- LOGI("LEAVE");
-}
-
-InferenceEngineCommon::~InferenceEngineCommon()
+namespace Common
{
- LOGW("ENTER");
-
- if (mUseProfiler == true) {
- mProfiler->Dump(mProfilerDumpType);
- delete mProfiler;
- }
-
- LOGW("LEAVE");
-}
+ InferenceEngineCommon::InferenceEngineCommon() :
+ mSelectedBackendEngine(INFERENCE_BACKEND_NONE),
+ mProfiler(),
+ mUseProfiler(false),
+ mProfilerDumpType(IE_PROFILER_DUMP_MIN),
+ mBackendModule(),
+ mBackendHandle()
+ {
+ LOGI("ENTER");
+ LOGI("LEAVE");
+ }
+
+ InferenceEngineCommon::~InferenceEngineCommon()
+ {
+ LOGW("ENTER");
+
+ if (mUseProfiler == true) {
+ mProfiler->Dump(mProfilerDumpType);
+ delete mProfiler;
+ }
-int InferenceEngineCommon::CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
-{
- if (buffers.size() == 0) {
- LOGE("tensor buffer vector is empty.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ LOGW("LEAVE");
}
- for (std::vector<inference_engine_tensor_buffer>::const_iterator iter = buffers.begin(); iter != buffers.end(); ++iter) {
- inference_engine_tensor_buffer tensor_buffer = *iter;
- if (tensor_buffer.buffer == nullptr || tensor_buffer.size == 0) {
- LOGE("tensor buffer pointer is null or tensor buffer size is 0.");
+ int InferenceEngineCommon::CheckTensorBuffers(
+ std::vector<inference_engine_tensor_buffer> &buffers)
+ {
+ if (buffers.size() == 0) {
+ LOGE("tensor buffer vector is empty.");
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
- if (tensor_buffer.data_type <= INFERENCE_TENSOR_DATA_TYPE_NONE || tensor_buffer.data_type >= INFERENCE_TENSOR_DATA_TYPE_MAX) {
- LOGE("tensor data type is invalid.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ for (std::vector<inference_engine_tensor_buffer>::const_iterator iter =
+ buffers.begin();
+ iter != buffers.end(); ++iter) {
+ inference_engine_tensor_buffer tensor_buffer = *iter;
+ if (tensor_buffer.buffer == nullptr || tensor_buffer.size == 0) {
+ LOGE("tensor buffer pointer is null or tensor buffer size is 0.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ if (tensor_buffer.data_type <= INFERENCE_TENSOR_DATA_TYPE_NONE ||
+ tensor_buffer.data_type >= INFERENCE_TENSOR_DATA_TYPE_MAX) {
+ LOGE("tensor data type is invalid.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
}
- }
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceEngineCommon::CheckLayerProperty(inference_engine_layer_property &property)
-{
- // Verity tensor info values.
- std::vector<inference_engine_tensor_info>::const_iterator info_iter;
- for (info_iter = property.tensor_infos.begin(); info_iter != property.tensor_infos.end(); ++info_iter) {
- inference_engine_tensor_info tensor_info = *info_iter;
- if (tensor_info.shape.size() == 0 || tensor_info.size == 0) {
- LOGE("shape size of tensor info or size of it is 0.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
+ int InferenceEngineCommon::CheckLayerProperty(
+ inference_engine_layer_property &property)
+ {
+ // Verity tensor info values.
+ std::vector<inference_engine_tensor_info>::const_iterator info_iter;
+ for (info_iter = property.tensor_infos.begin();
+ info_iter != property.tensor_infos.end(); ++info_iter) {
+ inference_engine_tensor_info tensor_info = *info_iter;
+ if (tensor_info.shape.size() == 0 || tensor_info.size == 0) {
+ LOGE("shape size of tensor info or size of it is 0.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ if (tensor_info.data_type < INFERENCE_TENSOR_DATA_TYPE_FLOAT16 ||
+ tensor_info.data_type > INFERENCE_TENSOR_DATA_TYPE_UINT32) {
+ LOGE("tensor data type is invalid.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ // TODO. we may need to check shape type also.
}
- if (tensor_info.data_type < INFERENCE_TENSOR_DATA_TYPE_FLOAT16 || tensor_info.data_type > INFERENCE_TENSOR_DATA_TYPE_UINT32) {
- LOGE("tensor data type is invalid.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ // Verity layer names.
+ std::vector<std::string>::const_iterator name_iter;
+ for (name_iter = property.layer_names.begin();
+ name_iter != property.layer_names.end(); ++name_iter) {
+ std::string name = *name_iter;
+
+ if (name.length() == 0) {
+ LOGE("layer name is invalid.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
}
- // TODO. we may need to check shape type also.
+ return INFERENCE_ENGINE_ERROR_NONE;
}
- // Verity layer names.
- std::vector<std::string>::const_iterator name_iter;
- for (name_iter = property.layer_names.begin(); name_iter != property.layer_names.end(); ++name_iter) {
- std::string name = *name_iter;
-
- if (name.length() == 0) {
- LOGE("layer name is invalid.");
+ int InferenceEngineCommon::EnableProfiler(bool enable)
+ {
+ if (enable != true && enable != false) {
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
- }
- return INFERENCE_ENGINE_ERROR_NONE;
-}
+ mUseProfiler = enable;
-int InferenceEngineCommon::EnableProfiler(bool enable)
-{
- if (enable != true && enable != false) {
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
+ if (mUseProfiler == true) {
+ mProfiler = new InferenceEngineProfiler();
- mUseProfiler = enable;
-
- if (mUseProfiler == true) {
- mProfiler = new InferenceEngineProfiler();
+ // In default, profile data will be stored to a given file.
+ mProfilerDumpType = IE_PROFILER_DUMP_FILE;
+ }
- // In default, profile data will be stored to a given file.
- mProfilerDumpType = IE_PROFILER_DUMP_FILE;
+ return INFERENCE_ENGINE_ERROR_NONE;
}
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceEngineCommon::DumpProfileToConsole(void)
-{
- if (mUseProfiler == false) {
- std::cout << "Enable Profiler." << "\n";
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
-
- mProfilerDumpType = IE_PROFILER_DUMP_CONSOLE;
- return INFERENCE_ENGINE_ERROR_NONE;
-}
+ int InferenceEngineCommon::DumpProfileToConsole(void)
+ {
+ if (mUseProfiler == false) {
+ std::cout << "Enable Profiler."
+ << "\n";
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
-int InferenceEngineCommon::DumpProfileToFile(const std::string filename)
-{
- if (mUseProfiler == false) {
- std::cout << "Enable Profiler." << "\n";
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ mProfilerDumpType = IE_PROFILER_DUMP_CONSOLE;
+ return INFERENCE_ENGINE_ERROR_NONE;
}
- mProfilerDumpType = IE_PROFILER_DUMP_FILE;
- mProfiler->SetDumpFilename(filename);
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
+ int InferenceEngineCommon::DumpProfileToFile(const std::string filename)
+ {
+ if (mUseProfiler == false) {
+ std::cout << "Enable Profiler."
+ << "\n";
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
-int InferenceEngineCommon::InitBackendEngine(const std::string &backend_path, int backend_type)
-{
- LOGI("lib: %s", backend_path.c_str());
- mBackendModule = dlopen(backend_path.c_str(), RTLD_NOW);
- LOGI("HANDLE : [%p]", mBackendModule);
-
- if (!mBackendModule) {
- LOGE("Fail to dlopen %s", backend_path.c_str());
- LOGE("Error: %s\n", dlerror());
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
- }
+ mProfilerDumpType = IE_PROFILER_DUMP_FILE;
+ mProfiler->SetDumpFilename(filename);
- init_t* EngineInit = (init_t *)dlsym(mBackendModule, "EngineCommonInit");
- char *error = NULL;
- if ((error = dlerror()) != NULL) {
- LOGE("Error: %s\n", error);
- dlclose(mBackendModule);
- mBackendModule = nullptr;
- return INFERENCE_ENGINE_ERROR_INTERNAL;
+ return INFERENCE_ENGINE_ERROR_NONE;
}
- mBackendHandle = EngineInit();
- if (mBackendHandle == NULL) {
- LOGE("Fail to EngineInit");
- dlclose(mBackendModule);
- mBackendModule = nullptr;
- return INFERENCE_ENGINE_ERROR_INTERNAL;
- }
+ int
+ InferenceEngineCommon::InitBackendEngine(const std::string &backend_path,
+ int backend_type)
+ {
+ LOGI("lib: %s", backend_path.c_str());
+ mBackendModule = dlopen(backend_path.c_str(), RTLD_NOW);
+ LOGI("HANDLE : [%p]", mBackendModule);
- // If a backend is ML Single API of NNStreamer or NNFW then set a tensor filter plugin type.
- if (backend_type == INFERENCE_BACKEND_NNFW || backend_type == INFERENCE_BACKEND_MLAPI) {
- int ret = mBackendHandle->SetPrivateData(&backend_type);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Failed to set a tensor filter plugin.");
+ if (!mBackendModule) {
+ LOGE("Fail to dlopen %s", backend_path.c_str());
+ LOGE("Error: %s\n", dlerror());
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
}
- }
- return INFERENCE_ENGINE_ERROR_NONE;
-}
+ init_t *EngineInit =
+ (init_t *) dlsym(mBackendModule, "EngineCommonInit");
+ char *error = NULL;
+ if ((error = dlerror()) != NULL) {
+ LOGE("Error: %s\n", error);
+ dlclose(mBackendModule);
+ mBackendModule = nullptr;
+ return INFERENCE_ENGINE_ERROR_INTERNAL;
+ }
-int InferenceEngineCommon::BindBackend(inference_engine_config *config)
-{
- LOGI("ENTER");
+ mBackendHandle = EngineInit();
+ if (mBackendHandle == NULL) {
+ LOGE("Fail to EngineInit");
+ dlclose(mBackendModule);
+ mBackendModule = nullptr;
+ return INFERENCE_ENGINE_ERROR_INTERNAL;
+ }
- if (mBackendHandle) {
- LOGE("Already backend engine has been initialized.");
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
+ // If a backend is ML Single API of NNStreamer or NNFW then set a tensor filter plugin type.
+ if (backend_type == INFERENCE_BACKEND_NNFW ||
+ backend_type == INFERENCE_BACKEND_MLAPI) {
+ int ret = mBackendHandle->SetPrivateData(&backend_type);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Failed to set a tensor filter plugin.");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ }
+ }
- if (config == nullptr) {
- LOGE("config object is invalid.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ return INFERENCE_ENGINE_ERROR_NONE;
}
- if (mUseProfiler == true) {
- // Memory usage will be measured between BindBackend ~ UnbindBackend callbacks.
- mProfiler->Start(IE_PROFILER_MEMORY);
- }
+ int InferenceEngineCommon::BindBackend(inference_engine_config *config)
+ {
+ LOGI("ENTER");
- std::string backendLibName = "libinference-engine-" + config->backend_name + ".so";
+ if (mBackendHandle) {
+ LOGE("Already backend engine has been initialized.");
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
- int ret = InitBackendEngine(backendLibName, config->backend_type);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- return ret;
- }
+ if (config == nullptr) {
+ LOGE("config object is invalid.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
- if (mUseProfiler == true) {
- mProfiler->AddBackendName(config->backend_name);
- }
+ if (mUseProfiler == true) {
+ // Memory usage will be measured between BindBackend ~ UnbindBackend callbacks.
+ mProfiler->Start(IE_PROFILER_MEMORY);
+ }
- LOGI("LEAVE");
+ std::string backendLibName =
+ "libinference-engine-" + config->backend_name + ".so";
- return INFERENCE_ENGINE_ERROR_NONE;
-}
+ int ret = InitBackendEngine(backendLibName, config->backend_type);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ return ret;
+ }
-int InferenceEngineCommon::BindBackend(int backend_type)
-{
- LOGI("ENTER");
+ if (mUseProfiler == true) {
+ mProfiler->AddBackendName(config->backend_name);
+ }
- if (mBackendHandle) {
- LOGE("Already backend engine has been initialized.");
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
+ LOGI("LEAVE");
- if (backend_type <= INFERENCE_BACKEND_NONE || backend_type >= INFERENCE_BACKEND_MAX) {
- LOGE("Backend type is invalid.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ return INFERENCE_ENGINE_ERROR_NONE;
}
- if (mUseProfiler == true) {
- // Memory usage will be measured between BindBackend ~ UnbindBackend callbacks.
- mProfiler->Start(IE_PROFILER_MEMORY);
- }
+ int InferenceEngineCommon::BindBackend(int backend_type)
+ {
+ LOGI("ENTER");
- std::string backendNameTable[INFERENCE_BACKEND_MAX] = {
- [INFERENCE_BACKEND_OPENCV] = "opencv",
- [INFERENCE_BACKEND_TFLITE] = "tflite",
- [INFERENCE_BACKEND_ARMNN] = "armnn",
- [INFERENCE_BACKEND_MLAPI] = "mlapi",
- [INFERENCE_BACKEND_NNFW] = "mlapi"
- };
+ if (mBackendHandle) {
+ LOGE("Already backend engine has been initialized.");
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
- std::string backendLibName = "libinference-engine-" + backendNameTable[backend_type] + ".so";
+ if (backend_type <= INFERENCE_BACKEND_NONE ||
+ backend_type >= INFERENCE_BACKEND_MAX) {
+ LOGE("Backend type is invalid.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
- int ret = InitBackendEngine(backendLibName, backend_type);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- return ret;
- }
+ if (mUseProfiler == true) {
+ // Memory usage will be measured between BindBackend ~ UnbindBackend callbacks.
+ mProfiler->Start(IE_PROFILER_MEMORY);
+ }
- if (mUseProfiler == true) {
- mProfiler->AddBackendName(backendNameTable[backend_type]);
- }
+ std::string backendNameTable[INFERENCE_BACKEND_MAX] = {
+ [INFERENCE_BACKEND_OPENCV] = "opencv",
+ [INFERENCE_BACKEND_TFLITE] = "tflite",
+ [INFERENCE_BACKEND_ARMNN] = "armnn",
+ [INFERENCE_BACKEND_MLAPI] = "mlapi",
+ [INFERENCE_BACKEND_NNFW] = "mlapi"
+ };
- LOGI("LEAVE");
+ std::string backendLibName =
+ "libinference-engine-" + backendNameTable[backend_type] + ".so";
- return INFERENCE_ENGINE_ERROR_NONE;
-}
+ int ret = InitBackendEngine(backendLibName, backend_type);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ return ret;
+ }
-void InferenceEngineCommon::UnbindBackend(void)
-{
- LOGW("ENTER");
+ if (mUseProfiler == true) {
+ mProfiler->AddBackendName(backendNameTable[backend_type]);
+ }
- if (mUseProfiler == true) {
- // Memory usage will be measured between BindBackend ~ UnbindBackend callbacks.
- mProfiler->Stop(IE_PROFILER_MEMORY);
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
}
- if (mBackendModule) {
- destroy_t *engineDestroy = (destroy_t*)dlsym(mBackendModule, "EngineCommonDestroy");
- engineDestroy(mBackendHandle);
- dlclose(mBackendModule);
- mBackendHandle = nullptr;
- mBackendModule = nullptr;
- }
+ void InferenceEngineCommon::UnbindBackend(void)
+ {
+ LOGW("ENTER");
- LOGW("LEAVE");
-}
+ if (mUseProfiler == true) {
+ // Memory usage will be measured between BindBackend ~ UnbindBackend callbacks.
+ mProfiler->Stop(IE_PROFILER_MEMORY);
+ }
-int InferenceEngineCommon::SetTargetDevices(int types)
-{
- CHECK_ENGINE_INSTANCE(mBackendHandle);
+ if (mBackendModule) {
+ destroy_t *engineDestroy =
+ (destroy_t *) dlsym(mBackendModule, "EngineCommonDestroy");
+ engineDestroy(mBackendHandle);
+ dlclose(mBackendModule);
+ mBackendHandle = nullptr;
+ mBackendModule = nullptr;
+ }
- if (types <= INFERENCE_TARGET_NONE || types >= INFERENCE_TARGET_MAX) {
- LOGE("Given target device types(%d) are invalid.", types);
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ LOGW("LEAVE");
}
- int ret = mBackendHandle->SetTargetDevices(types);
- if (ret != INFERENCE_ENGINE_ERROR_NONE)
- LOGE("Fail to SetTargetDevice");
+ int InferenceEngineCommon::SetTargetDevices(int types)
+ {
+ CHECK_ENGINE_INSTANCE(mBackendHandle);
- if (mUseProfiler == true) {
- mProfiler->AddTargetDevices(types);
- }
-
- return ret;
-}
+ if (types <= INFERENCE_TARGET_NONE || types >= INFERENCE_TARGET_MAX) {
+ LOGE("Given target device types(%d) are invalid.", types);
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
-int InferenceEngineCommon::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
-{
- LOGI("ENTER");
+ int ret = mBackendHandle->SetTargetDevices(types);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ LOGE("Fail to SetTargetDevice");
- CHECK_ENGINE_INSTANCE(mBackendHandle);
+ if (mUseProfiler == true) {
+ mProfiler->AddTargetDevices(types);
+ }
- if (mUseProfiler == true) {
- mProfiler->AddModelName(model_paths[0]);
- mProfiler->PushEnv();
- mProfiler->Start(IE_PROFILER_LATENCY);
+ return ret;
}
- int ret = mBackendHandle->Load(model_paths, model_format);
- if (ret != INFERENCE_ENGINE_ERROR_NONE)
- LOGE("Fail to load InferenceEngineVision");
+ int InferenceEngineCommon::Load(std::vector<std::string> model_paths,
+ inference_model_format_e model_format)
+ {
+ LOGI("ENTER");
- if (mUseProfiler == true) {
- mProfiler->Stop(IE_PROFILER_LATENCY, "Load");
- }
+ CHECK_ENGINE_INSTANCE(mBackendHandle);
- LOGI("LEAVE");
+ if (mUseProfiler == true) {
+ mProfiler->AddModelName(model_paths[0]);
+ mProfiler->PushEnv();
+ mProfiler->Start(IE_PROFILER_LATENCY);
+ }
- return ret;
-}
+ int ret = mBackendHandle->Load(model_paths, model_format);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ LOGE("Fail to load InferenceEngineVision");
-int InferenceEngineCommon::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
-{
- CHECK_ENGINE_INSTANCE(mBackendHandle);
+ if (mUseProfiler == true) {
+ mProfiler->Stop(IE_PROFILER_LATENCY, "Load");
+ }
- int ret = mBackendHandle->GetInputTensorBuffers(buffers);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Failed to get input tensor buffers.");
- return ret;
- }
+ LOGI("LEAVE");
- // If backend engine doesn't provide tensor buffers then just return.
- // In this case, InferenceEngineCommon framework will allocate the tensor buffers.
- if (buffers.size() == 0) {
return ret;
}
- return CheckTensorBuffers(buffers);
-}
+ int InferenceEngineCommon::GetInputTensorBuffers(
+ std::vector<inference_engine_tensor_buffer> &buffers)
+ {
+ CHECK_ENGINE_INSTANCE(mBackendHandle);
-int InferenceEngineCommon::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
-{
- CHECK_ENGINE_INSTANCE(mBackendHandle);
+ int ret = mBackendHandle->GetInputTensorBuffers(buffers);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Failed to get input tensor buffers.");
+ return ret;
+ }
- int ret = mBackendHandle->GetOutputTensorBuffers(buffers);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Failed to get output tensor buffers.");
- return ret;
- }
+ // If backend engine doesn't provide tensor buffers then just return.
+ // In this case, InferenceEngineCommon framework will allocate the tensor buffers.
+ if (buffers.size() == 0) {
+ return ret;
+ }
- // If backend engine doesn't provide tensor buffers then just return.
- // In this case, InferenceEngineCommon framework will allocate the tensor buffers.
- if (buffers.size() == 0) {
- return ret;
+ return CheckTensorBuffers(buffers);
}
- return CheckTensorBuffers(buffers);
-}
+ int InferenceEngineCommon::GetOutputTensorBuffers(
+ std::vector<inference_engine_tensor_buffer> &buffers)
+ {
+ CHECK_ENGINE_INSTANCE(mBackendHandle);
-int InferenceEngineCommon::GetInputLayerProperty(inference_engine_layer_property &property)
-{
- CHECK_ENGINE_INSTANCE(mBackendHandle);
+ int ret = mBackendHandle->GetOutputTensorBuffers(buffers);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Failed to get output tensor buffers.");
+ return ret;
+ }
- int ret = mBackendHandle->GetInputLayerProperty(property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Failed to get input layer property.");
- return ret;
- }
+ // If backend engine doesn't provide tensor buffers then just return.
+ // In this case, InferenceEngineCommon framework will allocate the tensor buffers.
+ if (buffers.size() == 0) {
+ return ret;
+ }
- // If backend engine doesn't provide input layer property information then just return.
- // In this case, user has to provide the information manually.
- if (property.layer_names.size() == 0 && property.tensor_infos.size() == 0) {
- LOGI("backend doesn't provide input layer property.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ return CheckTensorBuffers(buffers);
}
- return CheckLayerProperty(property);
-}
+ int InferenceEngineCommon::GetInputLayerProperty(
+ inference_engine_layer_property &property)
+ {
+ CHECK_ENGINE_INSTANCE(mBackendHandle);
-int InferenceEngineCommon::GetOutputLayerProperty(inference_engine_layer_property &property)
-{
- CHECK_ENGINE_INSTANCE(mBackendHandle);
+ int ret = mBackendHandle->GetInputLayerProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Failed to get input layer property.");
+ return ret;
+ }
- int ret = mBackendHandle->GetOutputLayerProperty(property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Failed to get output layer property.");
- return ret;
- }
+ // If backend engine doesn't provide input layer property information then just return.
+ // In this case, user has to provide the information manually.
+ if (property.layer_names.size() == 0 &&
+ property.tensor_infos.size() == 0) {
+ LOGI("backend doesn't provide input layer property.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
- // If backend engine doesn't provide output layer property information then just return.
- // In this case, user has to provide the information manually.
- if (property.layer_names.size() == 0 && property.tensor_infos.size() == 0) {
- LOGI("backend doesn't provide output layer property.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ return CheckLayerProperty(property);
}
- return CheckLayerProperty(property);
-}
+ int InferenceEngineCommon::GetOutputLayerProperty(
+ inference_engine_layer_property &property)
+ {
+ CHECK_ENGINE_INSTANCE(mBackendHandle);
-int InferenceEngineCommon::SetInputLayerProperty(inference_engine_layer_property &property)
-{
- CHECK_ENGINE_INSTANCE(mBackendHandle);
+ int ret = mBackendHandle->GetOutputLayerProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Failed to get output layer property.");
+ return ret;
+ }
- if (property.layer_names.empty() || property.tensor_infos.empty()) {
- LOGE("layer_names or tensor_infos vector of a given property is empty.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
+ // If backend engine doesn't provide output layer property information then just return.
+ // In this case, user has to provide the information manually.
+ if (property.layer_names.size() == 0 &&
+ property.tensor_infos.size() == 0) {
+ LOGI("backend doesn't provide output layer property.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
- int ret = CheckLayerProperty(property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Given input layer property is invalid.");
- return ret;
+ return CheckLayerProperty(property);
}
- return mBackendHandle->SetInputLayerProperty(property);
-}
+ int InferenceEngineCommon::SetInputLayerProperty(
+ inference_engine_layer_property &property)
+ {
+ CHECK_ENGINE_INSTANCE(mBackendHandle);
-int InferenceEngineCommon::SetOutputLayerProperty(inference_engine_layer_property &property)
-{
- CHECK_ENGINE_INSTANCE(mBackendHandle);
+ if (property.layer_names.empty() || property.tensor_infos.empty()) {
+ LOGE("layer_names or tensor_infos vector of a given property is empty.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
- if (property.layer_names.empty()) {
- LOGE("layer_names vector of a given property is empty.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
+ int ret = CheckLayerProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Given input layer property is invalid.");
+ return ret;
+ }
- int ret = CheckLayerProperty(property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Given output layer property is invalid.");
- return ret;
+ return mBackendHandle->SetInputLayerProperty(property);
}
- return mBackendHandle->SetOutputLayerProperty(property);
-}
+ int InferenceEngineCommon::SetOutputLayerProperty(
+ inference_engine_layer_property &property)
+ {
+ CHECK_ENGINE_INSTANCE(mBackendHandle);
-int InferenceEngineCommon::GetBackendCapacity(inference_engine_capacity *capacity)
-{
- CHECK_ENGINE_INSTANCE(mBackendHandle);
+ if (property.layer_names.empty()) {
+ LOGE("layer_names vector of a given property is empty.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ int ret = CheckLayerProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Given output layer property is invalid.");
+ return ret;
+ }
- if (capacity == nullptr) {
- LOGE("Given inference_engine_capacity object is invalid.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ return mBackendHandle->SetOutputLayerProperty(property);
}
- return mBackendHandle->GetBackendCapacity(capacity);
-}
+ int InferenceEngineCommon::GetBackendCapacity(
+ inference_engine_capacity *capacity)
+ {
+ CHECK_ENGINE_INSTANCE(mBackendHandle);
-int InferenceEngineCommon::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
-{
- CHECK_ENGINE_INSTANCE(mBackendHandle);
+ if (capacity == nullptr) {
+ LOGE("Given inference_engine_capacity object is invalid.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
- if (mUseProfiler == true) {
- mProfiler->Start(IE_PROFILER_LATENCY);
+ return mBackendHandle->GetBackendCapacity(capacity);
}
- int ret = mBackendHandle->Run(input_buffers, output_buffers);
+ int InferenceEngineCommon::Run(
+ std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers)
+ {
+ CHECK_ENGINE_INSTANCE(mBackendHandle);
- if (mUseProfiler == true) {
- mProfiler->Stop(IE_PROFILER_LATENCY, "Run");
- }
+ if (mUseProfiler == true) {
+ mProfiler->Start(IE_PROFILER_LATENCY);
+ }
- return ret;
-}
+ int ret = mBackendHandle->Run(input_buffers, output_buffers);
+
+ if (mUseProfiler == true) {
+ mProfiler->Stop(IE_PROFILER_LATENCY, "Run");
+ }
+
+ return ret;
+ }
} /* Common */
} /* InferenceEngineInterface */
#include <time.h>
#include <unistd.h>
-extern "C" {
-
+extern "C"
+{
#include <dlog.h>
#ifdef LOG_TAG
}
#define NANO_PER_SEC ((__clock_t) 1000000000)
-#define NANO_PER_MILLI ((__clock_t) 1000000)
-#define MILLI_PER_SEC ((__clock_t) 1000)
-
-namespace InferenceEngineInterface {
-namespace Profiler {
-
-// In default, we will use Markdown syntax to print out profile data.
-static const std::string sTitleMarkdown("backend|target devices|model name|Function name|Latency(ms)\n--|--|--|--|--\n");
-
-InferenceEngineProfiler::InferenceEngineProfiler()
-{
- mStartTime = { 0, };
- mEndTime = { 0, };
- mEnvNum = 0;
-
- // In default. we will store profile data to dump.txt file.
- // If you want to use other file then use SetDumpFilename function to change the filename.
- mDumpFilename = "dump.txt";
+#define NANO_PER_MILLI ((__clock_t) 1000000)
+#define MILLI_PER_SEC ((__clock_t) 1000)
- mStartMemoryData = {0, };
- mEndMemoryData = {0, };
-}
-
-InferenceEngineProfiler::~InferenceEngineProfiler()
+namespace InferenceEngineInterface
{
- v_mProfileEnv.clear();
- v_mProfileData.clear();
- m_mDataTable.clear();
-}
-
-void InferenceEngineProfiler::PushData(ProfileData &data)
+namespace Profiler
{
- std::string key = std::to_string(mEnvNum - 1) + data.function_name;
-
- // In case of multiple 'Run' per one 'Load', update just average value of measured ones instead of adding new one.
- if (!m_mDataTable.empty()) {
- std::map<const char *, const void *>::iterator iter;
- iter = m_mDataTable.find(key.c_str());
- if (iter != m_mDataTable.end()) {
- ProfileData *item = (ProfileData *)iter->second;
- item->elapsed_time = (item->elapsed_time + data.elapsed_time) >> 1;
- return;
- }
+ // In default, we will use Markdown syntax to print out profile data.
+ static const std::string sTitleMarkdown(
+ "backend|target devices|model name|Function name|Latency(ms)\n--|--|--|--|--\n");
+
+ InferenceEngineProfiler::InferenceEngineProfiler()
+ {
+ mStartTime = {
+ 0,
+ };
+ mEndTime = {
+ 0,
+ };
+ mEnvNum = 0;
+
+ // In default. we will store profile data to dump.txt file.
+ // If you want to use other file then use SetDumpFilename function to change the filename.
+ mDumpFilename = "dump.txt";
+
+ mStartMemoryData = {
+ 0,
+ };
+ mEndMemoryData = {
+ 0,
+ };
}
- v_mProfileData.push_back(data);
- m_mDataTable.insert(std::make_pair<const char *, const void *>(key.c_str(), &v_mProfileData.back()));
-}
-
-struct timespec InferenceEngineProfiler::GetTimeDiff(struct timespec &start,
- struct timespec &end)
-{
- struct timespec temp;
-
- if ((end.tv_nsec - start.tv_nsec) < 0) {
- temp.tv_sec = end.tv_sec - start.tv_sec - 1;
- temp.tv_nsec = NANO_PER_SEC + end.tv_nsec - start.tv_nsec;
- }
- else {
- temp.tv_sec = end.tv_sec - start.tv_sec;
- temp.tv_nsec = end.tv_nsec - start.tv_nsec;
- }
-
- return temp;
-}
-
-unsigned long InferenceEngineProfiler::ConvertMillisec(const struct timespec &time)
-{
- mStartTime.tv_nsec = 0;
- mStartTime.tv_sec = 0;
- mEndTime.tv_nsec = 0;
- mEndTime.tv_sec = 0;
-
- return (unsigned long)(time.tv_sec * MILLI_PER_SEC + time.tv_nsec / NANO_PER_MILLI);
-}
+ InferenceEngineProfiler::~InferenceEngineProfiler()
+ {
+ v_mProfileEnv.clear();
+ v_mProfileData.clear();
+ m_mDataTable.clear();
+ }
-void InferenceEngineProfiler::GetMemoryUsage(MemoryData &data)
-{
- unsigned long resident_set = 0, rss = 0;
-
- std::string ignore;
- std::ifstream ifs("/proc/self/stat", std::ios_base::in);
- ifs >> ignore >> ignore >> ignore >> ignore >> ignore >> ignore >> ignore >> ignore >> ignore >> ignore
- >> ignore >> ignore >> ignore >> ignore >> ignore >> ignore >> ignore >> ignore >> ignore >> ignore
- >> ignore >> ignore >> ignore >> rss;
-
- resident_set = (rss * getpagesize()) / 1024;
- data.rss = resident_set;
-
- // TODO. Collect GPU memory usage specific to board in case of GPU acceleration.
- //
- // If current Linux kernel used Linux DMA mapping framework which is a generic solution for GPU memory management
- // then we can get all memory usage.
- // On the other hands, GPU driver on some boards may use reserved memory which is hided
- // from Linux kernel memory subsystem so the memory usage cannot be measured in generic way.
- // In this case, board specific interface is required.
-}
+ void InferenceEngineProfiler::PushData(ProfileData &data)
+ {
+ std::string key = std::to_string(mEnvNum - 1) + data.function_name;
+
+ // In case of multiple 'Run' per one 'Load', update just average value of measured ones instead of adding new one.
+ if (!m_mDataTable.empty()) {
+ std::map<const char *, const void *>::iterator iter;
+ iter = m_mDataTable.find(key.c_str());
+ if (iter != m_mDataTable.end()) {
+ ProfileData *item = (ProfileData *) iter->second;
+ item->elapsed_time = (item->elapsed_time + data.elapsed_time) >>
+ 1;
+ return;
+ }
+ }
-void InferenceEngineProfiler::Start(const unsigned int type)
-{
- if (IE_PROFILER_MIN >= type || IE_PROFILER_MAX <= type) {
- LOGE("Invalid profiler type.");
- return;
+ v_mProfileData.push_back(data);
+ m_mDataTable.insert(std::make_pair<const char *, const void *>(
+ key.c_str(), &v_mProfileData.back()));
}
- switch (type) {
- case IE_PROFILER_LATENCY:
- clock_gettime(CLOCK_MONOTONIC, &mStartTime);
- break;
- case IE_PROFILER_MEMORY:
- mStartMemoryData = { 0, };
- GetMemoryUsage(mStartMemoryData);
- break;
- /* TODO */
- }
-}
+ struct timespec InferenceEngineProfiler::GetTimeDiff(struct timespec &start,
+ struct timespec &end)
+ {
+ struct timespec temp;
+
+ if ((end.tv_nsec - start.tv_nsec) < 0) {
+ temp.tv_sec = end.tv_sec - start.tv_sec - 1;
+ temp.tv_nsec = NANO_PER_SEC + end.tv_nsec - start.tv_nsec;
+ } else {
+ temp.tv_sec = end.tv_sec - start.tv_sec;
+ temp.tv_nsec = end.tv_nsec - start.tv_nsec;
+ }
-void InferenceEngineProfiler::Stop(const unsigned int type, const char *func_name)
-{
- if (IE_PROFILER_MIN >= type || IE_PROFILER_MAX <= type) {
- LOGE("Invalid profiler type.");
- return;
+ return temp;
}
- ProfileData data = { mEnvNum - 1, func_name, 0 };
+ unsigned long
+ InferenceEngineProfiler::ConvertMillisec(const struct timespec &time)
+ {
+ mStartTime.tv_nsec = 0;
+ mStartTime.tv_sec = 0;
+ mEndTime.tv_nsec = 0;
+ mEndTime.tv_sec = 0;
- switch (type) {
- case IE_PROFILER_LATENCY: {
- clock_gettime(CLOCK_MONOTONIC, &mEndTime);
- data.elapsed_time = ConvertMillisec(GetTimeDiff(mStartTime, mEndTime));
- // TODO.
- PushData(data);
- break;
+ return (unsigned long) (time.tv_sec * MILLI_PER_SEC +
+ time.tv_nsec / NANO_PER_MILLI);
}
- case IE_PROFILER_MEMORY:
- mEndMemoryData = { 0, };
- GetMemoryUsage(mEndMemoryData);
- break;
- /* TODO */
+
+ void InferenceEngineProfiler::GetMemoryUsage(MemoryData &data)
+ {
+ unsigned long resident_set = 0, rss = 0;
+
+ std::string ignore;
+ std::ifstream ifs("/proc/self/stat", std::ios_base::in);
+ ifs >> ignore >> ignore >> ignore >> ignore >> ignore >> ignore >>
+ ignore >> ignore >> ignore >> ignore >> ignore >> ignore >>
+ ignore >> ignore >> ignore >> ignore >> ignore >> ignore >>
+ ignore >> ignore >> ignore >> ignore >> ignore >> rss;
+
+ resident_set = (rss * getpagesize()) / 1024;
+ data.rss = resident_set;
+
+ // TODO. Collect GPU memory usage specific to board in case of GPU acceleration.
+ //
+ // If current Linux kernel used Linux DMA mapping framework which is a generic solution for GPU memory management
+ // then we can get all memory usage.
+ // On the other hands, GPU driver on some boards may use reserved memory which is hided
+ // from Linux kernel memory subsystem so the memory usage cannot be measured in generic way.
+ // In this case, board specific interface is required.
}
-}
-void InferenceEngineProfiler::DumpToConsole(void)
-{
- std::cout << sTitleMarkdown;
-
- std::vector<ProfileData>::iterator iter;
- for (iter = v_mProfileData.begin(); iter != v_mProfileData.end(); iter++) {
- ProfileData data = *iter;
- ProfileEnv env = v_mProfileEnv[data.env_idx];
- std::cout << env.backend_name << "|" << env.target_devices << "|" << env.model_name << "|";
- std::cout << data.function_name << "|" << data.elapsed_time << "\n";
+ void InferenceEngineProfiler::Start(const unsigned int type)
+ {
+ if (IE_PROFILER_MIN >= type || IE_PROFILER_MAX <= type) {
+ LOGE("Invalid profiler type.");
+ return;
+ }
+
+ switch (type) {
+ case IE_PROFILER_LATENCY:
+ clock_gettime(CLOCK_MONOTONIC, &mStartTime);
+ break;
+ case IE_PROFILER_MEMORY:
+ mStartMemoryData = {
+ 0,
+ };
+ GetMemoryUsage(mStartMemoryData);
+ break;
+ /* TODO */
+ }
}
- std::cout << "***" << "\n";
- std::cout << "Memory Usage(kb) : " << mEndMemoryData.rss - mStartMemoryData.rss << "\n";
- std::cout << "***" << "\n";
-}
+ void InferenceEngineProfiler::Stop(const unsigned int type,
+ const char *func_name)
+ {
+ if (IE_PROFILER_MIN >= type || IE_PROFILER_MAX <= type) {
+ LOGE("Invalid profiler type.");
+ return;
+ }
-void InferenceEngineProfiler::DumpToFile(const unsigned int dump_type, std::string filename)
-{
- if (mDumpFilename.empty())
- mDumpFilename = filename;
+ ProfileData data = { mEnvNum - 1, func_name, 0 };
- std::ofstream dump_file;
+ switch (type) {
+ case IE_PROFILER_LATENCY: {
+ clock_gettime(CLOCK_MONOTONIC, &mEndTime);
+ data.elapsed_time =
+ ConvertMillisec(GetTimeDiff(mStartTime, mEndTime));
+ // TODO.
+ PushData(data);
+ break;
+ }
+ case IE_PROFILER_MEMORY:
+ mEndMemoryData = {
+ 0,
+ };
+ GetMemoryUsage(mEndMemoryData);
+ break;
+ /* TODO */
+ }
+ }
- dump_file.open(mDumpFilename, std::ios::binary | std::ios::app);
- if (dump_file.is_open()) {
- dump_file.write(sTitleMarkdown.c_str(), sTitleMarkdown.length());
+ void InferenceEngineProfiler::DumpToConsole(void)
+ {
+ std::cout << sTitleMarkdown;
std::vector<ProfileData>::iterator iter;
- for (iter = v_mProfileData.begin(); iter != v_mProfileData.end(); iter++) {
+ for (iter = v_mProfileData.begin(); iter != v_mProfileData.end();
+ iter++) {
ProfileData data = *iter;
ProfileEnv env = v_mProfileEnv[data.env_idx];
- dump_file.write(env.backend_name.c_str(), env.backend_name.length());
- dump_file.write("|", 1);
- if (env.target_devices & INFERENCE_TARGET_CPU)
- dump_file.write("CPU", 3);
- if (env.target_devices & INFERENCE_TARGET_GPU)
- dump_file.write("GPU", 3);
- dump_file.write("|", 1);
- dump_file.write(env.model_name.c_str(), env.model_name.length());
- dump_file.write("|", 1);
- dump_file.write(data.function_name.c_str(), data.function_name.length());
- dump_file.write("|", 1);
- std::string sElapsedTime(std::to_string(data.elapsed_time));
- dump_file.write(sElapsedTime.c_str(), sElapsedTime.length());
- dump_file.write("\n", 1);
+ std::cout << env.backend_name << "|" << env.target_devices << "|"
+ << env.model_name << "|";
+ std::cout << data.function_name << "|" << data.elapsed_time << "\n";
}
- dump_file.write("***\n", 4);
- std::string sMemoryUsage = std::to_string(mEndMemoryData.rss - mStartMemoryData.rss) + "KB Memory used";
- dump_file.write(sMemoryUsage.c_str(), sMemoryUsage.length());
- dump_file.write("\n", 1);
- dump_file.write("***\n", 4);
+ std::cout << "***"
+ << "\n";
+ std::cout << "Memory Usage(kb) : "
+ << mEndMemoryData.rss - mStartMemoryData.rss << "\n";
+ std::cout << "***"
+ << "\n";
}
- dump_file.close();
-}
+ void InferenceEngineProfiler::DumpToFile(const unsigned int dump_type,
+ std::string filename)
+ {
+ if (mDumpFilename.empty())
+ mDumpFilename = filename;
+
+ std::ofstream dump_file;
+
+ dump_file.open(mDumpFilename, std::ios::binary | std::ios::app);
+ if (dump_file.is_open()) {
+ dump_file.write(sTitleMarkdown.c_str(), sTitleMarkdown.length());
+
+ std::vector<ProfileData>::iterator iter;
+ for (iter = v_mProfileData.begin(); iter != v_mProfileData.end();
+ iter++) {
+ ProfileData data = *iter;
+ ProfileEnv env = v_mProfileEnv[data.env_idx];
+ dump_file.write(env.backend_name.c_str(),
+ env.backend_name.length());
+ dump_file.write("|", 1);
+ if (env.target_devices & INFERENCE_TARGET_CPU)
+ dump_file.write("CPU", 3);
+ if (env.target_devices & INFERENCE_TARGET_GPU)
+ dump_file.write("GPU", 3);
+ dump_file.write("|", 1);
+ dump_file.write(env.model_name.c_str(),
+ env.model_name.length());
+ dump_file.write("|", 1);
+ dump_file.write(data.function_name.c_str(),
+ data.function_name.length());
+ dump_file.write("|", 1);
+ std::string sElapsedTime(std::to_string(data.elapsed_time));
+ dump_file.write(sElapsedTime.c_str(), sElapsedTime.length());
+ dump_file.write("\n", 1);
+ }
+
+ dump_file.write("***\n", 4);
+ std::string sMemoryUsage =
+ std::to_string(mEndMemoryData.rss - mStartMemoryData.rss) +
+ "KB Memory used";
+ dump_file.write(sMemoryUsage.c_str(), sMemoryUsage.length());
+ dump_file.write("\n", 1);
+ dump_file.write("***\n", 4);
+ }
-void InferenceEngineProfiler::Dump(const unsigned int dump_type)
-{
- if (IE_PROFILER_DUMP_MIN >= dump_type || IE_PROFILER_DUMP_MAX <= dump_type) {
- LOGE("Invalid profiler dump type.");
- return;
+ dump_file.close();
}
- if (dump_type == IE_PROFILER_DUMP_CONSOLE) {
- DumpToConsole();
- } else {
- DumpToFile(IE_PROFILER_DUMP_FORMAT_MARKDOWN, mDumpFilename);
+ void InferenceEngineProfiler::Dump(const unsigned int dump_type)
+ {
+ if (IE_PROFILER_DUMP_MIN >= dump_type ||
+ IE_PROFILER_DUMP_MAX <= dump_type) {
+ LOGE("Invalid profiler dump type.");
+ return;
+ }
+
+ if (dump_type == IE_PROFILER_DUMP_CONSOLE) {
+ DumpToConsole();
+ } else {
+ DumpToFile(IE_PROFILER_DUMP_FORMAT_MARKDOWN, mDumpFilename);
+ }
}
-}
} /* Profiler */
} /* InferenceEngineInterface */
#include <iniparser.h>
#include <unistd.h>
-extern "C" {
-
+extern "C"
+{
#include <dlog.h>
#ifdef LOG_TAG
#define LOG_TAG "INFERENCE_ENGINE_COMMON"
}
-namespace InferenceEngineInterface {
-namespace Common {
-
-const std::string INFERENCE_INI_FILENAME = "/inference/inference_engine.ini";
-
-InferenceEngineInI::InferenceEngineInI() :
- mIniDefaultPath(SYSCONFDIR),
- mSelectedBackendEngine(INFERENCE_BACKEND_NONE)
+namespace InferenceEngineInterface
{
- LOGE("ENTER");
- mIniDefaultPath += INFERENCE_INI_FILENAME;
- LOGE("LEAVE");
-}
-
-InferenceEngineInI::~InferenceEngineInI()
-{
- ;
-}
-
-int InferenceEngineInI::LoadInI()
+namespace Common
{
- LOGE("ENTER");
- dictionary *dict = iniparser_load(mIniDefaultPath.c_str());
- if (dict == NULL) {
- LOGE("Fail to load ini");
- return -1;
+ const std::string INFERENCE_INI_FILENAME =
+ "/inference/inference_engine.ini";
+
+ InferenceEngineInI::InferenceEngineInI()
+ : mIniDefaultPath(SYSCONFDIR)
+ , mSelectedBackendEngine(INFERENCE_BACKEND_NONE)
+ {
+ LOGE("ENTER");
+ mIniDefaultPath += INFERENCE_INI_FILENAME;
+ LOGE("LEAVE");
}
- mSelectedBackendEngine = static_cast<inference_backend_type_e>(iniparser_getint(dict, "inference backend:selected backend engine", -1));
-
- if(dict) {
- iniparser_freedict(dict);
- dict = NULL;
+ InferenceEngineInI::~InferenceEngineInI()
+ {
+ ;
}
- LOGE("LEAVE");
- return 0;
-}
+ int InferenceEngineInI::LoadInI()
+ {
+ LOGE("ENTER");
+ dictionary *dict = iniparser_load(mIniDefaultPath.c_str());
+ if (dict == NULL) {
+ LOGE("Fail to load ini");
+ return -1;
+ }
+
+ mSelectedBackendEngine =
+ static_cast<inference_backend_type_e>(iniparser_getint(
+ dict, "inference backend:selected backend engine", -1));
+
+ if (dict) {
+ iniparser_freedict(dict);
+ dict = NULL;
+ }
+
+ LOGE("LEAVE");
+ return 0;
+ }
-void InferenceEngineInI::UnLoadInI()
-{
- ;
-}
+ void InferenceEngineInI::UnLoadInI()
+ {
+ ;
+ }
-int InferenceEngineInI::GetSelectedBackendEngine()
-{
- return mSelectedBackendEngine;
-}
+ int InferenceEngineInI::GetSelectedBackendEngine()
+ {
+ return mSelectedBackendEngine;
+ }
} /* Inference */
} /* MediaVision */
#include "inference_engine_common_impl.h"
#include "inference_engine_test_common.h"
-typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>, std::vector<int>> ParamType_Infer;
-
-class InferenceEngineTfliteTest : public testing::TestWithParam<ParamType_Infer> { };
-class InferenceEngineCaffeTest : public testing::TestWithParam<ParamType_Infer> { };
-class InferenceEngineDldtTest : public testing::TestWithParam<ParamType_Infer> { };
+typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>,
+ int, int, int, std::vector<std::string>,
+ std::vector<std::string>, std::vector<std::string>,
+ std::vector<int> >
+ ParamType_Infer;
+
+class InferenceEngineTfliteTest : public testing::TestWithParam<ParamType_Infer>
+{};
+class InferenceEngineCaffeTest : public testing::TestWithParam<ParamType_Infer>
+{};
+class InferenceEngineDldtTest : public testing::TestWithParam<ParamType_Infer>
+{};
TEST_P(InferenceEngineTfliteTest, Inference)
{
std::vector<std::string> model_paths;
std::vector<int> answers;
- std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
+ std::tie(backend_name, target_devices, test_type, iteration, tensor_type,
+ image_paths, height, width, ch, input_layers, output_layers,
+ model_paths, answers) = GetParam();
if (iteration < 1) {
iteration = 1;
break;
}
- std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
- inference_engine_config config = {
- .backend_name = backend_name,
- .backend_type = 0,
- .target_devices = target_devices
- };
+ std::cout << test_name << " inference test : backend = " << backend_name
+ << ", target device = " << Target_Formats[target_devices]
+ << std::endl;
+ inference_engine_config config = { .backend_name = backend_name,
+ .backend_type = 0,
+ .target_devices = target_devices };
auto engine = std::make_unique<InferenceEngineCommon>();
if (engine == nullptr) {
return;
}
- ret = engine->DumpProfileToFile("profile_data_" + backend_name + "_tflite_model.txt");
+ ret = engine->DumpProfileToFile("profile_data_" + backend_name +
+ "_tflite_model.txt");
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
return;
ret = engine->SetTargetDevices(target_devices);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector <std::string> models;
+ std::vector<std::string> models;
int model_type = GetModelInfo(model_paths, models);
if (model_type == -1) {
ASSERT_NE(model_type, -1);
for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
- (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e)tensor_type,
+ (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width)
};
input_property.layer_names.push_back(*iter);
input_property.tensor_infos.push_back(tensor_info);
- }
+ }
ret = engine->SetInputLayerProperty(input_property);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
return;
}
- ret = engine->Load(models, (inference_model_format_e)model_type);
+ ret = engine->Load(models, (inference_model_format_e) model_type);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
return;
}
// Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int)image_paths.size(); ++i) {
+ for (int i = 0; i < (int) image_paths.size(); ++i) {
CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
}
std::vector<std::string> model_paths;
std::vector<int> answers;
- std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
+ std::tie(backend_name, target_devices, test_type, iteration, tensor_type,
+ image_paths, height, width, ch, input_layers, output_layers,
+ model_paths, answers) = GetParam();
if (iteration < 1) {
iteration = 1;
break;
}
- std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
- inference_engine_config config = {
- .backend_name = backend_name,
- .backend_type = 0,
- .target_devices = target_devices
- };
+ std::cout << test_name << " inference test : backend = " << backend_name
+ << ", target device = " << Target_Formats[target_devices]
+ << std::endl;
+ inference_engine_config config = { .backend_name = backend_name,
+ .backend_type = 0,
+ .target_devices = target_devices };
auto engine = std::make_unique<InferenceEngineCommon>();
if (engine == nullptr) {
return;
}
- ret = engine->DumpProfileToFile("profile_data_" + backend_name + "_caffe_model.txt");
+ ret = engine->DumpProfileToFile("profile_data_" + backend_name +
+ "_caffe_model.txt");
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
return;
ret = engine->SetTargetDevices(target_devices);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector <std::string> models;
+ std::vector<std::string> models;
int model_type = GetModelInfo(model_paths, models);
if (model_type == -1) {
ASSERT_NE(model_type, -1);
for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
- (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e)tensor_type,
+ (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width)
};
input_property.layer_names.push_back(*iter);
input_property.tensor_infos.push_back(tensor_info);
- }
+ }
ret = engine->SetInputLayerProperty(input_property);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
return;
}
- ret = engine->Load(models, (inference_model_format_e)model_type);
+ ret = engine->Load(models, (inference_model_format_e) model_type);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
return;
}
// Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int)image_paths.size(); ++i) {
+ for (int i = 0; i < (int) image_paths.size(); ++i) {
CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
}
std::vector<std::string> model_paths;
std::vector<int> answers;
- std::tie(backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
+ std::tie(backend_name, target_devices, test_type, iteration, tensor_type,
+ image_paths, height, width, ch, input_layers, output_layers,
+ model_paths, answers) = GetParam();
if (iteration < 1) {
iteration = 1;
break;
}
- std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
- inference_engine_config config = {
- .backend_name = backend_name,
- .backend_type = 0,
- .target_devices = target_devices
- };
+ std::cout << test_name << " inference test : backend = " << backend_name
+ << ", target device = " << Target_Formats[target_devices]
+ << std::endl;
+ inference_engine_config config = { .backend_name = backend_name,
+ .backend_type = 0,
+ .target_devices = target_devices };
auto engine = std::make_unique<InferenceEngineCommon>();
if (engine == nullptr) {
return;
}
- ret = engine->DumpProfileToFile("profile_data_" + backend_name + "_dldt_model.txt");
+ ret = engine->DumpProfileToFile("profile_data_" + backend_name +
+ "_dldt_model.txt");
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
return;
ret = engine->SetTargetDevices(target_devices);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector <std::string> models;
+ std::vector<std::string> models;
int model_type = GetModelInfo(model_paths, models);
if (model_type == -1) {
ASSERT_NE(model_type, -1);
for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
- (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e)tensor_type,
+ (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width)
};
input_property.layer_names.push_back(*iter);
input_property.tensor_infos.push_back(tensor_info);
- }
+ }
ret = engine->SetInputLayerProperty(input_property);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
return;
}
- ret = engine->Load(models, (inference_model_format_e)model_type);
+ ret = engine->Load(models, (inference_model_format_e) model_type);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
return;
}
// Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int)image_paths.size(); ++i) {
+ for (int i = 0; i < (int) image_paths.size(); ++i) {
CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
}
models.clear();
}
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTfliteTest,
+INSTANTIATE_TEST_CASE_P(
+ Prefix, InferenceEngineTfliteTest,
testing::Values(
- // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
- // mobilenet based image classification test
- // ARMNN.
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- // quantized mobilenet based image classification test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
- // object detection test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
- // face detection test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
- // pose estimation test
- ParamType_Infer("armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
- 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
- ParamType_Infer("armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
- 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
- // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
- // mobilenet based image classification test
- // TFLITE.
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- // quantized mobilenet based image classification test
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_UINT8, { "/opt/usr/images/image_classification_q.bin" }, 224, 224, 3, { "input" }, { "MobilenetV1/Predictions/Reshape_1" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" }, { 955 }),
- // object detection test
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" }, { 451, 474, 714, 969 }),
- // face detection test
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection.bin" }, 300, 300, 3, { "normalized_input_image_tensor" }, { "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3" }, { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" }, { 727, 225, 960, 555 }),
- // pose estimation test
- ParamType_Infer("tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
- 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 }),
- ParamType_Infer("tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3, { "image" }, { "Convolutional_Pose_Machine/stage_5_out" }, { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
- { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351, 351, 382, 382, 382,
- 76, 146, 170, 193, 216, 146, 123, 99, 287, 381, 451, 287, 381, 475 })
- /* TODO */
- )
-);
-
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineCaffeTest,
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
+ // ARMNN.
+ ParamType_Infer(
+ "armnn", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ ParamType_Infer(
+ "armnn", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // quantized mobilenet based image classification test
+ ParamType_Infer(
+ "armnn", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ ParamType_Infer(
+ "armnn", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ // object detection test
+ ParamType_Infer(
+ "armnn", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ ParamType_Infer(
+ "armnn", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ // face detection test
+ ParamType_Infer(
+ "armnn", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ ParamType_Infer(
+ "armnn", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ // pose estimation test
+ ParamType_Infer(
+ "armnn", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ ParamType_Infer(
+ "armnn", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
+ // TFLITE.
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // quantized mobilenet based image classification test
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_UINT8,
+ { "/opt/usr/images/image_classification_q.bin" }, 224,
+ 224, 3, { "input" },
+ { "MobilenetV1/Predictions/Reshape_1" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_q_model.tflite" },
+ { 955 }),
+ // object detection test
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite" },
+ { 451, 474, 714, 969 }),
+ // face detection test
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection.bin" }, 300, 300, 3,
+ { "normalized_input_image_tensor" },
+ { "TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3" },
+ { "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite" },
+ { 727, 225, 960, 555 }),
+ // pose estimation test
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_CPU, TEST_POSE_ESTIMATION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 }),
+ ParamType_Infer(
+ "tflite", INFERENCE_TARGET_GPU, TEST_POSE_ESTIMATION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/pose_estimation.bin" }, 192, 192, 3,
+ { "image" },
+ { "Convolutional_Pose_Machine/stage_5_out" },
+ { "/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite" },
+ { 382, 351, 320, 257, 226, 414, 414, 445, 351, 351,
+ 351, 382, 382, 382, 76, 146, 170, 193, 216, 146,
+ 123, 99, 287, 381, 451, 287, 381, 475 })
+ /* TODO */
+ ));
+
+INSTANTIATE_TEST_CASE_P(
+ Prefix, InferenceEngineCaffeTest,
testing::Values(
- // parameter order : backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers
- // OPENCV
- // squeezenet based image classification test
- ParamType_Infer("opencv", INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification_caffe.bin" }, 227, 227, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel", "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }, { 281 }),
- ParamType_Infer("opencv", INFERENCE_TARGET_GPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification_caffe.bin" }, 227, 227, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel", "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }, { 281 }),
-
- // mobilenet-ssd based object detection test
- ParamType_Infer("opencv", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection_caffe.bin" }, 300, 300, 3, { "data" }, { "detection_out" }, { "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel", "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt" }, { 15, 19, 335, 557 }),
- ParamType_Infer("opencv", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/object_detection_caffe.bin" }, 300, 300, 3, { "data" }, { "detection_out" }, { "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel", "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt" }, { 15, 19, 335, 557 }),
-
- // mobilenet-ssd based object detection test
- ParamType_Infer("opencv", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection_caffe.bin" }, 300, 300, 3, { "data" }, { "detection_out" }, { "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel", "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" }, { 733, 233, 965, 539 }),
- ParamType_Infer("opencv", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/face_detection_caffe.bin" }, 300, 300, 3, { "data" }, { "detection_out" }, { "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel", "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" }, { 733, 233, 965, 539 }),
-
- // tweakcnn based facial landmark detection test
- ParamType_Infer("opencv", INFERENCE_TARGET_CPU, TEST_FACIAL_LANDMARK_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/faciallandmark_detection_caffe.bin" }, 128, 128, 3, { "data" }, { "Sigmoid_fc2" }, { "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel", "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt" },
- { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79}),
- ParamType_Infer("opencv", INFERENCE_TARGET_GPU, TEST_FACIAL_LANDMARK_DETECTION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/faciallandmark_detection_caffe.bin" }, 128, 128, 3, { "data" }, { "Sigmoid_fc2" }, { "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel", "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt" },
- { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79})
- /* TODO */
- )
-);
-
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineDldtTest,
+ // parameter order : backend_name, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers
+ // OPENCV
+ // squeezenet based image classification test
+ ParamType_Infer(
+ "opencv", INFERENCE_TARGET_CPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification_caffe.bin" },
+ 227, 227, 3, { "data" }, { "prob" },
+ { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
+ "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
+ { 281 }),
+ ParamType_Infer(
+ "opencv", INFERENCE_TARGET_GPU,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification_caffe.bin" },
+ 227, 227, 3, { "data" }, { "prob" },
+ { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
+ "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
+ { 281 }),
+
+ // mobilenet-ssd based object detection test
+ ParamType_Infer(
+ "opencv", INFERENCE_TARGET_CPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection_caffe.bin" }, 300,
+ 300, 3, { "data" }, { "detection_out" },
+ { "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel",
+ "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt" },
+ { 15, 19, 335, 557 }),
+ ParamType_Infer(
+ "opencv", INFERENCE_TARGET_GPU, TEST_OBJECT_DETECTION,
+ 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/object_detection_caffe.bin" }, 300,
+ 300, 3, { "data" }, { "detection_out" },
+ { "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.caffemodel",
+ "/usr/share/capi-media-vision/models/OD/caffe/od_caffe_model_mobilenetv1ssd.prototxt" },
+ { 15, 19, 335, 557 }),
+
+ // mobilenet-ssd based object detection test
+ ParamType_Infer(
+ "opencv", INFERENCE_TARGET_CPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection_caffe.bin" }, 300,
+ 300, 3, { "data" }, { "detection_out" },
+ { "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel",
+ "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" },
+ { 733, 233, 965, 539 }),
+ ParamType_Infer(
+ "opencv", INFERENCE_TARGET_GPU, TEST_FACE_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/face_detection_caffe.bin" }, 300,
+ 300, 3, { "data" }, { "detection_out" },
+ { "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.caffemodel",
+ "/usr/share/capi-media-vision/models/FD/caffe/fd_caffe_model_resnet10ssd.prototxt" },
+ { 733, 233, 965, 539 }),
+
+ // tweakcnn based facial landmark detection test
+ ParamType_Infer(
+ "opencv", INFERENCE_TARGET_CPU,
+ TEST_FACIAL_LANDMARK_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/faciallandmark_detection_caffe.bin" },
+ 128, 128, 3, { "data" }, { "Sigmoid_fc2" },
+ { "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel",
+ "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt" },
+ { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79 }),
+ ParamType_Infer(
+ "opencv", INFERENCE_TARGET_GPU,
+ TEST_FACIAL_LANDMARK_DETECTION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/faciallandmark_detection_caffe.bin" },
+ 128, 128, 3, { "data" }, { "Sigmoid_fc2" },
+ { "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.caffemodel",
+ "/usr/share/capi-media-vision/models/FLD/caffe/fld_caffe_model_tweak.prototxt" },
+ { 53, 45, 85, 46, 66, 64, 54, 78, 82, 79 })
+ /* TODO */
+ ));
+
+INSTANTIATE_TEST_CASE_P(
+ Prefix, InferenceEngineDldtTest,
testing::Values(
- // DLDT
- ParamType_Infer("dldt", INFERENCE_TARGET_CUSTOM, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/dldt_banana_classification.bin" }, 224, 224, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.xml", "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.bin" }, { 954 })
- )
-);
+ // DLDT
+ ParamType_Infer(
+ "dldt", INFERENCE_TARGET_CUSTOM,
+ TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/dldt_banana_classification.bin" },
+ 224, 224, 3, { "data" }, { "prob" },
+ { "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.xml",
+ "/usr/share/capi-media-vision/models/IC/dldt/googlenet-v1.bin" },
+ { 954 })));
#include "inference_engine_common_impl.h"
#include "inference_engine_test_common.h"
-enum {
+enum
+{
INFERENCE_ENGINE_PROFILER_OFF = 0, /**< Do not profile inference engine. */
INFERENCE_ENGINE_PROFILER_FILE, /**< Profile inference engine, and store the collected data to file. */
INFERENCE_ENGINE_PROFILER_CONSOLE, /**< Profile inference engine, and print out the collected data on console screen. */
typedef std::tuple<std::string> ParamType_One;
typedef std::tuple<std::string, int> ParamType_Two;
-typedef std::tuple<std::string, int, std::vector<std::string>> ParamType_Three;
-typedef std::tuple<std::string, int, int, int, int, std::vector<std::string>> ParamType_Six;
-typedef std::tuple<std::string, int, int, int, int, int, std::vector<std::string>, int, int, int, std::vector<std::string>, std::vector<std::string>, std::vector<std::string>, std::vector<int>> ParamType_Many;
+typedef std::tuple<std::string, int, std::vector<std::string> > ParamType_Three;
+typedef std::tuple<std::string, int, int, int, int, std::vector<std::string> >
+ ParamType_Six;
+typedef std::tuple<std::string, int, int, int, int, int,
+ std::vector<std::string>, int, int, int,
+ std::vector<std::string>, std::vector<std::string>,
+ std::vector<std::string>, std::vector<int> >
+ ParamType_Many;
typedef std::tuple<int> ParamType_One_Int;
-class InferenceEngineTestCase_G1 : public testing::TestWithParam<ParamType_One> { };
-class InferenceEngineTestCase_G2 : public testing::TestWithParam<ParamType_Two> { };
-class InferenceEngineTestCase_G3 : public testing::TestWithParam<ParamType_Three> { };
-class InferenceEngineTestCase_G4 : public testing::TestWithParam<ParamType_Six> { };
-class InferenceEngineTestCase_G5 : public testing::TestWithParam<ParamType_Six> { };
-class InferenceEngineTestCase_G6 : public testing::TestWithParam<ParamType_Many> { };
-class InferenceEngineTestCase_G7 : public testing::TestWithParam<ParamType_One_Int> { };
-class InferenceEngineTestCase_G8 : public testing::TestWithParam<ParamType_One_Int> { };
-
-static auto InferenceEngineInit_One_Param = [](InferenceEngineCommon *engine, std::string &backend_name) -> int {
+class InferenceEngineTestCase_G1 : public testing::TestWithParam<ParamType_One>
+{};
+class InferenceEngineTestCase_G2 : public testing::TestWithParam<ParamType_Two>
+{};
+class InferenceEngineTestCase_G3
+ : public testing::TestWithParam<ParamType_Three>
+{};
+class InferenceEngineTestCase_G4 : public testing::TestWithParam<ParamType_Six>
+{};
+class InferenceEngineTestCase_G5 : public testing::TestWithParam<ParamType_Six>
+{};
+class InferenceEngineTestCase_G6 : public testing::TestWithParam<ParamType_Many>
+{};
+class InferenceEngineTestCase_G7
+ : public testing::TestWithParam<ParamType_One_Int>
+{};
+class InferenceEngineTestCase_G8
+ : public testing::TestWithParam<ParamType_One_Int>
+{};
+
+static auto InferenceEngineInit_One_Param =
+ [](InferenceEngineCommon *engine, std::string &backend_name) -> int {
inference_engine_config config = { backend_name, 0, 0 };
return engine->BindBackend(&config);
};
-static auto InferenceEngineInit_Two_Params = [](InferenceEngineCommon *engine, std::string &backend_name, int &target_devices) -> int {
+static auto InferenceEngineInit_Two_Params = [](InferenceEngineCommon *engine,
+ std::string &backend_name,
+ int &target_devices) -> int {
inference_engine_config config = { backend_name, 0, target_devices };
int ret = engine->BindBackend(&config);
std::tie(backend_name) = GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
std::tie(backend_name) = GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
std::tie(backend_name) = GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
std::tie(backend_name) = GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
std::tie(backend_name) = GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
std::tie(backend_name, target_devices) = GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
std::tie(backend_name, target_devices) = GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
std::tie(backend_name, target_devices, model_paths) = GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
- int ret = InferenceEngineInit_Two_Params(engine.get(), backend_name, target_devices);
+ int ret = InferenceEngineInit_Two_Params(engine.get(), backend_name,
+ target_devices);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector <std::string> models;
+ std::vector<std::string> models;
int model_type = GetModelInfo(model_paths, models);
if (model_type == -1) {
ASSERT_NE(model_type, -1);
return;
}
- ret = engine->Load(models, (inference_model_format_e)model_type);
+ ret = engine->Load(models, (inference_model_format_e) model_type);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
engine->UnbindBackend();
std::tie(backend_name, target_devices, model_paths) = GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
int model_type = GetModelInfo(model_paths, models);
ASSERT_NE(model_type, -1);
- int ret = engine->Load(models, (inference_model_format_e)model_type);
+ int ret = engine->Load(models, (inference_model_format_e) model_type);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_OPERATION);
}
{
std::string backend_name;
int target_devices;
- std::vector<std::string> model_paths = { "/path/to/wrong/ic_tflite_model.tflite" };
+ std::vector<std::string> model_paths = {
+ "/path/to/wrong/ic_tflite_model.tflite"
+ };
std::tie(backend_name, target_devices) = GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
- int ret = InferenceEngineInit_Two_Params(engine.get(), backend_name, target_devices);
+ int ret = InferenceEngineInit_Two_Params(engine.get(), backend_name,
+ target_devices);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
std::vector<std::string> models;
int model_type = GetModelInfo(model_paths, models);
ASSERT_NE(model_type, -1);
- ret = engine->Load(models, (inference_model_format_e)model_type);
+ ret = engine->Load(models, (inference_model_format_e) model_type);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PATH);
}
size_t ch;
std::vector<std::string> input_layers;
- std::tie(backend_name, tensor_type, height, width, ch, input_layers) = GetParam();
+ std::tie(backend_name, tensor_type, height, width, ch, input_layers) =
+ GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
- (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e)tensor_type,
+ (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width)
};
input_property.layer_names.push_back(*iter);
input_property.tensor_infos.push_back(tensor_info);
- }
+ }
ret = engine->SetInputLayerProperty(input_property);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
}
size_t ch;
std::vector<std::string> output_layers;
- std::tie(backend_name, tensor_type, height, width, ch, output_layers) = GetParam();
+ std::tie(backend_name, tensor_type, height, width, ch, output_layers) =
+ GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
- (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e)tensor_type,
+ (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width)
};
output_property.layer_names.push_back(*iter);
output_property.tensor_infos.push_back(tensor_info);
- }
+ }
ret = engine->SetInputLayerProperty(output_property);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
}
std::tie(backend_name) = GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
size_t ch;
std::vector<std::string> output_layers;
- std::tie(backend_name, tensor_type, height, width, ch, output_layers) = GetParam();
+ std::tie(backend_name, tensor_type, height, width, ch, output_layers) =
+ GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
- (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e)tensor_type,
+ (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width)
};
output_property.layer_names.push_back(*iter);
output_property.tensor_infos.push_back(tensor_info);
- }
+ }
ret = engine->SetOutputLayerProperty(output_property);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
}
size_t ch;
std::vector<std::string> output_layers;
- std::tie(backend_name, tensor_type, height, width, ch, output_layers) = GetParam();
+ std::tie(backend_name, tensor_type, height, width, ch, output_layers) =
+ GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
for (iter = output_layers.begin(); iter != output_layers.end(); iter++) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
- (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e)tensor_type,
+ (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width)
};
output_property.layer_names.push_back(*iter);
output_property.tensor_infos.push_back(tensor_info);
- }
+ }
ret = engine->SetOutputLayerProperty(output_property);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_INVALID_PARAMETER);
}
std::tie(backend_name) = GetParam();
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
ASSERT_TRUE(engine);
std::vector<std::string> model_paths;
std::vector<int> answers;
- std::tie(backend_name, profiler, target_devices, test_type, iteration, tensor_type, image_paths, height, width, ch, input_layers, output_layers, model_paths, answers) = GetParam();
+ std::tie(backend_name, profiler, target_devices, test_type, iteration,
+ tensor_type, image_paths, height, width, ch, input_layers,
+ output_layers, model_paths, answers) = GetParam();
if (iteration < 1) {
iteration = 1;
return;
}
- std::cout <<"backend = " << backend_name << std::endl;
+ std::cout << "backend = " << backend_name << std::endl;
auto engine = std::make_unique<InferenceEngineCommon>();
if (engine == nullptr) {
return;
}
- if (profiler > INFERENCE_ENGINE_PROFILER_OFF && profiler < INFERENCE_ENGINE_PROFILER_MAX) {
+ if (profiler > INFERENCE_ENGINE_PROFILER_OFF &&
+ profiler < INFERENCE_ENGINE_PROFILER_MAX) {
int ret = engine->EnableProfiler(true);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
if (profiler == INFERENCE_ENGINE_PROFILER_FILE) {
- ret = engine->DumpProfileToFile("profile_data_" + backend_name + "_tflite_model.txt");
+ ret = engine->DumpProfileToFile("profile_data_" + backend_name +
+ "_tflite_model.txt");
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
} else {
ret = engine->DumpProfileToConsole();
}
}
- int ret = InferenceEngineInit_Two_Params(engine.get(), backend_name, target_devices);
+ int ret = InferenceEngineInit_Two_Params(engine.get(), backend_name,
+ target_devices);
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- std::vector <std::string> models;
+ std::vector<std::string> models;
int model_type = GetModelInfo(model_paths, models);
if (model_type == -1) {
ASSERT_NE(model_type, -1);
for (iter = input_layers.begin(); iter != input_layers.end(); iter++) {
inference_engine_tensor_info tensor_info = {
{ 1, ch, height, width },
- (inference_tensor_shape_type_e)INFERENCE_TENSOR_SHAPE_NCHW,
- (inference_tensor_data_type_e)tensor_type,
+ (inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
+ (inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width)
};
input_property.layer_names.push_back(*iter);
input_property.tensor_infos.push_back(tensor_info);
- }
+ }
ret = engine->SetInputLayerProperty(input_property);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
return;
}
- ret = engine->Load(models, (inference_model_format_e)model_type);
+ ret = engine->Load(models, (inference_model_format_e) model_type);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
ASSERT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
return;
}
// Copy input image tensor data from a given file to input tensor buffer.
- for (int i = 0; i < (int)image_paths.size(); ++i) {
+ for (int i = 0; i < (int) image_paths.size(); ++i) {
CopyFileToMemory(image_paths[i].c_str(), inputs[i], inputs[i].size);
}
}
INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G1,
- testing::Values(
- // parameter order : backend name
- // ARMNN.
- ParamType_One("armnn"),
- // TFLITE.
- ParamType_One("tflite"),
- // OPENCV
- ParamType_One("opencv")
- /* TODO */
- )
-);
+ testing::Values(
+ // parameter order : backend name
+ // ARMNN.
+ ParamType_One("armnn"),
+ // TFLITE.
+ ParamType_One("tflite"),
+ // OPENCV
+ ParamType_One("opencv")
+ /* TODO */
+ ));
INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G2,
+ testing::Values(
+ // parameter order : backend name, target device
+ // ARMNN.
+ ParamType_Two("armnn", INFERENCE_TARGET_CPU),
+ // TFLITE.
+ ParamType_Two("tflite", INFERENCE_TARGET_CPU),
+ // OPENCV,
+ ParamType_Two("opencv", INFERENCE_TARGET_CPU)
+ /* TODO */
+ ));
+
+INSTANTIATE_TEST_CASE_P(
+ Prefix, InferenceEngineTestCase_G3,
testing::Values(
- // parameter order : backend name, target device
- // ARMNN.
- ParamType_Two("armnn", INFERENCE_TARGET_CPU),
- // TFLITE.
- ParamType_Two("tflite", INFERENCE_TARGET_CPU),
- // OPENCV,
- ParamType_Two("opencv", INFERENCE_TARGET_CPU)
- /* TODO */
- )
-);
-
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G3,
+ // parameter order : backend name, target device, model path/s
+ // mobilenet based image classification model loading test
+ // ARMNN.
+ ParamType_Three(
+ "armnn", INFERENCE_TARGET_CPU,
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+ // TFLITE.
+ ParamType_Three(
+ "tflite", INFERENCE_TARGET_CPU,
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
+ // OPENCV.
+ ParamType_Three(
+ "opencv", INFERENCE_TARGET_CPU,
+ { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
+ "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" })
+ /* TODO */
+ ));
+
+INSTANTIATE_TEST_CASE_P(
+ Prefix, InferenceEngineTestCase_G4,
testing::Values(
- // parameter order : backend name, target device, model path/s
- // mobilenet based image classification model loading test
- // ARMNN.
- ParamType_Three("armnn", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
- // TFLITE.
- ParamType_Three("tflite", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }),
- // OPENCV.
- ParamType_Three("opencv", INFERENCE_TARGET_CPU, { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel", "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" })
- /* TODO */
- )
-);
-
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G4,
+ // parameter order : backend name, input data type, height, width, channel count, layer name
+ // set input and output layer positive test
+ // ARMNN.
+ ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
+ 224, 3, { "test_name" }),
+ // TFLITE.
+ ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
+ 224, 3, { "test_name" }),
+ // OPENCV.
+ ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
+ 224, 3, { "test_name" })
+ /* TODO */
+ ));
+
+INSTANTIATE_TEST_CASE_P(
+ Prefix, InferenceEngineTestCase_G5,
testing::Values(
- // parameter order : backend name, input data type, height, width, channel count, layer name
- // set input and output layer positive test
- // ARMNN.
- ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "test_name" }),
- // TFLITE.
- ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "test_name" }),
- // OPENCV.
- ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "test_name" })
- /* TODO */
- )
-);
-
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G5,
+ // parameter order : backend name, input data type, height, width, channel count, layer name
+ // set input and output layer negative test
+ // ARMNN.
+ ParamType_Six("armnn", -1, 224, 224, 3, { "test_name" }),
+ ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0,
+ 224, 3, { "test_name" }),
+ ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
+ 0, 3, { "test_name" }),
+ ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
+ 224, 0, { "test_name" }),
+ ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
+ 224, 3, { "" }),
+ // TFLITE.
+ ParamType_Six("tflite", -1, 224, 224, 3, { "test_name" }),
+ ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0,
+ 224, 3, { "test_name" }),
+ ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
+ 0, 3, { "test_name" }),
+ ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
+ 224, 0, { "test_name" }),
+ ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
+ 224, 3, { "" }),
+ // OPENCV.
+ ParamType_Six("opencv", -1, 224, 224, 3, { "test_name" }),
+ ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0,
+ 224, 3, { "test_name" }),
+ ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
+ 0, 3, { "test_name" }),
+ ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
+ 224, 0, { "test_name" }),
+ ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224,
+ 224, 3, { "" })
+ /* TODO */
+ ));
+
+INSTANTIATE_TEST_CASE_P(
+ Prefix, InferenceEngineTestCase_G6,
testing::Values(
- // parameter order : backend name, input data type, height, width, channel count, layer name
- // set input and output layer negative test
- // ARMNN.
- ParamType_Six("armnn", -1, 224, 224, 3, { "test_name" }),
- ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 224, 3, { "test_name" }),
- ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 0, 3, { "test_name" }),
- ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 0, { "test_name" }),
- ParamType_Six("armnn", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "" }),
- // TFLITE.
- ParamType_Six("tflite", -1, 224, 224, 3, { "test_name" }),
- ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 224, 3, { "test_name" }),
- ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 0, 3, { "test_name" }),
- ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 0, { "test_name" }),
- ParamType_Six("tflite", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "" }),
- // OPENCV.
- ParamType_Six("opencv", -1, 224, 224, 3, { "test_name" }),
- ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 0, 224, 3, { "test_name" }),
- ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 0, 3, { "test_name" }),
- ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 0, { "test_name" }),
- ParamType_Six("opencv", INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 224, 224, 3, { "" })
- /* TODO */
- )
-);
-
-INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G6,
- testing::Values(
- // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
- // mobilenet based image classification test
- // ARMNN.
- ParamType_Many("armnn", INFERENCE_ENGINE_PROFILER_OFF, INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- // TFLITE.
- ParamType_Many("tflite", INFERENCE_ENGINE_PROFILER_OFF, INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- // OPENCV.
- ParamType_Many("opencv", INFERENCE_ENGINE_PROFILER_OFF, INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification_caffe.bin" }, 227, 227, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel", "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }, { 281 }),
- // ARMNN.
- ParamType_Many("armnn", INFERENCE_ENGINE_PROFILER_FILE, INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- // TFLITE.
- ParamType_Many("tflite", INFERENCE_ENGINE_PROFILER_FILE, INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- // OPENCV.
- ParamType_Many("opencv", INFERENCE_ENGINE_PROFILER_FILE, INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification_caffe.bin" }, 227, 227, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel", "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }, { 281 }),
- // ARMNN.
- ParamType_Many("armnn", INFERENCE_ENGINE_PROFILER_CONSOLE, INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- // TFLITE.
- ParamType_Many("tflite", INFERENCE_ENGINE_PROFILER_CONSOLE, INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification.bin" }, 224, 224, 3, { "input_2" }, { "dense_3/Softmax" }, { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" }, { 3 }),
- // OPENCV.
- ParamType_Many("opencv", INFERENCE_ENGINE_PROFILER_CONSOLE, INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, { "/opt/usr/images/image_classification_caffe.bin" }, 227, 227, 3, { "data" }, { "prob" }, { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel", "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" }, { 281 })
- /* TODO */
- )
-);
+ // parameter order : backend name, target device, input image path/s, height, width, channel count, input layer names, output layer names, model path/s, inference result
+ // mobilenet based image classification test
+ // ARMNN.
+ ParamType_Many(
+ "armnn", INFERENCE_ENGINE_PROFILER_OFF,
+ INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // TFLITE.
+ ParamType_Many(
+ "tflite", INFERENCE_ENGINE_PROFILER_OFF,
+ INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // OPENCV.
+ ParamType_Many(
+ "opencv", INFERENCE_ENGINE_PROFILER_OFF,
+ INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification_caffe.bin" },
+ 227, 227, 3, { "data" }, { "prob" },
+ { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
+ "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
+ { 281 }),
+ // ARMNN.
+ ParamType_Many(
+ "armnn", INFERENCE_ENGINE_PROFILER_FILE,
+ INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // TFLITE.
+ ParamType_Many(
+ "tflite", INFERENCE_ENGINE_PROFILER_FILE,
+ INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // OPENCV.
+ ParamType_Many(
+ "opencv", INFERENCE_ENGINE_PROFILER_FILE,
+ INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification_caffe.bin" },
+ 227, 227, 3, { "data" }, { "prob" },
+ { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
+ "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
+ { 281 }),
+ // ARMNN.
+ ParamType_Many(
+ "armnn", INFERENCE_ENGINE_PROFILER_CONSOLE,
+ INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // TFLITE.
+ ParamType_Many(
+ "tflite", INFERENCE_ENGINE_PROFILER_CONSOLE,
+ INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification.bin" }, 224,
+ 224, 3, { "input_2" }, { "dense_3/Softmax" },
+ { "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" },
+ { 3 }),
+ // OPENCV.
+ ParamType_Many(
+ "opencv", INFERENCE_ENGINE_PROFILER_CONSOLE,
+ INFERENCE_TARGET_CPU, TEST_IMAGE_CLASSIFICATION, 10,
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+ { "/opt/usr/images/image_classification_caffe.bin" },
+ 227, 227, 3, { "data" }, { "prob" },
+ { "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel",
+ "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt" },
+ { 281 })
+ /* TODO */
+ ));
INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G7,
- testing::Values(
- // parameter order : backend type
- // ARMNN.
- ParamType_One_Int(INFERENCE_BACKEND_ARMNN),
- // TFLITE.
- ParamType_One_Int(INFERENCE_BACKEND_TFLITE),
- // OPENCV.
- ParamType_One_Int(INFERENCE_BACKEND_OPENCV)
- /* TODO */
- )
-);
+ testing::Values(
+ // parameter order : backend type
+ // ARMNN.
+ ParamType_One_Int(INFERENCE_BACKEND_ARMNN),
+ // TFLITE.
+ ParamType_One_Int(INFERENCE_BACKEND_TFLITE),
+ // OPENCV.
+ ParamType_One_Int(INFERENCE_BACKEND_OPENCV)
+ /* TODO */
+ ));
INSTANTIATE_TEST_CASE_P(Prefix, InferenceEngineTestCase_G8,
- testing::Values(
- // parameter order : backend type
- // Wrong backend type.
- ParamType_One_Int(-1)
- /* TODO */
- )
-);
\ No newline at end of file
+ testing::Values(
+ // parameter order : backend type
+ // Wrong backend type.
+ ParamType_One_Int(-1)
+ /* TODO */
+ ));
\ No newline at end of file
#include "inference_engine_test_common.h"
static std::map<std::string, int> Model_Formats = {
- { "caffemodel", INFERENCE_MODEL_CAFFE },
- { "pb", INFERENCE_MODEL_TF },
- { "tflite", INFERENCE_MODEL_TFLITE },
- { "t7", INFERENCE_MODEL_TORCH },
- { "weights", INFERENCE_MODEL_DARKNET },
- { "xml", INFERENCE_MODEL_DLDT },
+ { "caffemodel", INFERENCE_MODEL_CAFFE }, { "pb", INFERENCE_MODEL_TF },
+ { "tflite", INFERENCE_MODEL_TFLITE }, { "t7", INFERENCE_MODEL_TORCH },
+ { "weights", INFERENCE_MODEL_DARKNET }, { "xml", INFERENCE_MODEL_DLDT },
{ "onnx", INFERENCE_MODEL_ONNX }
};
-int GetModelInfo(std::vector <std::string> &model_paths, std::vector<std::string> &models)
+int GetModelInfo(std::vector<std::string> &model_paths,
+ std::vector<std::string> &models)
{
std::string model_path = model_paths[0];
std::string ext_str = model_path.substr(model_path.find_last_of(".") + 1);
return ret;
}
-int PrepareTensorBuffers(InferenceEngineCommon *engine, std::vector<inference_engine_tensor_buffer> &inputs,
- std::vector<inference_engine_tensor_buffer> &outputs)
+int PrepareTensorBuffers(InferenceEngineCommon *engine,
+ std::vector<inference_engine_tensor_buffer> &inputs,
+ std::vector<inference_engine_tensor_buffer> &outputs)
{
int ret = engine->GetInputTensorBuffers(inputs);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
return INFERENCE_ENGINE_ERROR_NONE;
}
- for (int i = 0; i < (int)input_property.tensor_infos.size(); ++i) {
- inference_engine_tensor_info tensor_info = input_property.tensor_infos[i];
+ for (int i = 0; i < (int) input_property.tensor_infos.size(); ++i) {
+ inference_engine_tensor_info tensor_info =
+ input_property.tensor_infos[i];
inference_engine_tensor_buffer tensor_buffer;
if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
- tensor_buffer.buffer = (void *)(new float[tensor_info.size]);
+ tensor_buffer.buffer = (void *) (new float[tensor_info.size]);
tensor_buffer.size = tensor_info.size * 4;
- } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
- tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]);
+ } else if (tensor_info.data_type ==
+ INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+ tensor_buffer.buffer =
+ (void *) (new unsigned char[tensor_info.size]);
tensor_buffer.size = tensor_info.size;
}
return INFERENCE_ENGINE_ERROR_NONE;
}
- for (int i = 0; i < (int)output_property.tensor_infos.size(); ++i) {
- inference_engine_tensor_info tensor_info = output_property.tensor_infos[i];
+ for (int i = 0; i < (int) output_property.tensor_infos.size(); ++i) {
+ inference_engine_tensor_info tensor_info =
+ output_property.tensor_infos[i];
inference_engine_tensor_buffer tensor_buffer;
if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
- tensor_buffer.buffer = (void *)(new float[tensor_info.size]);
+ tensor_buffer.buffer = (void *) (new float[tensor_info.size]);
tensor_buffer.size = tensor_info.size * 4;
- } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
- tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]);
+ } else if (tensor_info.data_type ==
+ INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+ tensor_buffer.buffer =
+ (void *) (new unsigned char[tensor_info.size]);
tensor_buffer.size = tensor_info.size;
}
return INFERENCE_ENGINE_ERROR_NONE;
}
-void CleanupTensorBuffers(std::vector<inference_engine_tensor_buffer> &inputs, std::vector<inference_engine_tensor_buffer> &outputs)
+void CleanupTensorBuffers(std::vector<inference_engine_tensor_buffer> &inputs,
+ std::vector<inference_engine_tensor_buffer> &outputs)
{
if (!inputs.empty()) {
std::vector<inference_engine_tensor_buffer>::iterator iter;
}
if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
- delete[] (float *)tensor_buffer.buffer;
+ delete[](float *) tensor_buffer.buffer;
else
- delete[] (unsigned char *)tensor_buffer.buffer;
+ delete[](unsigned char *) tensor_buffer.buffer;
}
std::vector<inference_engine_tensor_buffer>().swap(inputs);
}
}
if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
- delete[] (float *)tensor_buffer.buffer;
+ delete[](float *) tensor_buffer.buffer;
else
- delete[] (unsigned char *)tensor_buffer.buffer;
+ delete[](unsigned char *) tensor_buffer.buffer;
}
std::vector<inference_engine_tensor_buffer>().swap(outputs);
}
}
-void CopyFileToMemory(const char *file_name, inference_engine_tensor_buffer &buffer, unsigned int size)
+void CopyFileToMemory(const char *file_name,
+ inference_engine_tensor_buffer &buffer, unsigned int size)
{
int fd = open(file_name, O_RDONLY);
if (fd == -1) {
close(fd);
}
-void FillOutputResult(InferenceEngineCommon *engine, std::vector<inference_engine_tensor_buffer> &outputs, tensor_t &outputData)
+void FillOutputResult(InferenceEngineCommon *engine,
+ std::vector<inference_engine_tensor_buffer> &outputs,
+ tensor_t &outputData)
{
inference_engine_layer_property property;
int ret = engine->GetOutputLayerProperty(property);
EXPECT_EQ(ret, INFERENCE_ENGINE_ERROR_NONE);
- for (int i = 0; i < (int)property.tensor_infos.size(); ++i) {
+ for (int i = 0; i < (int) property.tensor_infos.size(); ++i) {
inference_engine_tensor_info tensor_info = property.tensor_infos[i];
std::vector<int> tmpDimInfo;
- for (int i = 0; i < (int)tensor_info.shape.size(); i++) {
+ for (int i = 0; i < (int) tensor_info.shape.size(); i++) {
tmpDimInfo.push_back(tensor_info.shape[i]);
}
// Normalize output tensor data converting it to float type in case of quantized model.
if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
- unsigned char *ori_buf = (unsigned char *)outputs[i].buffer;
+ unsigned char *ori_buf = (unsigned char *) outputs[i].buffer;
float *new_buf = new float[tensor_info.size];
ASSERT_TRUE(new_buf);
- for (int j = 0; j < (int)tensor_info.size; j++) {
- new_buf[j] = (float)ori_buf[j] / 255.0f;
+ for (int j = 0; j < (int) tensor_info.size; j++) {
+ new_buf[j] = (float) ori_buf[j] / 255.0f;
}
// replace original buffer with new one, and release origin one.
}
}
- outputData.data.push_back((void *)outputs[i].buffer);
+ outputData.data.push_back((void *) outputs[i].buffer);
}
}
int VerifyImageClassificationResults(tensor_t &outputData, int answer)
{
- std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
+ std::vector<std::vector<int> > inferDimInfo(outputData.dimInfo);
+ std::vector<void *> inferResults(outputData.data.begin(),
+ outputData.data.end());
int idx = -1;
int count = inferDimInfo[0][1];
float value = 0.0f;
- float *prediction = reinterpret_cast<float*>(inferResults[0]);
+ float *prediction = reinterpret_cast<float *>(inferResults[0]);
for (int i = 0; i < count; ++i) {
if (value < prediction[i]) {
value = prediction[i];
return idx == answer;
}
-int VerifyObjectDetectionResults(tensor_t &outputData, std::vector<int> &answers, int height, int width)
+int VerifyObjectDetectionResults(tensor_t &outputData,
+ std::vector<int> &answers, int height,
+ int width)
{
- std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
+ std::vector<std::vector<int> > inferDimInfo(outputData.dimInfo);
+ std::vector<void *> inferResults(outputData.data.begin(),
+ outputData.data.end());
- float* boxes = nullptr;
- float* classes = nullptr;
- float* scores = nullptr;
+ float *boxes = nullptr;
+ float *classes = nullptr;
+ float *scores = nullptr;
int num_of_detections = 0;
if (outputData.dimInfo.size() == 1) {
// indicats the image id. But it is useless if a batch mode isn't supported.
// So, use the 1st of 7.
- num_of_detections = (int)(*reinterpret_cast<float*>(outputData.data[0]));
+ num_of_detections =
+ (int) (*reinterpret_cast<float *>(outputData.data[0]));
boxes = new float[num_of_detections * 4];
classes = new float[num_of_detections];
scores = new float[num_of_detections];
for (int idx = 0; idx < num_of_detections; ++idx) {
- classes[idx] = (reinterpret_cast<float*>(outputData.data[0]))[idx*inferDimInfo[0][3] + 1];
- scores[idx] = (reinterpret_cast<float*>(outputData.data[0]))[idx*inferDimInfo[0][3] + 2];
-
- boxes[idx*4] = (reinterpret_cast<float*>(outputData.data[0]))[idx*inferDimInfo[0][3] + 4];
- boxes[idx*4 + 1] = (reinterpret_cast<float*>(outputData.data[0]))[idx*inferDimInfo[0][3] + 3];
- boxes[idx*4 + 2] = (reinterpret_cast<float*>(outputData.data[0]))[idx*inferDimInfo[0][3] + 6];
- boxes[idx*4 + 3] = (reinterpret_cast<float*>(outputData.data[0]))[idx*inferDimInfo[0][3] + 5];
+ classes[idx] = (reinterpret_cast<float *>(
+ outputData.data[0]))[idx * inferDimInfo[0][3] + 1];
+ scores[idx] = (reinterpret_cast<float *>(
+ outputData.data[0]))[idx * inferDimInfo[0][3] + 2];
+
+ boxes[idx * 4] = (reinterpret_cast<float *>(
+ outputData.data[0]))[idx * inferDimInfo[0][3] + 4];
+ boxes[idx * 4 + 1] = (reinterpret_cast<float *>(
+ outputData.data[0]))[idx * inferDimInfo[0][3] + 3];
+ boxes[idx * 4 + 2] = (reinterpret_cast<float *>(
+ outputData.data[0]))[idx * inferDimInfo[0][3] + 6];
+ boxes[idx * 4 + 3] = (reinterpret_cast<float *>(
+ outputData.data[0]))[idx * inferDimInfo[0][3] + 5];
}
} else {
- boxes = reinterpret_cast<float*>(inferResults[0]);
- classes = reinterpret_cast<float*>(inferResults[1]);
- scores = reinterpret_cast<float*>(inferResults[2]);
- num_of_detections = (int)(*reinterpret_cast<float*>(inferResults[3]));
+ boxes = reinterpret_cast<float *>(inferResults[0]);
+ classes = reinterpret_cast<float *>(inferResults[1]);
+ scores = reinterpret_cast<float *>(inferResults[2]);
+ num_of_detections = (int) (*reinterpret_cast<float *>(inferResults[3]));
}
int left = 0, top = 0, right = 0, bottom = 0;
if (max_score < scores[i]) {
max_score = scores[i];
- left = (int)(boxes[i * 4 + 1] * width);
- top = (int)(boxes[i * 4 + 0] * height);
- right = (int)(boxes[i * 4 + 3] * width);
- bottom = (int)(boxes[i * 4 + 2] * height);
+ left = (int) (boxes[i * 4 + 1] * width);
+ top = (int) (boxes[i * 4 + 0] * height);
+ right = (int) (boxes[i * 4 + 3] * width);
+ bottom = (int) (boxes[i * 4 + 2] * height);
}
}
if (outputData.dimInfo.size() == 1) {
- delete [] boxes;
- delete [] classes;
- delete [] scores;
+ delete[] boxes;
+ delete[] classes;
+ delete[] scores;
}
- return (answers[0] == left && answers[1] == top && answers[2] == right && answers[3] == bottom);
+ return (answers[0] == left && answers[1] == top && answers[2] == right &&
+ answers[3] == bottom);
}
-int VerifyFacialLandmarkDetectionResults(tensor_t &outputData, std::vector<int> &answers, int height, int width)
+int VerifyFacialLandmarkDetectionResults(tensor_t &outputData,
+ std::vector<int> &answers, int height,
+ int width)
{
- std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
+ std::vector<std::vector<int> > inferDimInfo(outputData.dimInfo);
+ std::vector<void *> inferResults(outputData.data.begin(),
+ outputData.data.end());
std::vector<int> result_x, result_y;
long number_of_detections = inferDimInfo[0][1];
- float* loc = reinterpret_cast<float*>(inferResults[0]);
+ float *loc = reinterpret_cast<float *>(inferResults[0]);
- for (int idx = 0; idx < number_of_detections; idx+=2) {
- result_x.push_back((int)(loc[idx] * width));
- result_y.push_back((int)(loc[idx+1] * height));
+ for (int idx = 0; idx < number_of_detections; idx += 2) {
+ result_x.push_back((int) (loc[idx] * width));
+ result_y.push_back((int) (loc[idx + 1] * height));
}
int ret = 1;
- for (int i = 0; i < (number_of_detections>>1); i++) {
- if (result_x[i] != answers[i*2] || result_y[i] != answers[i*2 + 1]) {
+ for (int i = 0; i < (number_of_detections >> 1); i++) {
+ if (result_x[i] != answers[i * 2] ||
+ result_y[i] != answers[i * 2 + 1]) {
ret = 0;
break;
}
return ret;
}
-int VerifyPoseEstimationResults(tensor_t &outputData, std::vector<int> &answers, int height, int width)
+int VerifyPoseEstimationResults(tensor_t &outputData, std::vector<int> &answers,
+ int height, int width)
{
- std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
+ std::vector<std::vector<int> > inferDimInfo(outputData.dimInfo);
+ std::vector<void *> inferResults(outputData.data.begin(),
+ outputData.data.end());
std::vector<int> result_x, result_y;
const int heat_map_width = 96, heat_map_height = 96;
int num_of_pose = inferDimInfo[0][3];
float *data = static_cast<float *>(inferResults[0]);
- float ratio_x = (float)width / (float)inferDimInfo[0][2];
- float ratio_y = (float)height / (float)inferDimInfo[0][1];
+ float ratio_x = (float) width / (float) inferDimInfo[0][2];
+ float ratio_y = (float) height / (float) inferDimInfo[0][1];
for (int idx = 0; idx < num_of_pose; ++idx) {
float max_score = 0.0f;
for (int y = 0; y < heat_map_height; ++y) {
for (int x = 0; x < heat_map_width; ++x) {
// head_map[Yy][Xx][Kidx] = (Yy * heat_map_height * num_of_pose) + (Xx * num_of_pose) + Kidx
- float score = data[(y * heat_map_width * num_of_pose) + (x * num_of_pose) + idx];
+ float score = data[(y * heat_map_width * num_of_pose) +
+ (x * num_of_pose) + idx];
if (score > max_score) {
max_score = score;
max_x = x;
}
}
- result_x.push_back((int)((float)(max_x + 1) * ratio_x));
- result_y.push_back((int)((float)(max_y + 1) * ratio_y));
+ result_x.push_back((int) ((float) (max_x + 1) * ratio_x));
+ result_y.push_back((int) ((float) (max_y + 1) * ratio_y));
}
int ret = 1;
for (int i = 0; i < num_of_pose; ++i) {
- if (result_x[i] != answers[i] || result_y[i] != answers[num_of_pose + i]) {
+ if (result_x[i] != answers[i] ||
+ result_y[i] != answers[num_of_pose + i]) {
ret = 0;
break;
}
TEST_POSE_ESTIMATION
};
-int GetModelInfo(std::vector <std::string> &model_paths, std::vector<std::string> &models);
+int GetModelInfo(std::vector<std::string> &model_paths,
+ std::vector<std::string> &models);
-int PrepareTensorBuffers(InferenceEngineCommon *engine, std::vector<inference_engine_tensor_buffer> &inputs,
- std::vector<inference_engine_tensor_buffer> &outputs);
+int PrepareTensorBuffers(InferenceEngineCommon *engine,
+ std::vector<inference_engine_tensor_buffer> &inputs,
+ std::vector<inference_engine_tensor_buffer> &outputs);
-void CleanupTensorBuffers(std::vector<inference_engine_tensor_buffer> &inputs, std::vector<inference_engine_tensor_buffer> &outputs);
+void CleanupTensorBuffers(std::vector<inference_engine_tensor_buffer> &inputs,
+ std::vector<inference_engine_tensor_buffer> &outputs);
-void CopyFileToMemory(const char *file_name, inference_engine_tensor_buffer &buffer, unsigned int size);
+void CopyFileToMemory(const char *file_name,
+ inference_engine_tensor_buffer &buffer,
+ unsigned int size);
-void FillOutputResult(InferenceEngineCommon *engine, std::vector<inference_engine_tensor_buffer> &outputs, tensor_t &outputData);
+void FillOutputResult(InferenceEngineCommon *engine,
+ std::vector<inference_engine_tensor_buffer> &outputs,
+ tensor_t &outputData);
int VerifyImageClassificationResults(tensor_t &outputData, int answer);
-int VerifyObjectDetectionResults(tensor_t &outputData, std::vector<int> &answers, int height, int width);
+int VerifyObjectDetectionResults(tensor_t &outputData,
+ std::vector<int> &answers, int height,
+ int width);
-int VerifyFacialLandmarkDetectionResults(tensor_t &outputData, std::vector<int> &answers, int height, int width);
+int VerifyFacialLandmarkDetectionResults(tensor_t &outputData,
+ std::vector<int> &answers, int height,
+ int width);
-int VerifyPoseEstimationResults(tensor_t &outputData, std::vector<int> &answers, int height, int width);
\ No newline at end of file
+int VerifyPoseEstimationResults(tensor_t &outputData, std::vector<int> &answers,
+ int height, int width);
\ No newline at end of file
int main(int argc, char **argv)
{
- ::testing::InitGoogleTest(&argc, argv);
- int ret = RUN_ALL_TESTS();
- return ret;
+ ::testing::InitGoogleTest(&argc, argv);
+ int ret = RUN_ALL_TESTS();
+ return ret;
}