- void CheckSupportedInferenceBackend();
- int CheckBackendType(const mv_inference_backend_type_e backendType);
- bool IsTargetDeviceSupported(const int targetDevices);
- int ConvertEngineErrorToVisionError(int error);
- int ConvertTargetTypes(int given_types);
- int ConvertToCv(int given_type);
- int ConvertOutputDataTypeToFloat();
- int Preprocess(std::vector<mv_source_h> &mv_sources, std::vector<cv::Mat> &cv_sources);
- inference_tensor_data_type_e ConvertToIE(int given_type);
- int PrepareTenosrBuffers(void);
- void CleanupTensorBuffers(void);
- int SetUserFile(std::string filename);
+ void checkSupportedInferenceBackend();
+ int checkBackendType(const mv_inference_backend_type_e backendType);
+ bool isTargetDeviceSupported(const int targetDevices);
+ int convertEngineErrorToVisionError(int error);
+ int convertTargetTypes(int given_types);
+ int convertToCv(int given_type);
+ int convertOutputDataTypeToFloat();
+ int preprocess(std::vector<mv_source_h> &mv_sources, std::vector<cv::Mat> &cv_sources);
+ inference_tensor_data_type_e convertToIE(int given_type);
+ int prepareTenosrBuffers(void);
+ void cleanupTensorBuffers(void);
+ int setUserFile(std::string filename);