--- /dev/null
+/**
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IIMAGE_CLASSIFICATION_H__
+#define __IIMAGE_CLASSIFICATION_H__
+
+#include <mv_common.h>
+
+#include "image_classification_type.h"
+
+namespace mediavision
+{
+namespace machine_learning
+{
+class IImageClassification
+{
+public:
+ virtual ~IImageClassification() {};
+
+ virtual void preDestroy() = 0;
+ virtual void setEngineInfo(std::string engine_type, std::string device_type) = 0;
+ virtual void getNumberOfEngines(unsigned int *number_of_engines) = 0;
+ virtual void getEngineType(unsigned int engine_index, char **engine_type) = 0;
+ virtual void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) = 0;
+ virtual void getDeviceType(const char *engine_type, const unsigned int device_index, char **device_type) = 0;
+ virtual void configure() = 0;
+ virtual void prepare() = 0;
+ virtual void perform(mv_source_h &mv_src) = 0;
+ virtual void performAsync(ImageClassificationInput &input) = 0;
+ virtual ImageClassificationResult &getOutput() = 0;
+};
+
+} // machine_learning
+} // mediavision
+
+#endif
\ No newline at end of file
{
namespace machine_learning
{
-ImageClassification::ImageClassification(std::shared_ptr<MachineLearningConfig> config) : _config(config)
+template<typename T>
+ImageClassification<T>::ImageClassification(std::shared_ptr<MachineLearningConfig> config) : _config(config)
{
_inference = make_unique<Inference>();
}
-void ImageClassification::preDestroy()
+template<typename T> void ImageClassification<T>::preDestroy()
{
if (!_async_manager)
return;
_async_manager->stop();
}
-void ImageClassification::configure()
+template<typename T> void ImageClassification<T>::configure()
{
- _config->loadMetaFile(make_unique<ImageClassificationParser>());
loadLabel();
int ret = _inference->bind(_config->getBackendType(), _config->getTargetDeviceType());
throw InvalidOperation("Fail to bind a backend engine.");
}
-void ImageClassification::loadLabel()
+template<typename T> void ImageClassification<T>::loadLabel()
{
if (_config->getLabelFilePath().empty())
return;
readFile.close();
}
-void ImageClassification::getEngineList()
+template<typename T> void ImageClassification<T>::getEngineList()
{
for (auto idx = MV_INFERENCE_BACKEND_NONE + 1; idx < MV_INFERENCE_BACKEND_MAX; ++idx) {
auto backend = _inference->getSupportedInferenceBackend(idx);
}
}
-void ImageClassification::getDeviceList(const char *engine_type)
+template<typename T> void ImageClassification<T>::getDeviceList(const char *engine_type)
{
// TODO. add device types available for a given engine type later.
// In default, cpu and gpu only.
_valid_devices.push_back("gpu");
}
-void ImageClassification::setEngineInfo(std::string engine_type_name, std::string device_type_name)
+template<typename T>
+void ImageClassification<T>::setEngineInfo(std::string engine_type_name, std::string device_type_name)
{
if (engine_type_name.empty() || device_type_name.empty())
throw InvalidParameter("Invalid engine info.");
device_type_name.c_str(), device_type);
}
-void ImageClassification::getNumberOfEngines(unsigned int *number_of_engines)
+template<typename T> void ImageClassification<T>::getNumberOfEngines(unsigned int *number_of_engines)
{
if (!_valid_backends.empty()) {
*number_of_engines = _valid_backends.size();
*number_of_engines = _valid_backends.size();
}
-void ImageClassification::getEngineType(unsigned int engine_index, char **engine_type)
+template<typename T> void ImageClassification<T>::getEngineType(unsigned int engine_index, char **engine_type)
{
if (!_valid_backends.empty()) {
if (_valid_backends.size() <= engine_index)
*engine_type = const_cast<char *>(_valid_backends[engine_index].data());
}
-void ImageClassification::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+template<typename T>
+void ImageClassification<T>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
{
if (!_valid_devices.empty()) {
*number_of_devices = _valid_devices.size();
*number_of_devices = _valid_devices.size();
}
-void ImageClassification::getDeviceType(const char *engine_type, const unsigned int device_index, char **device_type)
+template<typename T>
+void ImageClassification<T>::getDeviceType(const char *engine_type, const unsigned int device_index, char **device_type)
{
if (!_valid_devices.empty()) {
if (_valid_devices.size() <= device_index)
*device_type = const_cast<char *>(_valid_devices[device_index].data());
}
-void ImageClassification::prepare()
+template<typename T> void ImageClassification<T>::prepare()
{
int ret = _inference->configureInputMetaInfo(_config->getInputMetaMap());
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to load model files.");
}
-shared_ptr<MetaInfo> ImageClassification::getInputMetaInfo()
+template<typename T> shared_ptr<MetaInfo> ImageClassification<T>::getInputMetaInfo()
{
TensorBuffer &tensor_buffer = _inference->getInputTensorBuffer();
IETensorBuffer &tensor_info_map = tensor_buffer.getIETensorBuffer();
}
template<typename T>
-void ImageClassification::preprocess(mv_source_h &mv_src, shared_ptr<MetaInfo> metaInfo, vector<T> &inputVector)
+void ImageClassification<T>::preprocess(mv_source_h &mv_src, shared_ptr<MetaInfo> metaInfo, vector<T> &inputVector)
{
LOGI("ENTER");
LOGI("LEAVE");
}
-template<typename T> void ImageClassification::inference(vector<vector<T> > &inputVectors)
+template<typename T> void ImageClassification<T>::inference(vector<vector<T> > &inputVectors)
{
LOGI("ENTER");
LOGI("LEAVE");
}
-template<typename T> void ImageClassification::perform(mv_source_h &mv_src, shared_ptr<MetaInfo> metaInfo)
+template<typename T> void ImageClassification<T>::perform(mv_source_h &mv_src)
{
+ shared_ptr<MetaInfo> metaInfo = getInputMetaInfo();
vector<T> inputVector;
- preprocess<T>(mv_src, metaInfo, inputVector);
+ preprocess(mv_src, metaInfo, inputVector);
vector<vector<T> > inputVectors = { inputVector };
-
- inference<T>(inputVectors);
-}
-
-void ImageClassification::perform(mv_source_h &mv_src)
-{
- shared_ptr<MetaInfo> metaInfo = getInputMetaInfo();
-
- if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8)
- perform<unsigned char>(mv_src, metaInfo);
- else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32)
- perform<float>(mv_src, metaInfo);
- else
- throw InvalidOperation("Invalid model data type.");
+ inference(inputVectors);
}
-ImageClassificationResult &ImageClassification::getOutput()
+template<typename T> ImageClassificationResult &ImageClassification<T>::getOutput()
{
if (_async_manager) {
if (!_async_manager->isWorking())
return _current_result;
}
-template<typename T>
-void ImageClassification::performAsync(ImageClassificationInput &input, shared_ptr<MetaInfo> metaInfo)
+template<typename T> void ImageClassification<T>::performAsync(ImageClassificationInput &input)
{
if (!_async_manager) {
_async_manager = make_unique<AsyncManager<ImageClassificationResult> >([this]() {
AsyncInputQueue<T> inputQueue = _async_manager->popFromInput<T>();
- inference<T>(inputQueue.inputs);
+ inference(inputQueue.inputs);
ImageClassificationResult &resultQueue = result();
});
}
+ shared_ptr<MetaInfo> metaInfo = getInputMetaInfo();
vector<T> inputVector;
- preprocess<T>(input.inference_src, metaInfo, inputVector);
+ preprocess(input.inference_src, metaInfo, inputVector);
vector<vector<T> > inputVectors = { inputVector };
-
_async_manager->push(inputVectors);
}
-void ImageClassification::performAsync(ImageClassificationInput &input)
-{
- shared_ptr<MetaInfo> metaInfo = getInputMetaInfo();
-
- if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8) {
- performAsync<unsigned char>(input, metaInfo);
- } else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32) {
- performAsync<float>(input, metaInfo);
- // TODO
- } else {
- throw InvalidOperation("Invalid model data type.");
- }
-}
-
-void ImageClassification::getOutputNames(vector<string> &names)
+template<typename T> void ImageClassification<T>::getOutputNames(vector<string> &names)
{
TensorBuffer &tensor_buffer_obj = _inference->getOutputTensorBuffer();
IETensorBuffer &ie_tensor_buffer = tensor_buffer_obj.getIETensorBuffer();
names.push_back(it->first);
}
-void ImageClassification::getOutpuTensor(string &target_name, vector<float> &tensor)
+template<typename T> void ImageClassification<T>::getOutpuTensor(string &target_name, vector<float> &tensor)
{
LOGI("ENTER");
LOGI("LEAVE");
}
-template void ImageClassification::preprocess<float>(mv_source_h &mv_src, shared_ptr<MetaInfo> metaInfo,
- vector<float> &inputVector);
-template void ImageClassification::inference<float>(vector<vector<float> > &inputVectors);
-template void ImageClassification::perform<float>(mv_source_h &mv_src, shared_ptr<MetaInfo> metaInfo);
-template void ImageClassification::performAsync<float>(ImageClassificationInput &input, shared_ptr<MetaInfo> metaInfo);
-
-template void ImageClassification::preprocess<unsigned char>(mv_source_h &mv_src, shared_ptr<MetaInfo> metaInfo,
- vector<unsigned char> &inputVector);
-template void ImageClassification::inference<unsigned char>(vector<vector<unsigned char> > &inputVectors);
-template void ImageClassification::perform<unsigned char>(mv_source_h &mv_src, shared_ptr<MetaInfo> metaInfo);
-template void ImageClassification::performAsync<unsigned char>(ImageClassificationInput &input,
- shared_ptr<MetaInfo> metaInfo);
+template class ImageClassification<unsigned char>;
+template class ImageClassification<float>;
}
}