)
INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name_common}.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig)
-ADD_SUBDIRECTORY(${PROJECT_SOURCE_DIR}/test)
+#ADD_SUBDIRECTORY(${PROJECT_SOURCE_DIR}/test)
IF(UNIX)
#ifndef __INFERENCE_ENGINE_COMMON_H__
#define __INFERENCE_ENGINE_COMMON_H__
+#include <map>
#include <vector>
#include <string>
* Otherwise, it should put buffers to be empty.
*/
virtual int GetInputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers) = 0;
+ std::map<std::string, inference_engine_tensor_buffer> &buffers) = 0;
/**
* @brief Get output tensor buffers from a given backend engine.
* Otherwise, it should put buffers to be empty.
*/
virtual int GetOutputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers) = 0;
+ std::map<std::string, inference_engine_tensor_buffer> &buffers) = 0;
/**
* @brief Get input layer property information from a given backend engine.
* @param[in] output_buffers It contains tensor buffers to be used as output layer.
*/
virtual int
- Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers) = 0;
+ Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+ std::map<std::string, inference_engine_tensor_buffer> &output_buffers) = 0;
};
typedef void destroy_t(IInferenceEngineCommon *);
#ifndef __INFERENCE_ENGINE_COMMON_IMPL_H__
#define __INFERENCE_ENGINE_COMMON_IMPL_H__
+#include <map>
#include <vector>
#include <string>
* Otherwise, it should put buffers to be empty.
*/
int GetInputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers);
+ std::map<std::string, inference_engine_tensor_buffer> &buffers);
/**
* @brief Get output tensor buffers from a given backend engine.
* Otherwise, it should put buffers to be empty.
*/
int GetOutputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers);
+ std::map<std::string, inference_engine_tensor_buffer> &buffers);
/**
* @brief Get input layer property information from a given backend engine.
* @param[in] input_buffers It contains tensor buffers to be used as input layer.
* @param[in] output_buffers It contains tensor buffers to be used as output layer.
*/
- int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers);
+ int Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+ std::map<std::string, inference_engine_tensor_buffer> &output_buffers);
/**
* @brief Enable or disable Inference engine profiler.
int InitBackendEngine(const std::string &backend_path,
int backend_type, int device_type);
int CheckTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers);
+ std::map<std::string, inference_engine_tensor_buffer> &buffers);
int CheckLayerProperty(inference_engine_layer_property &property);
inference_backend_type_e mSelectedBackendEngine;
mkdir -p %{buildroot}/opt/usr/images/
%make_install
-install -m 755 test/bin/inference_engine_profiler %{buildroot}%{_bindir}
-install -m 755 test/bin/inference_engine_tc %{buildroot}%{_bindir}
-install -m 755 start_profiler.sh %{buildroot}%{_bindir}
-install -m 666 test/res/*.bin %{buildroot}/opt/usr/images
+#install -m 755 test/bin/inference_engine_profiler %{buildroot}%{_bindir}
+#install -m 755 test/bin/inference_engine_tc %{buildroot}%{_bindir}
+#install -m 755 start_profiler.sh %{buildroot}%{_bindir}
+#install -m 666 test/res/*.bin %{buildroot}/opt/usr/images
%post -p /sbin/ldconfig
%postun -p /sbin/ldconfig
%{_includedir}/media/*.h
%{_libdir}/pkgconfig/*common.pc
%{_libdir}/lib*-common.so
-%{_bindir}/inference_engine_profiler
-%{_bindir}/inference_engine_tc
-%{_bindir}/start_profiler.sh
-/opt/usr/images/*.bin
+#%{_bindir}/inference_engine_profiler
+#%{_bindir}/inference_engine_tc
+#%{_bindir}/start_profiler.sh
+#/opt/usr/images/*.bin
}
int InferenceEngineCommon::CheckTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
if (buffers.size() == 0) {
LOGE("tensor buffer vector is empty.");
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
- for (std::vector<inference_engine_tensor_buffer>::const_iterator iter =
- buffers.begin();
- iter != buffers.end(); ++iter) {
- inference_engine_tensor_buffer tensor_buffer = *iter;
+ for (auto iter = buffers.begin(); iter != buffers.end(); ++iter) {
+ inference_engine_tensor_buffer tensor_buffer = iter->second;
if (tensor_buffer.buffer == nullptr || tensor_buffer.size == 0) {
LOGE("tensor buffer pointer is null or tensor buffer size is 0.");
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
int InferenceEngineCommon::GetInputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
CHECK_ENGINE_INSTANCE(mBackendHandle);
}
int InferenceEngineCommon::GetOutputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
CHECK_ENGINE_INSTANCE(mBackendHandle);
return mBackendHandle->GetBackendCapacity(capacity);
}
- int InferenceEngineCommon::Run(
- std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
+ int InferenceEngineCommon::Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+ std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
{
CHECK_ENGINE_INSTANCE(mBackendHandle);