mv_inference: change enumeration values to new ones
authorInki Dae <inki.dae@samsung.com>
Fri, 17 Apr 2020 08:08:19 +0000 (17:08 +0900)
committerInki Dae <inki.dae@samsung.com>
Fri, 17 Apr 2020 08:08:19 +0000 (17:08 +0900)
some enumeration values of inference-engine-interface have been
updated so change them.

Change-Id: Iea466768e7ffe4089dc63938c8c9be921628d654
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_inference/inference/src/Inference.cpp

index 730e7530b9c92965b604d572b1c1035834925372..08c9e22022dcc4c25d8fb7c38c5ac18cdc89e129 100755 (executable)
@@ -214,11 +214,11 @@ int Inference::ConvertToCv(int given_type)
        int type = 0;
 
        switch (given_type) {
-       case TENSOR_DATA_TYPE_UINT8:
+       case INFERENCE_TENSOR_DATA_TYPE_UINT8:
                LOGI("Type is %d ch with UINT8", mCh);
                type = mCh == 1 ? CV_8UC1 : CV_8UC3;
                break;
-       case TENSOR_DATA_TYPE_FLOAT32:
+       case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
                LOGI("Type is %d ch with FLOAT32", mCh);
                type = mCh == 1 ? CV_32FC1 : CV_32FC3;
                break;
@@ -233,14 +233,14 @@ int Inference::ConvertToCv(int given_type)
 
 inference_tensor_data_type_e Inference::ConvertToIE(int given_type)
 {
-       inference_tensor_data_type_e type = TENSOR_DATA_TYPE_FLOAT32;
+       inference_tensor_data_type_e type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
 
        switch (given_type) {
        case MV_INFERENCE_DATA_FLOAT32:
-               type = TENSOR_DATA_TYPE_FLOAT32;
+               type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
                break;
        case MV_INFERENCE_DATA_UINT8:
-               type = TENSOR_DATA_TYPE_UINT8;
+               type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
                break;
        default:
                LOGI("unknown data type so FLOAT32 data type will be used in default");
@@ -367,7 +367,7 @@ void Inference::ConfigureInputInfo(int width,
        tensor_info.data_type = ConvertToIE(dataType);
 
        // In case of OpenCV, only supports NCHW
-       tensor_info.shape_type = TENSOR_SHAPE_NCHW;
+       tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
        // modify to handle multiple tensor infos
        tensor_info.shape.push_back(mConfig.mTensorInfo.dim);
        tensor_info.shape.push_back(mConfig.mTensorInfo.ch);
@@ -521,7 +521,7 @@ void Inference::CleanupTensorBuffers(void)
                                continue;
                        }
 
-                       if (tensor_buffer.data_type == TENSOR_DATA_TYPE_FLOAT32)
+                       if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
                                delete[] (float *)tensor_buffer.buffer;
                        else
                                delete[] (unsigned char *)tensor_buffer.buffer;
@@ -542,7 +542,7 @@ void Inference::CleanupTensorBuffers(void)
                                continue;
                        }
 
-                       if (tensor_buffer.data_type == TENSOR_DATA_TYPE_FLOAT32)
+                       if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
                                delete[] (float *)tensor_buffer.buffer;
                        else
                                delete[] (unsigned char *)tensor_buffer.buffer;
@@ -592,10 +592,10 @@ int Inference::PrepareTenosrBuffers(void)
                for (int i = 0; i < mInputLayerProperty.tensor_infos.size(); ++i) {
                        inference_engine_tensor_info tensor_info = mInputLayerProperty.tensor_infos[i];
                        inference_engine_tensor_buffer tensor_buffer;
-                       if (tensor_info.data_type == TENSOR_DATA_TYPE_FLOAT32) {
+                       if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
                                tensor_buffer.buffer = (void *)(new float[tensor_info.size]);
                                tensor_buffer.size = tensor_info.size * 4;
-                       } else if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
+                       } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
                                tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]);
                                tensor_buffer.size = tensor_info.size;
                        } else {
@@ -636,10 +636,10 @@ int Inference::PrepareTenosrBuffers(void)
                for (int i = 0; i < mOutputLayerProperty.tensor_infos.size(); ++i) {
                        inference_engine_tensor_info tensor_info = mOutputLayerProperty.tensor_infos[i];
                        inference_engine_tensor_buffer tensor_buffer;
-                       if (tensor_info.data_type == TENSOR_DATA_TYPE_FLOAT32) {
+                       if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
                                tensor_buffer.buffer = new float[tensor_info.size];
                                tensor_buffer.size = tensor_info.size * 4;
-                       } else if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
+                       } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
                                tensor_buffer.buffer = new char[tensor_info.size];
                                tensor_buffer.size = tensor_info.size;
                        } else {
@@ -680,7 +680,7 @@ int Inference::FillOutputResult(tensor_t &outputData)
                outputData.dimInfo.push_back(tmpDimInfo);
 
                // Normalize output tensor data converting it to float type in case of quantized model.
-               if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
+               if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
                        unsigned char *ori_buf = (unsigned char *)mOutputTensorBuffers[i].buffer;
                        float *new_buf = new float[tensor_info.size];
                        if (new_buf == NULL) {