int type = 0;
switch (given_type) {
- case TENSOR_DATA_TYPE_UINT8:
+ case INFERENCE_TENSOR_DATA_TYPE_UINT8:
LOGI("Type is %d ch with UINT8", mCh);
type = mCh == 1 ? CV_8UC1 : CV_8UC3;
break;
- case TENSOR_DATA_TYPE_FLOAT32:
+ case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
LOGI("Type is %d ch with FLOAT32", mCh);
type = mCh == 1 ? CV_32FC1 : CV_32FC3;
break;
inference_tensor_data_type_e Inference::ConvertToIE(int given_type)
{
- inference_tensor_data_type_e type = TENSOR_DATA_TYPE_FLOAT32;
+ inference_tensor_data_type_e type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
switch (given_type) {
case MV_INFERENCE_DATA_FLOAT32:
- type = TENSOR_DATA_TYPE_FLOAT32;
+ type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
break;
case MV_INFERENCE_DATA_UINT8:
- type = TENSOR_DATA_TYPE_UINT8;
+ type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
break;
default:
LOGI("unknown data type so FLOAT32 data type will be used in default");
tensor_info.data_type = ConvertToIE(dataType);
// In case of OpenCV, only supports NCHW
- tensor_info.shape_type = TENSOR_SHAPE_NCHW;
+ tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
// modify to handle multiple tensor infos
tensor_info.shape.push_back(mConfig.mTensorInfo.dim);
tensor_info.shape.push_back(mConfig.mTensorInfo.ch);
continue;
}
- if (tensor_buffer.data_type == TENSOR_DATA_TYPE_FLOAT32)
+ if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
delete[] (float *)tensor_buffer.buffer;
else
delete[] (unsigned char *)tensor_buffer.buffer;
continue;
}
- if (tensor_buffer.data_type == TENSOR_DATA_TYPE_FLOAT32)
+ if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
delete[] (float *)tensor_buffer.buffer;
else
delete[] (unsigned char *)tensor_buffer.buffer;
for (int i = 0; i < mInputLayerProperty.tensor_infos.size(); ++i) {
inference_engine_tensor_info tensor_info = mInputLayerProperty.tensor_infos[i];
inference_engine_tensor_buffer tensor_buffer;
- if (tensor_info.data_type == TENSOR_DATA_TYPE_FLOAT32) {
+ if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
tensor_buffer.buffer = (void *)(new float[tensor_info.size]);
tensor_buffer.size = tensor_info.size * 4;
- } else if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
+ } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
tensor_buffer.buffer = (void *)(new unsigned char[tensor_info.size]);
tensor_buffer.size = tensor_info.size;
} else {
for (int i = 0; i < mOutputLayerProperty.tensor_infos.size(); ++i) {
inference_engine_tensor_info tensor_info = mOutputLayerProperty.tensor_infos[i];
inference_engine_tensor_buffer tensor_buffer;
- if (tensor_info.data_type == TENSOR_DATA_TYPE_FLOAT32) {
+ if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
tensor_buffer.buffer = new float[tensor_info.size];
tensor_buffer.size = tensor_info.size * 4;
- } else if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
+ } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
tensor_buffer.buffer = new char[tensor_info.size];
tensor_buffer.size = tensor_info.size;
} else {
outputData.dimInfo.push_back(tmpDimInfo);
// Normalize output tensor data converting it to float type in case of quantized model.
- if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
+ if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
unsigned char *ori_buf = (unsigned char *)mOutputTensorBuffers[i].buffer;
float *new_buf = new float[tensor_info.size];
if (new_buf == NULL) {