if (mInputAttrType[idx] == kTfLiteUInt8) {
mInputData.push_back(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[idx]));
pBuff = mInputData.back();
- buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, size, 1};
+ buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1};
}
else if (mInputAttrType[idx] == kTfLiteFloat32) {
mInputData.push_back(mInterpreter->typed_tensor<float>(mInputLayerId[idx]));
pBuff = mInputData.back();
- buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, size * 4, 1};
+ buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1};
}
else {
LOGE("Not supported");
if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteUInt8) {
LOGI("type is kTfLiteUInt8");
pBuff = (void*)mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[idx]);
- buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, size, 1};
+ buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1};
}
else if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteFloat32) {
LOGI("type is kTfLiteFloat32");
pBuff = (void*)mInterpreter->typed_tensor<float>(mOutputLayerId[idx]);
- buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, size * 4, 1};
+ buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1};
}
else {
LOGE("Not supported");
//tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility).
tensor_info.shape = shape_nhwc;
- tensor_info.shape_type = TENSOR_SHAPE_NHWC;
+ tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NHWC;
if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) {
LOGI("type is kTfLiteUInt8");
- tensor_info.data_type = TENSOR_DATA_TYPE_UINT8;
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
}
else if (mInterpreter->tensor((*iter))->type == kTfLiteFloat32) {
LOGI("type is kTfLiteFloat32");
- tensor_info.data_type = TENSOR_DATA_TYPE_FLOAT32;
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
}
else {
LOGE("Not supported");