case INFERENCE_TENSOR_DATA_TYPE_UINT8:
pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
+ LOGD("buffer type is UINT8");
+ break;
+ case INFERENCE_TENSOR_DATA_TYPE_INT8:
+ pBuff = static_cast<void *>(mInterpreter->typed_tensor<int8_t>(mInputLayerId[layer.first]));
+ buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_INT8, size, 1 };
+ LOGD("buffer type is INT8");
break;
case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mInputLayerId[layer.first]));
LOGE("Not supported");
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
}
+
buffers.insert(std::make_pair(layer.first, buffer));
}
pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
break;
+ case kTfLiteInt8:
+ LOGI("type is kTfLiteInt8");
+ pBuff = static_cast<void *>(mInterpreter->typed_tensor<int8_t>(mOutputLayerId[layer.first]));
+ buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_INT8, size, 1 };
+ break;
case kTfLiteInt64:
LOGI("type is kTfLiteInt64");
pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]));
if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
LOGI("type is kTfLiteUInt8");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+ } else if (mInterpreter->tensor(layer.second)->type == kTfLiteInt8) {
+ LOGI("type is kTfLiteInt8");
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_INT8;
} else if (mInterpreter->tensor(layer.second)->type == kTfLiteInt64) {
LOGI("type is kTfLiteInt64");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_INT64;
if (mIsDynamicTensorMode)
for (auto &input_buffer : input_buffers) {
void *pBuff;
+
switch (mInterpreter->tensor(mInputLayerId[input_buffer.first])->type) {
case kTfLiteUInt8:
LOGI("type is kTfLiteUInt8");
pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[input_buffer.first]));
break;
+ case kTfLiteInt8:
+ LOGI("type is kTfLiteInt8");
+ pBuff = static_cast<void *>(mInterpreter->typed_tensor<int8_t>(mInputLayerId[input_buffer.first]));
+ break;
case kTfLiteInt64:
LOGI("type is kTfLiteInt64");
pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mInputLayerId[input_buffer.first]));
LOGE("Not supported");
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
}
+
memcpy(pBuff, input_buffer.second.buffer, input_buffer.second.size);
}
LOGI("type is kTfLiteUInt8");
pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[output_buffer.first]));
break;
+ case kTfLiteInt8:
+ LOGI("type is kTfLiteInt8");
+ pBuff = static_cast<void *>(mInterpreter->typed_tensor<int8_t>(mOutputLayerId[output_buffer.first]));
+ break;
case kTfLiteInt64:
LOGI("type is kTfLiteInt64");
pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[output_buffer.first]));
LOGI("type is kTfLiteUInt8");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
break;
+ case kTfLiteInt8:
+ LOGI("type is kTfLiteInt8");
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_INT8;
+ break;
case kTfLiteFloat32:
LOGI("type is kTfLiteFloat32");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;