{
namespace TFLiteImpl
{
- InferenceTFLite::InferenceTFLite(void) : mTargetTypes(INFERENCE_TARGET_NONE)
+ InferenceTFLite::InferenceTFLite()
{
LOGI("ENTER");
LOGI("LEAVE");
InferenceTFLite::~InferenceTFLite()
{
- ;
+ if (mDelegate)
+ TfLiteGpuDelegateV2Delete(mDelegate);
}
int InferenceTFLite::SetPrivateData(void *data)
if (mTargetTypes == INFERENCE_TARGET_GPU) {
TfLiteGpuDelegateOptionsV2 options = TfLiteGpuDelegateOptionsV2Default();
- TfLiteDelegate *delegate = TfLiteGpuDelegateV2Create(&options);
- if (!delegate){
+ mDelegate = TfLiteGpuDelegateV2Create(&options);
+ if (!mDelegate){
LOGE("Failed to GPU delegate");
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- if (mInterpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk)
+ if (mInterpreter->ModifyGraphWithDelegate(mDelegate) != kTfLiteOk)
{
LOGE("Failed to construct GPU delegate");
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
for (auto& dim : layer.second.shape)
size *= dim;
+ void *pBuff;
switch (layer.second.data_type) {
case INFERENCE_TENSOR_DATA_TYPE_UINT8:
- auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first]));
+ pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
break;
case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
- auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mInputLayerId[layer.first]));
+ pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mInputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
break;
default:
for (int idx2 = 0; idx2 < mInterpreter->tensor(mOutputLayerId[layer.first])->dims->size; ++idx2)
size *= mInterpreter->tensor(mOutputLayerId[layer.first])->dims->data[idx2];
+ void *pBuff;
switch (mInterpreter->tensor(mOutputLayerId[layer.first])->type) {
case kTfLiteUInt8:
LOGI("type is kTfLiteUInt8");
- auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]));
+ pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
break;
case kTfLiteInt64:
LOGI("type is kTfLiteInt64");
- auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]));
+ pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]));
buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1};
break;
case kTfLiteFloat32:
LOGI("type is kTfLiteFloat32");
- auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]));
+ pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
break;
default: