SetInterpreterInfo();
}
- mInputData.clear();
-
- void *pBuff = NULL;
-
for (auto& layer : mInputLayers) {
size_t size = 1;
inference_engine_tensor_buffer buffer;
for (auto& dim : layer.second.shape)
size *= dim;
- if ((layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
- mInputData.push_back(
- mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first]));
- pBuff = mInputData.back();
+ switch (layer.second.data_type) {
+ case INFERENCE_TENSOR_DATA_TYPE_UINT8:
+ auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
- } else if ((layer.second).data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
- mInputData.push_back(
- mInterpreter->typed_tensor<float>(mInputLayerId[layer.first]));
- pBuff = mInputData.back();
+ break;
+ case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
+ auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mInputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
- } else {
+ break;
+ default:
LOGE("Not supported");
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
}
SetInterpreterInfo();
}
- void *pBuff = NULL;
for (auto& layer : mOutputLayers) {
inference_engine_tensor_buffer buffer;
size_t size = 1;
for (int idx2 = 0; idx2 < mInterpreter->tensor(mOutputLayerId[layer.first])->dims->size; ++idx2)
size *= mInterpreter->tensor(mOutputLayerId[layer.first])->dims->data[idx2];
- if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteUInt8) {
+ switch (mInterpreter->tensor(mOutputLayerId[layer.first])->type) {
+ case kTfLiteUInt8:
LOGI("type is kTfLiteUInt8");
- pBuff = (void *) mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]);
+ auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
- } else if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteInt64) {
+ break;
+ case kTfLiteInt64:
LOGI("type is kTfLiteInt64");
- pBuff = (void*)mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]);
+ auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]));
buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1};
- } else if (mInterpreter->tensor(mOutputLayerId[layer.first])->type == kTfLiteFloat32) {
+ break;
+ case kTfLiteFloat32:
LOGI("type is kTfLiteFloat32");
- pBuff = (void *) mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]);
+ auto pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
- } else {
+ break;
+ default:
LOGE("Not supported");
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
}
- buffers.insert(std::make_pair(mInterpreter->tensor(mOutputLayerId[layer.first])->name, buffer));
+ buffers.insert(std::make_pair(layer.first, buffer));
}
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;