void *pBuff = NULL;
for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx ) {
-
+ size_t size = 1;
inference_engine_tensor_buffer buffer;
-
+ for (std::vector<int>::iterator iter = mInputTensorInfo[idx].shape.begin();
+ iter != mInputTensorInfo[idx].shape.end(); ++iter) {
+ size *= (*iter);
+ }
if (mInputAttrType[idx] == kTfLiteUInt8) {
mInputData.push_back(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[idx]));
pBuff = mInputData.back();
- buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, 0, 1};
+ buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, size, 1};
}
else if (mInputAttrType[idx] == kTfLiteFloat32) {
- mInputData.push_back(mInterpreter->typed_tensor<float>(mInputLayerId[idx]));
+ mInputData.push_back(mInterpreter->typed_tensor<float>(mInputLayerId[idx]));
pBuff = mInputData.back();
- buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, 0, 1};
+ buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, size * 4, 1};
}
else {
LOGE("Not supported");
for (unsigned int idx = 0; idx < mOutputLayerId.size(); ++idx) {
inference_engine_tensor_buffer buffer;
+ size_t size = 1;
+ for (int idx2 = 0; idx2 < mInterpreter->tensor(mOutputLayerId[idx])->dims->size; ++idx2) {
+ size *= mInterpreter->tensor(mOutputLayerId[idx])->dims->data[idx2];
+ }
- pBuff = (void*)mInterpreter->typed_tensor<float>(mOutputLayerId[idx]);
- buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, 0, 1};
+ if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteUInt8) {
+ LOGI("type is kTfLiteUInt8");
+ pBuff = (void*)mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[idx]);
+ buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, size, 1};
+ }
+ else if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteFloat32) {
+ LOGI("type is kTfLiteFloat32");
+ pBuff = (void*)mInterpreter->typed_tensor<float>(mOutputLayerId[idx]);
+ buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, size * 4, 1};
+ }
+ else {
+ LOGE("Not supported");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+ }
buffers.push_back(buffer);
}
std::vector<int> shape_nhwc;
for (int idx = 0; idx <mInterpreter->tensor((*iter))->dims->size; idx++) {
- LOGI("mInterpreter->tensor((*iter))->dims[%d]= [%d]", idx, mInterpreter->tensor((*iter))->dims->data[idx]);
shape_nhwc.push_back(mInterpreter->tensor((*iter))->dims->data[idx]);
}
mInputLayer = property.layer_names;
mInputTensorInfo = property.tensor_infos;
+ LOGI("LEAVE");
+
return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceTFLite::SetOutputLayerProperty(inference_engine_layer_property &property)
{
+ LOGI("ENTER");
std::vector<std::string>::iterator iter;
for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
std::string name = *iter;
mOutputLayer = property.layer_names;
+ LOGI("LEAVE");
+
return INFERENCE_ENGINE_ERROR_NONE;
}