return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
+ const TfLiteTensor *tensor = mInterpreter->tensor(layer.second);
+ if (!tensor) {
+ LOGE("tensor for tensor index(%d) is null", layer.second);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
inference_engine_tensor_info tensor_info;
LOGI("mInterpreter->tensor(%d)->dims name[%s] size[%d] type[%d]",
- layer.second,
- mInterpreter->tensor(layer.second)->name,
- mInterpreter->tensor(layer.second)->dims->size,
- mInterpreter->tensor(layer.second)->type);
+ layer.second, tensor->name, tensor->dims->size, tensor->type);
std::vector<size_t> shape_nhwc;
- for (int idx = 0; idx < mInterpreter->tensor(layer.second)->dims->size; idx++)
- shape_nhwc.push_back(mInterpreter->tensor(layer.second)->dims->data[idx]);
+ for (int idx = 0; idx < tensor->dims->size; idx++)
+ shape_nhwc.push_back(tensor->dims->data[idx]);
//tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility).
tensor_info.shape = shape_nhwc;
tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NHWC;
- if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
+ if (tensor->type == kTfLiteUInt8) {
LOGI("type is kTfLiteUInt8");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
- } else if (mInterpreter->tensor(layer.second)->type == kTfLiteInt8) {
+ } else if (tensor->type== kTfLiteInt8) {
LOGI("type is kTfLiteInt8");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_INT8;
- } else if (mInterpreter->tensor(layer.second)->type == kTfLiteInt64) {
+ } else if (tensor->type == kTfLiteInt64) {
LOGI("type is kTfLiteInt64");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_INT64;
- } else if (mInterpreter->tensor(layer.second)->type == kTfLiteFloat32) {
+ } else if (tensor->type == kTfLiteFloat32) {
LOGI("type is kTfLiteFloat32");
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
} else {
for (auto & dim : tensor_info.shape)
tensor_info.size *= dim;
+ if (tensor->quantization.type == kTfLiteAffineQuantization) {
+ auto *quant_parms = reinterpret_cast<TfLiteAffineQuantization *>(tensor->quantization.params);
+
+ LOGD("This layer has quantization parameters.");
+ if (quant_parms) {
+ tensor_info.scale = quant_parms->scale->data[0];
+ tensor_info.zero_point = quant_parms->zero_point->data[0];
+ tensor_info.quantization_type = INFERENCE_TENSOR_QUANTIZATION_AFFINE;
+
+ LOGD("Quantization params : type(%d), scale(%f), zero point(%d)", tensor_info.quantization_type, tensor_info.scale, tensor_info.zero_point);
+ }
+ }
+
mOutputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
}