/** Supported with tf, tflite and caffe */
return _NNS_FLOAT32;
case armnn::DataType::Float16:
- ml_logw ("Unsupported armnn datatype Float16.");
+#ifdef FLOAT16_SUPPORT
+ return _NNS_FLOAT16;
+#else
+ ml_logw ("Unsupported armnn datatype Float16. Recompile with -DSUPPORT_FLOAT16 option.");
+#endif
break;
case armnn::DataType::QAsymmU8:
/** Supported with tflite */
res = _NNS_FLOAT64;
break;
case NNTensorType_F16:
+#ifdef FLOAT16_SUPPORT
+ res = _NNS_FLOAT16;
+ break;
+#endif
default:
nns_logw ("Tensor type not supported: %d", (gint)_type);
return -EINVAL;
case _NNS_FLOAT32:
value = (double) ((float *) lt->data)[tidx];
break;
+ case _NNS_FLOAT16:
+#ifdef FLOAT16_SUPPORT
+ value = (double) ((float16 *) lt->data)[tidx];
+#else
+ nns_loge
+ ("NNStreamer requires -DFLOAT16_SUPPORT as a build option to enable float16 type. This binary does not have float16 feature enabled; thus, float16 type is not supported in this instance.\n");
+ throw std::runtime_error ("Float16 not supported. Recompile with -DFLOAT16_SUPPORT.");
+#endif
+ break;
case _NNS_INT64:
value = (double) ((int64_t *) lt->data)[tidx];
break;
case _NNS_FLOAT32:
((float *) lt->data)[tidx] = (float) value;
break;
+ case _NNS_FLOAT16:
+#ifdef FLOAT16_SUPPORT
+ ((float16 *) lt->data)[tidx] = (float16) value;
+#else
+ nns_loge
+ ("NNStreamer requires -DFLOAT16_SUPPORT as a build option to enable float16 type. This binary does not have float16 feature enabled; thus, float16 type is not supported in this instance.\n");
+ throw std::runtime_error ("Float16 not supported. Recompile with -DFLOAT16_SUPPORT.");
+#endif
+ break;
case _NNS_INT64:
((int64_t *) lt->data)[tidx] = (int64_t) value;
break;
case torch::kF64:
return _NNS_FLOAT64;
case torch::kF16:
+#ifdef FLOAT16_SUPPORT
+ return _NNS_FLOAT16;
+#else
+ ml_loge
+ ("NNStreamer requires -DFLOAT16_SUPPORT as a build option to enable float16 type. This binary does not have float16 feature enabled; thus, float16 type is not supported in this instance.\n");
+ break;
+#endif
default:
break;
}
case _NNS_INT64:
*torchType = torch::kI64;
break;
+ case _NNS_FLOAT16:
+#ifdef FLOAT16_SUPPORT
+ *torchType = torch::kF16;
+#else
+ ml_loge
+ ("NNStreamer requires -DFLOAT16_SUPPORT as a build option to enable float16 type. This binary does not have float16 feature enabled; thus, float16 type is not supported in this instance.\n");
+#endif
+ break;
case _NNS_FLOAT32:
*torchType = torch::kF32;
break;
return _NNS_FLOAT32;
case TF_DOUBLE:
return _NNS_FLOAT64;
+ case TF_HALF:
+#ifdef FLOAT16_SUPPORT
+ return _NNS_FLOAT16;
+#else
+ ml_loge
+ ("NNStreamer requires -DFLOAT16_SUPPORT as a build option to enable float16 type. This binary does not have float16 feature enabled; thus, float16 type is not supported in this instance.\n");
+ break;
+#endif
default:
/** @todo Support other types */
break;
return TF_INT64;
case _NNS_UINT64:
return TF_UINT64;
+ case _NNS_FLOAT16:
+#ifdef FLOAT16_SUPPORT
+ return TF_HALF;
+#else
+ ml_loge
+ ("NNStreamer requires -DFLOAT16_SUPPORT as a build option to enable float16 type. This binary does not have float16 feature enabled; thus, float16 type is not supported in this instance.\n");
+ break;
+#endif
case _NNS_FLOAT32:
return TF_FLOAT;
case _NNS_FLOAT64:
#endif
case kTfLiteInt64:
return _NNS_INT64;
+#ifdef TFLITE_FLOAT16
+ case kTfLiteFloat16:
+#ifdef FLOAT16_SUPPORT
+ return _NNS_FLOAT16;
+#else
+ ml_loge
+ ("NNStreamer requires -DFLOAT16_SUPPORT as a build option to enable float16 type. This binary does not have float16 feature enabled; thus, float16 type is not supported in this instance.\n");
+ break;
+#endif
+#endif
case kTfLiteString:
#ifdef TFLITE_COMPLEX64
case kTfLiteComplex64:
#endif
-#ifdef TFLITE_FLOAT16
- case kTfLiteFloat16:
-#endif
default:
ml_loge ("Not supported Tensorflow Data Type: [%d].", tfType);
/** @todo Support other types */
return _NNS_INT16;
case VSI_NN_TYPE_UINT16:
return _NNS_UINT16;
- /** Note that the current nnstreamer (tensor_typedef.h) does not support FLOAT16.
- * Let's use UINT16 as a workaround.
- */
case VSI_NN_TYPE_FLOAT16:
+#ifdef FLOAT16_SUPPORT
+ return _NNS_FLOAT16;
+#else
+ /**
+ * Let's use UINT16 as a workaround if FLOAT16 is not supported.
+ */
return _NNS_UINT16;
+#endif
case VSI_NN_TYPE_FLOAT32:
return _NNS_FLOAT32;
default:
/** Get an input data type: VSI_NN_TYPE_UINT8 (u8) in case of inceptionv3 */
pdata->input_tensor.info[i].type =
convert_tensortype (i_tensor->attr.dtype.vx_type);
- asprintf (&pdata->input_tensor.info[i].name, "%i", pdata->graph->input.tensors[i]);
+ asprintf (&pdata->input_tensor.info[i].name, "%i",
+ pdata->graph->input.tensors[i]);
/** dummy name */
pdata->input_tensor.num_tensors = pdata->graph->input.num; /** number of tensors */
}
/** Get an output data type: VSI_NN_TYPE_FLOAT16 (f16) in case of inceptionv3 */
pdata->output_tensor.info[i].type =
convert_tensortype (o_tensor->attr.dtype.vx_type);
- asprintf (&pdata->output_tensor.info[i].name, "%i", pdata->graph->output.tensors[i]);
+ asprintf (&pdata->output_tensor.info[i].name, "%i",
+ pdata->graph->output.tensors[i]);
/** dummy name */
pdata->output_tensor.num_tensors = pdata->graph->output.num; /** number of tensors */
}
for (int i = 0; i < _NNS_END; ++i) {
tensor_type ttype = (tensor_type) i;
+#ifndef FLOAT16_SUPPORT
+ if (i == _NNS_FLOAT16)
+ continue;
+#endif
model = g_strdup_printf ("\
inputTensorsInfo={num=1,dim={{1,2,2,1},},type={'%s',}} \
outputTensorsInfo={num=1,dim={{1,2,2,1},},type={'%s',}} \