From 5926a2612af40cb328cc2e329fbf352a28147ab0 Mon Sep 17 00:00:00 2001 From: HyoungjooAhn Date: Wed, 18 Jul 2018 16:48:17 +0900 Subject: [PATCH] [Filter/TF-Lite] update the methods for get in/out dimension of tensors 1. by using memcpy, improve the robustness 2. when upper layer call get in/output tensor dimension, return data type of tensor rather than length. because the length is fixed with NNS_TENSOR_RANK_LIMIT Signed-off-by: HyoungjooAhn --- .../tensor_filter_tensorflow_lite_core.cc | 85 +++++++++++++++++----- .../tensor_filter_tensorflow_lite_core.h | 16 ++-- 2 files changed, 78 insertions(+), 23 deletions(-) rename {gst/tensor_filter => include}/tensor_filter_tensorflow_lite_core.h (84%) diff --git a/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc b/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc index b02fb50..84a7277 100644 --- a/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc +++ b/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc @@ -103,21 +103,61 @@ TFLiteCore::loadModel () } /** + * @brief return the data type of the tensor + * @param tensor_idx : the index of the tensor + * @param[out] type : the data type of the input tensor + * @return 0 if OK. non-zero if error. + */ +int +TFLiteCore::getTensorType (int tensor_idx, tensor_type * type) +{ + switch (interpreter->tensor (tensor_idx)->type) { + case kTfLiteFloat32: + *type = _NNS_FLOAT32; + break; + case kTfLiteUInt8: + *type = _NNS_UINT8; + break; + case kTfLiteInt32: + *type = _NNS_INT32; + break; + case kTfLiteBool: + *type = _NNS_INT8; + break; + case kTfLiteInt64: + case kTfLiteString: + default: + *type = _NNS_END; + return -1; + break; + } + return 0; +} + +/** * @brief return the Dimension of Input Tensor. * @param idx : the index of the input tensor * @param[out] dim : the array of the input tensor - * @param[out] len : the length of the input tensor array + * @param[out] type : the data type of the input tensor * @return 0 if OK. non-zero if error. */ int -TFLiteCore::getInputTensorDim (int idx, int **dim, int *len) +TFLiteCore::getInputTensorDim (int idx, tensor_dim dim, tensor_type * type) { if (idx >= input_size) { return -1; } - *dim = interpreter->tensor (input_idx_list[idx])->dims->data; - *len = interpreter->tensor (input_idx_list[idx])->dims->size; + if (getTensorType (input_idx_list[idx], type)) { + return -2; + } + int len = interpreter->tensor (input_idx_list[idx])->dims->size; + g_assert (len <= NNS_TENSOR_RANK_LIMIT); + memcpy (dim, interpreter->tensor (input_idx_list[idx])->dims->data, + sizeof (tensor_dim) / NNS_TENSOR_RANK_LIMIT * len); + for (int i = len; i < NNS_TENSOR_RANK_LIMIT; i++) { + dim[i] = 1; + } return 0; } @@ -125,18 +165,26 @@ TFLiteCore::getInputTensorDim (int idx, int **dim, int *len) * @brief return the Dimension of Output Tensor. * @param idx : the index of the output tensor * @param[out] dim : the array of the output tensor - * @param[out] len : the length of the output tensor array + * @param[out] type : the data type of the output tensor * @return 0 if OK. non-zero if error. */ int -TFLiteCore::getOutputTensorDim (int idx, int **dim, int *len) +TFLiteCore::getOutputTensorDim (int idx, tensor_dim dim, tensor_type * type) { if (idx >= output_size) { return -1; } - *dim = interpreter->tensor (output_idx_list[idx])->dims->data; - *len = interpreter->tensor (output_idx_list[idx])->dims->size; + if (getTensorType (output_idx_list[idx], type)) { + return -2; + } + int len = interpreter->tensor (output_idx_list[idx])->dims->size; + g_assert (len <= NNS_TENSOR_RANK_LIMIT); + memcpy (dim, interpreter->tensor (output_idx_list[idx])->dims->data, + sizeof (tensor_dim) / NNS_TENSOR_RANK_LIMIT * len); + for (int i = len; i < NNS_TENSOR_RANK_LIMIT; i++) { + dim[i] = 1; + } return 0; } @@ -171,9 +219,10 @@ TFLiteCore::invoke (uint8_t * inptr, uint8_t ** outptr) { int output_number_of_pixels = 1; - int sizeOfArray = 0; - int *inputTensorDim; - int ret = getInputTensorDim (0, &inputTensorDim, &sizeOfArray); + int sizeOfArray = NNS_TENSOR_RANK_LIMIT; + tensor_type type; + tensor_dim inputTensorDim; + int ret = getInputTensorDim (0, inputTensorDim, &type); if (ret) { return -1; } @@ -241,14 +290,15 @@ tflite_core_getModelPath (void *tflite) * @param tflite : the class object * @param idx : the index of the input tensor * @param[out] dim : the array of the input tensor - * @param[out] len : the length of the input tensor array + * @param[out] type : the data type of the input tensor * @return 0 if OK. non-zero if error. */ int -tflite_core_getInputDim (void *tflite, int idx, int **dim, int *len) +tflite_core_getInputDim (void *tflite, int idx, tensor_dim dim, + tensor_type * type) { TFLiteCore *c = (TFLiteCore *) tflite; - return c->getInputTensorDim (idx, dim, len); + return c->getInputTensorDim (idx, dim, type); } /** @@ -256,14 +306,15 @@ tflite_core_getInputDim (void *tflite, int idx, int **dim, int *len) * @param tflite : the class object * @param idx : the index of the output tensor * @param[out] dim : the array of the output tensor - * @param[out] len : the length of the output tensor array + * @param[out] type : the data type of the output tensor * @return 0 if OK. non-zero if error. */ int -tflite_core_getOutputDim (void *tflite, int idx, int **dim, int *len) +tflite_core_getOutputDim (void *tflite, int idx, tensor_dim dim, + tensor_type * type) { TFLiteCore *c = (TFLiteCore *) tflite; - return c->getOutputTensorDim (idx, dim, len); + return c->getOutputTensorDim (idx, dim, type); } /** diff --git a/gst/tensor_filter/tensor_filter_tensorflow_lite_core.h b/include/tensor_filter_tensorflow_lite_core.h similarity index 84% rename from gst/tensor_filter/tensor_filter_tensorflow_lite_core.h rename to include/tensor_filter_tensorflow_lite_core.h index 9971c12..43d6495 100644 --- a/gst/tensor_filter/tensor_filter_tensorflow_lite_core.h +++ b/include/tensor_filter_tensorflow_lite_core.h @@ -26,6 +26,9 @@ #ifdef __cplusplus #include +#include +#include +#include "tensor_typedef.h" #include "tensorflow/contrib/lite/model.h" #include "tensorflow/contrib/lite/optional_debug_tools.h" @@ -58,8 +61,8 @@ public: int getInputTensorSize (); int getOutputTensorSize (); - int getInputTensorDim (int idx, int **dim, int *len); - int getOutputTensorDim (int idx, int **dim, int *len); + int getInputTensorDim (int idx, tensor_dim dim, tensor_type * type); + int getOutputTensorDim (int idx, tensor_dim dim, tensor_type * type); int getInputTensorDimSize (); int getOutputTensorDimSize (); int invoke (uint8_t * inptr, uint8_t ** outptr); @@ -79,6 +82,7 @@ private: int output_idx_list_len; std::unique_ptr < tflite::Interpreter > interpreter; std::unique_ptr < tflite::FlatBufferModel > model; + int getTensorType(int tensor_idx, tensor_type *type); }; /** @@ -91,10 +95,10 @@ extern "C" extern void *tflite_core_new (const char *_model_path); extern void tflite_core_delete (void *tflite); extern const char *tflite_core_getModelPath (void *tflite); - extern int tflite_core_getInputDim (void *tflite, int idx, int **dim, - int *len); - extern int tflite_core_getOutputDim (void *tflite, int idx, int **dim, - int *len); + extern int tflite_core_getInputDim (void *tflite, int idx, tensor_dim dim, + tensor_type * type); + extern int tflite_core_getOutputDim (void *tflite, int idx, tensor_dim dim, + tensor_type * type); extern int tflite_core_getInputSize (void *tflite); extern int tflite_core_getOutputSize (void *tflite); extern int tflite_core_invoke (void *tflite, uint8_t * inptr, -- 2.7.4