* @brief connection with tflite libraries.
*
* @bug No know bugs.
- * @todo Invoke() should be implemented.
* @todo If it is required, class will be implemented as a singleton.
*/
+
#include "tensor_filter_tensorflow_lite_core.h"
/**
+ * @brief TFLiteCore creator
+ * @param _model_path : the logical path to '{model_name}.tffile' file
+ * @note the model of _model_path will be loaded simultaneously
+ * @return Nothing
+ */
+TFLiteCore::TFLiteCore (char *_model_path)
+{
+ model_path = _model_path;
+ input_idx_list_len = 0;
+ output_idx_list_len = 0;
+
+ loadModel ();
+}
+
+/**
+ * @brief TFLiteCore Destructor
+ * @return Nothing
+ */
+TFLiteCore::~TFLiteCore ()
+{
+ delete[]input_idx_list;
+ delete[]output_idx_list;
+}
+
+/**
+ * @brief load the tflite model
+ * @note the model will be loaded
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFLiteCore::loadModel ()
+{
+ if (!interpreter) {
+ model =
+ std::unique_ptr < tflite::FlatBufferModel >
+ (tflite::FlatBufferModel::BuildFromFile (model_path));
+ if (!model) {
+ std::cout << "Failed to mmap model" << std::endl;
+ return -1;
+ }
+ model->error_reporter ();
+ std::cout << "model loaded" << std::endl;
+
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+ tflite::InterpreterBuilder (*model, resolver) (&interpreter);
+ if (!interpreter) {
+ std::cout << "Failed to construct interpreter" << std::endl;
+ return -2;
+ }
+ }
+ // fill class parameters
+ tensor_size = interpreter->tensors_size ();
+ node_size = interpreter->nodes_size ();
+ input_size = interpreter->inputs ().size ();
+ output_size = interpreter->outputs ().size ();
+
+ // allocate the idx of input/output tensors
+ // it could be used for get name of the tensors by using 'interpreter->GetOutputName(0);'
+ input_idx_list = new int[input_size];
+ output_idx_list = new int[output_size];
+
+ int t_size = interpreter->tensors_size ();
+ for (int i = 0; i < t_size; i++) {
+ for (int j = 0; j < input_size; j++) {
+ if (strcmp (interpreter->tensor (i)->name,
+ interpreter->GetInputName (j)) == 0)
+ input_idx_list[input_idx_list_len++] = i;
+ }
+ for (int j = 0; j < output_size; j++) {
+ if (strcmp (interpreter->tensor (i)->name,
+ interpreter->GetOutputName (j)) == 0)
+ output_idx_list[output_idx_list_len++] = i;
+ }
+ }
+ return 0;
+}
+
+/**
+ * @brief return the Dimension of Input Tensor.
+ * @param idx : the index of the input tensor
+ * @param[out] dim : the array of the input tensor
+ * @param[out] len : the length of the input tensor array
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFLiteCore::getInputTensorDim (int idx, int **dim, int *len)
+{
+ if (idx >= input_size) {
+ return -1;
+ }
+ *dim = interpreter->tensor (input_idx_list[idx])->dims->data;
+ *len = interpreter->tensor (input_idx_list[idx])->dims->size;
+
+ return 0;
+}
+
+/**
+ * @brief return the Dimension of Output Tensor.
+ * @param idx : the index of the output tensor
+ * @param[out] dim : the array of the output tensor
+ * @param[out] len : the length of the output tensor array
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFLiteCore::getOutputTensorDim (int idx, int **dim, int *len)
+{
+ if (idx >= output_size) {
+ return -1;
+ }
+ *dim = interpreter->tensor (output_idx_list[idx])->dims->data;
+ *len = interpreter->tensor (output_idx_list[idx])->dims->size;
+
+ return 0;
+}
+
+/**
+ * @brief return the number of Input Tensors.
+ * @return the number of Input Tensors.
+ */
+int
+TFLiteCore::getInputTensorSize ()
+{
+ return input_size;
+}
+
+/**
+ * @brief return the number of Output Tensors.
+ * @return the number of Output Tensors
+ */
+int
+TFLiteCore::getOutputTensorSize ()
+{
+ return output_size;
+}
+
+/**
+ * @brief run the model with the input.
+ * @param[in] inptr : The input tensor
+ * @param[out] outptr : The output tensor
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFLiteCore::invoke (uint8_t * inptr, uint8_t ** outptr)
+{
+ int output_number_of_pixels = 1;
+
+ int sizeOfArray = 0;
+ int *inputTensorDim;
+ int ret = getInputTensorDim (0, &inputTensorDim, &sizeOfArray);
+ if (ret) {
+ return -1;
+ }
+ for (int i = 0; i < sizeOfArray; i++) {
+ output_number_of_pixels *= inputTensorDim[i];
+ }
+
+ int input = interpreter->inputs ()[0];
+
+ if (interpreter->AllocateTensors () != kTfLiteOk) {
+ std::cout << "Failed to allocate tensors!" << std::endl;
+ return -2;
+ }
+
+ for (int i = 0; i < output_number_of_pixels; i++) {
+ (interpreter->typed_tensor < uint8_t > (input))[i] = (uint8_t) inptr[i];
+ }
+
+ if (interpreter->Invoke () != kTfLiteOk) {
+ return -3;
+ }
+
+ *outptr = interpreter->typed_output_tensor < uint8_t > (0);
+
+ return 0;
+}
+
+/**
* @brief call the creator of TFLiteCore class.
* @param _model_path : the logical path to '{model_name}.tffile' file
* @return TFLiteCore class
/**
* @brief delete the TFLiteCore class.
- * @param _tflite : the class object
+ * @param tflite : the class object
* @return Nothing
*/
extern void
/**
* @brief get model path
- * @param _tflite : the class object
+ * @param tflite : the class object
* @return model path
*/
extern char *
/**
* @brief get the Dimension of Input Tensor of model
- * @param _tflite : the class object
- * @return the input dimension
+ * @param tflite : the class object
+ * @param idx : the index of the input tensor
+ * @param[out] dim : the array of the input tensor
+ * @param[out] len : the length of the input tensor array
+ * @return 0 if OK. non-zero if error.
*/
-int *
-tflite_core_getInputDim (void *tflite)
+int
+tflite_core_getInputDim (void *tflite, int idx, int **dim, int *len)
{
TFLiteCore *c = (TFLiteCore *) tflite;
- return c->getInputTensorDim ();
+ return c->getInputTensorDim (idx, dim, len);
}
/**
* @brief get the Dimension of Output Tensor of model
- * @param _tflite : the class object
- * @return the output dimension
+ * @param tflite : the class object
+ * @param idx : the index of the output tensor
+ * @param[out] dim : the array of the output tensor
+ * @param[out] len : the length of the output tensor array
+ * @return 0 if OK. non-zero if error.
*/
-int *
-tflite_core_getOutputDim (void *tflite)
+int
+tflite_core_getOutputDim (void *tflite, int idx, int **dim, int *len)
{
TFLiteCore *c = (TFLiteCore *) tflite;
- return c->getOutputTensorDim ();
+ return c->getOutputTensorDim (idx, dim, len);
}
/**
* @brief get the size of Input Tensor of model
- * @param _tflite : the class object
- * @return how many input tensors are
+ * @param tflite : the class object
+ * @return the number of Input Tensors.
*/
int
tflite_core_getInputSize (void *tflite)
/**
* @brief get the size of Output Tensor of model
- * @param _tflite : the class object
- * @return how many output tensors are
+ * @param tflite : the class object
+ * @return the number of Output Tensors.
*/
int
tflite_core_getOutputSize (void *tflite)
TFLiteCore *c = (TFLiteCore *) tflite;
return c->getOutputTensorSize ();
}
+
+/**
+ * @brief invoke the model
+ * @param tflite : the class object
+ * @param[in] inptr : The input tensor
+ * @param[out] outptr : The output tensor
+ * @return 0 if OK. non-zero if error.
+ */
+int
+tflite_core_invoke (void *tflite, uint8_t * inptr, uint8_t ** outptr)
+{
+ TFLiteCore *c = (TFLiteCore *) tflite;
+ return c->invoke (inptr, outptr);
+}
* @file tensor_filter_tensorflow_lite_core.h
* @author HyoungJoo Ahn <hello.ahn@samsung.com>
* @date 7/5/2018
- * @brief connection with tflite libraries.
+ * @brief connection with tflite libraries.
*
* @bug No know bugs.
- * @todo Invoke() should be implemented.
* @todo If it is required, class will be implemented as a singleton.
*/
#ifndef TENSOR_FILTER_TENSORFLOW_LITE_H
#include "tensorflow/contrib/lite/kernels/register.h"
/**
- * @brief ring cache structure
+ * @brief ring cache structure
*/
class TFLiteCore
{
* member functions.
*/
TFLiteCore (char *_model_path);
+ ~TFLiteCore ();
+
+ /**
+ * @brief get the model path.
+ * @return saved model path.
+ */
char *getModelPath ()
{
return model_path;
const char *getInputTensorName ();
const char *getOutputTensorName ();
- /**
- * @brief @todo fill this in
- */
- int getInputTensorSize ()
- {
- return input_size;
- }
-
- /**
- * @brief @todo fill this in
- */
- int getOutputTensorSize ()
- {
- return output_size;
- }
- int *getInputTensorDim ();
- int *getOutputTensorDim ();
+ int getInputTensorSize ();
+ int getOutputTensorSize ();
+ int getInputTensorDim (int idx, int **dim, int *len);
+ int getOutputTensorDim (int idx, int **dim, int *len);
+ int getInputTensorDimSize ();
+ int getOutputTensorDimSize ();
+ int invoke (uint8_t * inptr, uint8_t ** outptr);
private:
/**
int node_size;
int input_size;
int output_size;
- const char *input_name;
- const char *output_name;
- int input_idx;
- int output_idx;
+ int *input_idx_list;
+ int *output_idx_list;
+ int input_idx_list_len;
+ int output_idx_list_len;
std::unique_ptr < tflite::Interpreter > interpreter;
std::unique_ptr < tflite::FlatBufferModel > model;
};
/**
- * @brief TFLiteCore creator
- * @param _model_path : the logical path to '{model_name}.tffile' file
- * @note the model of _model_path will be loaded simultaneously
- * @return Nothing
- */
-TFLiteCore::TFLiteCore (char *_model_path)
-{
- model_path = _model_path;
- loadModel ();
-}
-
-/**
- * @brief load the tflite model
- * @note the model will be loaded
- * @return Nothing
- */
-int
-TFLiteCore::loadModel ()
-{
- if (!interpreter) {
- model =
- std::unique_ptr < tflite::FlatBufferModel >
- (tflite::FlatBufferModel::BuildFromFile (model_path));
- if (!model) {
- std::cout << "Failed to mmap model" << std::endl;
- return -1;
- }
- model->error_reporter ();
- std::cout << "model loaded" << std::endl;
-
- tflite::ops::builtin::BuiltinOpResolver resolver;
- tflite::InterpreterBuilder (*model, resolver) (&interpreter);
- if (!interpreter) {
- std::cout << "Failed to construct interpreter" << std::endl;
- return -2;
- }
- }
- // fill class parameters
- tensor_size = interpreter->tensors_size ();
- node_size = interpreter->nodes_size ();
- input_size = interpreter->inputs ().size ();
- input_name = interpreter->GetInputName (0);
- output_size = interpreter->outputs ().size ();
- output_name = interpreter->GetOutputName (0);
-
- int t_size = interpreter->tensors_size ();
- for (int i = 0; i < t_size; i++) {
- if (strcmp (interpreter->tensor (i)->name,
- interpreter->GetInputName (0)) == 0)
- input_idx = i;
- if (strcmp (interpreter->tensor (i)->name,
- interpreter->GetOutputName (0)) == 0)
- output_idx = i;
- }
- return 1;
-}
-
-/**
- * @brief return the Dimension of Input Tensor.
- * @return the array of integer.
- */
-int *
-TFLiteCore::getInputTensorDim ()
-{
- return interpreter->tensor (input_idx)->dims->data;
-}
-
-/**
- * @brief return the Dimension of Output Tensor.
- * @return the array of integer.
- */
-int *
-TFLiteCore::getOutputTensorDim ()
-{
- return interpreter->tensor (output_idx)->dims->data;
-}
-
-/**
* @brief the definition of functions to be used at C files.
*/
extern "C"
extern void *tflite_core_new (char *_model_path);
extern void tflite_core_delete (void *tflite);
extern char *tflite_core_getModelPath (void *tflite);
- extern int *tflite_core_getInputDim (void *tflite);
- extern int *tflite_core_getOutputDim (void *tflite);
+ extern int tflite_core_getInputDim (void *tflite, int idx, int **dim,
+ int *len);
+ extern int tflite_core_getOutputDim (void *tflite, int idx, int **dim,
+ int *len);
extern int tflite_core_getInputSize (void *tflite);
extern int tflite_core_getOutputSize (void *tflite);
+ extern int tflite_core_invoke (void *tflite, uint8_t * inptr,
+ uint8_t ** outptr);
#ifdef __cplusplus
}