"(?<!!)" ACCL_NEON_STR ")?"
/**
- * @brief TFLiteCore creator
- * @param _model_path : the logical path to '{model_name}.tflite' file
- * @param accelerators : the accelerators property set for this subplugin
- * @note the model of _model_path will be loaded simultaneously
- * @return Nothing
+ * @brief TFLiteInterpreter constructor
*/
-TFLiteCore::TFLiteCore (const char * _model_path, const char * accelerators)
+TFLiteInterpreter::TFLiteInterpreter ()
{
- g_assert (_model_path != NULL);
- model_path = g_strdup (_model_path);
interpreter = nullptr;
model = nullptr;
+#ifdef ENABLE_TFLITE_NNAPI_DELEGATE
+ nnfw_delegate = nullptr;
+#endif
+ model_path = nullptr;
- setAccelerator (accelerators);
- g_warning ("nnapi = %d, accl = %s", use_nnapi, get_accl_hw_str(accelerator));
+ g_mutex_init (&mutex);
gst_tensors_info_init (&inputTensorMeta);
gst_tensors_info_init (&outputTensorMeta);
}
/**
- * @brief TFLiteCore Destructor
- * @return Nothing
+ * @brief TFLiteInterpreter desctructor
*/
-TFLiteCore::~TFLiteCore ()
+TFLiteInterpreter::~TFLiteInterpreter ()
{
+ g_mutex_clear (&mutex);
+ g_free (model_path);
+
gst_tensors_info_free (&inputTensorMeta);
gst_tensors_info_free (&outputTensorMeta);
}
-void TFLiteCore::setAccelerator (const char * accelerators)
+/**
+ * @brief Internal implementation of TFLiteCore's invoke()
+ */
+int
+TFLiteInterpreter::invoke (const GstTensorMemory * input,
+ GstTensorMemory * output, bool use_nnapi)
{
- GRegex * nnapi_elem;
- GMatchInfo * match_info;
+#if (DBG)
+ gint64 start_time = g_get_real_time ();
+#endif
- if (accelerators == NULL) {
- goto use_nnapi_ini;
- }
+ std::vector <int> tensors_idx;
+ int tensor_idx;
+ TfLiteTensor *tensor_ptr;
+ TfLiteStatus status;
- /* If set by user, get the precise accelerator */
- use_nnapi = (bool) g_regex_match_simple (REGEX_ACCL_NNAPI, accelerators,
- G_REGEX_CASELESS, G_REGEX_MATCH_NOTEMPTY);
- if (use_nnapi == TRUE) {
- /** Default to auto mode */
- accelerator = ACCL_AUTO;
- nnapi_elem = g_regex_new (REGEX_ACCL_NNAPI_ELEM, G_REGEX_CASELESS,
- G_REGEX_MATCH_NOTEMPTY, NULL);
+ for (int i = 0; i < outputTensorMeta.num_tensors; ++i) {
+ tensor_idx = interpreter->outputs ()[i];
+ tensor_ptr = interpreter->tensor (tensor_idx);
- /** Now match each provided element and get specific accelerator */
- if (g_regex_match (nnapi_elem, accelerators, G_REGEX_MATCH_NOTEMPTY,
- &match_info)) {
+ g_assert (tensor_ptr->bytes == output[i].size);
+ tensor_ptr->data.raw = (char *) output[i].data;
+ tensors_idx.push_back (tensor_idx);
+ }
- while (g_match_info_matches (match_info)) {
- gchar *word = g_match_info_fetch (match_info, 0);
- accelerator = get_accl_hw_type (word);
- g_free (word);
- break;
- }
- }
- g_match_info_free (match_info);
- g_regex_unref (nnapi_elem);
- } else {
- goto use_nnapi_ini;
+ for (int i = 0; i < inputTensorMeta.num_tensors; ++i) {
+ tensor_idx = interpreter->inputs ()[i];
+ tensor_ptr = interpreter->tensor (tensor_idx);
+
+ g_assert (tensor_ptr->bytes == input[i].size);
+ tensor_ptr->data.raw = (char *) input[i].data;
+ tensors_idx.push_back (tensor_idx);
}
- return;
+#ifdef ENABLE_TFLITE_NNAPI_DELEGATE
+ if (use_nnapi)
+ status = nnfw_delegate->Invoke (interpreter.get());
+ else
+#endif
+ status = interpreter->Invoke ();
-use_nnapi_ini:
- use_nnapi = nnsconf_get_custom_value_bool ("tensorflowlite", "enable_nnapi",
- FALSE);
- if (use_nnapi == FALSE) {
- accelerator = ACCL_NONE;
- } else {
- accelerator = ACCL_AUTO;
+ /** if it is not `nullptr`, tensorflow makes `free()` the memory itself. */
+ int tensorSize = tensors_idx.size ();
+ for (int i = 0; i < tensorSize; ++i) {
+ interpreter->tensor (tensors_idx[i])->data.raw = nullptr;
}
-}
-/**
- * @brief initialize the object with tflite model
- * @return 0 if OK. non-zero if error.
- * -1 if the model is not loaded.
- * -2 if the initialization of input tensor is failed.
- * -3 if the initialization of output tensor is failed.
- */
-int
-TFLiteCore::init ()
-{
- if (loadModel ()) {
- g_critical ("Failed to load model\n");
+#if (DBG)
+ gint64 stop_time = g_get_real_time ();
+ g_message ("Invoke() is finished: %" G_GINT64_FORMAT,
+ (stop_time - start_time));
+#endif
+
+ if (status != kTfLiteOk) {
+ g_critical ("Failed to invoke");
return -1;
}
- if (setInputTensorProp ()) {
- g_critical ("Failed to initialize input tensor\n");
- return -2;
- }
- if (setOutputTensorProp ()) {
- g_critical ("Failed to initialize output tensor\n");
- return -3;
- }
- return 0;
-}
-/**
- * @brief get the model path
- * @return the model path.
- */
-const char *
-TFLiteCore::getModelPath ()
-{
- return model_path;
+ return 0;
}
/**
- * @brief load the tflite model
- * @note the model will be loaded
+ * @brief Internal implementation of TFLiteCore's loadModel()
* @return 0 if OK. non-zero if error.
*/
int
-TFLiteCore::loadModel ()
+TFLiteInterpreter::loadModel (bool use_nnapi)
{
#if (DBG)
gint64 start_time = g_get_real_time ();
#endif
+ if (!g_file_test (model_path, G_FILE_TEST_IS_REGULAR)) {
+ g_critical ("the file of model_path (%s) is not valid (not regular)\n", model_path);
+ return -1;
+ }
+ model = tflite::FlatBufferModel::BuildFromFile (model_path);
+ if (!model) {
+ g_critical ("Failed to mmap model\n");
+ return -1;
+ }
+ /* If got any trouble at model, active below code. It'll be help to analyze. */
+ /* model->error_reporter (); */
+
+ interpreter = nullptr;
+
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+ tflite::InterpreterBuilder (*model, resolver) (&interpreter);
if (!interpreter) {
- if (!g_file_test (model_path, G_FILE_TEST_IS_REGULAR)) {
- g_critical ("the file of model_path (%s) is not valid (not regular)\n", model_path);
- return -1;
- }
- model =
- std::unique_ptr <tflite::FlatBufferModel>
- (tflite::FlatBufferModel::BuildFromFile (model_path));
- if (!model) {
- g_critical ("Failed to mmap model\n");
- return -1;
- }
- /* If got any trouble at model, active below code. It'll be help to analyze. */
- /* model->error_reporter (); */
-
- tflite::ops::builtin::BuiltinOpResolver resolver;
- tflite::InterpreterBuilder (*model, resolver) (&interpreter);
- if (!interpreter) {
- g_critical ("Failed to construct interpreter\n");
- return -2;
- }
+ g_critical ("Failed to construct interpreter\n");
+ return -2;
+ }
- interpreter->UseNNAPI(use_nnapi);
+ interpreter->UseNNAPI (use_nnapi);
#ifdef ENABLE_TFLITE_NNAPI_DELEGATE
- if (use_nnapi) {
- nnfw_delegate.reset (new ::nnfw::tflite::NNAPIDelegate);
- if (nnfw_delegate->BuildGraph (interpreter.get()) != kTfLiteOk) {
- g_critical ("Fail to BuildGraph");
- return -3;
- }
+ if (use_nnapi) {
+ nnfw_delegate.reset (new ::nnfw::tflite::NNAPIDelegate);
+ if (nnfw_delegate->BuildGraph (interpreter) != kTfLiteOk) {
+ g_critical ("Fail to BuildGraph");
+ return -3;
}
+ }
#endif
- /** set allocation type to dynamic for in/out tensors */
- int tensor_idx;
+ /** set allocation type to dynamic for in/out tensors */
+ int tensor_idx;
- int tensorSize = interpreter->inputs ().size ();
- for (int i = 0; i < tensorSize; ++i) {
- tensor_idx = interpreter->inputs ()[i];
- interpreter->tensor (tensor_idx)->allocation_type = kTfLiteDynamic;
- }
+ int tensorSize = interpreter->inputs ().size ();
+ for (int i = 0; i < tensorSize; ++i) {
+ tensor_idx = interpreter->inputs ()[i];
+ interpreter->tensor (tensor_idx)->allocation_type = kTfLiteDynamic;
+ }
- tensorSize = interpreter->outputs ().size ();
- for (int i = 0; i < tensorSize; ++i) {
- tensor_idx = interpreter->outputs ()[i];
- interpreter->tensor (tensor_idx)->allocation_type = kTfLiteDynamic;
- }
+ tensorSize = interpreter->outputs ().size ();
+ for (int i = 0; i < tensorSize; ++i) {
+ tensor_idx = interpreter->outputs ()[i];
+ interpreter->tensor (tensor_idx)->allocation_type = kTfLiteDynamic;
+ }
- if (interpreter->AllocateTensors () != kTfLiteOk) {
- g_critical ("Failed to allocate tensors\n");
- return -2;
- }
+ if (interpreter->AllocateTensors () != kTfLiteOk) {
+ g_critical ("Failed to allocate tensors\n");
+ return -2;
}
#if (DBG)
gint64 stop_time = g_get_real_time ();
* @return the enum of defined _NNS_TYPE
*/
tensor_type
-TFLiteCore::getTensorType (TfLiteType tfType)
+TFLiteInterpreter::getTensorType (TfLiteType tfType)
{
switch (tfType) {
case kTfLiteFloat32:
}
/**
+ * @brief return the Dimension of Tensor.
+ * @param tensor_idx : the real index of model of the tensor
+ * @param[out] dim : the array of the tensor
+ * @return 0 if OK. non-zero if error.
+ * @note assume that the interpreter lock was already held.
+ */
+int
+TFLiteInterpreter::getTensorDim (int tensor_idx, tensor_dim dim)
+{
+ TfLiteIntArray *tensor_dims = interpreter->tensor (tensor_idx)->dims;
+ int len = tensor_dims->size;
+ g_assert (len <= NNS_TENSOR_RANK_LIMIT);
+
+ /* the order of dimension is reversed at CAPS negotiation */
+ std::reverse_copy (tensor_dims->data, tensor_dims->data + len, dim);
+
+ /* fill the remnants with 1 */
+ for (int i = len; i < NNS_TENSOR_RANK_LIMIT; ++i) {
+ dim[i] = 1;
+ }
+
+ return 0;
+}
+
+/**
* @brief extract and store the information of given tensor list
* @param tensor_idx_list list of index of tensors in tflite interpreter
* @param[out] tensorMeta tensors to set the info into
* @return 0 if OK. non-zero if error.
*/
int
-TFLiteCore::setTensorProp (const std::vector<int> &tensor_idx_list,
+TFLiteInterpreter::setTensorProp (const std::vector<int> &tensor_idx_list,
GstTensorsInfo * tensorMeta)
{
tensorMeta->num_tensors = tensor_idx_list.size ();
* @return 0 if OK. non-zero if error.
*/
int
-TFLiteCore::setInputTensorProp ()
+TFLiteInterpreter::setInputTensorProp ()
{
return setTensorProp (interpreter->inputs (), &inputTensorMeta);
}
* @return 0 if OK. non-zero if error.
*/
int
-TFLiteCore::setOutputTensorProp ()
+TFLiteInterpreter::setOutputTensorProp ()
{
return setTensorProp (interpreter->outputs (), &outputTensorMeta);
}
/**
- * @brief return the Dimension of Tensor.
- * @param tensor_idx : the real index of model of the tensor
- * @param[out] dim : the array of the tensor
- * @return 0 if OK. non-zero if error.
- */
-int
-TFLiteCore::getTensorDim (int tensor_idx, tensor_dim dim)
-{
- TfLiteIntArray *tensor_dims = interpreter->tensor (tensor_idx)->dims;
- int len = tensor_dims->size;
- g_assert (len <= NNS_TENSOR_RANK_LIMIT);
-
- /* the order of dimension is reversed at CAPS negotiation */
- std::reverse_copy (tensor_dims->data, tensor_dims->data + len, dim);
-
- /* fill the remnants with 1 */
- for (int i = len; i < NNS_TENSOR_RANK_LIMIT; ++i) {
- dim[i] = 1;
- }
-
- return 0;
-}
-
-/**
- * @brief return the Dimension of Input Tensor.
- * @param[out] info Structure for tensor info.
- * @todo return whole array rather than index 0
- * @return 0 if OK. non-zero if error.
- */
-int
-TFLiteCore::getInputTensorDim (GstTensorsInfo * info)
-{
- gst_tensors_info_copy (info, &inputTensorMeta);
- return 0;
-}
-
-/**
- * @brief return the Dimension of Tensor.
- * @param[out] info Structure for tensor info.
- * @todo return whole array rather than index 0
- * @return 0 if OK. non-zero if error.
- */
-int
-TFLiteCore::getOutputTensorDim (GstTensorsInfo * info)
-{
- gst_tensors_info_copy (info, &outputTensorMeta);
- return 0;
-}
-
-/**
* @brief set the Dimension for Input Tensor.
* @param info Structure for input tensor info.
* @return 0 if OK. non-zero if error.
* @note rank can be changed dependent on the model
*/
int
-TFLiteCore::setInputTensorDim (const GstTensorsInfo * info)
+TFLiteInterpreter::setInputTensorsInfo (const GstTensorsInfo * info)
{
TfLiteStatus status;
- const std::vector<int> &input_idx_list = interpreter->inputs ();
+ const std::vector<int> &input_idx_list = interpreter->inputs();
/** Cannot change the number of inputs */
if (info->num_tensors != input_idx_list.size ())
dims[idx] = tensor_info->dimension[rank - idx - 1];
}
- status = interpreter->ResizeInputTensor(input_idx_list[tensor_idx], dims);
+ status = interpreter->ResizeInputTensor (input_idx_list[tensor_idx], dims);
if (status != kTfLiteOk)
continue;
/** return error when none of the ranks worked */
if (status != kTfLiteOk)
return -EINVAL;
-
}
- status = interpreter->AllocateTensors();
+ status = interpreter->AllocateTensors ();
if (status != kTfLiteOk)
return -EINVAL;
}
/**
- * @brief run the model with the input.
- * @param[in] input : The array of input tensors
- * @param[out] output : The array of output tensors
- * @return 0 if OK. non-zero if error.
+ * @brief update the model path
*/
-int
-TFLiteCore::invoke (const GstTensorMemory * input, GstTensorMemory * output)
+void
+TFLiteInterpreter::setModelPath (const char *_model_path)
{
-#if (DBG)
- gint64 start_time = g_get_real_time ();
+ if (_model_path) {
+ g_free (model_path);
+ model_path = g_strdup (_model_path);
+ }
+}
+
+/**
+ * @brief Move the ownership of interpreter internal members
+ */
+void
+TFLiteInterpreter::moveInternals (TFLiteInterpreter& interp)
+{
+ interpreter = std::move (interp.interpreter);
+ model = std::move (interp.model);
+#ifdef ENABLE_TFLITE_NNAPI_DELEGATE
+ nnfw_delegate = std::move (interp.nnfw_delegate);
#endif
+ setModelPath (interp.getModelPath ());
+}
- std::vector <int> tensors_idx;
- int tensor_idx;
- TfLiteTensor *tensor_ptr;
- TfLiteStatus status;
+/**
+ * @brief TFLiteCore creator
+ * @param _model_path : the logical path to '{model_name}.tflite' file
+ * @param accelerators : the accelerators property set for this subplugin
+ * @note the model of _model_path will be loaded simultaneously
+ * @return Nothing
+ */
+TFLiteCore::TFLiteCore (const char * _model_path, const char * accelerators)
+{
+ g_assert (_model_path != NULL);
- for (int i = 0; i < outputTensorMeta.num_tensors; ++i) {
- tensor_idx = interpreter->outputs ()[i];
- tensor_ptr = interpreter->tensor (tensor_idx);
+ interpreter.setModelPath (_model_path);
- g_assert (tensor_ptr->bytes == output[i].size);
- tensor_ptr->data.raw = (char *) output[i].data;
- tensors_idx.push_back (tensor_idx);
- }
+ setAccelerator (accelerators);
- for (int i = 0; i < inputTensorMeta.num_tensors; ++i) {
- tensor_idx = interpreter->inputs ()[i];
- tensor_ptr = interpreter->tensor (tensor_idx);
+#if (DBG)
+ g_message ("nnapi = %d, accl = %s", use_nnapi, get_accl_hw_str(accelerator));
+#endif
+}
- g_assert (tensor_ptr->bytes == input[i].size);
- tensor_ptr->data.raw = (char *) input[i].data;
- tensors_idx.push_back (tensor_idx);
+void TFLiteCore::setAccelerator (const char * accelerators)
+{
+ GRegex * nnapi_elem;
+ GMatchInfo * match_info;
+
+ if (accelerators == NULL) {
+ goto use_nnapi_ini;
}
-#ifdef ENABLE_TFLITE_NNAPI_DELEGATE
- if (use_nnapi)
- status = nnfw_delegate->Invoke (interpreter.get());
- else
-#endif
- status = interpreter->Invoke ();
+ /* If set by user, get the precise accelerator */
+ use_nnapi = (bool) g_regex_match_simple (REGEX_ACCL_NNAPI, accelerators,
+ G_REGEX_CASELESS, G_REGEX_MATCH_NOTEMPTY);
+ if (use_nnapi == TRUE) {
+ /** Default to auto mode */
+ accelerator = ACCL_AUTO;
+ nnapi_elem = g_regex_new (REGEX_ACCL_NNAPI_ELEM, G_REGEX_CASELESS,
+ G_REGEX_MATCH_NOTEMPTY, NULL);
- /** if it is not `nullptr`, tensorflow makes `free()` the memory itself. */
- int tensorSize = tensors_idx.size ();
- for (int i = 0; i < tensorSize; ++i) {
- interpreter->tensor (tensors_idx[i])->data.raw = nullptr;
+ /** Now match each provided element and get specific accelerator */
+ if (g_regex_match (nnapi_elem, accelerators, G_REGEX_MATCH_NOTEMPTY,
+ &match_info)) {
+
+ while (g_match_info_matches (match_info)) {
+ gchar *word = g_match_info_fetch (match_info, 0);
+ accelerator = get_accl_hw_type (word);
+ g_free (word);
+ break;
+ }
+ }
+ g_match_info_free (match_info);
+ g_regex_unref (nnapi_elem);
+ } else {
+ goto use_nnapi_ini;
}
-#if (DBG)
- gint64 stop_time = g_get_real_time ();
- g_message ("Invoke() is finished: %" G_GINT64_FORMAT,
- (stop_time - start_time));
-#endif
+ return;
- if (status != kTfLiteOk) {
- g_critical ("Failed to invoke");
+use_nnapi_ini:
+ use_nnapi = nnsconf_get_custom_value_bool ("tensorflowlite", "enable_nnapi",
+ FALSE);
+ if (use_nnapi == FALSE) {
+ accelerator = ACCL_NONE;
+ } else {
+ accelerator = ACCL_AUTO;
+ }
+}
+
+/**
+ * @brief initialize the object with tflite model
+ * @return 0 if OK. non-zero if error.
+ * -1 if the model is not loaded.
+ * -2 if the initialization of input tensor is failed.
+ * -3 if the initialization of output tensor is failed.
+ */
+int
+TFLiteCore::init ()
+{
+ if (loadModel ()) {
+ g_critical ("Failed to load model\n");
return -1;
}
+ if (setInputTensorProp ()) {
+ g_critical ("Failed to initialize input tensor\n");
+ return -2;
+ }
+ if (setOutputTensorProp ()) {
+ g_critical ("Failed to initialize output tensor\n");
+ return -3;
+ }
+ return 0;
+}
+
+/**
+ * @brief compare the model path
+ * @return TRUE if tflite core has the same model path
+ */
+gboolean
+TFLiteCore::compareModelPath (const char *model_path)
+{
+ gboolean is_same;
+
+ interpreter.lock ();
+ is_same = (g_strcmp0 (model_path, interpreter.getModelPath ()) == 0);
+ interpreter.unlock ();
+
+ return is_same;
+}
+
+/**
+ * @brief load the tflite model
+ * @note the model will be loaded
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFLiteCore::loadModel ()
+{
+ int err;
+
+ interpreter.lock ();
+ err = interpreter.loadModel (use_nnapi);
+ interpreter.unlock ();
+
+ return err;
+}
+
+/**
+ * @brief extract and store the information of input tensors
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFLiteCore::setInputTensorProp ()
+{
+ int err;
+
+ interpreter.lock ();
+ err = interpreter.setInputTensorProp ();
+ interpreter.unlock ();
+
+ return err;
+}
+
+/**
+ * @brief extract and store the information of output tensors
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFLiteCore::setOutputTensorProp ()
+{
+ int err;
+
+ interpreter.lock ();
+ err = interpreter.setOutputTensorProp ();
+ interpreter.unlock ();
+
+ return err;
+}
+
+/**
+ * @brief return the Dimension of Input Tensor.
+ * @param[out] info Structure for tensor info.
+ * @todo return whole array rather than index 0
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFLiteCore::getInputTensorDim (GstTensorsInfo * info)
+{
+ interpreter.lock ();
+ gst_tensors_info_copy (info, interpreter.getInputTensorsInfo());
+ interpreter.unlock ();
+
+ return 0;
+}
+
+/**
+ * @brief return the Dimension of Tensor.
+ * @param[out] info Structure for tensor info.
+ * @todo return whole array rather than index 0
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFLiteCore::getOutputTensorDim (GstTensorsInfo * info)
+{
+ interpreter.lock ();
+ gst_tensors_info_copy (info, interpreter.getOutputTensorsInfo());
+ interpreter.unlock ();
return 0;
}
/**
+ * @brief set the Dimension for Input Tensor.
+ * @param info Structure for input tensor info.
+ * @return 0 if OK. non-zero if error.
+ * @note rank can be changed dependent on the model
+ */
+int
+TFLiteCore::setInputTensorDim (const GstTensorsInfo * info)
+{
+ int err;
+
+ interpreter.lock ();
+ err = interpreter.setInputTensorsInfo (info);
+ interpreter.unlock ();
+
+ return err;
+}
+
+/**
+ * @brief reload a model
+ * @param tflite : the class object
+ * @param[in] model_path : the path of model file
+ * @return 0 if OK. non-zero if error.
+ * @note reloadModel() is asynchronously called with other callbacks. But, it requires
+ * extra memory size enough to temporarily hold both models during this function.
+ */
+int
+TFLiteCore::reloadModel (const char * _model_path)
+{
+ int err;
+
+ interpreter_sub.lock ();
+ interpreter_sub.setModelPath (_model_path);
+
+ /**
+ * load a model into sub interpreter. This loading overhead is indenendent
+ * with main one's activities.
+ */
+ err = interpreter_sub.loadModel (use_nnapi);
+ if (err != 0) {
+ g_critical ("Failed to load model %s\n", _model_path);
+ goto out_unlock;
+ }
+ err = interpreter_sub.setInputTensorProp ();
+ if (err != 0) {
+ g_critical ("Failed to initialize input tensor\n");
+ goto out_unlock;
+ }
+ err = interpreter_sub.setOutputTensorProp ();
+ if (err != 0) {
+ g_critical ("Failed to initialize output tensor\n");
+ goto out_unlock;
+ }
+
+ /* Also, we need to check input/output tensors have the same info */
+ if (!gst_tensors_info_is_equal (
+ interpreter.getInputTensorsInfo (),
+ interpreter_sub.getInputTensorsInfo ()) ||
+ !gst_tensors_info_is_equal (
+ interpreter.getOutputTensorsInfo (),
+ interpreter_sub.getOutputTensorsInfo ())) {
+ g_critical ("The model has unmatched tensors info\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ /**
+ * Everything is ready. let's move the model in sub interpreter to main one.
+ * But, it needs to wait if main interpreter is busy (e.g., invoke()).
+ */
+ interpreter.lock ();
+ interpreter.moveInternals (interpreter_sub);
+ /* after this, all callbacks will handle operations for the reloaded model */
+ interpreter.unlock ();
+
+out_unlock:
+ interpreter_sub.unlock ();
+
+ return err;
+}
+
+/**
+ * @brief run the model with the input.
+ * @param[in] input : The array of input tensors
+ * @param[out] output : The array of output tensors
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFLiteCore::invoke (const GstTensorMemory * input, GstTensorMemory * output)
+{
+ int err;
+
+ interpreter.lock ();
+ err = interpreter.invoke (input, output, use_nnapi);
+ interpreter.unlock ();
+
+ return err;
+}
+
+/**
* @brief call the creator of TFLiteCore class.
* @param _model_path : the logical path to '{model_name}.tffile' file
* @param accelerators : the accelerators property set for this subplugin
}
/**
- * @brief get the model path
+ * @brief compare the model path
* @param tflite : the class object
- * @return the model path.
+ * @return TRUE if tflite core has the same model path
*/
-const char *
-tflite_core_getModelPath (void * tflite)
+gboolean
+tflite_core_compareModelPath (void * tflite, const char * model_path)
{
TFLiteCore *c = (TFLiteCore *) tflite;
- return c->getModelPath ();
+ return c->compareModelPath (model_path);
}
/**
}
/**
+ * @brief reload a model
+ * @param tflite : the class object
+ * @param[in] model_path : the path of model file
+ * @return 0 if OK. non-zero if error.
+ */
+int
+tflite_core_reloadModel (void * tflite, const char * model_path)
+{
+ TFLiteCore *c = (TFLiteCore *) tflite;
+ return c->reloadModel (model_path);
+}
+
+/**
* @brief invoke the model
* @param tflite : the class object
* @param[in] input : The array of input tensors