TFLiteCore::TFLiteCore (const char *_model_path)
{
model_path = _model_path;
- input_idx_list_len = 0;
- output_idx_list_len = 0;
loadModel ();
}
*/
TFLiteCore::~TFLiteCore ()
{
- delete[]input_idx_list;
- delete[]output_idx_list;
}
/**
return -2;
}
}
- // fill class parameters
- tensor_size = interpreter->tensors_size ();
- node_size = interpreter->nodes_size ();
- input_size = interpreter->inputs ().size ();
- output_size = interpreter->outputs ().size ();
-
- // allocate the idx of input/output tensors
- // it could be used for get name of the tensors by using 'interpreter->GetOutputName(0);'
- input_idx_list = new int[input_size];
- output_idx_list = new int[output_size];
-
- int t_size = interpreter->tensors_size ();
- for (int i = 0; i < t_size; i++) {
- for (int j = 0; j < input_size; j++) {
- if (strcmp (interpreter->tensor (i)->name,
- interpreter->GetInputName (j)) == 0)
- input_idx_list[input_idx_list_len++] = i;
- }
- for (int j = 0; j < output_size; j++) {
- if (strcmp (interpreter->tensor (i)->name,
- interpreter->GetOutputName (j)) == 0)
- output_idx_list[output_idx_list_len++] = i;
- }
- }
#if (DBG)
gettimeofday (&stop_time, nullptr);
int
TFLiteCore::getInputTensorDim (int idx, tensor_dim dim, tensor_type * type)
{
+ auto input_idx_list = interpreter->inputs ();
+ int input_size = input_idx_list.size ();
+
if (idx >= input_size) {
return -1;
}
+
int ret = getTensorDim (input_idx_list[idx], dim, type);
+#if (DBG)
+ if (ret) {
+ _print_log ("Failed to getInputTensorDim");
+ } else {
+ _print_log ("InputTensorDim idx[%d] type[%d] dim[%d:%d:%d:%d]",
+ idx, *type, dim[0], dim[1], dim[2], dim[3]);
+ }
+#endif
return ret;
}
int
TFLiteCore::getOutputTensorDim (int idx, tensor_dim dim, tensor_type * type)
{
+ auto output_idx_list = interpreter->outputs ();
+ int output_size = output_idx_list.size ();
+
if (idx >= output_size) {
return -1;
}
+
int ret = getTensorDim (output_idx_list[idx], dim, type);
+#if (DBG)
+ if (ret) {
+ _print_log ("Failed to getOutputTensorDim");
+ } else {
+ _print_log ("OutputTensorDim idx[%d] type[%d] dim[%d:%d:%d:%d]",
+ idx, *type, dim[0], dim[1], dim[2], dim[3]);
+ }
+#endif
return ret;
}
int
TFLiteCore::getTensorDim (int tensor_idx, tensor_dim dim, tensor_type * type)
{
-
if (getTensorType (tensor_idx, type)) {
return -2;
}
int
TFLiteCore::getInputTensorSize ()
{
- return input_size;
+ return interpreter->inputs ().size ();
}
/**
int
TFLiteCore::getOutputTensorSize ()
{
- return output_size;
+ return interpreter->outputs ().size ();
}
/**
tensor_dim inputTensorDim;
int ret = getInputTensorDim (0, inputTensorDim, &type);
if (ret) {
+ _print_log ("Failed to get input tensor dim");
return -1;
}
for (int i = 0; i < sizeOfArray; i++) {
output_number_of_pixels *= inputTensorDim[i];
}
+ /**
+ * @todo how to handle input/output tensor type? (for example, float32)
+ * also, we have to check multi tensor output.
+ */
int input = interpreter->inputs ()[0];
if (interpreter->AllocateTensors () != kTfLiteOk) {
- std::cout << "Failed to allocate tensors!" << std::endl;
+ _print_log ("Failed to allocate tensors");
return -2;
}
}
if (interpreter->Invoke () != kTfLiteOk) {
+ _print_log ("Failed to invoke");
return -3;
}