debhelper (>=9),
gstreamer1.0-tools, gstreamer1.0-plugins-base, gstreamer1.0-plugins-good,
libpng-dev, tensorflow-lite-dev, tensorflow-dev [amd64 arm64], libcairo2-dev, libopencv-dev,
- liborc-0.4-dev, ssat, python, python-numpy
+ liborc-0.4-dev, ssat, python, python-numpy, libprotobuf-dev [amd64 arm64]
Standards-Version: 3.9.6
Homepage: https://github.com/nnsuite/nnstreamer
-/usr/lib/*/libtensor_filter_tflitecore.so
+/usr/lib/*/libtensor_filter_tf*core.so
/usr/lib/*/gstreamer-1.0/*.so
g_return_if_fail (info != NULL);
+ info->name = NULL;
info->type = _NNS_END;
for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
*/
typedef struct
{
+ char * name; /**< Name of each element in the tensor. User must designate this. */
tensor_type type; /**< Type of each element in the tensor. User must designate this. */
tensor_dim dimension; /**< Dimension. We support up to 4th ranks. */
} GstTensorInfo;
list(APPEND FILTER_SOURCE tensor_filter_tensorflow.c)
list(APPEND FILTER_TARGET tensor_filter_tfcore)
list(APPEND FILTER_TARGET tensor_filter_tfcoreStatic)
+ PKG_CHECK_MODULES(protobuf_pkg REQUIRED protobuf)
PKG_CHECK_MODULES(TENSORFLOW REQUIRED tensorflow)
LINK_DIRECTORIES(${TENSORFLOW_LIBRARY_DIRS})
ADD_LIBRARY(tensor_filter_tfcore SHARED tensor_filter_tensorflow_core.cc)
- TARGET_LINK_LIBRARIES(tensor_filter_tfcore ${pkgs_LIBRARIES} ${TENSORFLOW_LIBRARIES})
+ TARGET_LINK_LIBRARIES(tensor_filter_tfcore ${pkgs_LIBRARIES} ${TENSORFLOW_LIBRARIES} ${protobuf_pkg_LIBRARIES})
TARGET_INCLUDE_DIRECTORIES(tensor_filter_tfcore PUBLIC ${pkgs_INCLUDE_DIRS} ${TENSORFLOW_INCLUDE_DIRS})
TARGET_COMPILE_OPTIONS(tensor_filter_tfcore PUBLIC ${pkgs_CFLAGS_OTHER})
if get_option('ENABLE_TENSORFLOW')
tensor_filter_sources += 'tensor_filter_tensorflow.c'
dependencies += dependency('tensorflow')
+ dependencies += dependency('protobuf')
tensor_filter_args += ['-DENABLE_TENSORFLOW']
tensor_filter_tfcoreOBJ = static_library('tensor_filter_tfcore',
* @see http://github.com/nnsuite/nnstreamer
* @author MyungJoo Ham <myungjoo.ham@samsung.com>
* @bug No known bugs except for NYI items
+ * @todo set priority among properties
+ * @todo logic for dynamic properties(like model change)
*
* This is the main plugin for per-NN-framework plugins.
* Specific implementations for each NN framework must be written
PROP_MODEL,
PROP_INPUT,
PROP_INPUTTYPE,
+ PROP_INPUTNAME,
PROP_OUTPUT,
PROP_OUTPUTTYPE,
+ PROP_OUTPUTNAME,
PROP_CUSTOM,
};
g_param_spec_string ("input", "Input dimension",
"Input tensor dimension from inner array, upto 4 dimensions ?", "",
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_INPUTNAME,
+ g_param_spec_string ("inputname", "Name of Input Tensor",
+ "The Name of Input Tensor", "",
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_INPUTTYPE,
g_param_spec_string ("inputtype", "Input tensor element type",
"Type of each element of the input tensor ?", "",
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_OUTPUTNAME,
+ g_param_spec_string ("outputname", "Name of Output Tensor",
+ "The Name of Output Tensor", "",
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_OUTPUT,
g_param_spec_string ("output", "Output dimension",
"Output tensor dimension from inner array, upto 4 dimensions ?", "",
tmp = g_value_dup_string (value);
silent_debug ("Model = %s\n", tmp);
if (!g_file_test (tmp, G_FILE_TEST_IS_REGULAR)) {
- GST_ERROR ("Cannot find the model file: %s\n", tmp);
+ GST_ERROR_OBJECT (self, "Cannot find the model file: %s\n", tmp);
g_free (tmp);
} else {
prop->model_file = tmp;
g_strfreev (str_types);
}
break;
+ case PROP_INPUTNAME:
+ /* INPUTNAME is required by tensorflow to designate the order of tensors */
+ g_assert (!prop->input_configured && value);
+ /* Once configures, it cannot be changed in runtime */
+ {
+ int i;
+ gchar **str_names;
+
+ str_names = g_strsplit (g_value_get_string (value), ",", -1);
+ prop->input_meta.num_tensors = g_strv_length (str_names);
+
+ for (i = 0; i < prop->input_meta.num_tensors; i++) {
+ if (strlen (prop->input_meta.info[i].name))
+ g_free ((char *) prop->input_meta.info[i].name);
+ prop->input_meta.info[i].name = g_strdup (str_names[i]);
+ g_assert (prop->input_meta.info[i].name != '\0');
+ }
+
+ g_strfreev (str_names);
+ }
+ break;
+ case PROP_OUTPUTNAME:
+ /* OUTPUTNAME is required by tensorflow to designate the order of tensors */
+ g_assert (!prop->output_configured && value);
+ /* Once configures, it cannot be changed in runtime */
+ {
+ int i;
+ gchar **str_names;
+
+ str_names = g_strsplit (g_value_get_string (value), ",", -1);
+ prop->output_meta.num_tensors = g_strv_length (str_names);
+
+ for (i = 0; i < prop->output_meta.num_tensors; i++) {
+ if (strlen (prop->output_meta.info[i].name))
+ g_free ((char *) prop->output_meta.info[i].name);
+ prop->output_meta.info[i].name = g_strdup (str_names[i]);
+ g_assert (prop->output_meta.info[i].name != '\0');
+ }
+
+ g_strfreev (str_names);
+ }
+ break;
case PROP_CUSTOM:
/* In case updated custom properties in runtime! */
g_free ((char *) prop->custom_properties); /* g_free cannot handle const char * */
g_value_set_string (value, "");
}
break;
+ case PROP_INPUTNAME:
+ if (prop->input_meta.num_tensors > 0) {
+ GString *names = g_string_new (NULL);
+ int i;
+
+ for (i = 0; i < prop->input_meta.num_tensors; i++) {
+ g_string_append (names, prop->input_meta.info[i].name);
+
+ if (i < prop->input_meta.num_tensors - 1) {
+ g_string_append (names, ",");
+ }
+ }
+
+ g_value_set_string (value, names->str);
+ g_string_free (names, TRUE);
+ } else {
+ g_value_set_string (value, "");
+ }
+ break;
+ case PROP_OUTPUTNAME:
+ if (prop->output_meta.num_tensors > 0) {
+ GString *names = g_string_new (NULL);
+ int i;
+
+ for (i = 0; i < prop->output_meta.num_tensors; i++) {
+ g_string_append (names, prop->output_meta.info[i].name);
+
+ if (i < prop->output_meta.num_tensors - 1) {
+ g_string_append (names, ",");
+ }
+ }
+
+ g_value_set_string (value, names->str);
+ g_string_free (names, TRUE);
+ } else {
+ g_value_set_string (value, "");
+ }
+ break;
case PROP_CUSTOM:
g_value_set_string (value, prop->custom_properties);
break;
#include "tensor_filter.h"
#include "tensor_filter_tensorflow_core.h"
#include <glib.h>
+#include <string.h>
/**
- * @brief internal data of tensorflow lite
+ * @brief internal data of tensorflow
*/
struct _Tf_data
{
};
typedef struct _Tf_data tf_data;
+
+/**
+ * @brief Free privateData and move on.
+ */
+static void
+tf_close (const GstTensorFilter * filter, void **private_data)
+{
+ tf_data *tf;
+ tf = *private_data;
+ tf_core_delete (tf->tf_private_data);
+ g_free (tf);
+ *private_data = NULL;
+ g_assert (filter->privateData == NULL);
+}
+
/**
- * @brief Load tensorflow lite modelfile
+ * @brief Load tensorflow modelfile
* @param filter : tensor_filter instance
- * @param private_data : tensorflow lite plugin's private data
- * @return 0 if successfully loaded. 1 if skipped (already loaded). -1 if error
+ * @param private_data : tensorflow plugin's private data
+ * @return 0 if successfully loaded. 1 if skipped (already loaded).
+ * -1 if the object construction is failed.
+ * -2 if the object initialization if failed
*/
static int
tf_loadModelFile (const GstTensorFilter * filter, void **private_data)
tf_data *tf;
if (filter->privateData != NULL) {
/** @todo : Check the integrity of filter->data and filter->model_file, nnfw */
- return 1;
+ tf = *private_data;
+ if (strcmp (filter->prop.model_file,
+ tf_core_getModelPath (tf->tf_private_data))) {
+ tf_close (filter, private_data);
+ } else {
+ return 1;
+ }
}
tf = g_new0 (tf_data, 1); /** initialize tf Fill Zero! */
*private_data = tf;
tf->tf_private_data = tf_core_new (filter->prop.model_file);
if (tf->tf_private_data) {
+ if (tf_core_init (tf->tf_private_data, &filter->prop))
+ return -2;
return 0;
} else {
return -1;
/**
* @brief The open callback for GstTensorFilterFramework. Called before anything else
* @param filter : tensor_filter instance
- * @param private_data : tensorflow lite plugin's private data
+ * @param private_data : tensorflow plugin's private data
*/
static int
tf_open (const GstTensorFilter * filter, void **private_data)
* @param[out] output The array of output tensors
*/
static int
-tf_invoke (const GstTensorFilter * filter, void **private_data,
+tf_run (const GstTensorFilter * filter, void **private_data,
const GstTensorMemory * input, GstTensorMemory * output)
{
int retval;
tf_data *tf;
tf = *private_data;
g_assert (filter->privateData && *private_data == filter->privateData);
- retval = tf_core_invoke (tf->tf_private_data, input, output);
+ retval = tf_core_run (tf->tf_private_data, input, output);
g_assert (retval == 0);
return retval;
}
tf_getInputDim (const GstTensorFilter * filter, void **private_data,
GstTensorsInfo * info)
{
- int temp_idx = 0;
tf_data *tf;
tf = *private_data;
- temp_idx = tf_core_getInputSize (tf->tf_private_data);
- if (temp_idx > 0)
- temp_idx--;
- else
- temp_idx = 0;
g_assert (filter->privateData && *private_data == filter->privateData);
- return tf_core_getInputDim (tf->tf_private_data, info);
+ int ret = tf_core_getInputDim (tf->tf_private_data, info);
+ return ret;
}
/**
tf_getOutputDim (const GstTensorFilter * filter, void **private_data,
GstTensorsInfo * info)
{
- int temp_idx = 0;
tf_data *tf;
tf = *private_data;
- temp_idx = tf_core_getOutputSize (tf->tf_private_data);
- if (temp_idx > 0)
- temp_idx--;
- else
- temp_idx = 0;
g_assert (filter->privateData && *private_data == filter->privateData);
- return tf_core_getOutputDim (tf->tf_private_data, info);
-}
-
-/**
- * @brief The set-input-dim callback for GstTensorFilterFramework
- */
-static int
-tf_setInputDim (const GstTensorFilter * filter, void **private_data,
- const GstTensorsInfo * in_info, GstTensorsInfo * out_info)
-{
- /** @todo call tflite core apis */
- return 0; /** NYI */
-}
-
-/**
- * @brief Free privateData and move on.
- */
-static void
-tf_close (const GstTensorFilter * filter, void **private_data)
-{
- tf_data *tf;
- tf = *private_data;
- tf_core_delete (tf->tf_private_data);
- g_free (tf);
- *private_data = NULL;
- g_assert (filter->privateData == NULL);
+ int ret = tf_core_getOutputDim (tf->tf_private_data, info);
+ return ret;
}
GstTensorFilterFramework NNS_support_tensorflow = {
.name = "tensorflow",
.allow_in_place = FALSE, /** @todo: support this to optimize performance later. */
- .allocate_in_invoke = TRUE,
- .invoke_NN = tf_invoke,
+ .allocate_in_invoke = FALSE,
+ .invoke_NN = tf_run,
.getInputDimension = tf_getInputDim,
.getOutputDimension = tf_getOutputDim,
- .setInputDimension = tf_setInputDim,
.open = tf_open,
.close = tf_close,
};
* @bug No known bugs.
*/
-#include <sys/time.h>
-#include <unistd.h>
-#include <algorithm>
-
#include "tensor_filter_tensorflow_core.h"
/**
/**
* @brief TFCore creator
- * @param _model_path : the logical path to '{model_name}.tffile' file
+ * @param _model_path : the logical path to '{model_name}.pb' file
* @note the model of _model_path will be loaded simultaneously
* @return Nothing
*/
{
model_path = _model_path;
- loadModel ();
+ gst_tensors_info_init (&inputTensorMeta);
+ gst_tensors_info_init (&outputTensorMeta);
}
/**
}
/**
+ * @brief initialize the object with tensorflow model
+ * @return 0 if OK. non-zero if error.
+ * -1 if the model is not loaded.
+ * -2 if the initialization of input tensor is failed.
+ * -3 if the initialization of output tensor is failed.
+ */
+int
+TFCore::init (const GstTensorFilterProperties * prop)
+{
+ if (setTensorProp (&inputTensorMeta, &prop->input_meta)) {
+ GST_ERROR("Failed to initialize input tensor\n");
+ return -2;
+ }
+ if (setTensorProp (&outputTensorMeta, &prop->output_meta)) {
+ GST_ERROR("Failed to initialize output tensor\n");
+ return -3;
+ }
+ if (loadModel ()) {
+ GST_ERROR("Failed to load model\n");
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * @brief get the model path
+ * @return the model path.
+ */
+const char *
+TFCore::getModelPath ()
+{
+ return model_path;
+}
+
+/**
* @brief load the tf model
* @note the model will be loaded
* @return 0 if OK. non-zero if error.
+ * -1 if the pb file is not loaded.
+ * -2 if the input properties is different with model.
+ * -3 if the Tensorflow session is not created.
*/
int
TFCore::loadModel ()
{
+#if (DBG)
+ gint64 start_time = g_get_real_time ();
+#endif
+ GraphDef graph_def;
+ Status load_graph_status =
+ ReadBinaryProto(Env::Default(), model_path, &graph_def);
+ if (!load_graph_status.ok()) {
+ GST_ERROR("Failed to load compute graph at '%s'", model_path);
+ return -1;
+ }
+ /* get input tensor */
+ std::vector<const NodeDef*> placeholders;
+ for (const NodeDef& node : graph_def.node()) {
+ if (node.op() == "Placeholder") {
+ placeholders.push_back(&node);
+ }
+ }
+
+ if (placeholders.empty()) {
+ g_message ("No inputs spotted.");
+ } else {
+ g_message ("Found possible inputs: %d", placeholders.size());
+ if(inputTensorValidation(placeholders)){
+ GST_ERROR("Input Tensor Information is not valid");
+ return -2;
+ }
+ }
+
+ /* get session */
+ Status new_session_status = NewSession(SessionOptions(), &session);
+ Status session_create_status = session->Create(graph_def);
+ if (!new_session_status.ok() || !session_create_status.ok()) {
+ GST_ERROR("Create Tensorflow Session was Failed");
+ return -3;
+ }
+#if (DBG)
+ gint64 stop_time = g_get_real_time ();
+ g_message ("Model is loaded: %" G_GINT64_FORMAT, (stop_time - start_time));
+#endif
return 0;
}
/**
* @brief return the data type of the tensor
- * @param tensor_idx : the index of the tensor
- * @param[out] type : the data type of the input tensor
- * @return 0 if OK. non-zero if error.
+ * @param tfType : the defined type of Tensorflow
+ * @return the enum of defined _NNS_TYPE
*/
-int
-TFCore::getTensorType (int tensor_idx, tensor_type * type)
+tensor_type
+TFCore::getTensorTypeFromTF (DataType tfType)
{
+ switch (tfType) {
+ case DT_INT32:
+ return _NNS_INT32;
+ case DT_UINT32:
+ return _NNS_UINT32;
+ case DT_INT16:
+ return _NNS_INT16;
+ case DT_UINT16:
+ return _NNS_UINT16;
+ case DT_INT8:
+ return _NNS_INT8;
+ case DT_UINT8:
+ return _NNS_UINT8;
+ case DT_INT64:
+ return _NNS_INT64;
+ case DT_UINT64:
+ return _NNS_UINT64;
+ case DT_FLOAT:
+ return _NNS_FLOAT32;
+ case DT_DOUBLE:
+ return _NNS_FLOAT64;
+ default:
+ /** @todo Support other types */
+ break;
+ }
- return 0;
+ return _NNS_END;
}
/**
- * @brief return the Dimension of Input Tensor.
- * @param[out] info Structure for tensor info.
- * @return 0 if OK. non-zero if error.
+ * @brief return the data type of the tensor for Tensorflow
+ * @param tType : the defined type of NNStreamer
+ * @return the enum of defined tensorflow::DataType
*/
-int
-TFCore::getInputTensorDim (GstTensorsInfo * info)
+DataType
+TFCore::getTensorTypeToTF (tensor_type tType)
{
- /**
- * @todo fill here
- */
- return 0;
+ switch (tType) {
+ case _NNS_INT32:
+ return DT_INT32;
+ case _NNS_UINT32:
+ return DT_UINT32;
+ case _NNS_INT16:
+ return DT_INT16;
+ case _NNS_UINT16:
+ return DT_UINT16;
+ case _NNS_INT8:
+ return DT_INT8;
+ case _NNS_UINT8:
+ return DT_UINT8;
+ case _NNS_INT64:
+ return DT_INT64;
+ case _NNS_UINT64:
+ return DT_UINT64;
+ case _NNS_FLOAT32:
+ return DT_FLOAT;
+ case _NNS_FLOAT64:
+ return DT_DOUBLE;
+ default:
+ /** @todo Support other types */
+ break;
+ }
+
+ return DT_INVALID;
}
/**
- * @brief return the Dimension of Output Tensor.
- * @param[out] info Structure for tensor info.
+ * @brief check the inserted information about input tensor with model
* @return 0 if OK. non-zero if error.
+ * -1 if the number of input tensors is not matched.
+ * -2 if the name of input tensors is not matched.
+ * -3 if the type of input tensors is not matched.
+ * -4 if the dimension of input tensors is not matched.
*/
int
-TFCore::getOutputTensorDim (GstTensorsInfo * info)
+TFCore::inputTensorValidation (std::vector<const NodeDef*> placeholders)
{
- /**
- * @todo fill here
- */
+ if (inputTensorMeta.num_tensors != placeholders.size()){
+ GST_ERROR ("Input Tensor is not valid: the number of input tensor is different\n");
+ return -1;
+ }
+ int length = placeholders.size();
+ for (int i = 0; i < length; i++) {
+ const NodeDef* node = placeholders[i];
+ string shape_description = "None";
+ if (node->attr().count("shape")) {
+ TensorShapeProto shape_proto = node->attr().at("shape").shape();
+ Status shape_status = PartialTensorShape::IsValidShape(shape_proto);
+ if (shape_status.ok()) {
+ shape_description = PartialTensorShape(shape_proto).DebugString();
+ } else {
+ shape_description = shape_status.error_message();
+ }
+ }
+ char chars[] = "[]";
+ for (unsigned int i = 0; i < strlen(chars); ++i)
+ {
+ shape_description.erase (
+ std::remove(
+ shape_description.begin(),
+ shape_description.end(),
+ chars[i]
+ ),
+ shape_description.end()
+ );
+ }
+
+ DataType dtype = DT_INVALID;
+ if (node->attr().count("dtype")) {
+ dtype = node->attr().at("dtype").type();
+ }
+
+ if (strcmp (inputTensorMeta.info[i].name, node->name().c_str())){
+ GST_ERROR ("Input Tensor is not valid: the name of input tensor is different\n");
+ return -2;
+ }
+ if (inputTensorMeta.info[i].type != getTensorTypeFromTF(dtype)){
+ GST_ERROR ("Input Tensor is not valid: the type of input tensor is different\n");
+ return -3;
+ }
+
+ gchar **str_dims;
+ str_dims = g_strsplit (shape_description.c_str(), ",", -1);
+ for(int j = 0; j < NNS_TENSOR_RANK_LIMIT; j++){
+ if (!strcmp (str_dims[j], "?"))
+ continue;
+
+ if (inputTensorMeta.info[i].dimension[NNS_TENSOR_RANK_LIMIT - j - 1] != atoi (str_dims[j])){
+ GST_ERROR ("Input Tensor is not valid: the dim of input tensor is different\n");
+ return -4;
+ }
+ }
+ }
return 0;
}
/**
- * @brief return the Dimension of Tensor.
- * @param tensor_idx : the real index of model of the tensor
- * @param[out] dim : the array of the tensor
- * @param[out] type : the data type of the tensor
+ * @brief extract and store the information of src tensors
* @return 0 if OK. non-zero if error.
*/
int
-TFCore::getTensorDim (tensor_dim dim, tensor_type * type)
+TFCore::setTensorProp (GstTensorsInfo * dest, const GstTensorsInfo * src)
{
-
+ dest->num_tensors = src->num_tensors;
+ for (int i = 0; i < src->num_tensors; i++){
+ dest->info[i].name = src->info[i].name;
+ dest->info[i].type = src->info[i].type;
+ for (int j = 0; j < NNS_TENSOR_RANK_LIMIT; j++){
+ dest->info[i].dimension[j] = src->info[i].dimension[j];
+ }
+ }
return 0;
}
int
TFCore::getInputTensorSize ()
{
- return input_size;
+ return inputTensorMeta.num_tensors;
}
/**
int
TFCore::getOutputTensorSize ()
{
- return output_size;
+ return outputTensorMeta.num_tensors;
}
/**
+ * @brief return the Dimension of Input Tensor.
+ * @param[out] info Structure for tensor info.
+ * @todo return whole array rather than index 0
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFCore::getInputTensorDim (GstTensorsInfo * info)
+{
+ info->num_tensors = inputTensorMeta.num_tensors;
+ memcpy (info->info, inputTensorMeta.info,
+ sizeof (GstTensorInfo) * inputTensorMeta.num_tensors);
+ return 0;
+}
+
+/**
+ * @brief return the Dimension of Tensor.
+ * @param[out] info Structure for tensor info.
+ * @todo return whole array rather than index 0
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFCore::getOutputTensorDim (GstTensorsInfo * info)
+{
+ info->num_tensors = outputTensorMeta.num_tensors;
+ memcpy (info->info, outputTensorMeta.info,
+ sizeof (GstTensorInfo) * outputTensorMeta.num_tensors);
+ return 0;
+}
+
+#define copyInputWithType(type) \
+ inputTensor.flat<type>()(i) = ((type*)input->data)[i];
+
+#define copyOutputWithType(type) \
+ for(int j = 0; j < n; j++) \
+ ((type *)output[i].data)[j] = outputs[i].flat<type>()(j); \
+
+/**
* @brief run the model with the input.
* @param[in] input : The array of input tensors
* @param[out] output : The array of output tensors
* @return 0 if OK. non-zero if error.
*/
int
-TFCore::invoke (const GstTensorMemory * input, GstTensorMemory * output)
+TFCore::run (const GstTensorMemory * input, GstTensorMemory * output)
{
+ /* TODO: Convert input -> inputTensor before run */
+
+ Tensor inputTensor(
+ getTensorTypeToTF(input->type),
+ TensorShape({
+ inputTensorMeta.info[0].dimension[3],
+ inputTensorMeta.info[0].dimension[2],
+ inputTensorMeta.info[0].dimension[1],
+ inputTensorMeta.info[0].dimension[0]
+ })
+ );
+ int len = input->size / tensor_element_size[input->type];
+
+ for(int i = 0; i < len; i++){
+ switch (input->type) {
+ case _NNS_INT32:
+ copyInputWithType(int32);
+ break;
+ case _NNS_UINT32:
+ copyInputWithType(uint32);
+ break;
+ case _NNS_INT16:
+ copyInputWithType(int16);
+ break;
+ case _NNS_UINT16:
+ copyInputWithType(uint16);
+ break;
+ case _NNS_INT8:
+ copyInputWithType(int8);
+ break;
+ case _NNS_UINT8:
+ copyInputWithType(uint8);
+ break;
+ case _NNS_INT64:
+ copyInputWithType(int64);
+ break;
+ case _NNS_UINT64:
+ copyInputWithType(uint64);
+ break;
+ case _NNS_FLOAT32:
+ copyInputWithType(float);
+ break;
+ case _NNS_FLOAT64:
+ copyInputWithType(double);
+ break;
+ default:
+ /** @todo Support other types */
+ break;
+ }
+ }
+
+ std::vector<std::pair<string, Tensor>> input_feeds;
+ std::vector<string> output_tensor_names;
+ std::vector<Tensor> outputs;
+
+ for (int i = 0; i < inputTensorMeta.num_tensors; i++) {
+ input_feeds.push_back({inputTensorMeta.info[i].name, inputTensor});
+ }
+
+ for (int i = 0; i < outputTensorMeta.num_tensors; i++) {
+ output_tensor_names.push_back(outputTensorMeta.info[i].name);
+ }
+
+ Status run_status =
+ session->Run(input_feeds, output_tensor_names, {}, &outputs);
+
+
+ for (int i = 0; i < outputTensorMeta.num_tensors; i++) {
+ output[i].type = getTensorTypeFromTF(outputs[i].dtype());
+ output[i].size = tensor_element_size[output[i].type];
+ for(int j = 0; j < NNS_TENSOR_RANK_LIMIT; j++)
+ output[i].size *= outputTensorMeta.info[i].dimension[j];
+
+ int n = output[i].size / tensor_element_size[output[i].type];
+
+ switch (output[i].type) {
+ case _NNS_INT32:{
+ copyOutputWithType(int32);
+ break;
+ }
+ case _NNS_UINT32:{
+ copyOutputWithType(uint32);
+ break;
+ }
+ case _NNS_INT16:{
+ copyOutputWithType(int16);
+ break;
+ }
+ case _NNS_UINT16:{
+ copyOutputWithType(uint16);
+ break;
+ }
+ case _NNS_INT8:{
+ copyOutputWithType(int8);
+ break;
+ }
+ case _NNS_UINT8:{
+ copyOutputWithType(uint8);
+ break;
+ }
+ case _NNS_INT64:{
+ copyOutputWithType(int64);
+ break;
+ }
+ case _NNS_UINT64:{
+ copyOutputWithType(uint64);
+ break;
+ }
+ case _NNS_FLOAT32:{
+ copyOutputWithType(float);
+ break;
+ }
+ case _NNS_FLOAT64:{
+ copyOutputWithType(double);
+ break;
+ }
+ default:
+ /** @todo Support other types */
+ break;
+ }
+ }
+
return 0;
}
}
/**
+ * @brief initialize the object with tf model
+ * @param tf : the class object
+ * @return 0 if OK. non-zero if error.
+ */
+int
+tf_core_init (void *tf, const GstTensorFilterProperties * prop)
+{
+ TFCore *c = (TFCore *) tf;
+ int ret = c->init (prop);
+ return ret;
+}
+
+/**
* @brief get model path
* @param tf : the class object
* @return model path
}
/**
- * @brief get the size of Input Tensor of model
- * @param tf : the class object
- * @return the number of Input Tensors.
- */
-int
-tf_core_getInputSize (void *tf)
-{
- TFCore *c = (TFCore *) tf;
- return c->getInputTensorSize ();
-}
-
-/**
- * @brief get the size of Output Tensor of model
- * @param tf : the class object
- * @return the number of Output Tensors.
- */
-int
-tf_core_getOutputSize (void *tf)
-{
- TFCore *c = (TFCore *) tf;
- return c->getOutputTensorSize ();
-}
-
-/**
- * @brief invoke the model
+ * @brief run the model
* @param tf : the class object
* @param[in] input : The array of input tensors
* @param[out] output : The array of output tensors
* @return 0 if OK. non-zero if error.
*/
int
-tf_core_invoke (void *tf, const GstTensorMemory * input,
+tf_core_run (void *tf, const GstTensorMemory * input,
GstTensorMemory * output)
{
TFCore *c = (TFCore *) tf;
- return c->invoke (input, output);
+ return c->run (input, output);
}
#define TENSOR_FILTER_TENSORFLOW_H
#ifdef __cplusplus
-#include <iostream>
#include <stdint.h>
#include <glib.h>
+#include <setjmp.h>
+#include <stdio.h>
+#include <string.h>
+#include <iostream>
+#include <fstream>
+#include <algorithm>
+#include <vector>
+#include <tensorflow/cc/ops/const_op.h>
+#include <tensorflow/cc/ops/image_ops.h>
+#include <tensorflow/cc/ops/standard_ops.h>
+#include <tensorflow/core/lib/io/path.h>
#include <tensorflow/core/platform/init_main.h>
#include <tensorflow/core/public/session.h>
-#include <tensorflow/core/framework/tensor_shape.h>
+#include <tensorflow/cc/client/client_session.h>
+#include <tensorflow/core/util/command_line_flags.h>
+#include <tensorflow/core/lib/strings/str_util.h>
+#include <tensorflow/tools/graph_transforms/transform_utils.h>
#include <tensor_common.h>
+using namespace tensorflow;
+
/**
* @brief ring cache structure
*/
TFCore (const char *_model_path);
~TFCore ();
- /**
- * @brief get the model path.
- * @return saved model path.
- */
- const char *getModelPath ()
- {
- return model_path;
- }
+ int init(const GstTensorFilterProperties * prop);
int loadModel ();
- const char *getInputTensorName ();
- const char *getOutputTensorName ();
-
+ const char* getModelPath();
+ int setInputTensorProp ();
+ int setOutputTensorProp ();
+
int getInputTensorSize ();
int getOutputTensorSize ();
int getInputTensorDim (GstTensorsInfo * info);
int getOutputTensorDim (GstTensorsInfo * info);
- int getInputTensorDimSize ();
- int getOutputTensorDimSize ();
- int invoke (const GstTensorMemory * input, GstTensorMemory * output);
+ int run (const GstTensorMemory * input, GstTensorMemory * output);
private:
- /**
- * member variables.
- */
+
const char *model_path;
- int tensor_size;
- int node_size;
- int input_size;
- int output_size;
- int getTensorType (int tensor_idx, tensor_type * type);
- int getTensorDim (tensor_dim dim, tensor_type * type);
+
+ GstTensorsInfo inputTensorMeta; /**< The tensor info of input tensors */
+ GstTensorsInfo outputTensorMeta; /**< The tensor info of output tensors */
+
+ Session * session;
+
+ tensor_type getTensorTypeFromTF (DataType tfType);
+ DataType getTensorTypeToTF (tensor_type tType);
+ int setTensorProp (GstTensorsInfo * dest, const GstTensorsInfo * src);
+ int inputTensorValidation (std::vector<const NodeDef*> placeholders);
+ /*TODO*/
+ // int outputTensorValidation ();
};
/**
extern void *tf_core_new (const char *_model_path);
extern void tf_core_delete (void *tf);
+ extern int tf_core_init (void *tf, const GstTensorFilterProperties * prop);
extern const char *tf_core_getModelPath (void *tf);
extern int tf_core_getInputDim (void *tf, GstTensorsInfo * info);
extern int tf_core_getOutputDim (void *tf, GstTensorsInfo * info);
- extern int tf_core_getInputSize (void *tf);
- extern int tf_core_getOutputSize (void *tf);
- extern int tf_core_invoke (void *tf, const GstTensorMemory * input, GstTensorMemory * output);
+ extern int tf_core_run (void *tf, const GstTensorMemory * input,
+ GstTensorMemory * output);
#ifdef __cplusplus
}
option('ENABLE_TEST', type: 'boolean', value: true)
option('ENABLE_TENSORFLOW_LITE', type: 'boolean', value: true)
-option('ENABLE_TENSORFLOW', type: 'boolean', value: false)
+option('ENABLE_TENSORFLOW', type: 'boolean', value: true)
BuildRequires: opencv-devel
# For './testAll.sh' time limit.
BuildRequires: procps
+# for tensorflow
+%ifarch x86_64 aarch64
+BuildRequires: protobuf-devel >= 3.4.0
+BuildRequires: tensorflow
+BuildRequires: tensorflow-devel
+%endif
%if 0%{?testcoverage}
BuildRequires: lcov
pushd build
export GST_PLUGIN_PATH=$(pwd)
export LD_LIBRARY_PATH=$(pwd):$(pwd)/gst/tensor_filter
-%cmake .. -DGST_INSTALL_DIR=%{gstlibdir} -DINSTALL_EXAMPLE_APP=ON
+
+%ifarch x86_64 aarch64
+ %cmake .. -DGST_INSTALL_DIR=%{gstlibdir} -DINSTALL_EXAMPLE_APP=ON -DENABLE_TENSORFLOW=ON
+%else
+ %cmake .. -DGST_INSTALL_DIR=%{gstlibdir} -DINSTALL_EXAMPLE_APP=ON
+%endif
+
make %{?_smp_mflags}
%if 0%{?unit_test}
./tests/unittest_common
%defattr(-,root,root,-)
%license LICENSE
%{_libdir}/libtensor_filter_tflitecore.so
+%ifarch x86_64 aarch64
+%{_libdir}/libtensor_filter_tfcore.so
+%endif
%{gstlibdir}/*.so
%files devel
%{_libdir}/*.so
/usr/lib/nnstreamer/bin/*
%exclude %{_libdir}/libtensor_filter_tflitecore.so
+%ifarch x86_64 aarch64
+%exclude %{_libdir}/libtensor_filter_tfcore.so
+%endif
%changelog
* Mon Dec 03 2018 MyungJoo Ham <myungjoo.ham@samsung.com>
Requires: gst-plugins-good-extra
Requires: gst-plugins-base
BuildRequires: pkg-config
+BuildRequires: coregl-devel
BuildRequires: pkgconfig(nnstreamer)
BuildRequires: pkgconfig(gstreamer-1.0)
BuildRequires: pkgconfig(gstreamer-video-1.0)