From: Jaeyun Date: Wed, 11 Sep 2019 12:16:44 +0000 (+0900) Subject: [CodeClean] remove duplicates X-Git-Tag: accepted/tizen/unified/20190918.102219~1 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=1e8fff92d2548071922fd6a73c00c392f31cbf39;p=platform%2Fupstream%2Fnnstreamer.git [CodeClean] remove duplicates remove duplicates and unnecessary code. Signed-off-by: Jaeyun Jung --- diff --git a/ext/nnstreamer/tensor_decoder/tensordec-boundingbox.c b/ext/nnstreamer/tensor_decoder/tensordec-boundingbox.c index a7bb1d1..bee9822 100644 --- a/ext/nnstreamer/tensor_decoder/tensordec-boundingbox.c +++ b/ext/nnstreamer/tensor_decoder/tensordec-boundingbox.c @@ -148,6 +148,25 @@ _init_modes (bounding_boxes * bdata) return TRUE; } +/** + * @brief Free the allocated lables + */ +static void +_free_labels (bounding_boxes * data) +{ + guint i; + + if (data->labels) { + for (i = 0; i < data->total_labels; i++) + g_free (data->labels[i]); + g_free (data->labels); + } + + data->labels = NULL; + data->total_labels = 0; + data->max_word_length = 0; +} + /** @brief tensordec-plugin's GstTensorDecoderDef callback */ static int bb_init (void **pdata) @@ -207,12 +226,8 @@ bb_exit (void **pdata) { bounding_boxes *bdata = *pdata; - if (bdata->labels) { - int i; - for (i = 0; i < bdata->total_labels; i++) - g_free (bdata->labels[i]); - g_free (bdata->labels); - } + _free_labels (bdata); + if (bdata->label_path) g_free (bdata->label_path); _exit_modes (bdata); @@ -234,14 +249,7 @@ loadImageLabels (bounding_boxes * data) guint i, len; /* Clean up previously configured data first */ - if (data->labels) { - for (i = 0; i < data->total_labels; i++) - g_free (data->labels[i]); - g_free (data->labels); - } - data->labels = NULL; - data->total_labels = 0; - data->max_word_length = 0; + _free_labels (data); /* Read file contents */ if (!g_file_get_contents (data->label_path, &contents, NULL, &err)) { diff --git a/ext/nnstreamer/tensor_decoder/tensordec-imagelabel.c b/ext/nnstreamer/tensor_decoder/tensordec-imagelabel.c index 99b9bbb..9aeffa2 100644 --- a/ext/nnstreamer/tensor_decoder/tensordec-imagelabel.c +++ b/ext/nnstreamer/tensor_decoder/tensordec-imagelabel.c @@ -49,6 +49,25 @@ typedef struct guint max_word_length; /**< The max size of labels */ } ImageLabelData; +/** + * @brief Free the allocated lables + */ +static void +_free_labels (ImageLabelData * data) +{ + guint i; + + if (data->labels) { + for (i = 0; i < data->total_labels; i++) + g_free (data->labels[i]); + g_free (data->labels); + } + + data->labels = NULL; + data->total_labels = 0; + data->max_word_length = 0; +} + /** @brief tensordec-plugin's GstTensorDecoderDef callback */ static int il_init (void **pdata) @@ -63,12 +82,9 @@ static void il_exit (void **pdata) { ImageLabelData *data = *pdata; - if (data->labels) { - int i; - for (i = 0; i < data->total_labels; i++) - g_free (data->labels[i]); - g_free (data->labels); - } + + _free_labels (data); + if (data->label_path) g_free (data->label_path); @@ -89,14 +105,7 @@ loadImageLabels (ImageLabelData * data) guint i, len; /* Clean up previously configured data first */ - if (data->labels) { - for (i = 0; i < data->total_labels; i++) - g_free (data->labels[i]); - g_free (data->labels); - } - data->labels = NULL; - data->total_labels = 0; - data->max_word_length = 0; + _free_labels (data); /* Read file contents */ if (!g_file_get_contents (data->label_path, &contents, NULL, &err)) { diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_python_core.cc b/ext/nnstreamer/tensor_filter/tensor_filter_python_core.cc index 6631491..73cc457 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_python_core.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_python_core.cc @@ -48,8 +48,8 @@ PYCore::PYCore (const char* _script_path, const char* _custom) */ gchar libname[32]; - g_snprintf (libname, sizeof(libname), -#if PY_VERSION_HEX >= 0x03000000 + g_snprintf (libname, sizeof(libname), +#if PY_VERSION_HEX >= 0x03000000 "libpython%d.%dm.so.1.0", #else "libpython%d.%d.so.1.0", @@ -94,7 +94,7 @@ PYCore::PYCore (const char* _script_path, const char* _custom) /** Find nnstreamer_api module */ PyObject *api_module = PyImport_ImportModule("nnstreamer_python"); g_assert(api_module); - shape_cls = PyObject_GetAttrString(api_module, "TensorShape"); + shape_cls = PyObject_GetAttrString(api_module, "TensorShape"); g_assert(shape_cls); Py_XDECREF(api_module); @@ -179,7 +179,7 @@ PYCore::loadScript () while (*(args++) != NULL) argc++; g_assert(argc > 0); - + py_args = PyTuple_New(argc); for (int i = 0; i < argc; i++) @@ -189,14 +189,14 @@ PYCore::loadScript () Py_XDECREF(py_args); g_strfreev(g_args); - } else + } else core_obj = PyObject_CallObject(cls, NULL); if (core_obj) { /** check whther either setInputDim or getInputDim/getOutputDim are defined */ if (PyObject_HasAttrString(core_obj, (char*) "setInputDim")) callback_type = CB_SETDIM; - else if (PyObject_HasAttrString(core_obj, (char*) "getInputDim") && + else if (PyObject_HasAttrString(core_obj, (char*) "getInputDim") && PyObject_HasAttrString(core_obj, (char*) "getOutputDim")) callback_type = CB_GETDIM; else @@ -224,7 +224,7 @@ PYCore::loadScript () gint64 stop_time = g_get_real_time (); g_message ("Script is loaded: %" G_GINT64_FORMAT, (stop_time - start_time)); #endif - + return 0; } @@ -273,7 +273,7 @@ PYCore::checkTensorSize (GstTensorMemory *output, PyArrayObject *array) size_t total_size = PyArray_ITEMSIZE(array); - for (int i = 0; i < PyArray_NDIM(array); i++) + for (int i = 0; i < PyArray_NDIM(array); i++) total_size *= PyArray_DIM(array, i); return (output->size == total_size); @@ -298,7 +298,7 @@ PYCore::getInputTensorDim (GstTensorsInfo * info) if (result) { res = parseOutputTensors(result, info); Py_XDECREF(result); - } else { + } else { Py_ERRMSG("Fail to call 'getInputDim'"); res = -1; } @@ -318,7 +318,7 @@ int PYCore::getOutputTensorDim (GstTensorsInfo * info) { int res = 0; - + g_assert (info); Py_LOCK(); @@ -327,7 +327,7 @@ PYCore::getOutputTensorDim (GstTensorsInfo * info) if (result) { res = parseOutputTensors(result, info); Py_XDECREF(result); - } else { + } else { Py_ERRMSG("Fail to call 'getOutputDim'"); res = -1; } @@ -362,7 +362,7 @@ PYCore::setInputTensorDim (const GstTensorsInfo * in_info, GstTensorsInfo * out_ for (int i = 0; i < in_info->num_tensors; i++) { PyObject *shape = PyTensorShape_New (&in_info->info[i]); assert (shape); - + PyList_Append(param, shape); } @@ -381,7 +381,7 @@ PYCore::setInputTensorDim (const GstTensorsInfo * in_info, GstTensorsInfo * out_ if (res == 0) outputTensorMeta.num_tensors = out_info->num_tensors; Py_XDECREF(result); - } else { + } else { Py_ERRMSG("Fail to call 'setInputDim'"); res = -1; } @@ -396,7 +396,7 @@ PYCore::setInputTensorDim (const GstTensorsInfo * in_info, GstTensorsInfo * out_ * @param info : the tensor info * @return created object */ -PyObject* +PyObject* PYCore::PyTensorShape_New (const GstTensorInfo* info) { PyObject *args = PyTuple_New(2); @@ -407,12 +407,12 @@ PYCore::PyTensorShape_New (const GstTensorInfo* info) g_assert(dims); g_assert(type); - for (int i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) + for (int i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) PyList_Append(dims, PyLong_FromLong((uint64_t) info->dimension[i])); PyTuple_SetItem(args, 0, dims); PyTuple_SetItem(args, 1, type); - + PyObject *obj = PyObject_CallObject(shape_cls, args); g_assert(obj); @@ -435,7 +435,7 @@ PYCore::parseOutputTensors(PyObject* result, GstTensorsInfo * info) for (int i = 0; i < info->num_tensors; i++) { /** don't own the reference */ - PyObject *tensor_shape = PyList_GetItem(result, (Py_ssize_t) i); + PyObject *tensor_shape = PyList_GetItem(result, (Py_ssize_t) i); g_assert(tensor_shape); PyObject *shape_dims = PyObject_CallMethod(tensor_shape, (char*) "getDims", NULL); @@ -447,7 +447,7 @@ PYCore::parseOutputTensors(PyObject* result, GstTensorsInfo * info) /** convert numpy type to tensor type */ info->info[i].type = getTensorType((NPY_TYPES)(((PyArray_Descr*) shape_type)->type_num)); for (int j = 0; j < PyList_Size(shape_dims); j++) - info->info[i].dimension[j] = + info->info[i].dimension[j] = (uint32_t) PyLong_AsLong(PyList_GetItem(shape_dims, (Py_ssize_t) j)); Py_XDECREF (shape_dims); @@ -495,8 +495,8 @@ PYCore::run (const GstTensorMemory * input, GstTensorMemory * output) for (int i = 0; i < outputTensorMeta.num_tensors; i++) { PyArrayObject* output_array = (PyArrayObject*) PyList_GetItem(result, (Py_ssize_t) i); /** type/size checking */ - if (checkTensorType(&output[i], output_array) && - checkTensorSize(&output[i], output_array)) { + if (checkTensorType(&output[i], output_array) && + checkTensorSize(&output[i], output_array)) { /** obtain the pointer to the buffer for the output array */ output[i].data = PyArray_DATA(output_array); Py_XINCREF(output_array); diff --git a/gst/nnstreamer/nnstreamer_conf.c b/gst/nnstreamer/nnstreamer_conf.c index 9984b6d..4e7be8e 100644 --- a/gst/nnstreamer/nnstreamer_conf.c +++ b/gst/nnstreamer/nnstreamer_conf.c @@ -166,6 +166,39 @@ _get_filenames (nnsconf_type_path type, const gchar * dir, GSList ** listF, } /** + * @brief Private function to get sub-plugins list with type. + */ +static gboolean +_get_subplugin_with_type (nnsconf_type_path type, gchar *** basename, + gchar *** filepath) +{ + gchar **vstr, **vstrFull; + + switch (type) { + case NNSCONF_PATH_FILTERS: + vstr = conf.basenameFILTERS; + vstrFull = conf.filesFILTERS; + break; + case NNSCONF_PATH_DECODERS: + vstr = conf.basenameDECODERS; + vstrFull = conf.filesDECODERS; + break; + case NNSCONF_PATH_CUSTOM_FILTERS: + vstr = conf.basenameCUSTOM_FILTERS; + vstrFull = conf.filesCUSTOM_FILTERS; + break; + default: + /* unknown type */ + g_critical ("Failed to get sub-plugins, unknown sub-plugin type."); + return FALSE; + } + + *basename = vstr; + *filepath = vstrFull; + return TRUE; +} + +/** * @brief Data structure for _g_list_foreach_vstr_helper */ typedef struct @@ -341,22 +374,8 @@ nnsconf_get_fullpath_from_file (const gchar * file2find, nnsconf_type_path type) gchar **vstr, **vstrFull; guint i; - switch (type) { - case NNSCONF_PATH_FILTERS: - vstr = conf.basenameFILTERS; - vstrFull = conf.filesFILTERS; - break; - case NNSCONF_PATH_DECODERS: - vstr = conf.basenameDECODERS; - vstrFull = conf.filesDECODERS; - break; - case NNSCONF_PATH_CUSTOM_FILTERS: - vstr = conf.basenameCUSTOM_FILTERS; - vstrFull = conf.filesCUSTOM_FILTERS; - break; - default: - return NULL; - } + if (!_get_subplugin_with_type (type, &vstr, &vstrFull)) + return NULL; if (vstr == NULL) return NULL; @@ -426,23 +445,8 @@ nnsconf_get_subplugin_info (nnsconf_type_path type, subplugin_info_s * info) nnsconf_loadconf (FALSE); - switch (type) { - case NNSCONF_PATH_FILTERS: - vstr = conf.basenameFILTERS; - vstrFull = conf.filesFILTERS; - break; - case NNSCONF_PATH_DECODERS: - vstr = conf.basenameDECODERS; - vstrFull = conf.filesDECODERS; - break; - case NNSCONF_PATH_CUSTOM_FILTERS: - vstr = conf.basenameCUSTOM_FILTERS; - vstrFull = conf.filesCUSTOM_FILTERS; - break; - default: - g_critical ("Failed to get sub-plugins, unknown sub-plugin type."); - return 0; - } + if (!_get_subplugin_with_type (type, &vstr, &vstrFull)) + return 0; info->names = vstr; info->paths = vstrFull; diff --git a/gst/nnstreamer/tensor_demux/gsttensordemux.c b/gst/nnstreamer/tensor_demux/gsttensordemux.c index 8eb8be9..aff3fa3 100644 --- a/gst/nnstreamer/tensor_demux/gsttensordemux.c +++ b/gst/nnstreamer/tensor_demux/gsttensordemux.c @@ -248,7 +248,7 @@ gst_tensor_demux_event (GstPad * pad, GstObject * parent, GstEvent * event) GstCaps *caps; gst_event_parse_caps (event, &caps); gst_tensor_demux_parse_caps (tensor_demux, caps); - return gst_pad_event_default (pad, parent, event); + break; } case GST_EVENT_EOS: if (!tensor_demux->srcpads) { @@ -257,13 +257,13 @@ gst_tensor_demux_event (GstPad * pad, GstObject * parent, GstEvent * event) ("Got EOS before adding any pads")); gst_event_unref (event); return FALSE; - } else { - return gst_pad_event_default (pad, parent, event); } break; default: - return gst_pad_event_default (pad, parent, event); + break; } + + return gst_pad_event_default (pad, parent, event); } /** diff --git a/gst/nnstreamer/tensor_merge/gsttensormerge.c b/gst/nnstreamer/tensor_merge/gsttensormerge.c index 12dc1b9..9131767 100644 --- a/gst/nnstreamer/tensor_merge/gsttensormerge.c +++ b/gst/nnstreamer/tensor_merge/gsttensormerge.c @@ -107,8 +107,8 @@ static GstStaticPadTemplate sink_templ = GST_STATIC_PAD_TEMPLATE ("sink_%u", GST_STATIC_CAPS (GST_TENSOR_CAP_DEFAULT) ); -static gboolean gst_tensor_merge_handle_src_event (GstPad * pad, - GstObject * parent, GstEvent * event); +static gboolean gst_tensor_merge_src_event (GstPad * pad, GstObject * parent, + GstEvent * event); static GstPad *gst_tensor_merge_request_new_pad (GstElement * element, GstPadTemplate * templ, const gchar * name, const GstCaps * caps); static GstStateChangeReturn gst_tensor_merge_change_state (GstElement * element, @@ -199,8 +199,7 @@ gst_tensor_merge_init (GstTensorMerge * tensor_merge) tensor_merge->srcpad = gst_pad_new_from_template (gst_element_class_get_pad_template (klass, "src"), "src"); - gst_pad_set_event_function (tensor_merge->srcpad, - gst_tensor_merge_handle_src_event); + gst_pad_set_event_function (tensor_merge->srcpad, gst_tensor_merge_src_event); gst_element_add_pad (GST_ELEMENT (tensor_merge), tensor_merge->srcpad); @@ -331,13 +330,13 @@ gst_tensor_merge_request_new_pad (GstElement * element, GstPadTemplate * templ, * @brief src event vmethod */ static gboolean -gst_tensor_merge_handle_src_event (GstPad * pad, GstObject * parent, - GstEvent * event) +gst_tensor_merge_src_event (GstPad * pad, GstObject * parent, GstEvent * event) { - GstEventType type; - type = event ? GST_EVENT_TYPE (event) : GST_EVENT_UNKNOWN; - switch (type) { + g_return_val_if_fail (event != NULL, FALSE); + + switch (GST_EVENT_TYPE (event)) { case GST_EVENT_SEEK: + gst_event_unref (event); return FALSE; default: break; @@ -353,19 +352,17 @@ static gboolean gst_tensor_merge_sink_event (GstCollectPads * pads, GstCollectData * data, GstEvent * event, GstTensorMerge * tensor_merge) { - gboolean ret; + g_return_val_if_fail (event != NULL, FALSE); + switch (GST_EVENT_TYPE (event)) { case GST_EVENT_FLUSH_STOP: - { tensor_merge->need_segment = TRUE; break; - } default: break; } - ret = gst_collect_pads_event_default (pads, data, event, FALSE); - return ret; + return gst_collect_pads_event_default (pads, data, event, FALSE); } /** diff --git a/gst/nnstreamer/tensor_mux/gsttensormux.c b/gst/nnstreamer/tensor_mux/gsttensormux.c index 4f0117c..9778556 100644 --- a/gst/nnstreamer/tensor_mux/gsttensormux.c +++ b/gst/nnstreamer/tensor_mux/gsttensormux.c @@ -110,8 +110,8 @@ static GstStaticPadTemplate sink_templ = GST_STATIC_PAD_TEMPLATE ("sink_%u", GST_STATIC_CAPS (CAPS_STRING_SINK) ); -static gboolean gst_tensor_mux_handle_src_event (GstPad * pad, - GstObject * parent, GstEvent * event); +static gboolean gst_tensor_mux_src_event (GstPad * pad, GstObject * parent, + GstEvent * event); static GstPad *gst_tensor_mux_request_new_pad (GstElement * element, GstPadTemplate * templ, const gchar * name, const GstCaps * caps); static GstStateChangeReturn gst_tensor_mux_change_state (GstElement * element, @@ -195,8 +195,7 @@ gst_tensor_mux_init (GstTensorMux * tensor_mux) tensor_mux->srcpad = gst_pad_new_from_template (gst_element_class_get_pad_template (klass, "src"), "src"); - gst_pad_set_event_function (tensor_mux->srcpad, - gst_tensor_mux_handle_src_event); + gst_pad_set_event_function (tensor_mux->srcpad, gst_tensor_mux_src_event); gst_element_add_pad (GST_ELEMENT (tensor_mux), tensor_mux->srcpad); @@ -280,13 +279,13 @@ gst_tensor_mux_request_new_pad (GstElement * element, GstPadTemplate * templ, * @brief src event vmethod */ static gboolean -gst_tensor_mux_handle_src_event (GstPad * pad, GstObject * parent, - GstEvent * event) +gst_tensor_mux_src_event (GstPad * pad, GstObject * parent, GstEvent * event) { - GstEventType type; - type = event ? GST_EVENT_TYPE (event) : GST_EVENT_UNKNOWN; - switch (type) { + g_return_val_if_fail (event != NULL, FALSE); + + switch (GST_EVENT_TYPE (event)) { case GST_EVENT_SEEK: + gst_event_unref (event); return FALSE; default: break; @@ -302,19 +301,17 @@ static gboolean gst_tensor_mux_sink_event (GstCollectPads * pads, GstCollectData * data, GstEvent * event, GstTensorMux * tensor_mux) { - gboolean ret; + g_return_val_if_fail (event != NULL, FALSE); + switch (GST_EVENT_TYPE (event)) { case GST_EVENT_FLUSH_STOP: - { tensor_mux->need_segment = TRUE; break; - } default: break; } - ret = gst_collect_pads_event_default (pads, data, event, FALSE); - return ret; + return gst_collect_pads_event_default (pads, data, event, FALSE); } /**