From fd5b20f989b0779fc62f69270ceb10bd9b598262 Mon Sep 17 00:00:00 2001 From: Sangjung Woo Date: Thu, 13 Jun 2019 15:01:51 +0900 Subject: [PATCH] [Tizen/Api] Remove Tizen dependency from CAPI set Since NNStreamer CAPI will be used for Android, Tizen and Linux distro, this patch removes the Tizen dependency from API set. Detailed works are as below. * Support multiple Log environment such as Android, Tizen and Linux distro instead of dlog API. * Use the standard error code instead of that of Tizen. Signed-off-by: Jaeyun Jung Signed-off-by: Sangjung Woo --- packaging/nnstreamer.spec | 2 +- tizen-api/include/nnstreamer.h | 14 +++-- tizen-api/include/tizen-api-private.h | 38 +++++++++--- tizen-api/src/nnstreamer-single.c | 41 +++++++------ tizen-api/src/tizen-api-pipeline.c | 111 +++++++++++++++++----------------- tizen-api/src/tizen-api-util.c | 4 +- 6 files changed, 116 insertions(+), 94 deletions(-) diff --git a/packaging/nnstreamer.spec b/packaging/nnstreamer.spec index 04aaa8b..55736ae 100644 --- a/packaging/nnstreamer.spec +++ b/packaging/nnstreamer.spec @@ -179,7 +179,7 @@ mkdir -p build %define enable_tf false %endif -meson --buildtype=plain --prefix=%{_prefix} --sysconfdir=%{_sysconfdir} --libdir=%{_libdir} --bindir=%{nnstexampledir} --includedir=%{_includedir} -Dinstall-example=true -Denable-tensorflow=%{enable_tf} -Denable-pytorch=false %{api} -Denable-env-var=false -Denable-symbolic-link=false build +meson --buildtype=plain --prefix=%{_prefix} --sysconfdir=%{_sysconfdir} --libdir=%{_libdir} --bindir=%{nnstexampledir} --includedir=%{_includedir} -Dinstall-example=true -Denable-tensorflow=%{enable_tf} -Denable-pytorch=false %{api} -Denable-env-var=false -Denable-symbolic-link=false -D__TIZEN__=1 build ninja -C build %{?_smp_mflags} diff --git a/tizen-api/include/nnstreamer.h b/tizen-api/include/nnstreamer.h index b54ca52..810f1e4 100644 --- a/tizen-api/include/nnstreamer.h +++ b/tizen-api/include/nnstreamer.h @@ -25,7 +25,7 @@ #define __TIZEN_MACHINELEARNING_NNSTREAMER_H__ #include -#include +#include #ifdef __cplusplus extern "C" { @@ -130,11 +130,13 @@ typedef enum _ml_tensor_type_e * @since_tizen 5.5 */ typedef enum { - ML_ERROR_NONE = TIZEN_ERROR_NONE, /**< Success! */ - ML_ERROR_INVALID_PARAMETER = TIZEN_ERROR_INVALID_PARAMETER, /**< Invalid parameter */ - ML_ERROR_NOT_SUPPORTED = TIZEN_ERROR_NOT_SUPPORTED, /**< The feature is not supported */ - ML_ERROR_STREAMS_PIPE = TIZEN_ERROR_STREAMS_PIPE, /**< Cannot create or access GStreamer pipeline. */ - ML_ERROR_TRY_AGAIN = TIZEN_ERROR_TRY_AGAIN, /**< The pipeline is not ready, yet (not negotiated, yet) */ + ML_ERROR_NONE = 0, /**< Success! */ + ML_ERROR_INVALID_PARAMETER = -EINVAL, /**< Invalid parameter */ + ML_ERROR_STREAMS_PIPE = -ESTRPIPE, /**< Cannot create or access GStreamer pipeline. */ + ML_ERROR_TRY_AGAIN = -EAGAIN, /**< The pipeline is not ready, yet (not negotiated, yet) */ + ML_ERROR_UNKNOWN = (-1073741824LL), + ML_ERROR_TIMED_OUT, + ML_ERROR_NOT_SUPPORTED, /**< The feature is not supported */ } ml_error_e; /** diff --git a/tizen-api/include/tizen-api-private.h b/tizen-api/include/tizen-api-private.h index c5365dc..7acb8b0 100644 --- a/tizen-api/include/tizen-api-private.h +++ b/tizen-api/include/tizen-api-private.h @@ -29,23 +29,41 @@ #include #include -#include -#include - #include #include "nnstreamer.h" -#define DLOG_TAG "nnstreamer-capi" +#define TAG_NAME "nnstreamer-capi" + +#if defined(__TIZEN__) + #include + + #define ml_logi(...) \ + dlog_print (DLOG_INFO, TAG_NAME, __VA_ARGS__) + + #define ml_logw(...) \ + dlog_print (DLOG_WARN, TAG_NAME, __VA_ARGS__) + + #define ml_loge(...) \ + dlog_print (DLOG_ERROR, TAG_NAME, __VA_ARGS__) + +#elif defined(__ANDROID__) + #include + + #define ml_logi(...) \ + __android_log_print (ANDROID_LOG_INFO, TAG_NAME, __VA_ARGS__) -#define dlogi(...) \ - dlog_print (DLOG_INFO, DLOG_TAG, __VA_ARGS__) + #define ml_logw(...) \ + __android_log_print (ANDROID_LOG_WARNING, TAG_NAME, __VA_ARGS__) -#define dlogw(...) \ - dlog_print (DLOG_WARN, DLOG_TAG, __VA_ARGS__) + #define ml_loge(...) \ + __android_log_print (ANDROID_LOG_ERROR, TAG_NAME, __VA_ARGS__) -#define dloge(...) \ - dlog_print (DLOG_ERROR, DLOG_TAG, __VA_ARGS__) +#else /* Linux distro */ + #define ml_logi g_message + #define ml_loge g_message + #define ml_logw g_message +#endif #ifdef __cplusplus extern "C" { diff --git a/tizen-api/src/nnstreamer-single.c b/tizen-api/src/nnstreamer-single.c index 8590efc..fcfdff0 100644 --- a/tizen-api/src/nnstreamer-single.c +++ b/tizen-api/src/nnstreamer-single.c @@ -23,6 +23,7 @@ * @bug No known bugs except for NYI items */ +#include #include #include /* Uses NNStreamer/Pipeline C-API */ @@ -96,7 +97,7 @@ ml_single_open (ml_single_h * single, const char *model_path, /* Validate the params */ if (!single) { - dloge ("The given param, single is invalid."); + ml_loge ("The given param, single is invalid."); return ML_ERROR_INVALID_PARAMETER; } @@ -104,26 +105,26 @@ ml_single_open (ml_single_h * single, const char *model_path, *single = NULL; if (!g_file_test (model_path, G_FILE_TEST_IS_REGULAR)) { - dloge ("The given param, model path [%s] is invalid.", + ml_loge ("The given param, model path [%s] is invalid.", GST_STR_NULL (model_path)); return ML_ERROR_INVALID_PARAMETER; } if (input_info && ml_util_validate_tensors_info (input_info) != ML_ERROR_NONE) { - dloge ("The given param, input tensor info is invalid."); + ml_loge ("The given param, input tensor info is invalid."); return ML_ERROR_INVALID_PARAMETER; } if (output_info && ml_util_validate_tensors_info (output_info) != ML_ERROR_NONE) { - dloge ("The given param, output tensor info is invalid."); + ml_loge ("The given param, output tensor info is invalid."); return ML_ERROR_INVALID_PARAMETER; } status = ml_util_check_nnfw (nnfw, hw); if (status < 0) { - dloge ("The given nnfw is not available."); + ml_loge ("The given nnfw is not available."); return status; } @@ -138,7 +139,7 @@ ml_single_open (ml_single_h * single, const char *model_path, break; case ML_NNFW_TENSORFLOW_LITE: if (!g_str_has_suffix (model_path, ".tflite")) { - dloge ("The given model file [%s] has invalid extension.", model_path); + ml_loge ("The given model file [%s] has invalid extension.", model_path); return ML_ERROR_INVALID_PARAMETER; } @@ -149,7 +150,7 @@ ml_single_open (ml_single_h * single, const char *model_path, break; default: /** @todo Add other fw later. */ - dloge ("The given nnfw is not supported."); + ml_loge ("The given nnfw is not supported."); return ML_ERROR_NOT_SUPPORTED; } @@ -189,7 +190,7 @@ ml_single_open (ml_single_h * single, const char *model_path, status = ml_util_validate_tensors_info (&single_h->in_info); if (status != ML_ERROR_NONE) { - dloge ("Failed to get the input tensor info."); + ml_loge ("Failed to get the input tensor info."); goto error; } @@ -207,7 +208,7 @@ ml_single_open (ml_single_h * single, const char *model_path, status = ml_util_validate_tensors_info (&single_h->out_info); if (status != ML_ERROR_NONE) { - dloge ("Failed to get the output tensor info."); + ml_loge ("Failed to get the output tensor info."); goto error; } @@ -242,7 +243,7 @@ ml_single_close (ml_single_h single) int ret; if (!single) { - dloge ("The given param, single is invalid."); + ml_loge ("The given param, single is invalid."); return ML_ERROR_INVALID_PARAMETER; } @@ -288,7 +289,7 @@ ml_single_inference (ml_single_h single, int i, status = ML_ERROR_NONE; if (!single || !input) { - dloge ("The given param is invalid."); + ml_loge ("The given param is invalid."); status = ML_ERROR_INVALID_PARAMETER; goto error; } @@ -298,7 +299,7 @@ ml_single_inference (ml_single_h single, /* Validate output memory and size */ if (output) { if (output->num_tensors != single_h->out_info.num_tensors) { - dloge ("Invalid output data, the number of output is different."); + ml_loge ("Invalid output data, the number of output is different."); status = ML_ERROR_INVALID_PARAMETER; goto error; } @@ -307,7 +308,7 @@ ml_single_inference (ml_single_h single, if (!output->tensors[i].tensor || output->tensors[i].size != ml_util_get_tensor_size (&single_h->out_info.info[i])) { - dloge ("Invalid output data, the size of output is different."); + ml_loge ("Invalid output data, the size of output is different."); status = ML_ERROR_INVALID_PARAMETER; goto error; } @@ -325,7 +326,7 @@ ml_single_inference (ml_single_h single, ret = gst_app_src_push_buffer (GST_APP_SRC (single_h->src), buffer); if (ret != GST_FLOW_OK) { - dloge ("Cannot push a buffer into source element."); + ml_loge ("Cannot push a buffer into source element."); status = ML_ERROR_STREAMS_PIPE; goto error; } @@ -334,7 +335,7 @@ ml_single_inference (ml_single_h single, sample = gst_app_sink_try_pull_sample (GST_APP_SINK (single_h->sink), GST_SECOND); if (!sample) { - dloge ("Failed to get the result from sink element."); + ml_loge ("Failed to get the result from sink element."); status = ML_ERROR_STREAMS_PIPE; goto error; } @@ -345,7 +346,7 @@ ml_single_inference (ml_single_h single, result = ml_util_allocate_tensors_data (&single_h->out_info); if (!result) { - dloge ("Failed to allocate the memory block."); + ml_loge ("Failed to allocate the memory block."); status = ml_util_get_last_error (); goto error; } @@ -401,7 +402,7 @@ ml_single_get_input_info (ml_single_h single, g_free (val); if (info.num_tensors != rank) { - dlogw ("Invalid state, input tensor type is mismatched in filter."); + ml_logw ("Invalid state, input tensor type is mismatched in filter."); } g_object_get (single_h->filter, "inputname", &val, NULL); @@ -409,7 +410,7 @@ ml_single_get_input_info (ml_single_h single, g_free (val); if (info.num_tensors != rank) { - dlogw ("Invalid state, input tensor name is mismatched in filter."); + ml_logw ("Invalid state, input tensor name is mismatched in filter."); } ml_util_copy_tensors_info_from_gst (input_info, &info); @@ -448,7 +449,7 @@ ml_single_get_output_info (ml_single_h single, g_free (val); if (info.num_tensors != rank) { - dlogw ("Invalid state, output tensor type is mismatched in filter."); + ml_logw ("Invalid state, output tensor type is mismatched in filter."); } g_object_get (single_h->filter, "outputname", &val, NULL); @@ -456,7 +457,7 @@ ml_single_get_output_info (ml_single_h single, g_free (val); if (info.num_tensors != rank) { - dlogw ("Invalid state, output tensor name is mismatched in filter."); + ml_logw ("Invalid state, output tensor name is mismatched in filter."); } ml_util_copy_tensors_info_from_gst (output_info, &info); diff --git a/tizen-api/src/tizen-api-pipeline.c b/tizen-api/src/tizen-api-pipeline.c index c2c63ba..21ddef7 100644 --- a/tizen-api/src/tizen-api-pipeline.c +++ b/tizen-api/src/tizen-api-pipeline.c @@ -21,6 +21,7 @@ * @bug No known bugs except for NYI items */ +#include #include #include /* Get GType from GObject Instances */ #include @@ -38,14 +39,14 @@ ml_pipeline_element *elem; \ int ret = ML_ERROR_NONE; \ if (h == NULL) { \ - dloge ("The given handle is invalid"); \ + ml_loge ("The given handle is invalid"); \ return ML_ERROR_INVALID_PARAMETER; \ } \ \ p = name->pipe; \ elem = name->element; \ if (p == NULL || elem == NULL || p != elem->pipe) { \ - dloge ("The handle appears to be broken."); \ + ml_loge ("The handle appears to be broken."); \ return ML_ERROR_INVALID_PARAMETER; \ } \ \ @@ -53,7 +54,7 @@ g_mutex_lock (&elem->lock); \ \ if (NULL == g_list_find (elem->handles, name)) { \ - dloge ("The handle does not exists."); \ + ml_loge ("The handle does not exists."); \ ret = ML_ERROR_INVALID_PARAMETER; \ goto unlock_return; \ } @@ -135,7 +136,7 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) num_mems = gst_buffer_n_memory (b); if (num_mems > ML_TENSOR_SIZE_LIMIT) { - dloge ("Number of memory chunks in a GstBuffer exceed the limit: %u > %u", + ml_loge ("Number of memory chunks in a GstBuffer exceed the limit: %u > %u", num_mems, ML_TENSOR_SIZE_LIMIT); return; } @@ -175,7 +176,7 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) elem->size = 0; if (elem->tensors_info.num_tensors != num_mems) { - dloge + ml_loge ("The sink event of [%s] cannot be handled because the number of tensors mismatches.", elem->name); @@ -188,7 +189,7 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) size_t sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]); if (sz != tensors_data.tensors[i].size) { - dloge + ml_loge ("The sink event of [%s] cannot be handled because the tensor dimension mismatches.", elem->name); @@ -212,7 +213,7 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) /* Get the data! */ if (gst_buffer_get_size (b) != total_size || (elem->size > 0 && total_size != elem->size)) { - dloge + ml_loge ("The buffersize mismatches. All the three values must be the same: %zu, %zu, %zu", total_size, elem->size, gst_buffer_get_size (b)); goto error; @@ -309,10 +310,10 @@ ml_pipeline_construct (const char *pipeline_description, ml_pipeline_h * pipe) if (FALSE == gst_init_check (NULL, NULL, &err)) { if (err) { - dloge ("GStreamer has the following error: %s", err->message); + ml_loge ("GStreamer has the following error: %s", err->message); g_error_free (err); } else { - dloge ("Cannot initialize GStreamer. Unknown reason."); + ml_loge ("Cannot initialize GStreamer. Unknown reason."); } return ML_ERROR_STREAMS_PIPE; } @@ -320,11 +321,11 @@ ml_pipeline_construct (const char *pipeline_description, ml_pipeline_h * pipe) pipeline = gst_parse_launch (pipeline_description, &err); if (pipeline == NULL || err) { if (err) { - dloge ("Cannot parse and launch the given pipeline = [%s], %s", + ml_loge ("Cannot parse and launch the given pipeline = [%s], %s", pipeline_description, err->message); g_error_free (err); } else { - dloge + ml_loge ("Cannot parse and launch the given pipeline = [%s], unknown reason", pipeline_description); } @@ -402,7 +403,7 @@ ml_pipeline_construct (const char *pipeline_description, ml_pipeline_h * pipe) break; case GST_ITERATOR_RESYNC: case GST_ITERATOR_ERROR: - dlogw + ml_logw ("There is an error or a resync-event while inspecting a pipeline. However, we can still execute the pipeline."); case GST_ITERATOR_DONE: done = TRUE; @@ -562,7 +563,7 @@ ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, int ret = ML_ERROR_NONE; if (h == NULL) { - dloge ("The argument sink handle is not valid."); + ml_loge ("The argument sink handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -570,17 +571,17 @@ ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, *h = NULL; if (pipe == NULL) { - dloge ("The first argument, pipeline handle is not valid."); + ml_loge ("The first argument, pipeline handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (sink_name == NULL) { - dloge ("The second argument, sink name is not valid."); + ml_loge ("The second argument, sink name is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (cb == NULL) { - dloge ("The callback argument, cb, is not valid."); + ml_loge ("The callback argument, cb, is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -588,21 +589,21 @@ ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, elem = g_hash_table_lookup (p->namednodes, sink_name); if (elem == NULL) { - dloge ("There is no element named [%s] in the pipeline.", sink_name); + ml_loge ("There is no element named [%s] in the pipeline.", sink_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } if (elem->type != ML_PIPELINE_ELEMENT_SINK && elem->type != ML_PIPELINE_ELEMENT_APP_SINK) { - dloge ("The element [%s] in the pipeline is not a sink element.", + ml_loge ("The element [%s] in the pipeline is not a sink element.", sink_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } if (elem->handle_id > 0) { - dlogw ("Sink callback is already registered."); + ml_logw ("Sink callback is already registered."); ret = ML_ERROR_NONE; goto unlock_return; } @@ -625,7 +626,7 @@ ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, } if (elem->handle_id == 0) { - dloge ("Failed to connect a signal to the element [%s].", sink_name); + ml_loge ("Failed to connect a signal to the element [%s].", sink_name); ret = ML_ERROR_STREAMS_PIPE; goto unlock_return; } @@ -710,7 +711,7 @@ ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name, int ret = ML_ERROR_NONE, i; if (h == NULL) { - dloge ("The argument source handle is not valid."); + ml_loge ("The argument source handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -718,17 +719,17 @@ ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name, *h = NULL; if (pipe == NULL) { - dloge ("The first argument, pipeline handle is not valid."); + ml_loge ("The first argument, pipeline handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (src_name == NULL) { - dloge ("The second argument, source name is not valid."); + ml_loge ("The second argument, source name is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (tensors_info == NULL) { - dloge ("The 3rd argument, tensors info is not valid."); + ml_loge ("The 3rd argument, tensors info is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -737,13 +738,13 @@ ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name, elem = g_hash_table_lookup (p->namednodes, src_name); if (elem == NULL) { - dloge ("There is no element named [%s] in the pipeline.", src_name); + ml_loge ("There is no element named [%s] in the pipeline.", src_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } if (elem->type != ML_PIPELINE_ELEMENT_APP_SRC) { - dloge ("The element [%s] in the pipeline is not a source element.", + ml_loge ("The element [%s] in the pipeline is not a source element.", src_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -771,7 +772,7 @@ ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name, elem->size += sz; } } else { - dlogw + ml_logw ("Cannot find caps. The pipeline is not yet negotiated for tensor_src, [%s].", src_name); gst_object_unref (elem->src); @@ -836,13 +837,13 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s * data, handle_init (src, src, h); if (!data) { - dloge ("The given param data is invalid."); + ml_loge ("The given param data is invalid."); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } if (data->num_tensors < 1 || data->num_tensors > ML_TENSOR_SIZE_LIMIT) { - dloge ("The tensor size is invalid. It should be 1 ~ %u; where it is %u", + ml_loge ("The tensor size is invalid. It should be 1 ~ %u; where it is %u", ML_TENSOR_SIZE_LIMIT, data->num_tensors); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -868,7 +869,7 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s * data, elem->size = 0; if (elem->tensors_info.num_tensors != data->num_tensors) { - dloge + ml_loge ("The src push of [%s] cannot be handled because the number of tensors in a frame mismatches. %u != %u", elem->name, elem->tensors_info.num_tensors, data->num_tensors); @@ -882,7 +883,7 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s * data, size_t sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]); if (sz != data->tensors[i].size) { - dloge + ml_loge ("The given input tensor size (%d'th, %zu bytes) mismatches the source pad (%zu bytes)", i, data->tensors[i].size, sz); @@ -905,7 +906,7 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s * data, } if (elem->size == 0) { - dlogw ("The pipeline is not ready to accept inputs. The input is ignored."); + ml_logw ("The pipeline is not ready to accept inputs. The input is ignored."); ret = ML_ERROR_TRY_AGAIN; goto unlock_return; } @@ -926,10 +927,10 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s * data, gret = gst_app_src_push_buffer (GST_APP_SRC (elem->element), buffer); if (gret == GST_FLOW_FLUSHING) { - dlogw ("The pipeline is not in PAUSED/PLAYING. The input may be ignored."); + ml_logw ("The pipeline is not in PAUSED/PLAYING. The input may be ignored."); ret = ML_ERROR_TRY_AGAIN; } else if (gret == GST_FLOW_EOS) { - dlogw ("THe pipeline is in EOS state. The input is ignored."); + ml_logw ("THe pipeline is in EOS state. The input is ignored."); ret = ML_ERROR_STREAMS_PIPE; } @@ -953,7 +954,7 @@ ml_pipeline_switch_get_handle (ml_pipeline_h pipe, const char *switch_name, int ret = ML_ERROR_NONE; if (h == NULL) { - dloge ("The argument switch handle is not valid."); + ml_loge ("The argument switch handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -961,12 +962,12 @@ ml_pipeline_switch_get_handle (ml_pipeline_h pipe, const char *switch_name, *h = NULL; if (pipe == NULL) { - dloge ("The first argument, pipeline handle, is not valid."); + ml_loge ("The first argument, pipeline handle, is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (switch_name == NULL) { - dloge ("The second argument, switch name, is not valid."); + ml_loge ("The second argument, switch name, is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -974,7 +975,7 @@ ml_pipeline_switch_get_handle (ml_pipeline_h pipe, const char *switch_name, elem = g_hash_table_lookup (p->namednodes, switch_name); if (elem == NULL) { - dloge ("There is no switch element named [%s] in the pipeline.", + ml_loge ("There is no switch element named [%s] in the pipeline.", switch_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -982,7 +983,7 @@ ml_pipeline_switch_get_handle (ml_pipeline_h pipe, const char *switch_name, if (elem->type != ML_PIPELINE_ELEMENT_SWITCH_INPUT && elem->type != ML_PIPELINE_ELEMENT_SWITCH_OUTPUT) { - dloge + ml_loge ("There is an element named [%s] in the pipeline, but it is not an input/output switch", switch_name); ret = ML_ERROR_INVALID_PARAMETER; @@ -1001,7 +1002,7 @@ ml_pipeline_switch_get_handle (ml_pipeline_h pipe, const char *switch_name, else if (elem->type == ML_PIPELINE_ELEMENT_SWITCH_OUTPUT) *type = ML_PIPELINE_SWITCH_OUTPUT_SELECTOR; else { - dloge ("Internal data of switch-handle [%s] is broken. It is fatal.", + ml_loge ("Internal data of switch-handle [%s] is broken. It is fatal.", elem->name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -1046,7 +1047,7 @@ ml_pipeline_switch_select (ml_pipeline_switch_h h, const char *pad_name) handle_init (switch, swtc, h); if (pad_name == NULL) { - dloge ("The second argument, pad name, is not valid."); + ml_loge ("The second argument, pad name, is not valid."); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } @@ -1055,7 +1056,7 @@ ml_pipeline_switch_select (ml_pipeline_switch_h h, const char *pad_name) active_name = gst_pad_get_name (active_pad); if (!g_strcmp0 (pad_name, active_name)) { - dlogi ("Switch is called, but there is no effective changes: %s->%s.", + ml_logi ("Switch is called, but there is no effective changes: %s->%s.", active_name, pad_name); g_free (active_name); gst_object_unref (active_pad); @@ -1069,7 +1070,7 @@ ml_pipeline_switch_select (ml_pipeline_switch_h h, const char *pad_name) new_pad = gst_element_get_static_pad (elem->element, pad_name); if (new_pad == NULL) { /* Not Found! */ - dloge ("Cannot find the pad, [%s], from the switch, [%s].", + ml_loge ("Cannot find the pad, [%s], from the switch, [%s].", pad_name, elem->name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -1078,7 +1079,7 @@ ml_pipeline_switch_select (ml_pipeline_switch_h h, const char *pad_name) g_object_set (G_OBJECT (elem->element), "active-pad", new_pad, NULL); gst_object_unref (new_pad); - dlogi ("Switched to [%s] successfully at switch [%s].", pad_name, elem->name); + ml_logi ("Switched to [%s] successfully at switch [%s].", pad_name, elem->name); handle_exit (h); } @@ -1099,7 +1100,7 @@ ml_pipeline_switch_nodelist (ml_pipeline_switch_h h, char ***list) handle_init (switch, swtc, h); if (list == NULL) { - dloge ("The second argument, list, is not valid."); + ml_loge ("The second argument, list, is not valid."); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } @@ -1112,7 +1113,7 @@ ml_pipeline_switch_nodelist (ml_pipeline_switch_h h, char ***list) else if (elem->type == ML_PIPELINE_ELEMENT_SWITCH_OUTPUT) it = gst_element_iterate_src_pads (elem->element); else { - dloge + ml_loge ("The element, [%s], is supposed to be input/output switch, but it is not. Internal data structure is broken.", elem->name); ret = ML_ERROR_STREAMS_PIPE; @@ -1134,7 +1135,7 @@ ml_pipeline_switch_nodelist (ml_pipeline_switch_h h, char ***list) gst_iterator_resync (it); break; case GST_ITERATOR_ERROR: - dloge ("Cannot access the list of pad properly of a switch, [%s].", + ml_loge ("Cannot access the list of pad properly of a switch, [%s].", elem->name); ret = ML_ERROR_STREAMS_PIPE; break; @@ -1159,7 +1160,7 @@ ml_pipeline_switch_nodelist (ml_pipeline_switch_h h, char ***list) g_list_free_full (dllist, g_free); /* This frees all strings as well */ g_free (list); - dloge + ml_loge ("Internal data inconsistency. This could be a bug in nnstreamer. Switch [%s].", elem->name); ret = ML_ERROR_STREAMS_PIPE; @@ -1185,7 +1186,7 @@ ml_pipeline_valve_get_handle (ml_pipeline_h pipe, const char *valve_name, int ret = ML_ERROR_NONE; if (h == NULL) { - dloge ("The argument valve handle is not valid."); + ml_loge ("The argument valve handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -1193,12 +1194,12 @@ ml_pipeline_valve_get_handle (ml_pipeline_h pipe, const char *valve_name, *h = NULL; if (pipe == NULL) { - dloge ("The first argument, pipeline handle, is not valid."); + ml_loge ("The first argument, pipeline handle, is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (valve_name == NULL) { - dloge ("The second argument, valve name, is not valid."); + ml_loge ("The second argument, valve name, is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -1206,13 +1207,13 @@ ml_pipeline_valve_get_handle (ml_pipeline_h pipe, const char *valve_name, elem = g_hash_table_lookup (p->namednodes, valve_name); if (elem == NULL) { - dloge ("There is no valve element named [%s] in the pipeline.", valve_name); + ml_loge ("There is no valve element named [%s] in the pipeline.", valve_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } if (elem->type != ML_PIPELINE_ELEMENT_VALVE) { - dloge + ml_loge ("There is an element named [%s] in the pipeline, but it is not a valve", valve_name); ret = ML_ERROR_INVALID_PARAMETER; @@ -1264,13 +1265,13 @@ ml_pipeline_valve_control (ml_pipeline_valve_h h, int drop) if ((drop != 0) == (current_val != FALSE)) { /* Nothing to do */ - dlogi ("Valve is called, but there is no effective changes: %d->%d", + ml_logi ("Valve is called, but there is no effective changes: %d->%d", ! !current_val, ! !drop); goto unlock_return; } g_object_set (G_OBJECT (elem->element), "drop", ! !drop, NULL); - dlogi ("Valve is changed: %d->%d", ! !current_val, ! !drop); + ml_logi ("Valve is changed: %d->%d", ! !current_val, ! !drop); handle_exit (h); } diff --git a/tizen-api/src/tizen-api-util.c b/tizen-api/src/tizen-api-util.c index 7b37770..bf204b2 100644 --- a/tizen-api/src/tizen-api-util.c +++ b/tizen-api/src/tizen-api-util.c @@ -142,7 +142,7 @@ ml_util_get_tensor_size (const ml_tensor_info_s * info) tensor_size = 8; break; default: - dloge ("In the given param, tensor type is invalid."); + ml_loge ("In the given param, tensor type is invalid."); return 0; } @@ -232,7 +232,7 @@ ml_util_allocate_tensors_data (const ml_tensors_info_s * info) data = g_new0 (ml_tensors_data_s, 1); if (!data) { - dloge ("Failed to allocate the memory block."); + ml_loge ("Failed to allocate the memory block."); ml_util_set_error (ML_ERROR_STREAMS_PIPE); return NULL; } -- 2.7.4