fix clang format error in taos-ci and code style.
Signed-off-by: Jaeyun <jy1210.jung@samsung.com>
Priority: 10
- Regex: '<.*>'
Priority: 0
-IndentCaseLabels: false
+IndentCaseLabels: true
+IndentCaseBlocks: true
IndentFunctionDeclarationAfterType: true
IndentWidth: 2
IndentWrappedFunctionNames: false
#include <glib.h>
#include <gst/gstinfo.h>
#include <iostream>
-#include <nnstreamer_generated.h> // Generated by `flatc`.
+#include <nnstreamer_generated.h> /* Generated by `flatc`. */
#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api.h>
#include <nnstreamer_plugin_api_decoder.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
-void init_fbd (void) __attribute__((constructor));
-void fini_fbd (void) __attribute__((destructor));
+void init_fbd (void) __attribute__ ((constructor));
+void fini_fbd (void) __attribute__ ((destructor));
#ifdef __cplusplus
}
#endif /* __cplusplus */
static gchar decoder_subplugin_flatbuf[] = "flatbuf";
/** @brief flatbuffer tensordec-plugin GstTensorDecoderDef instance */
-static GstTensorDecoderDef flatBuf = {.modename = decoder_subplugin_flatbuf,
+static GstTensorDecoderDef flatBuf = { .modename = decoder_subplugin_flatbuf,
.init = fbd_init,
.exit = fbd_exit,
.setOption = fbd_setOption,
* This is an example of a callback type custom mode.
* @code
* // Define custom callback function
- * int * tensor_decoder_custom_cb (const GstTensorMemory *input,
- * const GstTensorsConfig *config, void * data, GstBuffer * out_buf) {
+ * int tensor_decoder_custom_cb (const GstTensorMemory *input,
+ * const GstTensorsConfig *config, void *data, GstBuffer *out_buf) {
* // Write a code to convert tensors to flexbuffers.
* }
*
* @endcode
*/
-
+#include <flatbuffers/flexbuffers.h>
#include <glib.h>
#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api.h>
#include <nnstreamer_plugin_api_decoder.h>
#include "tensordecutil.h"
-#include <flatbuffers/flexbuffers.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
-void init_flxd (void) __attribute__((constructor));
-void fini_flxd (void) __attribute__((destructor));
+void init_flxd (void) __attribute__ ((constructor));
+void fini_flxd (void) __attribute__ ((destructor));
#ifdef __cplusplus
}
#endif /* __cplusplus */
fbb.Int ("rate_n", config->rate_n);
fbb.Int ("rate_d", config->rate_d);
for (i = 0; i < num_tensors; i++) {
- gchar * tensor_key = g_strdup_printf ("tensor_%d", i);
- gchar * tensor_name = NULL;
+ gchar *tensor_key = g_strdup_printf ("tensor_%d", i);
+ gchar *tensor_name = NULL;
if (config->info.info[i].name == NULL) {
tensor_name = g_strdup ("");
fbb += tensor_name;
fbb += type;
fbb.Vector (config->info.info[i].dimension, NNS_TENSOR_RANK_LIMIT);
- fbb.Blob (input[i].data, (size_t) input[i].size);
+ fbb.Blob (input[i].data, input[i].size);
});
g_free (tensor_key);
g_free (tensor_name);
static gchar decoder_subplugin_flexbuf[] = "flexbuf";
/** @brief flexbuffer tensordec-plugin GstTensorDecoderDef instance */
-static GstTensorDecoderDef flexBuf = {.modename = decoder_subplugin_flexbuf,
+static GstTensorDecoderDef flexBuf = { .modename = decoder_subplugin_flexbuf,
.init = flxd_init,
.exit = flxd_exit,
.setOption = flxd_setOption,
#include <tensorflow/lite/delegates/gpu/delegate.h>
#endif
-#if !defined (TFLITE_SUBPLUGIN_NAME)
+#if !defined(TFLITE_SUBPLUGIN_NAME)
#warning "The sub-plugin name for tensorflow-lite is not defined."
#define TFLITE_SUBPLUGIN_NAME "tensorflow-lite"
#endif
int getTensorDim (int tensor_idx, tensor_dim dim);
int setTensorProp (const std::vector<int> &tensor_idx_list, GstTensorsInfo *tensorMeta);
- TfLiteDelegate *delegate_
- = nullptr; /**< The delegate for tflite interpreter */
+ TfLiteDelegate *delegate_ = nullptr; /**< The delegate for tflite interpreter */
#ifdef TFLITE_NNAPI_DELEGATE_SUPPORTED
- std::unique_ptr<tflite::StatefulNnApiDelegate>
- stateful_nnapi_delegate; /**< The pointer of NNAPI delegate */
+ std::unique_ptr<tflite::StatefulNnApiDelegate> stateful_nnapi_delegate; /**< The pointer of NNAPI delegate */
#endif
#ifdef TFLITE_GPU_DELEGATE_SUPPORTED
- std::unique_ptr<TfLiteDelegate> gpu_delegate; /**< The pointer of GPU delegate
- */
+ std::unique_ptr<TfLiteDelegate> gpu_delegate; /**< The pointer of GPU delegate */
#endif
};
void setAccelerator (const char *accelerators, tflite_delegate_e d);
};
-extern "C" { /* accessed by android api */
-void init_filter_tflite (void) __attribute__((constructor));
-void fini_filter_tflite (void) __attribute__((destructor));
+extern "C" {
+void init_filter_tflite (void) __attribute__ ((constructor));
+void fini_filter_tflite (void) __attribute__ ((destructor));
}
/**
tflite_internal_stats.total_invoke_num += 1;
#if (DBG)
- g_message ("Invoke() is finished: %" G_GINT64_FORMAT "ms, model path: %s", (stop_time - start_time) / 1000, getModelPath());
- g_message ("%" G_GINT64_FORMAT " invoke average %" G_GINT64_FORMAT ", total overhead %" G_GINT64_FORMAT,
+ ml_logi ("Invoke() is finished: %" G_GINT64_FORMAT "ms, model path: %s",
+ (stop_time - start_time) / 1000, getModelPath ());
+ ml_logi ("%" G_GINT64_FORMAT " invoke average %" G_GINT64_FORMAT
+ ", total overhead %" G_GINT64_FORMAT,
tflite_internal_stats.total_invoke_num,
(tflite_internal_stats.total_invoke_latency / tflite_internal_stats.total_invoke_num),
tflite_internal_stats.total_overhead_latency);
TFLiteInterpreter::loadModel (int num_threads, tflite_delegate_e delegate)
{
#if (DBG)
- gint64 start_time = g_get_monotonic_time ();
+ gint64 start_time, stop_time;
+ start_time = g_get_monotonic_time ();
#endif
model = tflite::FlatBufferModel::BuildFromFile (model_path);
ml_loge ("Failed to mmap model\n");
return -1;
}
- /**If got any trouble at model, active below code. It'll be help to analyze.
- * model->error_reporter (); */
+
+ /**
+ * If got any trouble at model, active below code. It'll be help to analyze.
+ * model->error_reporter ();
+ */
interpreter = nullptr;
}
#if (DBG)
- gint64 stop_time = g_get_monotonic_time ();
- g_message ("Model is loaded: %" G_GINT64_FORMAT, (stop_time - start_time));
+ stop_time = g_get_monotonic_time ();
+ ml_logi ("Model is loaded: %" G_GINT64_FORMAT, (stop_time - start_time));
#endif
return 0;
}
#if (DBG)
gchar *dim_str = gst_tensor_get_dimension_string (tensorMeta->info[i].dimension);
- g_message ("tensorMeta[%d] >> name[%s], type[%d], dim[%s]", i,
+ ml_logi ("tensorMeta[%d] >> name[%s], type[%d], dim[%s]", i,
tensorMeta->info[i].name, tensorMeta->info[i].type, dim_str);
g_free (dim_str);
#endif
static gchar filter_subplugin_tensorflow_lite[] = TFLITE_SUBPLUGIN_NAME;
static GstTensorFilterFramework NNS_support_tensorflow_lite
- = {.version = GST_TENSOR_FILTER_FRAMEWORK_V0,
+ = { .version = GST_TENSOR_FILTER_FRAMEWORK_V0,
.open = tflite_open,
.close = tflite_close,
- {.v0 = {
- .name = filter_subplugin_tensorflow_lite,
- .allow_in_place = FALSE, /** @todo: support this to optimize performance later. */
- .allocate_in_invoke = FALSE,
- .run_without_model = FALSE,
- .verify_model_path = TRUE,
- .statistics = &tflite_internal_stats,
- .invoke_NN = tflite_invoke,
- .getInputDimension = tflite_getInputDim,
- .getOutputDimension = tflite_getOutputDim,
- .setInputDimension = tflite_setInputDim,
- .destroyNotify = nullptr,
- .reloadModel = tflite_reloadModel,
- .checkAvailability = tflite_checkAvailability,
- .allocateInInvoke = nullptr,
- } } };
+ { .v0 = {
+ .name = filter_subplugin_tensorflow_lite,
+ .allow_in_place = FALSE, /** @todo: support this to optimize performance later. */
+ .allocate_in_invoke = FALSE,
+ .run_without_model = FALSE,
+ .verify_model_path = TRUE,
+ .statistics = &tflite_internal_stats,
+ .invoke_NN = tflite_invoke,
+ .getInputDimension = tflite_getInputDim,
+ .getOutputDimension = tflite_getOutputDim,
+ .setInputDimension = tflite_setInputDim,
+ .destroyNotify = nullptr,
+ .reloadModel = tflite_reloadModel,
+ .checkAvailability = tflite_checkAvailability,
+ .allocateInInvoke = nullptr,
+ } } };
/** @brief Initialize this object for tensor_filter subplugin runtime register */
void
-/** * NNStreamer Configurations / Environmental Variable Manager.
+/**
+ * NNStreamer Configurations / Environmental Variable Manager.
* Copyright (C) 2018 MyungJoo Ham <myungjoo.ham@samsung.com>
*
* This library is free software; you can redistribute it and/or
+/* SPDX-License-Identifier: LGPL-2.1-only */
/**
- * SPDX-License-Identifier: LGPL-2.1-only
* Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved.
*
* @file nnstreamer_internal.h
*/
TEST (commonTensorsInfo, parsingDimInvalidParam0_n)
{
- const gchar * dim_str = "1:2:3:4";
+ const gchar *dim_str = "1:2:3:4";
EXPECT_EQ (0U, gst_tensors_info_parse_dimensions_string (NULL, dim_str));
}
*/
TEST (commonTensorsInfo, parsingTypeInvalidParam0_n)
{
- const gchar * dim_str = "uint8";
+ const gchar *dim_str = "uint8";
EXPECT_EQ (0U, gst_tensors_info_parse_types_string (NULL, dim_str));
}
*/
TEST (commonTensorsInfo, parsingNameInvalidParam0_n)
{
- const gchar * dim_str = "tname";
+ const gchar *dim_str = "tname";
EXPECT_EQ (0U, gst_tensors_info_parse_names_string (NULL, dim_str));
}
gboolean is_tensor;
/* Create a nnstreamer pipeline */
- pipeline = g_strdup_printf (
- "fakesrc name=fsrc ! fakesink name=fsink");
+ pipeline = g_strdup_printf ("fakesrc name=fsrc ! fakesink name=fsink");
gstpipe = gst_parse_launch (pipeline, NULL);
EXPECT_NE (pipeline, nullptr);
*/\r
\r
#include <gtest/gtest.h>\r
+#include <flatbuffers/flexbuffers.h>\r
#include <glib.h>\r
#include <gst/gst.h>\r
+#include <nnstreamer_plugin_api_decoder.h>\r
#include <tensor_common.h>\r
#include <unittest_util.h>\r
#include <tensor_decoder_custom.h>\r
-#include <flatbuffers/flexbuffers.h>\r
-#include <nnstreamer_plugin_api_decoder.h>\r
\r
#define TEST_TIMEOUT_MS (1000U)\r
\r
/**\r
* @brief custom callback function\r
*/\r
-int tensor_decoder_custom_cb (const GstTensorMemory *input,\r
+static int\r
+tensor_decoder_custom_cb (const GstTensorMemory *input,\r
const GstTensorsConfig *config, void * data, GstBuffer *out_buf)\r
{\r
GstMapInfo out_info;\r
fbb.Int ("rate_n", config->rate_n);\r
fbb.Int ("rate_d", config->rate_d);\r
for (i = 0; i < num_tensors; i++) {\r
- gchar * tensor_key = g_strdup_printf ("tensor_%d", i);\r
- gchar * tensor_name = NULL;\r
+ gchar *tensor_key = g_strdup_printf ("tensor_%d", i);\r
+ gchar *tensor_name = NULL;\r
\r
if (config->info.info[i].name == NULL) {\r
tensor_name = g_strdup ("");\r
}\r
tensor_type type = config->info.info[i].type;\r
\r
- fbb.Vector (tensor_key, [&] () {\r
+ fbb.Vector (tensor_key, [&]() {\r
fbb += tensor_name;\r
fbb += type;\r
fbb.Vector (config->info.info[i].dimension, NNS_TENSOR_RANK_LIMIT);\r
- fbb.Blob (input[i].data, (size_t) input[i].size);\r
+ fbb.Blob (input[i].data, input[i].size);\r
});\r
g_free (tensor_key);\r
g_free (tensor_name);\r
\r
/** @brief tensordec-plugin's decode callback */\r
static GstFlowReturn\r
-decsub_decode (void **pdata, const GstTensorsConfig * config,\r
- const GstTensorMemory * input, GstBuffer * outbuf)\r
+decsub_decode (void **pdata, const GstTensorsConfig *config,\r
+ const GstTensorMemory *input, GstBuffer *outbuf)\r
{\r
return GST_FLOW_OK;\r
}\r
GstTensorDecoderDef *sub = g_try_new0 (GstTensorDecoderDef, 1);\r
g_assert (sub);\r
\r
- sub->modename = (char *) g_strdup (name);\r
+ sub->modename = g_strdup (name);\r
sub->init = decsub_init;\r
sub->getOutCaps = decsub_getOutCaps;\r
sub->decode = decsub_decode;\r
static void\r
free_default_decoder (GstTensorDecoderDef *sub)\r
{\r
- g_free ((char *)sub->modename);\r
+ g_free (sub->modename);\r
g_free (sub);\r
}\r
\r