/**
* SECTION:element-tensor_aggregator
*
- * @file tensor_aggregator.c
+ * @file gsttensor_aggregator.c
* @date 29 August 2018
* @brief GStreamer plugin to aggregate tensor stream
* @see https://github.com/nnstreamer/nnstreamer
#endif
#include <string.h>
-#include "tensor_aggregator.h"
+#include "gsttensor_aggregator.h"
#include "tensor_meta.h"
#include <nnstreamer_util.h>
*/
/**
- * @file tensor_aggregator.h
+ * @file gsttensor_aggregator.h
* @date 29 August 2018
* @brief GStreamer plugin to aggregate tensor stream
* @see https://github.com/nnstreamer/nnstreamer
*/
/**
- * @file tensor_converter.c
+ * @file gsttensor_converter.c
* @date 26 Mar 2018
* @brief GStreamer plugin to convert media types to tensors (as a filter for other general neural network filters)
* @see https://github.com/nnstreamer/nnstreamer
#endif
#include <string.h>
-#include "tensor_converter.h"
+#include "gsttensor_converter.h"
#include "tensor_meta.h"
#ifdef NO_VIDEO
-#include "converter-media-info-no-video.h"
+#include "gsttensor_converter_media_no_video.h"
#else
-#include "converter-media-info-video.h"
+#include "gsttensor_converter_media_info_video.h"
#endif
#ifdef NO_AUDIO
-#include "converter-media-info-no-audio.h"
+#include "gsttensor_converter_media_no_audio.h"
#else
-#include "converter-media-info-audio.h"
+#include "gsttensor_converter_media_info_audio.h"
#endif
#include <nnstreamer_log.h>
#include <nnstreamer_subplugin.h>
*/
/**
- * @file tensor_converter.h
+ * @file gsttensor_converter.h
* @date 26 Mar 2018
* @brief GStreamer plugin to convert media types to tensors (as a filter for other general neural network filters)
*
*/
/**
- * @file converter-media-info-audio.h
+ * @file gsttensor_converter_media_info_audio.h
* @date 26 Mar 2019
* @brief Define collection of media type and functions to parse media info for audio support
* @see https://github.com/nnstreamer/nnstreamer
* @bug No known bugs except for NYI items
*/
-#ifndef __CONVERTER_MEDIA_INFO_AUDIO_H__
-#define __CONVERTER_MEDIA_INFO_AUDIO_H__
+#ifndef __GST_TENSOR_CONVERTER_MEDIA_INFO_AUDIO_H__
+#define __GST_TENSOR_CONVERTER_MEDIA_INFO_AUDIO_H__
#ifdef NO_AUDIO
#error This header is not supported if NO_AUDIO is defined
gst_caps_append (caps, gst_caps_from_string (AUDIO_CAPS_STR))
#define is_audio_supported(...) TRUE
-#endif /* __CONVERTER_MEDIA_INFO_AUDIO_H__ */
+#endif /* __GST_TENSOR_CONVERTER_MEDIA_INFO_AUDIO_H__ */
*/
/**
- * @file converter-media-info-video.h
+ * @file gsttensor_converter_media_info_video.h
* @date 26 Mar 2019
* @brief Define collection of media type and functions to parse media info for video support
* @see https://github.com/nnstreamer/nnstreamer
* @bug No known bugs except for NYI items
*/
-#ifndef __CONVERTER_MEDIA_INFO_VIDEO_H__
-#define __CONVERTER_MEDIA_INFO_VIDEO_H__
+#ifndef __GST_TENSOR_CONVERTER_MEDIA_INFO_VIDEO_H__
+#define __GST_TENSOR_CONVERTER_MEDIA_INFO_VIDEO_H__
#ifdef NO_VIDEO
#error This header is not supported if NO_VIDEO is defined
gst_caps_append (caps, gst_caps_from_string (VIDEO_CAPS_STR))
#define is_video_supported(...) TRUE
-#endif /* __CONVERTER_MEDIA_INFO_VIDEO_H__ */
+#endif /* __GST_TENSOR_CONVERTER_MEDIA_INFO_VIDEO_H__ */
*/
/**
- * @file converter-media-info-no-audio.h
+ * @file gsttensor_converter_media_no_audio.h
* @date 26 Mar 2019
* @brief Define collection of media type and functions to parse media info for audio if there is no audio support
* @see https://github.com/nnstreamer/nnstreamer
* @bug No known bugs except for NYI items
*/
-#ifndef __CONVERTER_MEDIA_INFO_NO_AUDIO_H__
-#define __CONVERTER_MEDIA_INFO_NO_AUDIO_H__
+#ifndef __GST_TENSOR_CONVERTER_MEDIA_NO_AUDIO_H__
+#define __GST_TENSOR_CONVERTER_MEDIA_NO_AUDIO_H__
#ifndef NO_AUDIO
#error This header is not supported if NO_AUDIO is not defined
#define GST_AUDIO_INFO_BPF(...) 0
-#endif /* __CONVERTER_MEDIA_INFO_NO_AUDIO_H__ */
+#endif /* __GST_TENSOR_CONVERTER_MEDIA_NO_AUDIO_H__ */
*/
/**
- * @file converter-media-info-no-video.h
+ * @file gsttensor_converter_media_no_video.h
* @date 26 Mar 2019
* @brief Define collection of media type and functions to parse media info for video if there is no video support
* @see https://github.com/nnstreamer/nnstreamer
* @bug No known bugs except for NYI items
*/
-#ifndef __CONVERTER_MEDIA_INFO_NO_VIDEO_H__
-#define __CONVERTER_MEDIA_INFO_NO_VIDEO_H__
+#ifndef __GST_TENSOR_CONVERTER_MEDIA_NO_VIDEO_H__
+#define __GST_TENSOR_CONVERTER_MEDIA_NO_VIDEO_H__
#ifndef NO_VIDEO
#error This header is not supported if NO_VIDEO is not defined
#define GST_VIDEO_INFO_FPS_N(...) 0
#define GST_VIDEO_INFO_FPS_D(...) 1
-#endif /* __CONVERTER_MEDIA_INFO_NO_VIDEO_H__ */
+#endif /* __GST_TENSOR_CONVERTER_MEDIA_NO_VIDEO_H__ */
/**
* Copyright (C) 2021 Samsung Electronics Co., Ltd.
*
- * @file tensor_crop.c
+ * @file gsttensor_crop.c
* @date 10 May 2021
* @brief GStreamer element to crop the regions of incoming tensor
* @see https://github.com/nnstreamer/nnstreamer
#include <string.h>
#include <nnstreamer_util.h>
-#include "tensor_crop.h"
+#include "gsttensor_crop.h"
#include "tensor_data.h"
/**
/**
* Copyright (C) 2021 Samsung Electronics Co., Ltd.
*
- * @file tensor_crop.c
+ * @file gsttensor_crop.h
* @date 10 May 2021
* @brief GStreamer element to crop the regions of incoming tensor
* @see https://github.com/nnstreamer/nnstreamer
*
*/
/**
- * @file tensordec.c
+ * @file gsttensor_decoder.c
* @date 26 Mar 2018
* @brief GStreamer plugin to convert tensors (as a filter for other general neural network filters) to other media types
* @see https://github.com/nnstreamer/nnstreamer
#endif
#include <string.h>
-#include "tensordec.h"
+#include "gsttensor_decoder.h"
/**
* @brief Macro for debug mode.
GST_STATIC_CAPS ("ANY"));
#define gst_tensordec_parent_class parent_class
-G_DEFINE_TYPE (GstTensorDec, gst_tensordec, GST_TYPE_BASE_TRANSFORM);
+G_DEFINE_TYPE (GstTensorDecoder, gst_tensordec, GST_TYPE_BASE_TRANSFORM);
/** GObject vmethod implementations */
static void gst_tensordec_set_property (GObject * object, guint prop_id,
* @return caps for media type
*/
static GstCaps *
-gst_tensordec_media_caps_from_tensor (GstTensorDec * self,
+gst_tensordec_media_caps_from_tensor (GstTensorDecoder * self,
const GstTensorsConfig * config)
{
g_return_val_if_fail (config != NULL, NULL);
* @param structure structure to be interpreted
*/
static GstCaps *
-gst_tensordec_media_caps_from_structure (GstTensorDec * self,
+gst_tensordec_media_caps_from_structure (GstTensorDecoder * self,
const GstStructure * structure)
{
GstTensorsConfig config;
* @param t_info newly configured tensor metadata
*/
static gboolean
-gst_tensordec_check_consistency (GstTensorDec * self, GstTensorsConfig * config)
+gst_tensordec_check_consistency (GstTensorDecoder * self,
+ GstTensorsConfig * config)
{
g_return_val_if_fail (self != NULL, FALSE);
g_return_val_if_fail (config != NULL, FALSE);
* @brief initialize the tensordec's class
*/
static void
-gst_tensordec_class_init (GstTensorDecClass * klass)
+gst_tensordec_class_init (GstTensorDecoderClass * klass)
{
GObjectClass *gobject_class;
GstElementClass *gstelement_class;
* initialize instance structure
*/
static void
-gst_tensordec_init (GstTensorDec * self)
+gst_tensordec_init (GstTensorDecoder * self)
{
guint i;
* @retval FALSE if error. TRUE if OK (or SKIP)
*/
static gboolean
-gst_tensordec_process_plugin_options (GstTensorDec * self, guint opnum)
+gst_tensordec_process_plugin_options (GstTensorDecoder * self, guint opnum)
{
g_assert (opnum < TensorDecMaxOpNum); /* Internal logic error! */
if (self->decoder == NULL)
gst_tensordec_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
- GstTensorDec *self;
+ GstTensorDecoder *self;
self = GST_TENSOR_DECODER (object);
gst_tensordec_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec)
{
- GstTensorDec *self;
+ GstTensorDecoder *self;
self = GST_TENSOR_DECODER (object);
static void
gst_tensordec_class_finalize (GObject * object)
{
- GstTensorDec *self;
+ GstTensorDecoder *self;
guint i;
self = GST_TENSOR_DECODER (object);
* @brief Configure tensor metadata from sink caps
*/
static gboolean
-gst_tensordec_configure (GstTensorDec * self, const GstCaps * in_caps,
+gst_tensordec_configure (GstTensorDecoder * self, const GstCaps * in_caps,
const GstCaps * out_caps)
{
GstStructure *structure;
gst_tensordec_transform (GstBaseTransform * trans,
GstBuffer * inbuf, GstBuffer * outbuf)
{
- GstTensorDec *self;
+ GstTensorDecoder *self;
GstFlowReturn res;
self = GST_TENSOR_DECODER_CAST (trans);
gst_tensordec_transform_caps (GstBaseTransform * trans,
GstPadDirection direction, GstCaps * caps, GstCaps * filter)
{
- GstTensorDec *self;
+ GstTensorDecoder *self;
GstCaps *result;
self = GST_TENSOR_DECODER_CAST (trans);
gst_tensordec_fixate_caps (GstBaseTransform * trans,
GstPadDirection direction, GstCaps * caps, GstCaps * othercaps)
{
- GstTensorDec *self;
+ GstTensorDecoder *self;
GstCaps *supposed;
GstCaps *result;
gst_tensordec_set_caps (GstBaseTransform * trans,
GstCaps * incaps, GstCaps * outcaps)
{
- GstTensorDec *self = GST_TENSOR_DECODER_CAST (trans);
+ GstTensorDecoder *self = GST_TENSOR_DECODER_CAST (trans);
silent_debug_caps (self, incaps, "from incaps");
silent_debug_caps (self, outcaps, "from outcaps");
GstPadDirection direction, GstCaps * caps, gsize size,
GstCaps * othercaps, gsize * othersize)
{
- GstTensorDec *self;
+ GstTensorDecoder *self;
if (direction == GST_PAD_SRC)
return FALSE;
*
*/
/**
- * @file tensordec.h
+ * @file gsttensor_decoder.h
* @date 26 Mar 2018
* @brief GStreamer plugin to convert tensors to media types
*
*
*/
-#ifndef __GST_TENSORDEC_H__
-#define __GST_TENSORDEC_H__
+#ifndef __GST_TENSOR_DECODER_H__
+#define __GST_TENSOR_DECODER_H__
#include <gst/gst.h>
#include <gst/base/gstbasetransform.h>
#define GST_TYPE_TENSOR_DECODER \
(gst_tensordec_get_type())
#define GST_TENSOR_DECODER(obj) \
- (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_TENSOR_DECODER,GstTensorDec))
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_TENSOR_DECODER,GstTensorDecoder))
#define GST_TENSOR_DECODER_CLASS(klass) \
- (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_TENSOR_DECODER,GstTensorDecClass))
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_TENSOR_DECODER,GstTensorDecoderClass))
#define GST_IS_TENSOR_DECODER(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_TENSOR_DECODER))
#define GST_IS_TENSOR_DECODER_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_TENSOR_DECODER))
-#define GST_TENSOR_DECODER_CAST(obj) ((GstTensorDec *)(obj))
+#define GST_TENSOR_DECODER_CAST(obj) ((GstTensorDecoder *)(obj))
-typedef struct _GstTensorDec GstTensorDec;
-typedef struct _GstTensorDecClass GstTensorDecClass;
+typedef struct _GstTensorDecoder GstTensorDecoder;
+typedef struct _GstTensorDecoderClass GstTensorDecoderClass;
typedef struct
{
tensor_decoder_custom func;
/**
* @brief Internal data structure for tensordec instances.
*/
-struct _GstTensorDec
+struct _GstTensorDecoder
{
GstBaseTransform element; /**< This is the parent object */
};
/**
- * @brief GstTensorDecClass inherits GstBaseTransformClass.
+ * @brief GstTensorDecoderClass inherits GstBaseTransformClass.
*
* Referring another child (sibiling), GstVideoFilter (abstract class) and
* its child (concrete class) GstVideoConverter.
- * Note that GstTensorDecClass is a concrete class; thus we need to look at both.
+ * Note that GstTensorDecoderClass is a concrete class; thus we need to look at both.
*/
-struct _GstTensorDecClass
+struct _GstTensorDecoderClass
{
GstBaseTransformClass parent_class; /**< Inherits GstBaseTransformClass */
};
G_END_DECLS
-#endif /* __GST_TENSORDEC_H__ */
+#endif /* __GST_TENSOR_DECODER_H__ */
*
*/
/**
- * @file gsttensordemux.c
+ * @file gsttensor_demux.c
* @date 03 July 2018
* @brief GStreamer plugin to demux tensors (as a filter for other general neural network filters)
* @see https://github.com/nnstreamer/nnstreamer
#include <glib.h>
#include <nnstreamer_util.h>
-#include "gsttensordemux.h"
+#include "gsttensor_demux.h"
GST_DEBUG_CATEGORY_STATIC (gst_tensor_demux_debug);
#define GST_CAT_DEFAULT gst_tensor_demux_debug
*
*/
/**
- * @file gsttensordemux.h
+ * @file gsttensor_demux.h
* @date 03 July 2018
* @brief GStreamer plugin to demux tensors (as a filter for other general neural network filters)
* @see https://github.com/nnstreamer/nnstreamer
* Copyright (C) 2020 MyungJoo Ham <myungjoo.ham@samsung.com>
*/
/**
- * @file gsttensorif.c
+ * @file gsttensor_if.c
* @date 08 April 2020
* @brief GStreamer plugin to control flow based on tensor values
* @see https://github.com/nnstreamer/nnstreamer
#include <nnstreamer_subplugin.h>
#include <nnstreamer_util.h>
-#include "gsttensorif.h"
+#include "gsttensor_if.h"
/**
* @brief Macro for debug mode.
* Copyright (C) 2020 MyungJoo Ham <myungjoo.ham@samsung.com>
*/
/**
- * @file gsttensorif.h
+ * @file gsttensor_if.h
* @date 08 April 2020
* @brief GStreamer plugin to control flow based on tensor values
* @see https://github.com/nnstreamer/nnstreamer
*
*/
/**
- * @file gsttensormerge.c
+ * @file gsttensor_merge.c
* @date 03 July 2018
* @brief GStreamer plugin to merge tensors (as a filter for other general neural network filters)
* @see https://github.com/nnstreamer/nnstreamer
#include <glib.h>
#include <nnstreamer_util.h>
-#include "gsttensormerge.h"
+#include "gsttensor_merge.h"
GST_DEBUG_CATEGORY_STATIC (gst_tensor_merge_debug);
#define GST_CAT_DEFAULT gst_tensor_merge_debug
*
*/
/**
- * @file gsttensormerge.h
+ * @file gsttensor_merge.h
* @date 03 July 2018
* @brief GStreamer plugin to merge tensors (as a filter for other general neural network filters)
* @see https://github.com/nnstreamer/nnstreamer
*
*/
/**
- * @file gsttensormux.c
+ * @file gsttensor_mux.c
* @date 03 July 2018
* @brief GStreamer plugin to mux tensors (as a filter for other general neural network filters)
* @see https://github.com/nnstreamer/nnstreamer
#include <glib.h>
#include <nnstreamer_util.h>
-#include "gsttensormux.h"
+#include "gsttensor_mux.h"
GST_DEBUG_CATEGORY_STATIC (gst_tensor_mux_debug);
#define GST_CAT_DEFAULT gst_tensor_mux_debug
*
*/
/**
- * @file gsttensormux.h
+ * @file gsttensor_mux.h
* @date 03 July 2018
* @brief GStreamer plugin to mux tensors (as a filter for other general neural network filters)
* @see https://github.com/nnstreamer/nnstreamer
*/
/**
- * @file gsttensorrate.c
+ * @file gsttensor_rate.c
* @date 24 Sep 2020
* @brief GStreamer plugin to adjust tensor rate
* @see https://github.com/nnstreamer/nnstreamer
#include <nnstreamer_log.h>
#include <nnstreamer_util.h>
-#include "gsttensorrate.h"
+#include "gsttensor_rate.h"
/**
* @brief Macro for debug mode.
* Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
*/
/**
- * @file gsttensorrate.h
+ * @file gsttensor_rate.h
* @date 24 Sep 2020
* @brief GStreamer plugin to adjust tensor rate
* @see https://github.com/nnstreamer/nnstreamer
*
*/
/**
- * @file tensor_repo.c
+ * @file gsttensor_repo.c
* @date 17 Nov 2018
* @brief tensor repo file for NNStreamer, the GStreamer plugin for neural networks
* @see https://github.com/nnstreamer/nnstreamer
*
*/
-#include "tensor_repo.h"
+#include "gsttensor_repo.h"
#include <nnstreamer_util.h>
#ifndef DBG
*
*/
/**
- * @file tensor_repo.h
+ * @file gsttensor_repo.h
* @date 17 Nov 2018
* @brief tensor repo header file for NNStreamer, the GStreamer plugin for neural networks
* @see https://github.com/nnstreamer/nnstreamer
*
* Set elemnt to handle tensor repo
*
- * @file tensor_reposink.c
+ * @file gsttensor_reposink.c
* @date 19 Nov 2018
* @brief GStreamer plugin to handle tensor repository
* @see https://github.com/nnstreamer/nnstreamer
#endif
#include <nnstreamer_util.h>
-#include "tensor_repo.h"
-#include "tensor_reposink.h"
+#include "gsttensor_repo.h"
+#include "gsttensor_reposink.h"
GST_DEBUG_CATEGORY_STATIC (gst_tensor_reposink_debug);
#define GST_CAT_DEFAULT gst_tensor_reposink_debug
*/
/**
- * @file tensor_reposink.h
+ * @file gsttensor_reposink.h
* @date 19 Nov 2018
* @brief GStreamer plugin to handle tensor repository
* @see https://github.com/nnstreamer/nnstreamer
*
* Pop elemnt to handle tensor repo
*
- * @file tensor_reposrc.c
+ * @file gsttensor_reposrc.c
* @date 19 Nov 2018
* @brief GStreamer plugin to handle tensor repository
* @see https://github.com/nnstreamer/nnstreamer
#endif
#include <string.h>
-#include "tensor_repo.h"
-#include "tensor_reposrc.h"
+#include "gsttensor_repo.h"
+#include "gsttensor_reposrc.h"
GST_DEBUG_CATEGORY_STATIC (gst_tensor_reposrc_debug);
#define GST_CAT_DEFAULT gst_tensor_reposrc_debug
*/
/**
- * @file tensor_reposrc.h
+ * @file gsttensor_reposrc.h
* @date 19 Nov 2018
* @brief GStreamer plugin to handle tensor repository
* @see https://github.com/nnstreamer/nnstreamer
*
* Sink element to handle tensor stream
*
- * @file tensor_sink.c
+ * @file gsttensor_sink.c
* @date 15 June 2018
* @brief GStreamer plugin to handle tensor stream
* @see https://github.com/nnstreamer/nnstreamer
#include <config.h>
#endif
-#include "tensor_sink.h"
+#include "gsttensor_sink.h"
/**
* @brief Macro for debug mode.
*/
/**
- * @file tensor_sink.h
+ * @file gsttensor_sink.h
* @date 15 June 2018
* @brief GStreamer plugin to handle tensor stream
* @see https://github.com/nnstreamer/nnstreamer
/**
* Copyright (C) 2021 Samsung Electronics Co., Ltd.
*
- * @file tensor_sparse_dec.c
+ * @file gsttensor_sparsedec.c
* @date 27 Jul 2021
* @brief GStreamer element to decode sparse tensors into dense tensors
* @see https://github.com/nnstreamer/nnstreamer
#include <string.h>
#include <nnstreamer_util.h>
-#include "tensor_sparse_dec.h"
+#include "gsttensor_sparsedec.h"
/**
* @brief Macro for debug mode.
/**
* Copyright (C) 2021 Samsung Electronics Co., Ltd.
*
- * @file tensor_sparse_dec.h
+ * @file gsttensor_sparsedec.h
* @date 27 Jul 2021
* @brief GStreamer element to decode sparse tensors into dense tensors
* @see https://github.com/nnstreamer/nnstreamer
#include <gst/gst.h>
#include <tensor_common.h>
-#include "tensor_sparse_util.h"
+#include "gsttensor_sparseutil.h"
G_BEGIN_DECLS
/**
* Copyright (C) 2021 Samsung Electronics Co., Ltd.
*
- * @file tensor_sparse_enc.c
+ * @file gsttensor_sparseenc.c
* @date 27 Jul 2021
* @brief GStreamer element to encode dense tensors into sparse tensors
* @see https://github.com/nnstreamer/nnstreamer
#include <string.h>
#include <nnstreamer_util.h>
-#include "tensor_sparse_enc.h"
+#include "gsttensor_sparseenc.h"
/**
* @brief Macro for debug mode.
/**
* Copyright (C) 2021 Samsung Electronics Co., Ltd.
*
- * @file tensor_sparse_enc.h
+ * @file gsttensor_sparseenc.h
* @date 27 Jul 2021
* @brief GStreamer element to encode sparse tensors into dense tensors
* @see https://github.com/nnstreamer/nnstreamer
#include <gst/gst.h>
#include <tensor_common.h>
-#include "tensor_sparse_util.h"
+#include "gsttensor_sparseutil.h"
G_BEGIN_DECLS
* Copyright (C) 2021 Yongjoo Ahn <yongjoo1.ahn@samsung.com>
*/
/**
- * @file tensor_sparse_util.c
+ * @file gsttensor_sparseutil.c
* @date 27 Jul 2021
* @brief Util functions for tensor_sparse encoder and decoder.
* @see https://github.com/nnstreamer/nnstreamer
#include <string.h>
#include <tensor_common.h>
#include <tensor_data.h>
-#include "tensor_sparse_util.h"
+#include "gsttensor_sparseutil.h"
/**
* @brief Make dense tensor with input sparse tensor.
* Copyright (C) 2021 Yongjoo Ahn <yongjoo1.ahn@samsung.com>
*/
/**
- * @file tensor_sparse_util.h
+ * @file gsttensor_sparseutil.h
* @date 06 Jul 2021
* @brief Util functions for tensor_sparse encoder and decoder.
* @see https://github.com/nnstreamer/nnstreamer
*
*/
/**
- * @file gsttensorsplit.c
+ * @file gsttensor_split.c
* @date 27 Aug 2018
* @brief GStreamer plugin to split tensor (as a filter for other general neural network filters)
* @see https://github.com/nnstreamer/nnstreamer
#include <gst/gst.h>
#include <glib.h>
-#include "gsttensorsplit.h"
+#include "gsttensor_split.h"
#include <tensor_common.h>
#include <nnstreamer_util.h>
*
*/
/**
- * @file gsttensorsplit.h
+ * @file gsttensor_split.h
* @date 27 Aug 2018
* @brief GStreamer plugin to split tensor (as a filter for other general neural network filters)
* @see https://github.com/nnstreamer/nnstreamer
*
*/
/**
- * @file tensor_src_iio.c
+ * @file gsttensorsr_ciio.c
* @date 27 Feb 2019
* @brief GStreamer plugin to capture sensor data as tensor(s)
* @see http://github.com/nnstreamer/nnstreamer
#include <errno.h>
#include <nnstreamer_util.h>
-#include "tensor_src_iio.h"
+#include "gsttensor_srciio.h"
/**
* @brief Macro for debug mode.
*/
/**
- * @file tensor_src_iio.h
+ * @file gsttensor_srciio.h
* @date 26 Feb 2019
* @brief GStreamer plugin to support linux IIO as tensor(s)
* @see https://github.com/nnstreamer/nnstreamer
*
*/
/**
- * @file tensor_transform.c
+ * @file gsttensor_transform.c
* @date 10 Jul 2018
* @brief GStreamer plugin to transform tensor dimension or type
* @see https://github.com/nnstreamer/nnstreamer
#include <math.h>
#include <nnstreamer_log.h>
#include <nnstreamer_util.h>
-#include "tensor_transform.h"
+#include "gsttensor_transform.h"
#ifdef HAVE_ORC
-#include "transform-orc.h"
+#include "nnstreamer-orc.h"
#endif
/**
*
*/
/**
- * @file tensor_transform.h
+ * @file gsttensor_transform.h
* @date 10 Jul 2018
* @brief GStreamer plugin to transform tensor dimension or type
* @see https://github.com/nnstreamer/nnstreamer
--- /dev/null
+tensor_element_sources = [
+ 'gsttensor_aggregator.c',
+ 'gsttensor_converter.c',
+ 'gsttensor_crop.c',
+ 'gsttensor_decoder.c',
+ 'gsttensor_demux.c',
+ 'gsttensor_if.c',
+ 'gsttensor_merge.c',
+ 'gsttensor_mux.c',
+ 'gsttensor_rate.c',
+ 'gsttensor_repo.c',
+ 'gsttensor_reposink.c',
+ 'gsttensor_reposrc.c',
+ 'gsttensor_sink.c',
+ 'gsttensor_sparsedec.c',
+ 'gsttensor_sparseenc.c',
+ 'gsttensor_sparseutil.c',
+ 'gsttensor_split.c',
+]
+
+# gsttensorsrc
+tensor_src_sources = []
+gst18_dep = dependency('gstreamer-' + gst_api_verision, version : '>=1.8', required : false)
+if gst18_dep.found()
+ tensor_src_sources += 'gsttensor_srciio.c'
+else
+ message('tensor_src_iio requires GStreamer >= 1.8. Skipping it')
+endif
+
+foreach s : tensor_src_sources
+ if build_platform != 'macos'
+ nnstreamer_sources += join_paths(meson.current_source_dir(), s)
+ endif
+endforeach
+
+# gsttensortransform
+if orcc_support_is_available
+ # tensor_transform does not need to generate -dist files.
+ orcsrc = 'nnstreamer-orc'
+
+ orc_h = custom_target(orcsrc + '.h',
+ input: orcsrc + '.orc',
+ output: orcsrc + '.h',
+ command: orcc_support_orcc_args + ['--header', '-o', '@OUTPUT@', '@INPUT@'])
+ orc_c_org = custom_target(orcsrc + '.c.in',
+ input: orcsrc + '.orc',
+ output: orcsrc + '.c.in',
+ command: orcc_support_orcc_args + ['--implementation', '-o', '@OUTPUT@', '@INPUT@'])
+
+ # ORC generated codes incur a few warnings. Ignore warnings of generated files.
+ script = find_program('ignore_warning.sh')
+ orc_c = custom_target(orcsrc + '.c',
+ input: orc_c_org,
+ output: orcsrc + '.c',
+ command: [ script, '@INPUT@', '@OUTPUT@' ])
+
+ nnstreamer_sources += [orc_c, orc_h]
+ nnstreamer_internal_deps += declare_dependency(sources: orc_h)
+endif
+tensor_element_sources += 'gsttensor_transform.c'
+
+foreach s : tensor_element_sources
+ nnstreamer_sources += join_paths(meson.current_source_dir(), s)
+endforeach
# Add plugins
nnst_plugins = [
- 'tensor_aggregator',
- 'tensor_converter',
- 'tensor_crop',
- 'tensor_decoder',
- 'tensor_demux',
- 'tensor_merge',
- 'tensor_mux',
- 'tensor_sink',
- 'tensor_source',
- 'tensor_sparse',
- 'tensor_split',
- 'tensor_transform',
'tensor_filter',
- 'tensor_repo',
- 'tensor_if',
- 'tensor_rate',
- 'tensor_query'
+ 'tensor_query',
+ 'elements'
]
foreach p : nnst_plugins
#include <gst/gst.h>
-#include <tensor_aggregator/tensor_aggregator.h>
-#include <tensor_converter/tensor_converter.h>
-#include <tensor_crop/tensor_crop.h>
-#include <tensor_decoder/tensordec.h>
-#include <tensor_demux/gsttensordemux.h>
-#include <tensor_filter/tensor_filter.h>
-#include <tensor_merge/gsttensormerge.h>
-#include <tensor_mux/gsttensormux.h>
-#include <tensor_repo/tensor_reposink.h>
-#include <tensor_repo/tensor_reposrc.h>
-#include <tensor_sink/tensor_sink.h>
+#include <elements/gsttensor_aggregator.h>
+#include <elements/gsttensor_converter.h>
+#include <elements/gsttensor_crop.h>
+#include <elements/gsttensor_decoder.h>
+#include <elements/gsttensor_demux.h>
+#include <elements/gsttensor_if.h>
+#include <elements/gsttensor_merge.h>
+#include <elements/gsttensor_mux.h>
+#include <elements/gsttensor_rate.h>
+#include <elements/gsttensor_reposink.h>
+#include <elements/gsttensor_reposrc.h>
+#include <elements/gsttensor_sink.h>
+#include <elements/gsttensor_sparsedec.h>
+#include <elements/gsttensor_sparseenc.h>
+#include <elements/gsttensor_split.h>
+#include <elements/gsttensor_transform.h>
+
#if defined(__gnu_linux__) && !defined(__ANDROID__)
-#include <tensor_source/tensor_src_iio.h>
+#include <elements/gsttensor_srciio.h>
#endif /* __gnu_linux__ && !__ANDROID__ */
-#include <tensor_sparse/tensor_sparse_enc.h>
-#include <tensor_sparse/tensor_sparse_dec.h>
-#include <tensor_split/gsttensorsplit.h>
-#include <tensor_transform/tensor_transform.h>
-#include <tensor_if/gsttensorif.h>
-#include <tensor_rate/gsttensorrate.h>
+
+#include <tensor_filter/tensor_filter.h>
#include <tensor_query/tensor_query_serversrc.h>
#include <tensor_query/tensor_query_serversink.h>
#include <tensor_query/tensor_query_client.h>
+++ /dev/null
----
-title: tensor_aggregator
-...
-
-# NNStreamer::tensor\_aggregator
-
-## Supported features
-
-GstTensorAggregator is a plugin to aggregate the tensor using GstAdapter.
-
-This plugin handles the buffer with the unit **frame**.
-Each incoming or outgoing buffer is supposed a single tensor, which may contain one or multi frames.
-
-GstTensorAggregator gets the size of one frame with ```frames-in```, aggregates the frames, and pushes a buffer with ```frames-out``` frames.
-After pushing an outgoing buffer, GstTensorAggregator flushes the ```frames-flush``` frames.
-
-For example, GstTensorAggregator with the properties ```frames-in=3```, ```frames-out=4```, ```frames-flush=2```
-
-```
-Incoming buffer
---------------------------------------------------------------------
-| 1st buffer | 2nd buffer | 3rd buffer | 4th buffer |
---------------------------------------------------------------------
-| 01 | 02 | 03 | 04 | 05 | 06 | 07 | 08 | 09 | 10 | 11 | 12 |
---------------------------------------------------------------------
-Outgoing buffer
---------------------------------------------------------------------
-| 1st out-buffer |
---------------------------------------------------------------------
- flushed | 2nd out-buffer |
---------------------------------------------------------------------
- flushed | 3rd out-buffer |
---------------------------------------------------------------------
- flushed | 4th out-buffer |
---------------------------------------------------------------------
- flushed | 5th out-buffer |
---------------------------------------------------------------------
-```
-
-Please be informed that, to ensure the tensor configuration, you have to change the dimension if input and output frames are different. (See the property ```frames-dim```.)
-
-### Dis-aggregation
-
-With larger ```frames-in``` values and smaller ```frames-out``` values, the output stream may have more frames than its input stream: ```dis-aggregation```.
-For example, if a neural network model multiplies picture frames of a video stream, generating 120FPS from 30FPS video, aggregated in a single video (in tensor format) stream output, we can generate a 120FPS stream from a 30FPS (4 frames per buffer) stream. If the model generates 4 video (tensor format) streams with other/tensors, we may merge them first and apply aggregator for the same effect.
-
-## Sink Pads
-
-One "Always" sink pad exists. The capability of sink pad is ```other/tensor```.
-
-## Source Pads
-
-One "Always" source pad exists. The capability of source pad is ```other/tensor```.
-It does not support ```other/tensors``` because each frame (or a set of frames consisting a buffer) is supposed to be represented by a **single** tensor instance.
-
-## Properties
-
-- frames-in: The number of frames in incoming buffer. (Default 1)
-
- GstTensorAggregator itself cannot get the number of frames in buffer.
- This plugin calculates the size of one frame with this property.
-
-- frames-out: The number of frames in outgoing buffer. (Default 1)
-
- GstTensorAggregator calculates the size of outgoing frames and pushes a buffer to source pad.
-
-- frames-flush: The number of frames to flush. (Default 0)
-
- GstTensorAggregator flushes the bytes (```frames-flush``` frames) in GstAdapter after pushing a buffer.
- If set 0 (default value), all outgoing frames will be flushed.
-
-- frames-dim: The dimension index of frames in tensor. (Default value is (NNS_TENSOR_RANK_LIMIT - 1))
-
- If ```frames-in``` and ```frames-out``` are different, GstTensorAggregator has to change the dimension of tensor.
- With this property, GstTensorAggregator changes the out-caps.
-
- If set this value in 0 ~ (NNS_TENSOR_RANK_LIMIT - 2) and ```concat``` is true, GstTensorAggregator will concatenate the output buffer.
-
-- concat: The flag to concatenate output buffer. (Default true)
-
- If ```concat``` is true and ```frames-out``` is larger than 1, GstTensorAggregator will concatenate the output buffer with the axis ```frames-dim```.
-
-### Properties for debugging
-
-- silent: Enable/disable debugging messages.
-
-## Usage Examples
-
-```
-$ gst-launch videotestsrc ! video/x-raw,format=RGB,width=640,height=480 ! tensor_converter ! tensor_aggregator frames-out=10 frames-flush=5 frames-dim=3 ! tensor_sink
-```
-
-GstTensorAggregator receives a buffer with 1 frame (dimension 3:640:480:1), pushes a buffer with 10 frames (dimension 3:640:480:10), and flushes 5 frames after pushing a buffer.
+++ /dev/null
-tensor_aggregator_sources = [
- 'tensor_aggregator.c'
-]
-
-foreach s : tensor_aggregator_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
----
-title: tensor_converter
-...
-
-# NNStreamer::tensor\_converter
-
-## Supported features
-
-- Video: direct conversion of video/x-raw / non-interlace(progressive) to [height][width][#Colorspace] tensor. (#Colorspace:width:height:frames-per-tensor)
- - Supported colorspaces: RGB (3), BGRx (4), Gray8 (1)
- - You may express ```frames-per-tensor``` to have multiple image frames in a tensor like audio and text as well.
- - If ```frames-per-tensor``` is not configured, the default value is 1.
- - Golden tests for such input
-- Audio: direct conversion of audio/x-raw with arbitrary numbers of channels and frames per tensor to [frames-per-tensor][channels] tensor. (channels:frames-per-tensor)
- - The number of frames per tensor is supposed to be configured manually by stream pipeline developer with the property of ```frames-per-tensor```.
- - If ```frames-per-tensor``` is not configured, the default value is 1.
-- Text: direct conversion of text/x-raw with UTF-8 to [frames-per-tensor][input-dim] tensor. (input-dim:frames-per-tensor)
- - The number of frames per tensor is supposed to be configured manually by stream pipeline developer with the property of ```frames-per-tensor```.
- - If ```frames-per-tensor``` is not configured, the default value is 1.
- - The size of a text frame should be configured by developer with the property ```input-dim```. Because the dimension of tensor is the key metadata of a tensor stream pipeline, we need to fix the value before actually looking at the actual stream data.
-- Octet stream: direct conversion of application/octet-stream.
- - Octet stream to static tensor: You should set ```input-type``` and ```input-dim``` to describe tensor(s) information of outgoing buffer.
- If setting multiple tensors, converter will divide incoming buffer and set multiple memory chunks in outgoing buffer.
-
- e.g, converting 10 bytes of octet stream to 2 static tensors:
-
- ```... ! application/octet-stream ! tensor_converter input-dim=2:1:1:1,2:1:1:1 input-type=int32,int8 ! ...```
- - Octet stream to flexible tensor: With a caps filter (```other/tensors,format=flexible```), tensor-converter generates flexible tensor.
- In this case, you don't need to denote ```input-type``` and ```input-dim``` in pipeline description.
- Converter sets the dimension with buffer size (size:1:1:1) and type uint8, and appends this information (tensor-meta) in the memory of outgoing buffer.
-
- e.g, converting octet stream to flexible tensor:
-
- ```... ! application/octet-stream ! tensor_converter ! other/tensors,format=flexible ! ...```
- - Media to octet stream: If you need to convert media to octet stream, use [capssetter](https://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-good/html/gst-plugins-good-plugins-capssetter.html).
- This element can update the caps of incoming buffer using it's properties. After updating the mimetye of media stream, it can be converted to tensor stream with tensor_converter.
-
- e.g., converting jpeg to flexible tensor:
-
- ```... ! jpegenc ! capssetter caps="application/octet-stream" replace=true join=false ! tensor_converter ! other/tensors,format=flexible ! ...```
- - Only single frame. ```frames-per-tensor``` should be 1 (default value).
-- Flexible tensor: conversion to static tensor stream.
- - You can convert mime type (flexible to static) if incoming tensor has fixed data format and size.
- - With ```input-type``` and ```input-dim```, converter will set the output capability on src pad.
-- Serialized data: conversion to static tensor stream.
- - Supported serialization format: Protocol Buffers, Flatbuffers and Flexbuffers.
- - The converter gets input capability from the peer pad of the sink pad, or you can specify the capability.
- - You don't need to specify the option because the sub-plugin is registered using the capability.
-
-## Planned features
-
-From higher priority
-- Support other color spaces (IUV, BGGR, ...)
-
-## Sink Pads
-
-One "Always" sink pad exists. The capability of sink pad is ```video/x-raw```, ```audio/x-raw```, ```text/x-raw```, ```application/octet-stream```, and ```other/tensors-flexible```.
-
-If you require another pad caps to convert media stream to tensor(s), you can implement new sub-plugin or register custom converter.
-
-## Source Pads
-
-One "Always" source pad exists. The capability of source pad is ```other/tensor```, ```other/tensors```, and ```other/tensors-flexible```.
-
-Note that, only octet-stream in the default capabilities of sink pad supports configuring multiple tensors in outgoing buffer.
-When incoming media type is video, audio, or text, each frame (or a set of frames consisting a buffer) is supposed to be represented by a **single** tensor instance and it will have ```other/tensor``` capability.
-
-## Performance Characteristics
-
-- Video
- - Unless it is RGB with ```width % 4 > 0``` or Gray8 with ```width % 4 > 0```, there are no memcpy or data modification processes. It only converts meta data in such cases.
- - Otherwise, there will be one memcpy for each frame.
-- Audio
- - TBD.
-- Text
- - TBD.
-
-## Properties
-
-- frames-per-tensor: The number of incoming media frames that will be contained in a single instance of tensors. With the value > 1, you can put multiple frames in a single tensor.
-
-### Properties for debugging
-
-- silent: Enable/disable debugging messages.
-
-## Usage Examples
-
-```
-$ gst-launch videotestsrc ! video/x-raw,format=RGB,width=640,height=480 ! tensor_converter ! tensor_sink
-```
-
-### flatbuffers to tensors stream
-Convert to flatbuffers using tensor decoder and then convert back to tensors stream.
-```
-$ gst-launch videotestsrc ! video/x-raw,format=RGB,width=640,height=480 ! tensor_converter ! tensor_decoder mode=flatbuf ! tensor_converter ! tensor_sink
-```
-
-### protocol buffers to tensors stream
-Convert to protocol buffers using tensor decoder and then convert back to tensors stream.
-```
-$ gst-launch videotestsrc ! video/x-raw,format=RGB,width=640,height=480 ! tensor_converter ! tensor_decoder mode=protobuf ! tensor_converter ! tensor_sink
-```
-
-### flexbuffers to tensors stream
-Convert to flexbuffers using tensor decoder and then convert back to tensors stream.
-```
-$ gst-launch videotestsrc ! video/x-raw,format=RGB,width=640,height=480 ! tensor_converter ! tensor_decoder mode=flexbuf ! tensor_converter ! tensor_sink
-```
-
-## Custom converter
-If you want to convert any media type to tensors, you can use custom mode of the tensor converter.
-
-### Code mode
-This is an example of a callback type custom mode.
-```
-// Define custom callback function
-GstBuffer * tensor_converter_custom_cb (GstBuffer *in_buf, void *data, GstTensorsConfig *config) {
- // Write a code to convert any media type to tensors.
-}
-...
-// Register custom callback function
-nnstreamer_converter_custom_register ("tconv", tensor_converter_custom_cb, NULL);
-...
-// Use the custom tensor converter in a pipeline.
-// E.g., Pipeline of " ... (any media stream) ! tensor_converter mode=custom-code:tconv ! (tensors)... "
-...
-// After everything is done.
-nnstreamer_converter_custom_unregister ("tconv");
-```
-
-### Script mode
-* Note: Currently only Python is supported.
- - If you want to use FlatBuffers Python in Tizen, install package `flatbuffers-python`. It also includes a Flexbuffers Python.
- - If you want to use Flatbuffers Python in Ubuntu, install package using pip `pip install flatbuffers`. It also includes a Flexbuffers Python.
-
-This is an example of a python script.
-```
-# @file custom_converter_example.py
-import numpy as np
-import nnstreamer_python as nns
-## @brief User-defined custom converter
-class CustomConverter(object):
- def convert (self, input_array):
- ## Write a code to convert any media type to tensors.
- return (tensors_info, out_array, rate_n, rate_d)
-```
-Example pipeline
-```
-... (any media stream) ! tensor_converter mode=custom-script:custom_converter_example.py ! (tensors) ...
-```
+++ /dev/null
-tensor_converter_sources = [
- 'tensor_converter.c'
-]
-
-foreach s : tensor_converter_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
-tensor_crop_sources = [
- 'tensor_crop.c'
-]
-
-foreach s : tensor_crop_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
----
-title: tensor_decoder
-...
-
-# NNStreamer::tensor\_decoder
-
-## Supported features
-
-With given properties from users or pipeline developers, support the following conversions. The list is not exclusive and we may need to implement more features later.
-
-| Mode | Main property (input tensor semantics) | Additional & mandatory property | Output |
-| -| - | - | - |
-| directvideo | other/tensors | N/A | video/x-raw |
-| bounding_boxes | Bounding boxes (other/tensor) | File path to labels, decoding schems, out dim, in dim | video/x-raw |
-| image_labeling | Image label (other/tensor) | File path to labels | text/x-raw |
-| image_segment | segmentaion info | expected model | video/x-raw |
-| pose_estimation | pose info | out dim, in dim, File path to labels, mode | video/x-raw |
-| flatbuf | other/tensors | N/A | flatbuffers |
-| protobuf | other/tensors | N/A | protocol buffers |
-| flexbuf | other/tensors | N/A | flexbuffers |
-| | ... more features coming ... | | |
-
-
-## Sink Pads
-
-One always sink pad.
-
-- other/tensors (current)
-
-## Source Pads
-
-One always source pad.
-
-- video/x-raw
-- text/x-raw
-- flatbuffers
-- protocol buffers
-- flexbuffers
-
-## Performance Characteristics
-
-TBD.
-
-## Properties
-
-- output-type: Integer denoting VIDEO, AUDIO, or TEXT.
- - Experimental: we need to update this. Current "output-type" is not satisfactory.
-
-The following properties are suggested and planned.
-- (update) output-type: String denoting "bounding-boxes", "image-label", "bounding-boxes-with-label", ...
-- additional-file-1: String denoting the file path to the **first** data file for decoding; e.g., label list text file for image labeling.
-- additional-file-2: **second** data file if the corresponding output-type requires two or more.
-- additional-file-N: ... **N'th** data file if the corresponding output-type requires N or more.
-
-
-## Properties for debugging
-
-- silent: disable or enable debugging messages
-
-## Usage Examples
-
-```
-$ gst-launch somevideosrc_with_xraw ! tee name=t ! queue ! tensor_converter ! tensor_filter SOME_OPTION ! tensor_decoder output-type=image-label additional-file-1=/tmp/labels.txt ! txt. t. ! queue ! textoverlay name=txt ! autovideosink
-```
-Note: not tested. not sure if the syntax is correct with ```txt. t. !```. Regard the above as pseudo code.
-
-### tensor stream to flatbuffers
-```
-$ gst-launch videotestsrc ! video/x-raw,format=RGB,width=640,height=480 ! tensor_converter ! tensor_decoder mode=flatbuf ! fakesink
-```
-
-### tensor stream to protocol buffers
-```
-$ gst-launch videotestsrc ! video/x-raw,format=RGB,width=640,height=480 ! tensor_converter ! tensor_decoder mode=protobuf ! fakesink
-```
-
-### tensor stream to flexbuffers
-```
-$ gst-launch videotestsrc ! video/x-raw,format=RGB,width=640,height=480 ! tensor_converter ! tensor_decoder mode=flexbuf ! fakesink
-```
-
-## Custom decoder
-If you want to convert tensors to any media type, You can use custom mode of the tensor decoder.
-### code mode
-This is an example of a callback type custom mode.
-```
-// Define custom callback function
-int tensor_decoder_custom_cb (const GstTensorMemory *input,
-const GstTensorsConfig *config, void *data, GstBuffer *out_buf) {
- // Write a code to convert tensors to any media type.
-}
-
-...
-// Register custom callback function
-nnstreamer_decoder_custom_register ("tdec", tensor_decoder_custom_cb, NULL);
-...
-// Use the custom tensor decoder in a pipeline.
-// E.g., Pipeline of " ... (tensors) ! tensor_decoder mode=custom-code option1=tdec ! (any media stream)... "
-...
-// After everything is done.
-nnstreamer_decoder_custom_unregister ("tdec");
-```
-
-### script mode
-* Note: Currently only Python is supported.
- - If you want to use FlatBuffers Python in Tizen, install package `flatbuffers-python`. It also includes a Flexbuffers Python.
- - If you want to use Flatbuffers Python in Ubuntu, install package using pip `pip install flatbuffers`. It also includes a Flexbuffers Python.
-
-This is an example of a python script.
-```
-# @file custom_decoder_example.py
-## @brief User-defined custom decoder
-class CustomDecoder(object):
-## @breif Python callback: getOutCaps
- def getOutCaps (self):
- # Write capability of the media type.
- return bytes('@CAPS_STRING@', 'UTF-8')
-
-## @breif Python callback: decode
- def decode (self, raw_data, in_info, rate_n, rate_d):
- # return decoded raw data as `bytes` type.
- return data
-
-```
-Example pipeline
-```
-... (tensors) ! tensor_decoder mode=python3 option1=custom_decoder_example.py ! (any media stream) ...
-```
+++ /dev/null
-tensor_decoder_sources = [
- 'tensordec.c'
-]
-
-foreach s : tensor_decoder_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
-tensor_demux_sources = [
- 'gsttensordemux.c'
-]
-
-foreach s : tensor_demux_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
-tensor_if_sources = [
- 'gsttensorif.c'
-]
-
-foreach s : tensor_if_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
-tensor_merge_sources = [
- 'gsttensormerge.c'
-]
-
-foreach s : tensor_merge_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
-tensor_mux_sources = [
- 'gsttensormux.c'
-]
-
-foreach s : tensor_mux_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
-tensor_rate_sources = [
- 'gsttensorrate.c'
-]
-
-foreach s : tensor_rate_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
-tensor_repo_sources = [
- 'tensor_repo.c',
- 'tensor_reposink.c',
- 'tensor_reposrc.c'
-]
-
-foreach s : tensor_repo_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
----
-title: tensor_sink
-...
-
-# NNStreamer::tensor\_sink
-
-## Supported features
-
-GstTensorSink is a sink plugin for making an application to get a buffer of tensor (or tensors).
-
-GstTensorSink emits a signal when receiving a buffer from up-stream element.
-An application can connect a signal ```new-data```, then will get the buffer of tensor.
-
-## Sink Pads
-
-One "Always" sink pad exists. The capability of sink pad is ```other/tensor``` and ```other/tensors```.
-
-## Signals
-
-- new-data: Signal to get the buffer from GstTensorSink.
-
-- stream-start: Optional. An application can use this signal to detect the start of a new stream, instead of the message ```GST_MESSAGE_STREAM_START``` from pipeline.
-
-- eos: Optional. An application can use this signal to detect the EOS (end-of-stream), instead of the message ```GST_MESSAGE_EOS``` from pipeline.
-
-## Properties
-
-- signal-rate: New data signals per second (Default 0 for unlimited, MAX 500)
-
- If ```signal-rate``` is larger than 0, GstTensorSink calculates the time to emit a signal with this property.
-
- If set 0 (default value), all the received buffers will be passed to the application.
-
- Please note that this property does not guarantee the periodic signals.
- This means if GstTensorSink cannot get the buffers in time, it will pass all the buffers. (working like default 0)
-
-- emit-signal: Flag to emit the signals for new data, stream start, and eos. (Default true)
-
-### Properties for debugging
-
-- silent: Enable/disable debugging messages.
-
-## Usage Examples
-
-```
-$ gst-launch-1.0 videotestsrc ! video/x-raw,format=RGB,width=640,height=480 ! tensor_converter ! tensor_sink
-```
-
-For more details, see the [examples](https://github.com/nnstreamer/nnstreamer/tree/master/nnstreamer_example/example_sink) to handle the buffer from GstTensorSink.
+++ /dev/null
-tensor_sink_sources = [
- 'tensor_sink.c'
-]
-
-foreach s : tensor_sink_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
----
-title: tensor_source
-...
-
-# NNStreamer::tensor_source
-
-## Motivation
-
-Allow non Gstreamer standard input sources to provide other/tensor or other/tensors stream.
-
-Such sources include, but not limited to:
-
-- Output of general external applications including instances of neural network frameworks and models not using NNStreamer suite (TBD).
-- Output of non gstreamer compatible sensors, such as Linux IIO devices: [Industrial I/O, 01.org](https://01.org/linuxgraphics/gfx-docs/drm/driver-api/iio/index.html). We may need to get streams from thermostats, light detectors, IMUs, or signals from GPIO pins.
-- Output of LIDAR and RADAR in case we do not have V4L2 interfaces for them (TBD).
-
-
-## Output Format (src_pad)
-
-other/tensor or other/tensors
+++ /dev/null
-tensor_src_sources = [
-]
-
-gst18_dep = dependency('gstreamer-' + gst_api_verision, version : '>=1.8', required : false)
-if gst18_dep.found()
- tensor_src_sources += 'tensor_src_iio.c'
-else
- message('tensor_src_iio requires GStreamer >= 1.8. Skipping it')
-endif
-
-foreach s : tensor_src_sources
- if build_platform != 'macos'
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
- endif
-endforeach
+++ /dev/null
-tensor_sparse_sources = [
- 'tensor_sparse_util.c',
- 'tensor_sparse_enc.c',
- 'tensor_sparse_dec.c'
-]
-
-foreach s : tensor_sparse_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
-tensor_split_sources = [
- 'gsttensorsplit.c'
-]
-
-foreach s : tensor_split_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
+++ /dev/null
----
-title: tensor_transform
-...
-
-# NNStreamer::tensor_transform
-
-## Supported Features
-
-- Transformation the shape, data values (arithmetics or normalization), or data type of ```other/tensor``` stream.
-- If possible, the tensor_transform element exploits [ORC: Optimized inner Loop Runtime Compiler](https://gitlab.freedesktop.org/gstreamer/orc) to accelerate the supported operations.
-- Aggregate multiple operators into a single transform instance for performance optimization.
- - E.g., ```tensor_transform mode=typecast option=uint8 ! tensor_transform mode=arithmetic option=mul:4 ! tensor_transform mode=arithmetic option=add:25 can be optimized by tensor_transform mode=arithmetic option=typecast:uint8,mul:8,add:25```
-
-## Planned Features
-
-- TBD
-
-## Pads
-
-- SINK
-
- - One always sink pad named 'sink'
- - other/tensor
-
-- SRC
-
- - One always source pad named 'src'
- - other/tensor
-
-## Properties
-
-- mode (readable, writable): Mode used for transforming tensor
- - Enum "gtt_mode_type" Default: -1, "unknown"
- - (0): dimchg
- - A mode for changing tensor dimensions
- - An option should be provided as option=FROM_DIM:TO_DIM (with a regex, ^([0-3]):([0-3])$, where NNS_TENSOR_RANK_LIMIT is 4).
- - Example: Move 1st dim to 2nd dim (i.e., [a][H][W][C] ==> [a][C][H][W])
-
- ```bash
- ... ! tensor_converter ! tensor_transform mode=dimchg option=0:2 ! ...
- ```
-
- - (1): typecast
- - A mode for casting data type of tensor
- - An option should be provided as option=TARGET_TYPE (with a regex, ^[u]?int(8|16|32|64)$|^float(32|64)$)
- - Example: Cast the data type of upstream tensor to uint8
-
- ```bash
- ... ! tensor_converter ! tensor_transform mode=typecast option=uint8 ! ...
- ```
-
- - (2): arithmetic
- - A mode for arithmetic operations with tensor
- - An option should be provided as option=[typecast:TYPE,][per-channel:(false|true@DIM),]add|mul|div:NUMBER[@CH_IDX]..., ...
- - Example 1: Element-wise add 25 and multiply 4
-
- ```bash
- ... ! tensor_converter ! tensor_transform mode=arithmetic option=add:25,mul:4 ! ...
- ```
-
- - Example 2: Cast the data type of upstream tensor to float32 and element-wise subtract 25
-
- ```bash
- ... ! tensor_converter ! tensor_transform mode=arithmetic option=typecast:float32,add:-25 ! ...
- ```
-
- - For "per-channel", DIM means the dimension which should be viewed as channel and CH_IDX means the idx of channel the given operation should be applied to. When CH_IDX is not given, the operation is applied to all channels.
- - Example 3: Add 255 only for 1-th channel when 0-th dim is channel (for RGB image, add 255 for G channel)
-
- ```bash
- ... ! video/x-raw,format=RGB ! tensor_converter ! tensor_transform mode=arithmetic option=per-channel:true@0,add:255@1 ! ...
- ```
-
- - (3): transpose
- - A mode for transposing shape of tensor
- - An option should be provided as D1':D2':D3':D4 (fixed to 3)
- - Example: 640:480:3:1 ==> 3:480:640:1
-
- ```bash
- ... ! tensor_converter input-dim=640:480:3:1 ! tensor_transform mode=transpose option=2:1:0:3 ! ...
- ```
-
- - (4): stand
- - A mode for statistical standardization or normalization of tensor
- - An option should be provided as option=(default|dc-average)[:TYPE] where `default` for statistical standardization and `dc-average` to remove DC offset (average value). `TYPE` denotes output data type.
- - Example: Remove DC offset, output type to float32
-
- ```bash
- ... ! tensor_converter ! tensor_transform mode=stand option=dc-average:float32 ! ...
- ```
-
-- acceleration (readable, writable): A flat indicating whether to enable ```orc``` acceleration
-
-## Properties for debugging
-
-- silent: disable or enable debugging messages
+++ /dev/null
-tensor_transform_sources = [
- 'tensor_transform.c'
-]
-
-if orcc_support_is_available
- # tensor_transform does not need to generate -dist files.
- orcsrc = 'transform-orc'
-
- orc_h = custom_target(orcsrc + '.h',
- input: orcsrc + '.orc',
- output: orcsrc + '.h',
- command: orcc_support_orcc_args + ['--header', '-o', '@OUTPUT@', '@INPUT@'])
- orc_c_org = custom_target(orcsrc + '.c.in',
- input: orcsrc + '.orc',
- output: orcsrc + '.c.in',
- command: orcc_support_orcc_args + ['--implementation', '-o', '@OUTPUT@', '@INPUT@'])
-
-## ORC generated codes incur a few warnings. Ignore warnings of generated files.
- script = find_program('ignore_warning.sh')
- orc_c = custom_target(orcsrc + '.c',
- input: orc_c_org,
- output: orcsrc + '.c',
- command: [ script, '@INPUT@', '@OUTPUT@' ])
-
- nnstreamer_sources += [orc_c, orc_h]
- nnstreamer_internal_deps += declare_dependency(sources: orc_h)
-endif
-
-foreach s : tensor_transform_sources
- nnstreamer_sources += join_paths(meson.current_source_dir(), s)
-endforeach
#include <gst/gst.h>
#include <tensor_common.h>
#include <unittest_util.h>
-#include "../gst/nnstreamer/tensor_if/gsttensorif.h"
+#include "../gst/nnstreamer/elements/gsttensor_if.h"
#define TEST_TIMEOUT_MS (20000U)
#include <unistd.h>
#include "../unittest_util.h"
-#include "../gst/nnstreamer/tensor_sparse/tensor_sparse_util.h"
-#include "../gst/nnstreamer/tensor_transform/tensor_transform.h"
+#include "../gst/nnstreamer/elements/gsttensor_sparseutil.h"
+#include "../gst/nnstreamer/elements/gsttensor_transform.h"
#ifdef ENABLE_TENSORFLOW_LITE
#define TEST_REQUIRE_TFLITE(Case, Name) TEST (Case, Name)
}
#ifdef HAVE_ORC
-#include "transform-orc.h"
+#include "nnstreamer-orc.h"
/**
* @brief Test for tensor_transform orc functions (add constant value)