gstreamer-controller-1.0
gstreamer-video-1.0
gstreamer-audio-1.0
+ gstreamer-app-1.0
glib-2.0
)
g_return_val_if_fail (name != NULL, _NNS_MEDIA_END);
- /** @todo Support other types */
if (g_str_has_prefix (name, "video/")) {
return _NNS_VIDEO;
} else if (g_str_has_prefix (name, "audio/")) {
return _NNS_AUDIO;
+ } else if (g_str_has_prefix (name, "text/")) {
+ return _NNS_STRING;
}
/** unknown, or not-supported type */
{
g_return_if_fail (v_info != NULL);
+ /**
+ * Refer: https://gstreamer.freedesktop.org/documentation/design/mediatype-video-raw.html
+ */
v_info->format = GST_VIDEO_FORMAT_UNKNOWN;
v_info->w = 0;
v_info->h = 0;
{
g_return_if_fail (a_info != NULL);
+ /**
+ * Refer: https://gstreamer.freedesktop.org/documentation/design/mediatype-audio-raw.html
+ */
a_info->format = GST_AUDIO_FORMAT_UNKNOWN;
a_info->ch = 0;
a_info->rate = 0;
}
/**
+ * @brief Initialize the text info structure
+ * @param t_info text info structure to be initialized
+ */
+void
+gst_tensor_text_info_init (GstTensorTextInfo * t_info)
+{
+ g_return_if_fail (t_info != NULL);
+
+ /**
+ * Refer: https://gstreamer.freedesktop.org/documentation/design/mediatype-text-raw.html
+ */
+ t_info->format = 0;
+}
+
+/**
* @brief Set video info to configure tensor
* @param v_info video info structure to be filled
* @param structure caps structure
}
/**
+ * @brief Set text info to configure tensor
+ * @param t_info text info structure to be filled
+ * @param structure caps structure
+ */
+void
+gst_tensor_text_info_from_structure (GstTensorTextInfo * t_info,
+ const GstStructure * structure)
+{
+ const gchar *format;
+
+ g_return_if_fail (t_info != NULL);
+ g_return_if_fail (structure != NULL);
+
+ gst_tensor_text_info_init (t_info);
+
+ format = gst_structure_get_string (structure, "format");
+ if (format) {
+ if (g_str_equal (format, "utf8")) {
+ t_info->format = 1;
+ }
+ }
+}
+
+/**
* @brief Set the video info structure from tensor config
* @param v_info video info structure to be filled
* @param config tensor config structure to be interpreted
}
/**
+ * @brief Set the text info structure from tensor config
+ * @param t_info text info structure to be filled
+ * @param config tensor config structure to be interpreted
+ * @return TRUE if supported format
+ */
+gboolean
+gst_tensor_text_info_from_config (GstTensorTextInfo * t_info,
+ const GstTensorConfig * config)
+{
+ g_return_val_if_fail (config != NULL, FALSE);
+ g_return_val_if_fail (t_info != NULL, FALSE);
+
+ gst_tensor_text_info_init (t_info);
+
+ g_return_val_if_fail (config->tensor_media_type == _NNS_STRING, FALSE);
+
+ t_info->format = config->tensor_media_format;
+
+ return (t_info->format != 0);
+}
+
+/**
* @brief Initialize the tensor config info structure
* @param config tensor config structure to be initialized
*/
config->rate_n = -1;
config->rate_d = -1;
- config->frame_size = 0;
config->tensor_media_type = _NNS_MEDIA_END;
config->tensor_media_format = 0;
}
* @return TRUE if configured
*/
gboolean
-gst_tensor_config_validate (GstTensorConfig * config)
+gst_tensor_config_validate (const GstTensorConfig * config)
{
guint i;
return FALSE;
}
- if (config->frame_size == 0 || config->tensor_media_format == 0) {
- return FALSE;
- }
-
switch (config->tensor_media_type) {
case _NNS_VIDEO:
case _NNS_AUDIO:
+ case _NNS_STRING:
break;
default:
+ /** unsupported type */
return FALSE;
}
+ if (config->tensor_media_format == 0) {
+ return FALSE;
+ }
+
return TRUE;
}
}
if (c1->rank == c2->rank && c1->type == c2->type &&
- c1->rate_n == c2->rate_n && c1->rate_d == c2->rate_d &&
- c1->frame_size == c2->frame_size) {
+ c1->rate_n == c2->rate_n && c1->rate_d == c2->rate_d) {
for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
if (c1->dimension[i] != c2->dimension[i]) {
return FALSE;
gst_structure_get_fraction (structure, "framerate", &config->rate_n,
&config->rate_d);
- if (config->type != _NNS_END) {
- config->frame_size =
- tensor_element_size[config->type] *
- get_tensor_element_count (config->dimension);
- }
-
/** @todo we cannot get media type from caps */
- if (config->rank == 2) {
- config->tensor_media_type = _NNS_AUDIO;
- } else if (config->rank == 3) {
- config->tensor_media_type = _NNS_VIDEO;
+ switch (config->rank) {
+ case 1:
+ config->tensor_media_type = _NNS_STRING;
+ break;
+ case 2:
+ config->tensor_media_type = _NNS_AUDIO;
+ break;
+ case 3:
+ config->tensor_media_type = _NNS_VIDEO;
+ break;
+ default:
+ config->tensor_media_type = _NNS_MEDIA_END;
+ break;
}
/** @todo we cannot get media format from caps */
config->tensor_media_format = GST_AUDIO_FORMAT_UNKNOWN;
break;
}
+ } else if (config->tensor_media_type == _NNS_STRING) {
+ if (config->type == _NNS_INT8) {
+ /** utf8 */
+ config->tensor_media_format = 1;
+ }
}
return TRUE;
m_type = gst_tensor_media_type_from_structure (structure);
- if (m_type == _NNS_VIDEO) {
- GstTensorVideoInfo v_info;
+ switch (m_type) {
+ case _NNS_VIDEO:
+ {
+ GstTensorVideoInfo v_info;
+
+ gst_tensor_video_info_from_structure (&v_info, structure);
+ gst_tensor_config_from_video_info (config, &v_info);
+ break;
+ }
+ case _NNS_AUDIO:
+ {
+ GstTensorAudioInfo a_info;
- gst_tensor_video_info_from_structure (&v_info, structure);
- gst_tensor_config_from_video_info (config, &v_info);
- } else if (m_type == _NNS_AUDIO) {
- GstTensorAudioInfo a_info;
+ gst_tensor_audio_info_from_structure (&a_info, structure);
+ gst_tensor_config_from_audio_info (config, &a_info);
+ break;
+ }
+ case _NNS_STRING:
+ {
+ GstTensorTextInfo t_info;
- gst_tensor_audio_info_from_structure (&a_info, structure);
- gst_tensor_config_from_audio_info (config, &a_info);
- } else {
- /** @todo Support other types */
- err_print ("Unsupported type %d\n", m_type);
- return FALSE;
+ gst_tensor_text_info_from_structure (&t_info, structure);
+ gst_tensor_config_from_text_info (config, &t_info);
+ break;
+ }
+ default:
+ err_print ("Unsupported type %d\n", m_type);
+ return FALSE;
}
return TRUE;
* @brief Set the tensor config structure from video info
* @param config tensor config structure to be filled
* @param v_info video info structure to be interpreted
- * @return TRUE if ok
+ * @return TRUE if supported format
*/
gboolean
gst_tensor_config_from_video_info (GstTensorConfig * config,
* A 4-D uint8 or float32 Tensor of shape [batch_size, height, width, channels]
* where channels is 1, 3, or 4.
*/
- gboolean res = TRUE;
-
g_return_val_if_fail (config != NULL, FALSE);
g_return_val_if_fail (v_info != NULL, FALSE);
default:
/** unsupported format */
err_print ("Unsupported format = %d\n", v_info->format);
- res = FALSE;
break;
}
config->rate_n = v_info->fn;
config->rate_d = v_info->fd;
- if (config->type != _NNS_END) {
- config->frame_size =
- tensor_element_size[config->type] *
- get_tensor_element_count (config->dimension);
- }
-
config->tensor_media_type = _NNS_VIDEO;
config->tensor_media_format = v_info->format;
- return res;
+ return (config->type != _NNS_END);
}
/**
* @brief Set the tensor config structure from audio info
* @param config tensor config structure to be filled
* @param a_info audio info structure to be interpreted
- * @return TRUE if ok
+ * @return TRUE if supported format
*/
gboolean
gst_tensor_config_from_audio_info (GstTensorConfig * config,
* A 3-D float32 Tensor of shape [batch_size, frames, channels]
* or a 2-D float32 Tensor of shape [batch_size, frames].
*/
- gboolean res = TRUE;
-
g_return_val_if_fail (config != NULL, FALSE);
g_return_val_if_fail (a_info != NULL, FALSE);
default:
/** unsupported format */
err_print ("Unsupported format = %d\n", a_info->format);
- res = FALSE;
break;
}
config->rate_d = 1;
}
- if (config->type != _NNS_END) {
- config->frame_size =
- tensor_element_size[config->type] *
- get_tensor_element_count (config->dimension);
- }
-
config->tensor_media_type = _NNS_AUDIO;
config->tensor_media_format = a_info->format;
- return res;
+ return (config->type != _NNS_END);
+}
+
+/**
+ * @brief Set the tensor config structure from text info
+ * @param config tensor config structure to be filled
+ * @param t_info text info structure to be interpreted
+ * @return TRUE if supported format
+ */
+gboolean
+gst_tensor_config_from_text_info (GstTensorConfig * config,
+ const GstTensorTextInfo * t_info)
+{
+ /**
+ * Refer: https://www.tensorflow.org/api_docs/python/tf/summary/text
+ * A string-type Tensor
+ */
+ g_return_val_if_fail (config != NULL, FALSE);
+ g_return_val_if_fail (t_info != NULL, FALSE);
+
+ gst_tensor_config_init (config);
+
+ config->rank = 1;
+
+ switch (t_info->format) {
+ case 1:
+ /** utf8 */
+ config->type = _NNS_INT8;
+ break;
+ default:
+ /** unsupported format */
+ err_print ("Unsupported format = %d\n", t_info->format);
+ break;
+ }
+
+ config->dimension[0] = 1;
+ config->dimension[1] = 1;
+ config->dimension[2] = 1;
+ config->dimension[3] = 1;
+
+ /** cannot get the framerate for text type */
+ config->rate_n = config->rate_d = 1;
+
+ config->tensor_media_type = _NNS_STRING;
+ config->tensor_media_format = t_info->format;
+ return (config->type != _NNS_END);
}
/**
gst_tensor_caps_from_config (const GstTensorConfig * config)
{
GstCaps *caps;
- GstStaticCaps raw_caps = GST_STATIC_CAPS (GST_TENSOR_CAP_DEFAULT);
g_return_val_if_fail (config != NULL, NULL);
- caps = gst_static_caps_get (&raw_caps);
- caps = gst_caps_make_writable (caps);
+ caps = gst_caps_from_string (GST_TENSOR_CAP_DEFAULT);
if (config->rank > 0) {
gst_caps_set_simple (caps, "rank", G_TYPE_INT, config->rank, NULL);
{
GstTensorVideoInfo v_info;
GstCaps *caps;
- GstStaticCaps raw_caps = GST_STATIC_CAPS (GST_TENSOR_VIDEO_CAPS_STR);
g_return_val_if_fail (config != NULL, NULL);
- caps = gst_static_caps_get (&raw_caps);
- caps = gst_caps_make_writable (caps);
+ caps = gst_caps_from_string (GST_TENSOR_VIDEO_CAPS_STR);
gst_tensor_video_info_from_config (&v_info, config);
{
GstTensorAudioInfo a_info;
GstCaps *caps;
- GstStaticCaps raw_caps = GST_STATIC_CAPS (GST_TENSOR_AUDIO_CAPS_STR);
g_return_val_if_fail (config != NULL, NULL);
- caps = gst_static_caps_get (&raw_caps);
- caps = gst_caps_make_writable (caps);
+ caps = gst_caps_from_string (GST_TENSOR_AUDIO_CAPS_STR);
gst_tensor_audio_info_from_config (&a_info, config);
}
/**
+ * @brief Get text caps from tensor config
+ * @param config tensor config info
+ * @return caps for given config
+ */
+static GstCaps *
+gst_tensor_text_caps_from_config (const GstTensorConfig * config)
+{
+ GstTensorTextInfo t_info;
+
+ g_return_val_if_fail (config != NULL, NULL);
+
+ gst_tensor_text_info_from_config (&t_info, config);
+
+ /** utf8 */
+ g_return_val_if_fail (t_info.format != 1, NULL);
+
+ return gst_caps_from_string (GST_TENSOR_TEXT_CAPS_STR);
+}
+
+/**
* @brief Get media caps from tensor config
* @param config tensor config info
* @return caps for given config
case _NNS_AUDIO:
caps = gst_tensor_audio_caps_from_config (config);
break;
+ case _NNS_STRING:
+ caps = gst_tensor_text_caps_from_config (config);
+ break;
default:
- /** @todo Support other types */
err_print ("Unsupported type %d\n", config->tensor_media_type);
break;
}
*/
#define DEFAULT_FRAMES_PER_BUFFER 0
-#define SINK_CAPS \
- GST_STATIC_CAPS (GST_TENSOR_VIDEO_CAPS_STR "; " GST_TENSOR_AUDIO_CAPS_STR)
-
-#define SRC_CAPS \
- GST_STATIC_CAPS (GST_TENSOR_CAP_DEFAULT)
-
/**
* @brief The capabilities of the inputs
*/
static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
- SINK_CAPS);
+ GST_STATIC_CAPS (GST_TENSOR_MEDIA_CAPS_STR));
/**
* @brief The capabilities of the outputs
static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
- SRC_CAPS);
+ GST_STATIC_CAPS (GST_TENSOR_CAP_DEFAULT));
#define gst_tensor_converter_parent_class parent_class
G_DEFINE_TYPE (GstTensorConverter, gst_tensor_converter,
}
/**
+ * @brief Validate newly configured tensor metadata
+ * @param self "this" pointer
+ * @param config newly configured tensor metadata
+ */
+static gboolean
+gst_tensor_converter_validate_config (GstTensorConverter * self,
+ const GstTensorConfig * config)
+{
+ g_return_val_if_fail (self != NULL, FALSE);
+ g_return_val_if_fail (config != NULL, FALSE);
+
+ if (self->tensor_configured &&
+ !gst_tensor_converter_check_consistency (self, config)) {
+ /** mismatched to old metadata */
+ return FALSE;
+ }
+
+ if (!gst_tensor_config_validate (config)) {
+ /** not fully configured */
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/**
+ * @brief Get output frame size
+ * @param self "this" pointer
+ * @param in_size received buffer size
+ */
+static gsize
+gst_tensor_converter_get_frame_size (GstTensorConverter * self, gsize in_size)
+{
+ GstTensorConfig *config;
+ gsize out_size = 0;
+
+ g_assert (self->tensor_configured);
+ config = &self->tensor_config;
+
+ /**
+ * @todo Do we need to aggregate the buffers to meet tensor dimension?
+ * video : supposed 1 frame per buffer.
+ * audio : we can get samples in buffer, but it would not be equal to tensor dimension.
+ * text : just passes the size of bytearray.
+ */
+ switch (config->tensor_media_type) {
+ case _NNS_VIDEO:
+ out_size =
+ tensor_element_size[config->type] *
+ get_tensor_element_count (config->dimension);
+ break;
+ case _NNS_AUDIO:
+ /**
+ * samples in buffer = size / GST_AUDIO_INFO_BPF (&self->in_info.audio)
+ */
+ out_size = in_size;
+ break;
+ case _NNS_STRING:
+ out_size = in_size;
+ break;
+ default:
+ /** unsupported type */
+ break;
+ }
+
+ return out_size;
+}
+
+/**
* @brief Parse structure and return tensor caps
* @param self "this" pointer
* @param structure structure to be interpreted
return FALSE;
}
- if (self->tensor_configured &&
- !gst_tensor_converter_check_consistency (self, &config)) {
- /** mismatched to old metadata */
+ if (!gst_tensor_converter_validate_config (self, &config)) {
return FALSE;
}
return FALSE;
}
- if (self->tensor_configured &&
- !gst_tensor_converter_check_consistency (self, &config)) {
- /** mismatched to old metadata */
+ if (!gst_tensor_converter_validate_config (self, &config)) {
return FALSE;
}
}
/**
+ * @brief Configure tensor metadata for text (internal static function)
+ * @param self "this" pointer to be configured.
+ * @param caps the sink cap.
+ * @return FALSE if error. TRUE if ok.
+ */
+static gboolean
+gst_tensor_converter_configure_for_text (GstTensorConverter * self,
+ const GstCaps * caps)
+{
+ GstStructure *structure;
+ GstTensorConfig config;
+ GstTensorTextInfo t_info;
+
+ structure = gst_caps_get_structure (caps, 0);
+ gst_tensor_text_info_from_structure (&t_info, structure);
+
+ if (!gst_tensor_config_from_text_info (&config, &t_info)) {
+ /** unsupported format */
+ return FALSE;
+ }
+
+ if (!gst_tensor_converter_validate_config (self, &config)) {
+ return FALSE;
+ }
+
+ self->tensor_config = config;
+ return TRUE;
+}
+
+/**
* @brief Configure tensor metadata from sink caps (internal static function)
* @param self "this" pointer to be configured.
* @param caps the sink cap.
m_type = gst_tensor_media_type_from_caps (caps);
- /** @todo Support other types */
switch (m_type) {
case _NNS_VIDEO:
if (!gst_tensor_converter_configure_for_video (self, caps)) {
return FALSE;
}
break;
-
case _NNS_AUDIO:
if (!gst_tensor_converter_configure_for_audio (self, caps)) {
return FALSE;
}
break;
-
+ case _NNS_STRING:
+ if (!gst_tensor_converter_configure_for_text (self, caps)) {
+ return FALSE;
+ }
+ break;
default:
err_print ("Unsupported type %d\n", m_type);
return FALSE;
g_assert (self->tensor_configured);
config = &self->tensor_config;
- block_size = config->frame_size;
- g_assert (gst_buffer_get_size (outbuf) >= block_size);
g_assert (gst_buffer_map (inbuf, &src_info, GST_MAP_READ));
g_assert (gst_buffer_map (outbuf, &dest_info, GST_MAP_WRITE));
+ block_size = gst_tensor_converter_get_frame_size (self, src_info.size);
+ g_assert (gst_buffer_get_size (outbuf) >= block_size);
+
srcptr = src_info.data;
destptr = dest_info.data;
switch (config->tensor_media_type) {
case _NNS_VIDEO:
case _NNS_AUDIO:
+ case _NNS_STRING:
res = gst_tensor_converter_copy_buffer (self, inbuf, outbuf);
break;
-
- case _NNS_STRING:
default:
err_print ("Unsupported Media Type (%d)\n", config->tensor_media_type);
goto unknown_type;
break;
case _NNS_AUDIO:
+ case _NNS_STRING:
break;
- case _NNS_STRING:
default:
err_print ("Unsupported Media Type (%d)\n", config->tensor_media_type);
goto unknown_type;
self = GST_TENSOR_CONVERTER_CAST (trans);
+ silent_debug ("Direction = %d\n", direction);
silent_debug_caps (caps, "from");
silent_debug_caps (filter, "filter");
GstStructure *s = gst_caps_get_structure (caps, 0);
result = gst_tensor_converter_caps_from_structure (self, s);
} else if (direction == GST_PAD_SRC) {
- GstStaticCaps raw_sink_caps = SINK_CAPS;
- result = gst_static_caps_get (&raw_sink_caps);
+ result = gst_caps_from_string (GST_TENSOR_MEDIA_CAPS_STR);
} else {
- silent_debug ("Direction = %d\n", direction);
g_assert (0);
return NULL;
}
self = GST_TENSOR_CONVERTER_CAST (trans);
g_assert (self->tensor_configured);
- *othersize = self->tensor_config.frame_size;
+ *othersize = gst_tensor_converter_get_frame_size (self, size);
- return TRUE;
+ return (*othersize > 0);
}
/**
static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
- GST_STATIC_CAPS (GST_TENSOR_CAP_DEFAULT)
- );
+ GST_STATIC_CAPS (GST_TENSOR_CAP_DEFAULT));
/**
* @brief The capabilities of the outputs
static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
- GST_STATIC_CAPS ("ANY")
- );
+ GST_STATIC_CAPS ("ANY"));
#define gst_tensordec_parent_class parent_class
G_DEFINE_TYPE (GstTensorDec, gst_tensordec, GST_TYPE_BASE_TRANSFORM);
}
if (result == NULL) {
- /** raw caps for video, audio */
- GstStaticCaps caps_video = GST_STATIC_CAPS (GST_TENSOR_VIDEO_CAPS_STR);
- GstStaticCaps caps_audio = GST_STATIC_CAPS (GST_TENSOR_AUDIO_CAPS_STR);
-
- result = gst_caps_merge (gst_static_caps_get (&caps_video),
- gst_static_caps_get (&caps_audio));
- result = gst_caps_simplify (result);
+ /** raw caps for supported media types */
+ result = gst_caps_from_string (GST_TENSOR_MEDIA_CAPS_STR);
}
return result;
break;
}
case _NNS_AUDIO:
+ case _NNS_STRING:
break;
default:
err_print ("Unsupported type %d\n", config.tensor_media_type);
switch (config->tensor_media_type) {
case _NNS_VIDEO:
case _NNS_AUDIO:
+ case _NNS_STRING:
res = gst_tensordec_copy_buffer (self, inbuf, outbuf);
break;
- case _NNS_STRING:
default:
err_print ("Unsupported Media Type (%d)\n", config->tensor_media_type);
goto unknown_type;
}
break;
case _NNS_AUDIO:
- break;
case _NNS_STRING:
+ break;
default:
err_print ("Unsupported Media Type (%d)\n", config->tensor_media_type);
goto unknown_type;
self = GST_TENSORDEC_CAST (trans);
+ silent_debug ("Direction = %d\n", direction);
silent_debug_caps (caps, "from");
silent_debug_caps (filter, "filter");
* Currently video/x-raw and audio/x-raw supported.
*/
if (direction == GST_PAD_SINK) {
+ /** caps from media */
GstStructure *s = gst_caps_get_structure (caps, 0);
result = gst_tensordec_media_caps_from_structure (self, s);
} else if (direction == GST_PAD_SRC) {
/** caps from tensor */
- GstStaticCaps raw_caps = GST_STATIC_CAPS (GST_TENSOR_CAP_DEFAULT);
- result = gst_static_caps_get (&raw_caps);
+ result = gst_caps_from_string (GST_TENSOR_CAP_DEFAULT);
} else {
- silent_debug ("Direction = %d\n", direction);
g_assert (0);
return NULL;
}
*othersize = offset * config->dimension[2] * config->dimension[3];
} else {
- *othersize = self->tensor_config.frame_size;
+ *othersize = size;
}
return TRUE;
GST_AUDIO_CAPS_MAKE ("{ S8, U8, S16LE, S16BE, U16LE, U16BE }") \
", layout = (string) interleaved"
+#define GST_TENSOR_TEXT_CAPS_STR \
+ "text/x-raw, format = (string) utf8"
+
+/**
+ * @brief Caps string for supported types
+ * @todo Support other types
+ */
+#define GST_TENSOR_MEDIA_CAPS_STR \
+ GST_TENSOR_VIDEO_CAPS_STR "; " \
+ GST_TENSOR_AUDIO_CAPS_STR "; " \
+ GST_TENSOR_TEXT_CAPS_STR
+
/** @todo I'm not sure if the range is to be 1, 65535 or larger */
#define GST_TENSOR_RANK_RANGE "(int) [ 1, 4 ]"
#define GST_TENSOR_DIM_RANGE "(int) [ 1, 65535 ]"
{
_NNS_VIDEO = 0, /**< supposedly video/x-raw */
_NNS_AUDIO, /**< supposedly audio/x-raw */
- _NNS_STRING, /**< Not Supported Yet */
+ _NNS_STRING, /**< supposedly text/x-raw */
_NNS_MEDIA_END, /**< End Marker */
} media_type;
} GstTensorAudioInfo;
/**
+ * @brief Internal data structure for text info to configure tensor.
+ */
+typedef struct
+{
+ gint format; /**< text format (0:unknown, 1:utf8) */
+} GstTensorTextInfo;
+
+/**
* @brief Internal data structure for configured tensor info.
*/
typedef struct
tensor_dim dimension; /**< Dimensions. We support up to 4th ranks. */
gint rate_n; /**< framerate is in fraction, which is numerator/denominator */
gint rate_d; /**< framerate is in fraction, which is numerator/denominator */
- gsize frame_size; /**< Size of a single tensor frame in # bytes */
media_type tensor_media_type; /**< Denotes the input media stream type */
gint tensor_media_format; /**< Denotes the input media stream format */
} GstTensorConfig;
gst_tensor_audio_info_init (GstTensorAudioInfo * a_info);
/**
+ * @brief Initialize the text info structure
+ * @param t_info text info structure to be initialized
+ */
+extern void
+gst_tensor_text_info_init (GstTensorTextInfo * t_info);
+
+/**
* @brief Set video info to configure tensor
* @param v_info video info structure to be filled
* @param structure caps structure
const GstStructure * structure);
/**
+ * @brief Set text info to configure tensor
+ * @param t_info text info structure to be filled
+ * @param structure caps structure
+ */
+extern void
+gst_tensor_text_info_from_structure (GstTensorTextInfo * t_info,
+ const GstStructure * structure);
+
+/**
* @brief Set the video info structure from tensor config
* @param v_info video info structure to be filled
* @param config tensor config structure to be interpreted
const GstTensorConfig * config);
/**
+ * @brief Set the text info structure from tensor config
+ * @param t_info text info structure to be filled
+ * @param config tensor config structure to be interpreted
+ * @return TRUE if supported format
+ */
+extern gboolean
+gst_tensor_text_info_from_config (GstTensorTextInfo * t_info,
+ const GstTensorConfig * config);
+
+/**
* @brief Initialize the tensor config info structure
* @param config tensor config structure to be initialized
*/
* @return TRUE if configured
*/
extern gboolean
-gst_tensor_config_validate (GstTensorConfig * config);
+gst_tensor_config_validate (const GstTensorConfig * config);
/**
* @brief Compare tensor config info
* @brief Set the tensor config structure from video info
* @param config tensor config structure to be filled
* @param v_info video info structure to be interpreted
- * @return TRUE if ok
+ * @return TRUE if supported format
*/
extern gboolean
gst_tensor_config_from_video_info (GstTensorConfig * config,
* @brief Set the tensor config structure from audio info
* @param config tensor config structure to be filled
* @param a_info audio info structure to be interpreted
- * @return TRUE if ok
+ * @return TRUE if supported format
*/
extern gboolean
gst_tensor_config_from_audio_info (GstTensorConfig * config,
const GstTensorAudioInfo * a_info);
/**
+ * @brief Set the tensor config structure from text info
+ * @param config tensor config structure to be filled
+ * @param t_info text info structure to be interpreted
+ * @return TRUE if supported format
+ */
+extern gboolean
+gst_tensor_config_from_text_info (GstTensorConfig * config,
+ const GstTensorTextInfo * t_info);
+
+/**
* @brief Get tensor caps from tensor config
* @param config tensor config info
* @return caps for given config
* @bug No known bugs.
*/
+#include <string.h>
#include <gtest/gtest.h>
#include <gst/gst.h>
+#include <gst/app/gstappsrc.h>
/**
* @brief Macro for debug mode.
} TestStatus;
/**
+ * @brief Test type.
+ */
+typedef enum
+{
+ TEST_TYPE_VIDEO, /**< pipeline for video */
+ TEST_TYPE_AUDIO, /**< pipeline for audio */
+ TEST_TYPE_TEXT, /**< pipeline for text */
+ TEST_TYPE_TENSORS, /**< pipeline for tensors with tensormux */
+ TEST_TYPE_NEGO_FAILED, /**< pipeline to test caps negotiation */
+} TestType;
+
+/**
* @brief Test options.
*/
typedef struct
{
guint num_buffers; /**< count of buffers */
- gboolean convert_tensor; /**< true to set tensor converter */
- gboolean caps_tensors; /**< true to test caps other/tensors */
+ TestType test_type; /**< test pipeline */
} TestOption;
/**
g_test_data.end = FALSE;
g_test_data.caps_name = NULL;
- _print_log ("option num_buffers[%d] convert_tensor[%d] caps_tensors[%d]",
- option.num_buffers, option.convert_tensor, option.caps_tensors);
+ _print_log ("option num_buffers[%d] test_type[%d]",
+ option.num_buffers, option.test_type);
g_test_data.loop = g_main_loop_new (NULL, FALSE);
_check_cond_err (g_test_data.loop != NULL);
- if (option.caps_tensors) {
- /** other/tensors with tensormux */
- str_pipeline =
- g_strdup_printf
- ("tensormux name=mux ! tensor_sink name=test_sink "
- "videotestsrc num-buffers=%d ! video/x-raw,width=160,height=120,format=RGB,framerate=(fraction)30/1 ! tensor_converter ! mux.sink_0 "
- "videotestsrc num-buffers=%d ! video/x-raw,width=160,height=120,format=RGB,framerate=(fraction)30/1 ! tensor_converter ! mux.sink_1 ",
- option.num_buffers, option.num_buffers);
- } else {
- str_pipeline =
- g_strdup_printf
- ("videotestsrc num-buffers=%d ! video/x-raw,width=160,height=120,format=RGB,framerate=(fraction)30/1 ! "
- "%s ! tensor_sink name=test_sink", option.num_buffers,
- option.convert_tensor ? "tensor_converter" : "videoconvert");
+ switch (option.test_type) {
+ case TEST_TYPE_VIDEO:
+ /** video 160x120 */
+ str_pipeline =
+ g_strdup_printf
+ ("videotestsrc num-buffers=%d ! video/x-raw,width=160,height=120,format=RGB,framerate=(fraction)30/1 ! "
+ "tensor_converter ! tensor_sink name=test_sink", option.num_buffers);
+ break;
+ case TEST_TYPE_AUDIO:
+ /** audio sample rate 16000 (16 bits, signed, little endian) */
+ str_pipeline =
+ g_strdup_printf
+ ("audiotestsrc num-buffers=%d ! audio/x-raw,format=S16LE,rate=16000 ! "
+ "tensor_converter ! tensor_sink name=test_sink", option.num_buffers);
+ break;
+ case TEST_TYPE_TEXT:
+ str_pipeline =
+ g_strdup_printf
+ ("appsrc name=appsrc caps=text/x-raw,format=utf8 ! "
+ "tensor_converter ! tensor_sink name=test_sink");
+ break;
+ case TEST_TYPE_TENSORS:
+ /** other/tensors with tensormux */
+ str_pipeline =
+ g_strdup_printf
+ ("tensormux name=mux ! tensor_sink name=test_sink "
+ "videotestsrc num-buffers=%d ! video/x-raw,width=160,height=120,format=RGB,framerate=(fraction)30/1 ! tensor_converter ! mux.sink_0 "
+ "videotestsrc num-buffers=%d ! video/x-raw,width=160,height=120,format=RGB,framerate=(fraction)30/1 ! tensor_converter ! mux.sink_1 ",
+ option.num_buffers, option.num_buffers);
+ break;
+ case TEST_TYPE_NEGO_FAILED:
+ /** caps negotiation failed */
+ str_pipeline =
+ g_strdup_printf
+ ("videotestsrc num-buffers=%d ! video/x-raw,width=160,height=120,format=RGB,framerate=(fraction)30/1 ! "
+ "videoconvert ! tensor_sink name=test_sink", option.num_buffers);
+ break;
+ default:
+ goto error;
}
+
g_test_data.pipeline = gst_parse_launch (str_pipeline, NULL);
g_free (str_pipeline);
_check_cond_err (g_test_data.pipeline != NULL);
gst_bin_get_by_name (GST_BIN (g_test_data.pipeline), "test_sink");
_check_cond_err (g_test_data.sink != NULL);
+ if (DBG) {
+ /** print logs */
+ g_object_set (g_test_data.sink, "silent", (gboolean) FALSE, NULL);
+ }
+
g_test_data.status = TEST_INIT;
return TRUE;
gboolean emit, res_emit;
gboolean sync, res_sync;
gboolean qos, res_qos;
- TestOption option = { 1, TRUE, FALSE };
+ TestOption option = { 1, TEST_TYPE_VIDEO };
ASSERT_TRUE (_setup_pipeline (option));
/** default silent is TRUE */
g_object_get (g_test_data.sink, "silent", &silent, NULL);
- EXPECT_EQ (silent, TRUE);
+ EXPECT_EQ (silent, (DBG) ? FALSE : TRUE);
g_object_set (g_test_data.sink, "silent", !silent, NULL);
g_object_get (g_test_data.sink, "silent", &res_silent, NULL);
{
const guint num_buffers = 10;
gulong handle_id;
- TestOption option = { num_buffers, TRUE, FALSE };
+ TestOption option = { num_buffers, TEST_TYPE_VIDEO };
ASSERT_TRUE (_setup_pipeline (option));
- if (DBG) {
- /** print logs */
- g_object_set (g_test_data.sink, "silent", (gboolean) FALSE, NULL);
- }
-
/** tensor sink signals */
handle_id = g_signal_connect (g_test_data.sink, "new-data",
(GCallback) _new_data_cb, NULL);
{
const guint num_buffers = 10;
gulong handle_id;
- TestOption option = { num_buffers, TRUE, FALSE };
+ TestOption option = { num_buffers, TEST_TYPE_VIDEO };
ASSERT_TRUE (_setup_pipeline (option));
- if (DBG) {
- /** print logs */
- g_object_set (g_test_data.sink, "silent", (gboolean) FALSE, NULL);
- }
-
/** set signal-rate */
g_object_set (g_test_data.sink, "signal-rate", (guint) 15, NULL);
const guint num_buffers = 5;
gulong handle_id;
gint unknown = -1;
- TestOption option = { num_buffers, TRUE, FALSE };
+ TestOption option = { num_buffers, TEST_TYPE_VIDEO };
ASSERT_TRUE (_setup_pipeline (option));
- if (DBG) {
- /** print logs */
- g_object_set (g_test_data.sink, "silent", (gboolean) FALSE, NULL);
- }
-
/** try to set/get unknown property */
g_object_set (g_test_data.sink, "unknown-prop", 1, NULL);
g_object_get (g_test_data.sink, "unknown-prop", &unknown, NULL);
{
const guint num_buffers = 5;
gulong handle_id;
- TestOption option = { num_buffers, FALSE, FALSE };
+ TestOption option = { num_buffers, TEST_TYPE_NEGO_FAILED };
/** failed : cannot link videoconvert and tensor_sink */
ASSERT_TRUE (_setup_pipeline (option));
- if (DBG) {
- /** print logs */
- g_object_set (g_test_data.sink, "silent", (gboolean) FALSE, NULL);
- }
-
/** signal for new data */
handle_id = g_signal_connect (g_test_data.sink, "new-data",
(GCallback) _new_data_cb, NULL);
{
const guint num_buffers = 5;
gulong handle_id;
- TestOption option = { num_buffers, TRUE, TRUE };
+ TestOption option = { num_buffers, TEST_TYPE_TENSORS };
ASSERT_TRUE (_setup_pipeline (option));
- if (DBG) {
- /** print logs */
- g_object_set (g_test_data.sink, "silent", (gboolean) FALSE, NULL);
- }
-
/** signal for new data */
handle_id = g_signal_connect (g_test_data.sink, "new-data",
(GCallback) _new_data_cb, NULL);
}
/**
+ * @brief Test for audio stream.
+ */
+TEST (tensor_sink_test, audio_stream)
+{
+ const guint num_buffers = 10;
+ gulong handle_id;
+ TestOption option = { num_buffers, TEST_TYPE_AUDIO };
+
+ ASSERT_TRUE (_setup_pipeline (option));
+
+ /** signal for new data */
+ handle_id = g_signal_connect (g_test_data.sink, "new-data",
+ (GCallback) _new_data_cb, NULL);
+ EXPECT_TRUE (handle_id > 0);
+
+ _print_log ("start pipeline to test audio stream");
+ gst_element_set_state (g_test_data.pipeline, GST_STATE_PLAYING);
+ g_main_loop_run (g_test_data.loop);
+ gst_element_set_state (g_test_data.pipeline, GST_STATE_NULL);
+
+ /** check eos message */
+ EXPECT_EQ (g_test_data.status, TEST_EOS);
+
+ /** check received buffers */
+ EXPECT_EQ (g_test_data.received, num_buffers);
+
+ /** check caps name */
+ EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor"));
+
+ _free_test_data ();
+}
+
+/**
+ * @brief Test for text stream.
+ */
+TEST (tensor_sink_test, text_stream)
+{
+ const guint num_buffers = 10;
+ gulong handle_id;
+ guint i;
+ GstElement *appsrc;
+ TestOption option = { num_buffers, TEST_TYPE_TEXT };
+
+ ASSERT_TRUE (_setup_pipeline (option));
+
+ appsrc = gst_bin_get_by_name (GST_BIN (g_test_data.pipeline), "appsrc");
+
+ /** signal for new data */
+ handle_id = g_signal_connect (g_test_data.sink, "new-data",
+ (GCallback) _new_data_cb, NULL);
+ EXPECT_TRUE (handle_id > 0);
+
+ _print_log ("start pipeline to test text stream");
+ gst_element_set_state (g_test_data.pipeline, GST_STATE_PLAYING);
+
+ for (i = 0; i < num_buffers; i++) {
+ GstBuffer *buf = gst_buffer_new_allocate (NULL, 5, NULL);
+ GstMapInfo info;
+
+ gst_buffer_map (buf, &info, GST_MAP_WRITE);
+ strcpy ((gchar *) info.data, "test");
+ gst_buffer_unmap (buf, &info);
+
+ GST_BUFFER_PTS (buf) = (i + 1) * 20 * GST_MSECOND;
+ GST_BUFFER_DTS (buf) = GST_BUFFER_PTS (buf);
+
+ EXPECT_EQ (gst_app_src_push_buffer (GST_APP_SRC (appsrc), buf),
+ GST_FLOW_OK);
+ }
+
+ EXPECT_EQ (gst_app_src_end_of_stream (GST_APP_SRC (appsrc)), GST_FLOW_OK);
+
+ g_main_loop_run (g_test_data.loop);
+ gst_element_set_state (g_test_data.pipeline, GST_STATE_NULL);
+
+ /** check eos message */
+ EXPECT_EQ (g_test_data.status, TEST_EOS);
+
+ /** check received buffers */
+ EXPECT_EQ (g_test_data.received, num_buffers);
+
+ /** check caps name */
+ EXPECT_TRUE (g_str_equal (g_test_data.caps_name, "other/tensor"));
+
+ _free_test_data ();
+}
+
+/**
* @brief Main function for unit test.
*/
int