ogg->push_byte_length = length;
GST_DEBUG_OBJECT (ogg,
"File byte length %" G_GINT64_FORMAT, ogg->push_byte_length);
+ } else {
+ GST_DEBUG_OBJECT (ogg, "File byte length unknown, assuming live");
+ ogg->push_disable_seeking = TRUE;
+ return TRUE;
}
- format = GST_FORMAT_TIME;
- res = gst_pad_query_duration (peer, &format, &length);
+ res = gst_pad_query_duration (peer, GST_FORMAT_TIME, &length);
gst_object_unref (peer);
if (res && length >= 0) {
ogg->push_time_length = length;
ogg->push_byte_length = -1;
ogg->push_time_length = GST_CLOCK_TIME_NONE;
ogg->push_time_offset = GST_CLOCK_TIME_NONE;
- ogg->push_disable_seeking = FALSE;
ogg->push_state = PUSH_PLAYING;
- GstFormat format = GST_FORMAT_BYTES;
+
+ ogg->push_disable_seeking = FALSE;
+ if (!ogg->pullmode) {
+ GstPad *peer;
+ if ((peer = gst_pad_get_peer (ogg->sinkpad)) != NULL) {
- if (!gst_pad_query_duration (peer, &format, &length) || length <= 0) {
+ gint64 length = -1;
++ if (!gst_pad_query_duration (peer, GST_FORMAT_BYTES, &length)
++ || length <= 0) {
+ GST_DEBUG_OBJECT (ogg,
+ "Unable to determine stream size, assuming live, seeking disabled");
+ ogg->push_disable_seeking = TRUE;
+ }
+ }
+ }
+
GST_PUSH_UNLOCK (ogg);
gst_segment_init (&ogg->segment, GST_FORMAT_TIME);
break;
static char *theora_enc_get_supported_formats (void);
+ static void theora_timefifo_free (GstTheoraEnc * enc);
+ static GstFlowReturn
+ theora_enc_encode_and_push (GstTheoraEnc * enc, ogg_packet op,
+ GstBuffer * buffer);
+
static void
-gst_theora_enc_base_init (gpointer g_class)
-{
- GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
-
- gst_element_class_add_static_pad_template (element_class,
- &theora_enc_src_factory);
- gst_element_class_add_static_pad_template (element_class,
- &theora_enc_sink_factory);
- gst_element_class_set_details_simple (element_class,
- "Theora video encoder", "Codec/Encoder/Video",
- "encode raw YUV video to a theora stream",
- "Wim Taymans <wim@fluendo.com>");
-}
-
-static void
gst_theora_enc_class_init (GstTheoraEncClass * klass)
{
GObjectClass *gobject_class = (GObjectClass *) klass;
"Single pass or first/second pass", GST_TYPE_MULTIPASS_MODE,
THEORA_DEF_MULTIPASS_MODE,
(GParamFlags) G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_DUP_ON_GAP,
+ g_param_spec_boolean ("dup-on-gap", "Create DUP frame on GAP flag",
+ "Allow codec to handle frames with GAP flag as duplicates "
+ "of previous frame. "
+ "This is good to work with variable frame rate stabilized "
+ "by videorate element. It will add variable latency with maximal "
+ "size of keyframe distance, this way it is a bad idea "
+ "to use with live streams.",
+ THEORA_DEF_DUP_ON_GAP,
+ (GParamFlags) G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
- caps_string = g_strdup_printf ("video/x-raw-yuv, "
- "format = (fourcc) { %s }, "
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&theora_enc_src_factory));
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&theora_enc_sink_factory));
+ gst_element_class_set_details_simple (gstelement_class,
+ "Theora video encoder", "Codec/Encoder/Video",
+ "encode raw YUV video to a theora stream",
+ "Wim Taymans <wim@fluendo.com>");
+
+ caps_string = g_strdup_printf ("video/x-raw, "
+ "format = (string) { %s }, "
"framerate = (fraction) [1/MAX, MAX], "
"width = (int) [ 1, MAX ], " "height = (int) [ 1, MAX ]",
theora_enc_get_supported_formats ());
return TRUE;
}
+ /**
+ * g_slice_free can't be used with g_queue_foreach.
+ * so we create new function with predefined GstClockTime size.
+ */
+ static void
+ theora_free_gstclocktime (gpointer mem)
+ {
+ g_slice_free (GstClockTime, mem);
+ }
+
+ static void
+ theora_timefifo_in (GstTheoraEnc * enc, const GstClockTime * timestamp)
+ {
+ GstClockTime *ptr;
+
+ if (!enc->t_queue)
+ enc->t_queue = g_queue_new ();
+
+ g_assert (enc->t_queue != NULL);
+
+ ptr = g_slice_new (GstClockTime);
+ *ptr = *timestamp;
+
+ g_queue_push_head (enc->t_queue, ptr);
+ }
+
+ static GstClockTime
+ theora_timefifo_out (GstTheoraEnc * enc)
+ {
+ GstClockTime ret, *ptr;
+
+ g_assert (enc->t_queue != NULL);
+
+ ptr = g_queue_pop_tail (enc->t_queue);
+ g_assert (ptr != NULL);
+
+ ret = *ptr;
+ theora_free_gstclocktime (ptr);
+
+ return ret;
+ }
+
+ /**
+ * theora_timefifo_truncate - truncate the timestamp queue.
+ * After frame encoding we should have only one buffer for next time.
+ * The count of timestamps should be the same. If it is less,
+ * some thing really bad has happened. If it is bigger, encoder
+ * decided to return less then we ordered.
+ * TODO: for now we will just drop this timestamps. The better solution
+ * probably will be to recovery frames by recovery timestamps with
+ * last buffer.
+ */
+ static void
+ theora_timefifo_truncate (GstTheoraEnc * enc)
+ {
+ if (enc->dup_on_gap) {
+ guint length;
+ g_assert (enc->t_queue != NULL);
+ length = g_queue_get_length (enc->t_queue);
+
+ if (length > 1) {
+ /* it is also not good if we have more then 1. */
+ GST_DEBUG_OBJECT (enc, "Dropping %u time stamps", length - 1);
+ while (g_queue_get_length (enc->t_queue) > 1) {
+ theora_timefifo_out (enc);
+ }
+ }
+ }
+ }
+
+ static void
+ theora_timefifo_free (GstTheoraEnc * enc)
+ {
+ if (enc->t_queue) {
+ if (g_queue_get_length (enc->t_queue))
+ g_queue_foreach (enc->t_queue, (GFunc) theora_free_gstclocktime, NULL);
+ g_queue_free (enc->t_queue);
+ enc->t_queue = NULL;
+ }
+ /* prevbuf makes no sense without timestamps,
+ * so clear it too. */
+ if (enc->prevbuf) {
+ gst_buffer_unref (enc->prevbuf);
+ enc->prevbuf = NULL;
+ }
+
+ }
+
+ static void
+ theora_update_prevbuf (GstTheoraEnc * enc, GstBuffer * buffer)
+ {
+ if (enc->prevbuf) {
+ gst_buffer_unref (enc->prevbuf);
+ enc->prevbuf = NULL;
+ }
+ enc->prevbuf = gst_buffer_ref (buffer);
+ }
+
+ /**
+ * theora_enc_encode_and_push - encode buffer or queued previous buffer
+ * buffer - buffer to encode. If set to NULL it should encode only
+ * queued buffers and produce dups if needed.
+ */
+
static GstFlowReturn
theora_enc_encode_and_push (GstTheoraEnc * enc, ogg_packet op,
- GstClockTime timestamp, GstClockTime running_time,
- GstClockTime duration, GstBuffer * buffer)
+ GstBuffer * buffer)
{
GstFlowReturn ret;
++ GstVideoFrame frame;
th_ycbcr_buffer ycbcr;
gint res;
- GstVideoFrame frame;
- gst_video_frame_map (&frame, &enc->vinfo, buffer, GST_MAP_READ);
- theora_enc_init_buffer (ycbcr, &frame);
+ if (enc->dup_on_gap) {
+ guint t_queue_length;
+
+ if (enc->t_queue)
+ t_queue_length = g_queue_get_length (enc->t_queue);
+ else
+ t_queue_length = 0;
+
+ if (buffer) {
+ GstClockTime timestamp = GST_BUFFER_TIMESTAMP (buffer);
+
+ /* videorate can easy create 200 dup frames in one shot.
+ * In this case th_encode_ctl will just return TH_EINVAL
+ * and we will generate only one frame as result.
+ * To make us more bullet proof, make sure we have no
+ * more dup frames than keyframe interval.
+ */
+ if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_GAP) &&
+ enc->keyframe_force > t_queue_length) {
+ GST_DEBUG_OBJECT (enc, "Got GAP frame, queue as duplicate.");
+
+ theora_timefifo_in (enc, ×tamp);
+ gst_buffer_unref (buffer);
+ return GST_FLOW_OK;
+ } else {
+ theora_timefifo_in (enc, ×tamp);
+ /* We should have one frame delay to create correct frame order.
+ * First time we got buffer, prevbuf should be empty. Nothing else
+ * should be done here.
+ */
+ if (!enc->prevbuf) {
+ theora_update_prevbuf (enc, buffer);
+ gst_buffer_unref (buffer);
+ return GST_FLOW_OK;
+ } else {
+ theora_update_prevbuf (enc, buffer);
+ /* after theora_update_prevbuf t_queue_length was changed */
+ t_queue_length++;
+
+ if (t_queue_length > 2) {
+ /* now in t_queue_length should be two real buffers: current and
+ * previous. All others are timestamps of duplicate frames. */
+ t_queue_length -= 2;
+ res = th_encode_ctl (enc->encoder, TH_ENCCTL_SET_DUP_COUNT,
+ &t_queue_length, sizeof (t_queue_length));
+ if (res < 0)
+ GST_WARNING_OBJECT (enc, "Failed marking dups for last frame");
+ }
+ }
+ }
+ } else {
+ /* if there is no buffer, then probably we got EOS or discontinuous.
+ * We need to encode every thing what was left in the queue
+ */
+ GST_DEBUG_OBJECT (enc, "Encode collected buffers.");
+ if (t_queue_length > 1) {
+ t_queue_length--;
+ res = th_encode_ctl (enc->encoder, TH_ENCCTL_SET_DUP_COUNT,
+ &t_queue_length, sizeof (t_queue_length));
+ if (res < 0)
+ GST_WARNING_OBJECT (enc, "Failed marking dups for last frame.");
+ } else {
+ GST_DEBUG_OBJECT (enc, "Prevbuffer is empty. Nothing to encode.");
+ return GST_FLOW_OK;
+ }
+ }
- theora_enc_init_buffer (ycbcr, &enc->info, GST_BUFFER_DATA (enc->prevbuf));
- } else
- theora_enc_init_buffer (ycbcr, &enc->info, GST_BUFFER_DATA (buffer));
++ gst_video_frame_map (&frame, &enc->vinfo, enc->prevbuf, GST_MAP_READ);
++ theora_enc_init_buffer (ycbcr, &frame);
++ } else {
++ gst_video_frame_map (&frame, &enc->vinfo, buffer, GST_MAP_READ);
++ theora_enc_init_buffer (ycbcr, &frame);
++ }
- if (theora_enc_is_discontinuous (enc, running_time, duration)) {
+ /* check for buffer, it can be optional */
+ if (enc->current_discont && buffer) {
+ GstClockTime timestamp = GST_BUFFER_TIMESTAMP (buffer);
+ GstClockTime running_time =
+ gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, timestamp);
theora_enc_reset (enc);
enc->granulepos_offset =
- gst_util_uint64_scale (running_time, enc->fps_n,
- GST_SECOND * enc->fps_d);
+ gst_util_uint64_scale (running_time, enc->vinfo.fps_n,
+ GST_SECOND * enc->vinfo.fps_d);
enc->timestamp_offset = running_time;
enc->next_ts = 0;
enc->next_discont = TRUE;
ret = GST_FLOW_OK;
while (th_encode_packetout (enc->encoder, 0, &op)) {
- GstClockTime next_time;
+ GstClockTime next_time, duration;
+ GstClockTime timestamp = 0;
+ GST_DEBUG_OBJECT (enc, "encoded. granule:%" G_GINT64_FORMAT ", packet:%p, "
+ "bytes:%ld", op.granulepos, op.packet, op.bytes);
next_time = th_granule_time (enc->encoder, op.granulepos) * GST_SECOND;
+ duration = next_time - enc->next_ts;
+
+ if (enc->dup_on_gap && !enc->current_discont)
+ timestamp = theora_timefifo_out (enc);
+ else
+ timestamp = GST_BUFFER_TIMESTAMP (buffer);
- ret =
- theora_push_packet (enc, &op, timestamp, enc->next_ts,
- next_time - enc->next_ts);
+ ret = theora_push_packet (enc, &op, timestamp, enc->next_ts, duration);
enc->next_ts = next_time;
- if (ret != GST_FLOW_OK)
+ if (ret != GST_FLOW_OK) {
+ theora_timefifo_truncate (enc);
goto data_push;
+ }
}
- gst_buffer_unref (buffer);
+ theora_timefifo_truncate (enc);
+done:
+ gst_video_frame_unmap (&frame);
+ if (buffer)
+ gst_buffer_unref (buffer);
+ enc->current_discont = FALSE;
return ret;
dec->priv->error_count = 0;
gst_audio_decoder_clear_queues (dec);
- gst_audio_info_clear (&dec->priv->ctx.info);
+ gst_audio_info_init (&dec->priv->ctx.info);
memset (&dec->priv->ctx, 0, sizeof (dec->priv->ctx));
+ dec->priv->ctx.max_errors = GST_AUDIO_DECODER_MAX_ERRORS;
if (dec->priv->taglist) {
gst_tag_list_free (dec->priv->taglist);
GST_DEBUG ("Adding inner ifd: %x", tag_map[i].exif_tag);
gst_exif_writer_write_tag_header (&writer, tag_map[i].exif_tag,
EXIF_TYPE_LONG, 1,
- gst_byte_writer_get_size (&writer.datawriter), FALSE);
+ gst_byte_writer_get_size (&writer.datawriter), NULL);
- gst_byte_writer_put_data (&writer.datawriter,
- GST_BUFFER_DATA (inner_ifd), GST_BUFFER_SIZE (inner_ifd));
+
+ data = gst_buffer_map (inner_ifd, &size, NULL, GST_MAP_READ);
+ gst_byte_writer_put_data (&writer.datawriter, data, size);
+ gst_buffer_unmap (inner_ifd, data, size);
gst_buffer_unref (inner_ifd);
}
continue;
/* now wait for the collected to be finished and mark a new
* segment. After we have the lock, no collect function is running and no
* new collect function will be called for as long as we're flushing. */
- GST_OBJECT_LOCK (adder->collect);
+ GST_COLLECT_PADS2_STREAM_LOCK (adder->collect);
+ adder->segment.rate = rate;
if (curtype == GST_SEEK_TYPE_SET)
- adder->segment_start = cur;
+ adder->segment.start = cur;
else
- adder->segment_start = 0;
+ adder->segment.start = 0;
if (endtype == GST_SEEK_TYPE_SET)
- adder->segment_end = end;
+ adder->segment.stop = end;
else
- adder->segment_end = GST_CLOCK_TIME_NONE;
+ adder->segment.stop = GST_CLOCK_TIME_NONE;
if (flush) {
/* Yes, we need to call _set_flushing again *WHEN* the streaming threads
* have stopped so that the cookie gets properly updated. */
GST_EVENT_TYPE_NAME (event));
switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_CAPS:
+ {
+ GstCaps *caps;
+
+ gst_event_parse_caps (event, &caps);
+ ret = gst_adder_setcaps (adder, pad, caps);
+ gst_event_unref (event);
+
+ goto beach;
+ }
case GST_EVENT_FLUSH_STOP:
- /* we received a flush-stop. The collect_event function will push the
- * event past our element. We simply forward all flush-stop events, even
- * when no flush-stop was pending, this is required because collectpads
- * does not provide an API to handle-but-not-forward the flush-stop.
- * We unset the pending flush-stop flag so that we don't send anymore
- * flush-stop from the collect function later.
+ /* we received a flush-stop. The collect_event function will call the
+ * gst_adder_event function we have set on the GstCollectPads2, so we
+ * have control over whether the event is sent past our element.
+ * We will only forward it when flush_stop_pending is set, and we will
+ * unset it then.
*/
- GST_OBJECT_LOCK (adder->collect);
+ GST_COLLECT_PADS2_STREAM_LOCK (adder->collect);
g_atomic_int_set (&adder->new_segment_pending, TRUE);
- g_atomic_int_set (&adder->flush_stop_pending, FALSE);
/* Clear pending tags */
if (adder->pending_events) {
g_list_foreach (adder->pending_events, (GFunc) gst_event_unref, NULL);
g_list_free (adder->pending_events);
adder->pending_events = NULL;
}
- GST_OBJECT_UNLOCK (adder->collect);
+ GST_COLLECT_PADS2_STREAM_UNLOCK (adder->collect);
break;
case GST_EVENT_TAG:
- GST_OBJECT_LOCK (adder->collect);
+ GST_COLLECT_PADS2_STREAM_LOCK (adder->collect);
/* collect tags here so we can push them out when we collect data */
adder->pending_events = g_list_append (adder->pending_events, event);
- GST_OBJECT_UNLOCK (adder->collect);
+ GST_COLLECT_PADS2_STREAM_UNLOCK (adder->collect);
goto beach;
- case GST_EVENT_NEWSEGMENT:
+ case GST_EVENT_SEGMENT:
if (g_atomic_int_compare_and_exchange (&adder->wait_for_new_segment,
TRUE, FALSE)) {
/* make sure we push a new segment, to inform about new basetime
break;
}
- /* now GstCollectPads can take care of the rest, e.g. EOS */
+ /* now GstCollectPads2 can take care of the rest, e.g. EOS */
- ret = adder->collect_event (pad, event);
+ ret = adder->collect_event (pad, parent, event);
beach:
- gst_object_unref (adder);
return ret;
}
GST_DEBUG_OBJECT (adder, "request new pad %s", name);
g_free (name);
- gst_pad_set_getcaps_function (newpad,
- GST_DEBUG_FUNCPTR (gst_adder_sink_getcaps));
- gst_pad_set_setcaps_function (newpad, GST_DEBUG_FUNCPTR (gst_adder_setcaps));
+ gst_pad_set_query_function (newpad, GST_DEBUG_FUNCPTR (gst_adder_sink_query));
- gst_collect_pads_add_pad (adder->collect, newpad, sizeof (GstCollectData),
- NULL);
+ gst_collect_pads2_add_pad (adder->collect, newpad, sizeof (GstCollectData2));
/* FIXME: hacked way to override/extend the event function of
- * GstCollectPads; because it sets its own event function giving the
+ * GstCollectPads2; because it sets its own event function giving the
* element no access to events */
adder->collect_event = (GstPadEventFunction) GST_PAD_EVENTFUNC (newpad);
gst_pad_set_event_function (newpad, GST_DEBUG_FUNCPTR (gst_adder_sink_event));
gst_element_remove_pad (element, pad);
}
- static GstBuffer *
- gst_adder_do_clip (GstCollectPads * pads, GstCollectData * data,
- GstBuffer * buffer, gpointer user_data)
+ static GstFlowReturn
+ gst_adder_do_clip (GstCollectPads2 * pads, GstCollectData2 * data,
+ GstBuffer * buffer, GstBuffer ** out, gpointer user_data)
{
GstAdder *adder = GST_ADDER (user_data);
+ gint rate, bpf;
- /* in 0.10 the application might need to seek on newly added source-branches
- * to make it send a newsegment, that is hard to sync and so the segment might
- * not be initialized. Check this here to not trigger the assertion
- */
- if (data->segment.format != GST_FORMAT_UNDEFINED) {
- buffer = gst_audio_buffer_clip (buffer, &data->segment, adder->rate,
- adder->bps);
- }
+ rate = GST_AUDIO_INFO_RATE (&adder->info);
+ bpf = GST_AUDIO_INFO_BPF (&adder->info);
+
+ buffer = gst_audio_buffer_clip (buffer, &data->segment, rate, bpf);
- return buffer;
+ *out = buffer;
+ return GST_FLOW_OK;
}
static GstFlowReturn
if (outsize == 0)
goto eos;
+ rate = GST_AUDIO_INFO_RATE (&adder->info);
+ bps = GST_AUDIO_INFO_BPS (&adder->info);
+ bpf = GST_AUDIO_INFO_BPF (&adder->info);
+
GST_LOG_OBJECT (adder,
- "starting to cycle through channels, %d bytes available (bps = %d)",
- outsize, adder->bps);
+ "starting to cycle through channels, %d bytes available (bps = %d, bpf = %d)",
+ outsize, bps, bpf);
for (collected = pads->data; collected; collected = next) {
- GstCollectData *collect_data;
+ GstCollectData2 *collect_data;
GstBuffer *inbuf;
gboolean is_gap;
adder->flush_stop_pending = FALSE;
adder->new_segment_pending = TRUE;
adder->wait_for_new_segment = FALSE;
- adder->segment_start = 0;
- adder->segment_end = GST_CLOCK_TIME_NONE;
- adder->segment_rate = 1.0;
- gst_segment_init (&adder->segment, GST_FORMAT_UNDEFINED);
+ gst_segment_init (&adder->segment, GST_FORMAT_TIME);
- gst_collect_pads_start (adder->collect);
+ gst_collect_pads2_start (adder->collect);
break;
case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
break;
#define __GST_ADDER_H__
#include <gst/gst.h>
- #include <gst/base/gstcollectpads.h>
+ #include <gst/base/gstcollectpads2.h>
+#include <gst/audio/audio.h>
G_BEGIN_DECLS
return NULL;
}
- u = gst_structure_id_empty_new (topology_structure_name);
+ u = gst_structure_new_id_empty (topology_structure_name);
/* Now at the last element */
- if (chain->elements && (chain->endpad || chain->deadend)) {
+ if ((chain->elements || !chain->active_group) &&
+ (chain->endpad || chain->deadend)) {
- s = gst_structure_id_empty_new (topology_structure_name);
+ s = gst_structure_new_id_empty (topology_structure_name);
gst_structure_id_set (u, topology_caps, GST_TYPE_CAPS, chain->endcaps,
NULL);
}
static gboolean
- _has_subtitle_encoding_property (GstElement * element)
+ _setup_parser (GstSubtitleOverlay * self)
{
- GParamSpec *pspec;
+ GstPad *video_peer;
+
+ /* Try to get the latest video framerate */
+ video_peer = gst_pad_get_peer (self->video_sinkpad);
+ if (video_peer) {
+ GstCaps *video_caps;
+ gint fps_n, fps_d;
+
- video_caps = gst_pad_get_negotiated_caps (video_peer);
++ video_caps = gst_pad_get_current_caps (video_peer);
+ if (!video_caps) {
- video_caps = gst_pad_get_caps_reffed (video_peer);
++ video_caps = gst_pad_query_caps (video_peer, NULL);
+ if (!gst_caps_is_fixed (video_caps)) {
+ gst_caps_unref (video_caps);
+ video_caps = NULL;
+ }
+ }
+
- if (video_caps
- && gst_video_parse_caps_framerate (video_caps, &fps_n, &fps_d)) {
- if (self->fps_n != fps_n || self->fps_d != fps_d) {
++ if (video_caps) {
++ GstStructure *st = gst_caps_get_structure (video_caps, 0);
++ if (gst_structure_get_fraction (st, "framerate", &fps_n, &fps_d)) {
+ GST_DEBUG_OBJECT (self, "New video fps: %d/%d", fps_n, fps_d);
+ self->fps_n = fps_n;
+ self->fps_d = fps_d;
+ }
+ }
+
+ if (video_caps)
+ gst_caps_unref (video_caps);
+ gst_object_unref (video_peer);
+ }
+
+ if (_has_property_with_type (G_OBJECT (self->parser), "subtitle-encoding",
+ G_TYPE_STRING))
+ g_object_set (self->parser, "subtitle-encoding", self->encoding, NULL);
+
+ /* Try to set video fps on the parser */
+ gst_subtitle_overlay_set_fps (self);
- pspec =
- g_object_class_find_property (G_OBJECT_GET_CLASS (element),
- "subtitle-encoding");
- return (pspec && pspec->value_type == G_TYPE_STRING);
+
+ return TRUE;
}
static gboolean
- _has_font_desc_property (GstElement * element)
+ _setup_renderer (GstSubtitleOverlay * self, GstElement * renderer)
{
- GParamSpec *pspec;
+ GstElementFactory *factory = gst_element_get_factory (renderer);
+ const gchar *name =
+ gst_plugin_feature_get_name (GST_PLUGIN_FEATURE_CAST (factory));
+
+ if (strcmp (name, "textoverlay") == 0) {
+ /* Set some textoverlay specific properties */
+ g_object_set (G_OBJECT (renderer),
+ "halign", "center", "valign", "bottom", "wait-text", FALSE, NULL);
+ if (self->font_desc)
+ g_object_set (G_OBJECT (renderer), "font-desc", self->font_desc, NULL);
+ self->silent_property = "silent";
+ self->silent_property_invert = FALSE;
+ } else {
+ self->silent_property =
+ _get_silent_property (renderer, &self->silent_property_invert);
+ if (_has_property_with_type (G_OBJECT (renderer), "subtitle-encoding",
+ G_TYPE_STRING))
+ g_object_set (renderer, "subtitle-encoding", self->encoding, NULL);
+ if (_has_property_with_type (G_OBJECT (renderer), "font-desc",
+ G_TYPE_STRING))
+ g_object_set (renderer, "font-desc", self->font_desc, NULL);
+ }
+
+ return TRUE;
+ }
+
+ /* subtitle_src==NULL means: use subtitle_sink ghostpad */
+ static gboolean
+ _link_renderer (GstSubtitleOverlay * self, GstElement * renderer,
+ GstPad * subtitle_src)
+ {
+ GstPad *sink, *src;
+ gboolean is_video, is_hw;
+
+ is_video = _is_video_pad (self->video_sinkpad, &is_hw);
+
+ if (is_video) {
+ gboolean render_is_hw;
+
+ /* First check that renderer also supports the video format */
+ sink = _get_video_pad (renderer);
+ if (G_UNLIKELY (!sink)) {
+ GST_WARNING_OBJECT (self, "Can't get video sink from renderer");
+ return FALSE;
+ }
+
+ if (is_video != _is_video_pad (sink, &render_is_hw) ||
+ is_hw != render_is_hw) {
+ GST_DEBUG_OBJECT (self, "Renderer doesn't support %s video",
+ is_hw ? "surface" : "raw");
+ gst_object_unref (sink);
+ return FALSE;
+ }
+ gst_object_unref (sink);
+
+ if (!is_hw) {
+ /* First link everything internally */
+ if (G_UNLIKELY (!_create_element (self, &self->post_colorspace,
+ COLORSPACE, NULL, "post-colorspace", FALSE))) {
+ return FALSE;
+ }
+ src = gst_element_get_static_pad (renderer, "src");
+ if (G_UNLIKELY (!src)) {
+ GST_WARNING_OBJECT (self, "Can't get src pad from renderer");
+ return FALSE;
+ }
+
+ sink = gst_element_get_static_pad (self->post_colorspace, "sink");
+ if (G_UNLIKELY (!sink)) {
+ GST_WARNING_OBJECT (self, "Can't get sink pad from " COLORSPACE);
+ gst_object_unref (src);
+ return FALSE;
+ }
+
+ if (G_UNLIKELY (gst_pad_link (src, sink) != GST_PAD_LINK_OK)) {
+ GST_WARNING_OBJECT (self, "Can't link renderer with " COLORSPACE);
+ gst_object_unref (src);
+ gst_object_unref (sink);
+ return FALSE;
+ }
+ gst_object_unref (src);
+ gst_object_unref (sink);
+
+ if (G_UNLIKELY (!_create_element (self, &self->pre_colorspace,
+ COLORSPACE, NULL, "pre-colorspace", FALSE))) {
+ return FALSE;
+ }
+
+ sink = _get_video_pad (renderer);
+ if (G_UNLIKELY (!sink)) {
+ GST_WARNING_OBJECT (self, "Can't get video sink from renderer");
+ return FALSE;
+ }
+
+ src = gst_element_get_static_pad (self->pre_colorspace, "src");
+ if (G_UNLIKELY (!src)) {
+ GST_WARNING_OBJECT (self, "Can't get srcpad from " COLORSPACE);
+ gst_object_unref (sink);
+ return FALSE;
+ }
+
+ if (G_UNLIKELY (gst_pad_link (src, sink) != GST_PAD_LINK_OK)) {
+ GST_WARNING_OBJECT (self, "Can't link " COLORSPACE " to renderer");
+ gst_object_unref (src);
+ gst_object_unref (sink);
+ return FALSE;
+ }
+ gst_object_unref (src);
+ gst_object_unref (sink);
+
+ /* Set src ghostpad target */
+ src = gst_element_get_static_pad (self->post_colorspace, "src");
+ if (G_UNLIKELY (!src)) {
+ GST_WARNING_OBJECT (self, "Can't get src pad from " COLORSPACE);
+ return FALSE;
+ }
+ } else {
+ /* Set src ghostpad target in the harware accelerated case */
+
+ src = gst_element_get_static_pad (renderer, "src");
+ if (G_UNLIKELY (!src)) {
+ GST_WARNING_OBJECT (self, "Can't get src pad from renderer");
+ return FALSE;
+ }
+ }
+ } else { /* No video pad */
+ GstCaps *allowed_caps, *video_caps = NULL;
+ GstPad *video_peer;
+ gboolean can_intersect = FALSE;
+
+ video_peer = gst_pad_get_peer (self->video_sinkpad);
+ if (video_peer) {
- video_caps = gst_pad_get_negotiated_caps (video_peer);
++ video_caps = gst_pad_get_current_caps (video_peer);
+ if (!video_caps) {
- video_caps = gst_pad_get_caps_reffed (video_peer);
++ video_caps = gst_pad_query_caps (video_peer, NULL);
+ }
+ gst_object_unref (video_peer);
+ }
- pspec =
- g_object_class_find_property (G_OBJECT_GET_CLASS (element), "font-desc");
- return (pspec && pspec->value_type == G_TYPE_STRING);
+ sink = _get_video_pad (renderer);
+ if (G_UNLIKELY (!sink)) {
+ GST_WARNING_OBJECT (self, "Can't get video sink from renderer");
+ return FALSE;
+ }
- allowed_caps = gst_pad_get_caps_reffed (sink);
++ allowed_caps = gst_pad_query_caps (sink, NULL);
+ gst_object_unref (sink);
+
+ if (allowed_caps && video_caps)
+ can_intersect = gst_caps_can_intersect (allowed_caps, video_caps);
+
+ if (allowed_caps)
+ gst_caps_unref (allowed_caps);
+
+ if (video_caps)
+ gst_caps_unref (video_caps);
+
+ if (G_UNLIKELY (!can_intersect)) {
+ GST_WARNING_OBJECT (self, "Renderer with custom caps is not "
+ "compatible with video stream");
+ return FALSE;
+ }
+
+ src = gst_element_get_static_pad (renderer, "src");
+ if (G_UNLIKELY (!src)) {
+ GST_WARNING_OBJECT (self, "Can't get src pad from renderer");
+ return FALSE;
+ }
+ }
+
+ if (G_UNLIKELY (!gst_ghost_pad_set_target (GST_GHOST_PAD_CAST
+ (self->srcpad), src))) {
+ GST_WARNING_OBJECT (self, "Can't set srcpad target");
+ gst_object_unref (src);
+ return FALSE;
+ }
+ gst_object_unref (src);
+
+ /* Set the sink ghostpad targets */
+ if (self->pre_colorspace) {
+ sink = gst_element_get_static_pad (self->pre_colorspace, "sink");
+ if (G_UNLIKELY (!sink)) {
+ GST_WARNING_OBJECT (self, "Can't get sink pad from " COLORSPACE);
+ return FALSE;
+ }
+ } else {
+ sink = _get_video_pad (renderer);
+ if (G_UNLIKELY (!sink)) {
+ GST_WARNING_OBJECT (self, "Can't get sink pad from %" GST_PTR_FORMAT,
+ renderer);
+ return FALSE;
+ }
+ }
+
+ if (G_UNLIKELY (!gst_ghost_pad_set_target (GST_GHOST_PAD_CAST
+ (self->video_sinkpad), sink))) {
+ GST_WARNING_OBJECT (self, "Can't set video sinkpad target");
+ gst_object_unref (sink);
+ return FALSE;
+ }
+ gst_object_unref (sink);
+
+ sink = _get_sub_pad (renderer);
+ if (G_UNLIKELY (!sink)) {
+ GST_WARNING_OBJECT (self, "Failed to get subpad");
+ return FALSE;
+ }
+
+ if (subtitle_src) {
+ if (G_UNLIKELY (gst_pad_link (subtitle_src, sink) != GST_PAD_LINK_OK)) {
+ GST_WARNING_OBJECT (self, "Failed to link subtitle srcpad with renderer");
+ gst_object_unref (sink);
+ return FALSE;
+ }
+ } else {
+ if (G_UNLIKELY (!gst_ghost_pad_set_target (GST_GHOST_PAD_CAST
+ (self->subtitle_sinkpad), sink))) {
+ GST_WARNING_OBJECT (self, "Failed to set subtitle sink target");
+ gst_object_unref (sink);
+ return FALSE;
+ }
+ }
+ gst_object_unref (sink);
+
+ return TRUE;
}
-static void
-_pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data)
+static GstPadProbeReturn
+_pad_blocked_cb (GstPad * pad, GstPadProbeInfo * info, gpointer user_data)
{
GstSubtitleOverlay *self = GST_SUBTITLE_OVERLAY_CAST (user_data);
GstCaps *subcaps;
"parser", FALSE))))
continue;
- element = is_renderer ? self->renderer : self->parser;
-
- is_video = _is_video_pad (self->video_sinkpad, &is_hw);
- /* If this is a parser, create textoverlay and link video and the parser to it
- * Else link the renderer to the output colorspace */
if (!is_renderer) {
- GstElement *overlay;
- GstPad *video_peer;
-
- /* Try to get the latest video framerate */
- video_peer = gst_pad_get_peer (self->video_sinkpad);
- if (video_peer) {
- GstCaps *video_caps;
-
- video_caps = gst_pad_get_current_caps (video_peer);
- if (!video_caps) {
- video_caps = gst_pad_query_caps (video_peer, NULL);
- if (!gst_caps_is_fixed (video_caps)) {
- gst_caps_unref (video_caps);
- video_caps = NULL;
- }
- }
-
- if (video_caps) {
- GstVideoInfo info;
+ GstCaps *parser_caps;
+ GList *overlay_factories, *k;
- if (gst_video_info_from_caps (&info, video_caps)) {
- if (self->fps_n != info.fps_n || self->fps_d != info.fps_d) {
- GST_DEBUG_OBJECT (self, "New video fps: %d/%d", info.fps_n,
- info.fps_d);
- self->fps_n = info.fps_n;
- self->fps_d = info.fps_d;
- }
- }
- }
-
- if (video_caps)
- gst_caps_unref (video_caps);
- gst_object_unref (video_peer);
- }
-
- if (_has_subtitle_encoding_property (self->parser))
- g_object_set (self->parser, "subtitle-encoding", self->encoding, NULL);
-
- /* Try to set video fps on the parser */
- gst_subtitle_overlay_set_fps (self);
-
- /* First link everything internally */
- if (G_UNLIKELY (!_create_element (self, &self->overlay, "textoverlay",
- NULL, "overlay", FALSE))) {
+ if (!_setup_parser (self))
continue;
- }
- overlay = self->overlay;
- self->silent_property = "silent";
- self->silent_property_invert = FALSE;
- /* Set some properties */
- g_object_set (G_OBJECT (overlay),
- "halign", "center", "valign", "bottom", "wait-text", FALSE, NULL);
- if (self->font_desc)
- g_object_set (G_OBJECT (overlay), "font-desc", self->font_desc, NULL);
+ /* Find our factories */
+ src = gst_element_get_static_pad (self->parser, "src");
- parser_caps = gst_pad_get_caps_reffed (src);
++ parser_caps = gst_pad_query_caps (src, NULL);
+ gst_object_unref (src);
- src = gst_element_get_static_pad (element, "src");
- if (G_UNLIKELY (!src)) {
- continue;
- }
+ g_assert (parser_caps != NULL);
- sink = gst_element_get_static_pad (overlay, "text_sink");
- if (G_UNLIKELY (!sink)) {
- GST_WARNING_OBJECT (self, "Can't get text sink from textoverlay");
- gst_object_unref (src);
- continue;
- }
+ g_mutex_lock (self->factories_lock);
+ gst_subtitle_overlay_update_factory_list (self);
+ GST_DEBUG_OBJECT (self,
+ "Searching overlay factories for caps %" GST_PTR_FORMAT, parser_caps);
+ overlay_factories =
+ gst_subtitle_overlay_get_factories_for_caps (self->factories,
+ parser_caps);
+ g_mutex_unlock (self->factories_lock);
- if (G_UNLIKELY (gst_pad_link (src, sink) != GST_PAD_LINK_OK)) {
- GST_WARNING_OBJECT (self, "Can't link parser to textoverlay");
- gst_object_unref (sink);
- gst_object_unref (src);
+ if (!overlay_factories) {
+ GST_WARNING_OBJECT (self,
+ "Found no suitable overlay factories for caps %" GST_PTR_FORMAT,
+ parser_caps);
+ gst_caps_unref (parser_caps);
continue;
}
- gst_object_unref (sink);
- gst_object_unref (src);
-
- /* If we are working with video/x-surface, we do not add
- * colorspace conversion elements */
- if (is_video && !is_hw) {
- if (G_UNLIKELY (!_create_element (self, &self->post_colorspace,
- COLORSPACE, NULL, "post-colorspace", FALSE))) {
- continue;
- }
-
- src = gst_element_get_static_pad (overlay, "src");
- if (G_UNLIKELY (!src)) {
- GST_WARNING_OBJECT (self, "Can't get src pad from overlay");
- continue;
- }
-
- sink = gst_element_get_static_pad (self->post_colorspace, "sink");
- if (G_UNLIKELY (!sink)) {
- GST_WARNING_OBJECT (self, "Can't get sink pad from " COLORSPACE);
- gst_object_unref (src);
- continue;
- }
+ gst_caps_unref (parser_caps);
- if (G_UNLIKELY (gst_pad_link (src, sink) != GST_PAD_LINK_OK)) {
- GST_WARNING_OBJECT (self, "Can't link overlay with " COLORSPACE);
- gst_object_unref (src);
- gst_object_unref (sink);
- continue;
- }
- gst_object_unref (src);
- gst_object_unref (sink);
+ /* Sort the factories by rank */
+ overlay_factories =
+ g_list_sort (overlay_factories, (GCompareFunc) _sort_by_ranks);
- if (G_UNLIKELY (!_create_element (self, &self->pre_colorspace,
- "identity", NULL, "pre-colorspace", FALSE))) {
- continue;
- }
+ for (k = overlay_factories; k; k = k->next) {
+ GstElementFactory *overlay_factory = k->data;
- sink = gst_element_get_static_pad (overlay, "video_sink");
- if (G_UNLIKELY (!sink)) {
- GST_WARNING_OBJECT (self, "Can't get video sink from textoverlay");
- continue;
- }
+ GST_DEBUG_OBJECT (self, "Trying overlay factory '%s'",
+ GST_STR_NULL (gst_plugin_feature_get_name (GST_PLUGIN_FEATURE_CAST
+ (overlay_factory))));
- src = gst_element_get_static_pad (self->pre_colorspace, "src");
- if (G_UNLIKELY (!src)) {
- GST_WARNING_OBJECT (self, "Can't get srcpad from " COLORSPACE);
- gst_object_unref (sink);
- continue;
- }
+ /* Try this factory and link it, otherwise unlink everything
+ * again and remove the overlay. Up to this point only the
+ * parser was instantiated and setup, nothing was linked
+ */
- if (G_UNLIKELY (gst_pad_link (src, sink) != GST_PAD_LINK_OK)) {
- GST_WARNING_OBJECT (self, "Can't link " COLORSPACE " to textoverlay");
- gst_object_unref (src);
- gst_object_unref (sink);
- continue;
- }
- gst_object_unref (src);
- gst_object_unref (sink);
+ gst_ghost_pad_set_target (GST_GHOST_PAD_CAST (self->srcpad), NULL);
+ gst_ghost_pad_set_target (GST_GHOST_PAD_CAST (self->video_sinkpad),
+ NULL);
+ gst_ghost_pad_set_target (GST_GHOST_PAD_CAST (self->subtitle_sinkpad),
+ NULL);
+ self->silent_property = NULL;
+ _remove_element (self, &self->post_colorspace);
+ _remove_element (self, &self->overlay);
+ _remove_element (self, &self->pre_colorspace);
- /* Set src ghostpad target */
- src = gst_element_get_static_pad (self->post_colorspace, "src");
- if (G_UNLIKELY (!src)) {
- GST_WARNING_OBJECT (self, "Can't get src pad from " COLORSPACE);
+ if (!_create_element (self, &self->overlay, NULL, overlay_factory,
+ "overlay", FALSE))
continue;
- }
- if (G_UNLIKELY (!gst_ghost_pad_set_target (GST_GHOST_PAD_CAST
- (self->srcpad), src))) {
- GST_WARNING_OBJECT (self, "Can't set srcpad target");
- gst_object_unref (src);
+ if (!_setup_renderer (self, self->overlay))
continue;
- }
- gst_object_unref (src);
- } else if (is_hw) {
- GST_DEBUG_OBJECT (self,
- "Is Hardware, not adding colorspace converters, ");
- /* Set src ghostpad target */
- src = gst_element_get_static_pad (self->overlay, "src");
- if (G_UNLIKELY (!src)) {
- GST_WARNING_OBJECT (self, "Can't get src pad from textoverlay");
- continue;
- }
- if (G_UNLIKELY (!gst_ghost_pad_set_target (GST_GHOST_PAD_CAST
- (self->srcpad), src))) {
- GST_WARNING_OBJECT (self, "Can't set srcpad target");
+ src = gst_element_get_static_pad (self->parser, "src");
+ if (!_link_renderer (self, self->overlay, src)) {
gst_object_unref (src);
continue;
}
self->subtitle_error = TRUE;
_setup_passthrough (self);
do_async_done (self);
- } else {
- GST_DEBUG_OBJECT (self, "Everything worked, unblocking pads");
- unblock_video (self);
- unblock_subtitle (self);
- do_async_done (self);
+ goto out;
}
- GstEvent *event1, *event2;
+ /* Send segments to the renderer if necessary. These are not sent
+ * outside this element because of the proxy pad event handler */
+ if (self->video_segment.format != GST_FORMAT_UNDEFINED) {
- _generate_update_newsegment_event (&self->video_segment, &event1, &event2);
- GST_DEBUG_OBJECT (self,
- "Pushing video accumulate newsegment event: %" GST_PTR_FORMAT,
- event1->structure);
++ GstEvent *event1;
+ GstPad *sink;
+
+ if (self->pre_colorspace) {
+ sink = gst_element_get_static_pad (self->pre_colorspace, "sink");
+ } else {
+ sink = _get_video_pad ((self->renderer) ? self->renderer : self->overlay);
+ }
+
- "Pushing video update newsegment event: %" GST_PTR_FORMAT,
- event2->structure);
++ _generate_update_segment_event (&self->video_segment, &event1);
+ GST_DEBUG_OBJECT (self,
- gst_pad_send_event (sink, event2);
++ "Pushing video update segment event: %" GST_PTR_FORMAT,
++ gst_event_get_structure (event1));
+ gst_pad_send_event (sink, event1);
- GstEvent *event1, *event2;
+ gst_object_unref (sink);
+ }
+
+ if (self->subtitle_segment.format != GST_FORMAT_UNDEFINED) {
- _generate_update_newsegment_event (&self->subtitle_segment, &event1,
- &event2);
++ GstEvent *event1;
+ GstPad *sink;
+
+ if (self->renderer)
+ sink = _get_sub_pad (self->renderer);
+ else
+ sink = gst_element_get_static_pad (self->parser, "sink");
+
- "Pushing subtitle accumulate newsegment event: %" GST_PTR_FORMAT,
- event1->structure);
- GST_DEBUG_OBJECT (self,
- "Pushing subtitle update newsegment event: %" GST_PTR_FORMAT,
- event2->structure);
++ _generate_update_segment_event (&self->subtitle_segment, &event1);
+ GST_DEBUG_OBJECT (self,
- gst_pad_send_event (sink, event2);
++ "Pushing subtitle update segment event: %" GST_PTR_FORMAT,
++ gst_event_get_structure (event1));
+ gst_pad_send_event (sink, event1);
- gst_pad_set_blocked_async_full (self->video_block_pad, FALSE,
- _pad_blocked_cb, self, NULL);
- gst_pad_set_blocked_async_full (self->subtitle_block_pad, FALSE,
- _pad_blocked_cb, self, NULL);
+ gst_object_unref (sink);
+ }
+
+ GST_DEBUG_OBJECT (self, "Everything worked, unblocking pads");
++ unblock_video (self);
++ unblock_subtitle (self);
+ do_async_done (self);
+
out:
if (factories)
gst_plugin_feature_list_free (factories);
s = gst_event_get_structure (event);
if (s && gst_structure_id_has_field (s, _subtitle_overlay_event_marker_id)) {
- GST_DEBUG_OBJECT (ghostpad, "Dropping event with marker: %" GST_PTR_FORMAT,
- event);
+ GST_DEBUG_OBJECT (ghostpad,
- "Dropping event with marker: %" GST_PTR_FORMAT, event->structure);
++ "Dropping event with marker: %" GST_PTR_FORMAT,
++ gst_event_get_structure (event));
gst_event_unref (event);
event = NULL;
ret = TRUE;
}
static gboolean
-gst_subtitle_overlay_video_sink_event (GstPad * pad, GstEvent * event)
+gst_subtitle_overlay_video_sink_event (GstPad * pad, GstObject * parent,
+ GstEvent * event)
{
- GstSubtitleOverlay *self = GST_SUBTITLE_OVERLAY (gst_pad_get_parent (pad));
+ GstSubtitleOverlay *self = GST_SUBTITLE_OVERLAY (parent);
gboolean ret;
- if (GST_EVENT_TYPE (event) == GST_EVENT_FLUSH_STOP) {
- GST_DEBUG_OBJECT (pad,
- "Resetting video segment because of flush-stop event");
- gst_segment_init (&self->video_segment, GST_FORMAT_UNDEFINED);
- self->fps_n = self->fps_d = 0;
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_FLUSH_STOP:
+ {
+ GST_DEBUG_OBJECT (pad,
+ "Resetting video segment because of flush-stop event");
+ gst_segment_init (&self->video_segment, GST_FORMAT_UNDEFINED);
+ self->fps_n = self->fps_d = 0;
+ break;
+ }
+ case GST_EVENT_CAPS:
+ {
+ GstCaps *caps;
+
+ gst_event_parse_caps (event, &caps);
+ ret = gst_subtitle_overlay_video_sink_setcaps (self, caps);
+ if (!ret)
+ goto done;
+ break;
+ }
+ default:
+ break;
}
- ret = gst_proxy_pad_event_default (pad, gst_event_ref (event));
-
- if (GST_EVENT_TYPE (event) == GST_EVENT_NEWSEGMENT) {
- gboolean update;
- gdouble rate, applied_rate;
- GstFormat format;
- gint64 start, stop, position;
+ ret = gst_proxy_pad_event_default (pad, parent, gst_event_ref (event));
- GST_DEBUG_OBJECT (pad, "Newsegment event: %" GST_PTR_FORMAT,
- event->structure);
- gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
- &format, &start, &stop, &position);
-
- if (format != GST_FORMAT_TIME) {
- GST_ERROR_OBJECT (pad, "Newsegment event in non-time format: %s",
- gst_format_get_name (format));
- gst_event_unref (event);
- gst_object_unref (self);
- return FALSE;
- }
+ if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
+ GST_DEBUG_OBJECT (pad, "segment event: %" GST_PTR_FORMAT, event);
+ gst_event_copy_segment (event, &self->video_segment);
- GST_DEBUG_OBJECT (pad, "Old video segment: %" GST_SEGMENT_FORMAT,
- &self->video_segment);
- gst_segment_set_newsegment_full (&self->video_segment, update, rate,
- applied_rate, format, start, stop, position);
- GST_DEBUG_OBJECT (pad, "New video segment: %" GST_SEGMENT_FORMAT,
- &self->video_segment);
+ if (self->video_segment.format != GST_FORMAT_TIME)
+ goto invalid_format;
}
+done:
gst_event_unref (event);
- gst_object_unref (self);
+
return ret;
- GST_ERROR_OBJECT (pad, "Newsegment event in non-time format: %s",
+
+ /* ERRORS */
+invalid_format:
+ {
++ GST_ERROR_OBJECT (pad, "Segment event in non-time format: %s",
+ gst_format_get_name (self->video_segment.format));
+ ret = FALSE;
+ goto done;
+ }
}
static GstFlowReturn
{
GstCaps *caps;
gchar *string;
+ GstAudioFormat fmt;
+
++ GST_DEBUG ("channels:%d, endianness:%d, width:%d, depth:%d, signedness:%d",
++ channels, endianness, width, depth, signedness);
++
+ fmt = gst_audio_format_build_integer (signedness, endianness, width, depth);
- string = g_strdup_printf ("audio/x-raw-int, "
+ string = g_strdup_printf ("audio/x-raw, "
+ "format = (string) %s, "
"rate = (int) 44100, "
- "channels = (int) %d, "
- "endianness = (int) %s, "
- "width = (int) %d, "
- "depth = (int) %d, "
- "signed = (boolean) %s ",
- channels, endianness, width, depth, signedness ? "true" : "false");
+ "channels = (int) %d", gst_audio_format_to_string (fmt), channels);
GST_DEBUG ("creating caps from %s", string);
caps = gst_caps_from_string (string);
g_free (string);
/* there shouldn't be any errors */
fail_if (gst_bus_poll (GST_ELEMENT_BUS (pipe), GST_MESSAGE_ERROR, 0) != NULL);
++ GST_DEBUG ("Resetting pipeline");
++
/* reset */
- gst_element_set_state (pipe, GST_STATE_NULL);
+ gst_element_set_state (pipe, GST_STATE_READY);
sink = gst_bin_get_by_name (GST_BIN (pipe), "sink");
gst_bin_remove (GST_BIN (pipe), sink);
};
/* test data */
- buf = gst_buffer_new ();
-
+ j = 0;
i = 0;
- while (test_data[i].xmp_data) {
- gsize len;
-
- GST_DEBUG ("trying test-data %u", i);
-
- text = g_strconcat (xmp_header, test_data[i].xmp_data, xmp_footer, NULL);
-
- buf = gst_buffer_new ();
- len = strlen (text) + 1;
- gst_buffer_take_memory (buf, -1,
- gst_memory_new_wrapped (0, text, NULL, len, 0, len));
-
- list = gst_tag_list_from_xmp_buffer (buf);
- if (test_data[i].result_size >= 0) {
- fail_unless (list != NULL);
-
- result_size = gst_structure_n_fields ((GstStructure *) list);
- fail_unless (result_size == test_data[i].result_size);
-
- /* check the taglist content */
- switch (test_data[i].result_test) {
- case 0:
- ASSERT_TAG_LIST_HAS_STRING (list, "description", "test");
- break;
- default:
- break;
+ while (xmp_footers[j]) {
+ while (test_data[i].xmp_data) {
++ gsize len;
++
+ GST_DEBUG ("trying test-data %u", i);
+
+ text =
+ g_strconcat (xmp_header, test_data[i].xmp_data, xmp_footers[j], NULL);
- GST_BUFFER_DATA (buf) = (guint8 *) text;
- GST_BUFFER_SIZE (buf) = strlen (text) + 1;
++
++ buf = gst_buffer_new ();
++ len = strlen (text) + 1;
++ gst_buffer_take_memory (buf, -1,
++ gst_memory_new_wrapped (0, text, NULL, len, 0, len));
+
+ list = gst_tag_list_from_xmp_buffer (buf);
+ if (test_data[i].result_size >= 0) {
+ fail_unless (list != NULL);
+
+ result_size = gst_structure_n_fields ((GstStructure *) list);
+ fail_unless (result_size == test_data[i].result_size);
+
+ /* check the taglist content */
+ switch (test_data[i].result_test) {
+ case 0:
+ ASSERT_TAG_LIST_HAS_STRING (list, "description", "test");
+ break;
+ default:
+ break;
+ }
}
- }
- if (list)
- gst_tag_list_free (list);
+ if (list)
+ gst_tag_list_free (list);
- gst_buffer_unref (buf);
- g_free (text);
- i++;
++ gst_buffer_unref (buf);
+ g_free (text);
+ i++;
+ }
+ j++;
}
-
- gst_buffer_unref (buf);
}
GST_END_TEST;
fourcc = GST_MAKE_FOURCC (s[0], s[1], s[2], s[3]);
fmt = gst_video_format_from_fourcc (fourcc);
-- if (fmt == GST_VIDEO_FORMAT_UNKNOWN)
++ if (fmt == GST_VIDEO_FORMAT_UNKNOWN) {
++ GST_DEBUG ("Unknown format %s, skipping tests", fourcc_list[i].fourcc);
continue;
++ }
+
+ vf_info = gst_video_format_get_info (fmt);
+ fail_unless (vf_info != NULL);
+
+ fail_unless_equals_int (GST_VIDEO_FORMAT_INFO_FORMAT (vf_info), fmt);
- GST_INFO ("Fourcc %s, packed=%", fourcc_list[i].fourcc,
+ GST_INFO ("Fourcc %s, packed=%d", fourcc_list[i].fourcc,
gst_video_format_is_packed (fmt));
- fail_unless (gst_video_format_is_yuv (fmt));
+ fail_unless (GST_VIDEO_FORMAT_INFO_IS_YUV (vf_info));
/* use any non-NULL pointer so we can compare against NULL */
{
GST_DEBUG ("testing caps: %" GST_PTR_FORMAT, caps);
- fail_unless (gst_video_format_parse_caps (caps, &fmt, &w, &h));
- fail_unless_equals_int (fmt, formats[i].fmt);
- fail_unless_equals_int (w, 2 * (i + 1));
- fail_unless_equals_int (h, i + 1);
+ gst_video_info_init (&vinfo);
+ fail_unless (gst_video_info_from_caps (&vinfo, caps));
+ fail_unless_equals_int (GST_VIDEO_INFO_FORMAT (&vinfo), formats[i].fmt);
+ fail_unless_equals_int (GST_VIDEO_INFO_WIDTH (&vinfo), 2 * (i + 1));
+ fail_unless_equals_int (GST_VIDEO_INFO_HEIGHT (&vinfo), i + 1);
/* make sure they're serialised back correctly */
- caps2 = gst_video_format_new_caps (fmt, w, h, 15, 1, 1, 1);
+ caps2 = gst_video_info_to_caps (&vinfo);
fail_unless (caps != NULL);
-- fail_unless (gst_caps_is_equal (caps, caps2));
++ fail_unless (gst_caps_is_equal (caps, caps2),
++ "caps [%" GST_PTR_FORMAT "] not equal to caps2 [%" GST_PTR_FORMAT "]",
++ caps, caps2);
gst_caps_unref (caps);
gst_caps_unref (caps2);