From d05c805b16fd16d7bba346e25533137f082116a2 Mon Sep 17 00:00:00 2001 From: Iago Toral Date: Thu, 6 Aug 2009 15:28:00 +0200 Subject: [PATCH] Added a draft implementation of gstbaseaudiodecoder --- gst-libs/gst/audio/gstbaseaudiodecoder.c | 1138 ++++++------------------------ gst-libs/gst/audio/gstbaseaudiodecoder.h | 9 + 2 files changed, 206 insertions(+), 941 deletions(-) diff --git a/gst-libs/gst/audio/gstbaseaudiodecoder.c b/gst-libs/gst/audio/gstbaseaudiodecoder.c index 8e942bf..56808f6 100644 --- a/gst-libs/gst/audio/gstbaseaudiodecoder.c +++ b/gst-libs/gst/audio/gstbaseaudiodecoder.c @@ -31,18 +31,15 @@ GST_DEBUG_CATEGORY_EXTERN (baseaudio_debug); static void gst_base_audio_decoder_finalize (GObject * object); -static gboolean gst_base_audio_decoder_sink_setcaps (GstPad * pad, - GstCaps * caps); -static gboolean gst_base_audio_decoder_sink_event (GstPad * pad, - GstEvent * event); static gboolean gst_base_audio_decoder_src_event (GstPad * pad, GstEvent * event); static GstFlowReturn gst_base_audio_decoder_chain (GstPad * pad, GstBuffer * buf); static gboolean gst_base_audio_decoder_sink_query (GstPad * pad, GstQuery * query); -static GstStateChangeReturn gst_base_audio_decoder_change_state (GstElement * - element, GstStateChange transition); +static gboolean gst_base_audio_decoder_sink_convert (GstPad * pad, + GstFormat src_format, gint64 src_value, GstFormat * dest_format, + gint64 * dest_value); static const GstQueryType *gst_base_audio_decoder_get_query_types (GstPad * pad); static gboolean gst_base_audio_decoder_src_query (GstPad * pad, @@ -53,16 +50,6 @@ static gboolean gst_base_audio_decoder_src_convert (GstPad * pad, static void gst_base_audio_decoder_reset (GstBaseAudioDecoder * base_audio_decoder); -static guint64 -gst_base_audio_decoder_get_timestamp (GstBaseAudioDecoder * base_audio_decoder, - int picture_number); -static guint64 -gst_base_audio_decoder_get_field_timestamp (GstBaseAudioDecoder * - base_audio_decoder, int field_offset); -static GstAudioFrame *gst_base_audio_decoder_new_frame (GstBaseAudioDecoder * - base_audio_decoder); -static void gst_base_audio_decoder_free_frame (GstAudioFrame * frame); - GST_BOILERPLATE (GstBaseAudioDecoder, gst_base_audio_decoder, GstBaseAudioCodec, GST_TYPE_BASE_AUDIO_CODEC); @@ -76,15 +63,11 @@ static void gst_base_audio_decoder_class_init (GstBaseAudioDecoderClass * klass) { GObjectClass *gobject_class; - GstElementClass *gstelement_class; gobject_class = G_OBJECT_CLASS (klass); - gstelement_class = GST_ELEMENT_CLASS (klass); gobject_class->finalize = gst_base_audio_decoder_finalize; - gstelement_class->change_state = gst_base_audio_decoder_change_state; - parent_class = g_type_class_peek_parent (klass); } @@ -99,8 +82,6 @@ gst_base_audio_decoder_init (GstBaseAudioDecoder * base_audio_decoder, pad = GST_BASE_AUDIO_CODEC_SINK_PAD (base_audio_decoder); gst_pad_set_chain_function (pad, gst_base_audio_decoder_chain); - gst_pad_set_event_function (pad, gst_base_audio_decoder_sink_event); - gst_pad_set_setcaps_function (pad, gst_base_audio_decoder_sink_setcaps); gst_pad_set_query_function (pad, gst_base_audio_decoder_sink_query); pad = GST_BASE_AUDIO_CODEC_SRC_PAD (base_audio_decoder); @@ -108,339 +89,219 @@ gst_base_audio_decoder_init (GstBaseAudioDecoder * base_audio_decoder, gst_pad_set_event_function (pad, gst_base_audio_decoder_src_event); gst_pad_set_query_type_function (pad, gst_base_audio_decoder_get_query_types); gst_pad_set_query_function (pad, gst_base_audio_decoder_src_query); - - base_audio_decoder->input_adapter = gst_adapter_new (); - base_audio_decoder->output_adapter = gst_adapter_new (); - - gst_segment_init (&base_audio_decoder->state.segment, GST_FORMAT_TIME); - gst_base_audio_decoder_reset (base_audio_decoder); - - base_audio_decoder->current_frame = - gst_base_audio_decoder_new_frame (base_audio_decoder); - - base_audio_decoder->sink_clipping = TRUE; -} - -static gboolean -gst_base_audio_decoder_sink_setcaps (GstPad * pad, GstCaps * caps) -{ - GstBaseAudioDecoder *base_audio_decoder; - GstBaseAudioDecoderClass *base_audio_decoder_class; - GstStructure *structure; - const GValue *codec_data; - - base_audio_decoder = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - base_audio_decoder_class = - GST_BASE_AUDIO_DECODER_GET_CLASS (base_audio_decoder); - - GST_DEBUG ("setcaps %" GST_PTR_FORMAT, caps); - - if (base_audio_decoder->codec_data) { - gst_buffer_unref (base_audio_decoder->codec_data); - base_audio_decoder->codec_data = NULL; - } - - structure = gst_caps_get_structure (caps, 0); - - codec_data = gst_structure_get_value (structure, "codec_data"); - if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER) { - base_audio_decoder->codec_data = gst_value_get_buffer (codec_data); - } - - if (base_audio_decoder_class->start) { - base_audio_decoder_class->start (base_audio_decoder); - } - - g_object_unref (base_audio_decoder); - - return TRUE; } static void gst_base_audio_decoder_finalize (GObject * object) { GstBaseAudioDecoder *base_audio_decoder; - GstBaseAudioDecoderClass *base_audio_decoder_class; g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (object)); + + GST_DEBUG_OBJECT (object, "finalize"); + base_audio_decoder = GST_BASE_AUDIO_DECODER (object); - base_audio_decoder_class = GST_BASE_AUDIO_DECODER_GET_CLASS (object); gst_base_audio_decoder_reset (base_audio_decoder); - GST_DEBUG_OBJECT (object, "finalize"); - G_OBJECT_CLASS (parent_class)->finalize (object); } +/* FIXME: implement */ static gboolean -gst_base_audio_decoder_sink_event (GstPad * pad, GstEvent * event) +gst_base_audio_decoder_src_convert (GstPad * pad, GstFormat src_format, + gint64 src_value, GstFormat * dest_format, gint64 * dest_value) { - GstBaseAudioDecoder *base_audio_decoder; - GstBaseAudioDecoderClass *base_audio_decoder_class; - gboolean ret = FALSE; + return TRUE; +} - base_audio_decoder = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - base_audio_decoder_class = - GST_BASE_AUDIO_DECODER_GET_CLASS (base_audio_decoder); +#ifndef GST_DISABLE_INDEX +static gboolean +gst_base_audio_decoder_index_seek (GstBaseAudioDecoder *base_audio_decoder, + GstIndex *index, GstPad * pad, GstEvent * event) +{ + GstIndexEntry *entry; + gdouble rate; + GstFormat format; + GstSeekFlags flags; + GstSeekType cur_type, stop_type; + gint64 cur, stop; + gint index_id; + GstBaseAudioCodec *base_audio_codec; + + base_audio_codec = GST_BASE_AUDIO_CODEC (base_audio_decoder); + + gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, &cur, + &stop_type, &stop); + + gst_index_get_writer_id (index, GST_OBJECT (base_audio_decoder), &index_id); + entry = gst_index_get_assoc_entry (index, index_id, + GST_INDEX_LOOKUP_BEFORE, GST_ASSOCIATION_FLAG_KEY_UNIT, format, cur); + + if (entry && gst_pad_is_linked (base_audio_codec->sinkpad)) { + const GstFormat *peer_formats, *try_formats; + + /* since we know the exact byteoffset of the frame, + make sure to seek on bytes first */ + const GstFormat try_all_formats[] = { + GST_FORMAT_BYTES, + GST_FORMAT_TIME, + 0 + }; + + try_formats = try_all_formats; + +#if 0 + /* FIXE ME */ + peer_formats = + gst_pad_get_formats (GST_PAD_PEER (base_audio_codec->sinkpad)); +#else + peer_formats = try_all_formats; +#endif - switch (GST_EVENT_TYPE (event)) { - case GST_EVENT_EOS: - { - GstAudioFrame *frame; - - frame = g_malloc0 (sizeof (GstAudioFrame)); - frame->presentation_frame_number = - base_audio_decoder->presentation_frame_number; - frame->presentation_duration = 0; - base_audio_decoder->presentation_frame_number++; - - base_audio_decoder->frames = - g_list_append (base_audio_decoder->frames, frame); - if (base_audio_decoder_class->finish) { - base_audio_decoder_class->finish (base_audio_decoder, frame); + while (gst_formats_contains (peer_formats, *try_formats)) { + gint64 value; + + if (gst_index_entry_assoc_map (entry, *try_formats, &value)) { + GstEvent *seek_event; + + GST_DEBUG_OBJECT (base_audio_decoder, + "index %s %" G_GINT64_FORMAT + " -> %s %" G_GINT64_FORMAT, + gst_format_get_details (format)->nick, + cur, + gst_format_get_details (*try_formats)->nick, + value); + + seek_event = gst_event_new_seek (rate, *try_formats, flags, + cur_type, value, stop_type, stop); + + if (gst_pad_push_event (base_audio_codec->sinkpad, seek_event)) { + return TRUE; + } } - ret = - gst_pad_push_event (GST_BASE_AUDIO_CODEC_SRC_PAD (base_audio_decoder), - event); + try_formats++; } - break; - case GST_EVENT_NEWSEGMENT: - { - gboolean update; - double rate; - double applied_rate; - GstFormat format; - gint64 start; - gint64 stop; - gint64 position; - - gst_event_parse_new_segment_full (event, &update, &rate, - &applied_rate, &format, &start, &stop, &position); - - if (format != GST_FORMAT_TIME) - goto newseg_wrong_format; - - GST_DEBUG ("new segment %lld %lld", start, position); - - gst_segment_set_newsegment_full (&base_audio_decoder->state.segment, - update, rate, applied_rate, format, start, stop, position); - - ret = - gst_pad_push_event (GST_BASE_AUDIO_CODEC_SRC_PAD (base_audio_decoder), - event); - } - break; - default: - /* FIXME this changes the order of events */ - ret = - gst_pad_push_event (GST_BASE_AUDIO_CODEC_SRC_PAD (base_audio_decoder), - event); - break; } -done: - gst_object_unref (base_audio_decoder); - return ret; - -newseg_wrong_format: - { - GST_DEBUG_OBJECT (base_audio_decoder, "received non TIME newsegment"); - gst_event_unref (event); - goto done; - } + return FALSE; } +#endif static gboolean -gst_base_audio_decoder_src_event (GstPad * pad, GstEvent * event) -{ - GstBaseAudioDecoder *base_audio_decoder; - gboolean res = FALSE; - - base_audio_decoder = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - - switch (GST_EVENT_TYPE (event)) { - case GST_EVENT_SEEK: - { - GstFormat format, tformat; - gdouble rate; - GstEvent *real_seek; - GstSeekFlags flags; - GstSeekType cur_type, stop_type; - gint64 cur, stop; - gint64 tcur, tstop; - - gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, - &cur, &stop_type, &stop); - gst_event_unref (event); - - tformat = GST_FORMAT_TIME; - res = - gst_base_audio_decoder_src_convert (pad, format, cur, &tformat, - &tcur); - if (!res) - goto convert_error; - res = - gst_base_audio_decoder_src_convert (pad, format, stop, &tformat, - &tstop); - if (!res) - goto convert_error; - - real_seek = gst_event_new_seek (rate, GST_FORMAT_TIME, - flags, cur_type, tcur, stop_type, tstop); - - res = - gst_pad_push_event (GST_BASE_AUDIO_CODEC_SINK_PAD - (base_audio_decoder), real_seek); - - break; - } - case GST_EVENT_QOS: - { - gdouble proportion; - GstClockTimeDiff diff; - GstClockTime timestamp; - - gst_event_parse_qos (event, &proportion, &diff, ×tamp); - - GST_OBJECT_LOCK (base_audio_decoder); - base_audio_decoder->proportion = proportion; - base_audio_decoder->earliest_time = timestamp + diff; - GST_OBJECT_UNLOCK (base_audio_decoder); +gst_base_audio_decoder_normal_seek (GstBaseAudioDecoder *base_audio_decoder, + GstPad *pad, GstEvent *event) +{ + gdouble rate; + GstFormat format, conv; + GstSeekFlags flags; + GstSeekType cur_type, stop_type; + gint64 cur, stop; + gint64 time_cur, bytes_cur; + gint64 time_stop, bytes_stop; + gboolean res; + GstEvent *peer_event; + GstBaseAudioDecoderClass *base_audio_decoder_class; + GstBaseAudioCodec *base_audio_codec; - GST_DEBUG_OBJECT (base_audio_decoder, - "got QoS %" GST_TIME_FORMAT ", %" G_GINT64_FORMAT ", %g", - GST_TIME_ARGS (timestamp), diff, proportion); + base_audio_codec = GST_BASE_AUDIO_CODEC (base_audio_decoder); + base_audio_decoder_class = + GST_BASE_AUDIO_DECODER_GET_CLASS (base_audio_decoder); - res = - gst_pad_push_event (GST_BASE_AUDIO_CODEC_SINK_PAD - (base_audio_decoder), event); - break; - } - default: - res = - gst_pad_push_event (GST_BASE_AUDIO_CODEC_SINK_PAD - (base_audio_decoder), event); - break; + gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, &cur, + &stop_type, &stop); + + res = FALSE; + + /* Try to seek in time */ + conv = GST_FORMAT_TIME; + if (!gst_base_audio_decoder_src_convert (pad, format, cur, &conv, &time_cur)) + goto convert_failed; + if (!gst_base_audio_decoder_src_convert (pad, format, stop, &conv, &time_stop)) + goto convert_failed; + + GST_DEBUG ("seek to time %" GST_TIME_FORMAT "-%" GST_TIME_FORMAT, + GST_TIME_ARGS (time_cur), GST_TIME_ARGS (time_stop)); + + peer_event = gst_event_new_seek (rate, GST_FORMAT_TIME, flags, + cur_type, time_cur, stop_type, time_stop); + + res = gst_pad_push_event (base_audio_codec->sinkpad, peer_event); + + /* Try seek in bytes if seek in time failed */ + if (!res) { + conv = GST_FORMAT_BYTES; + if (!gst_base_audio_decoder_sink_convert (pad, GST_FORMAT_TIME, time_cur, + &conv, &bytes_cur)) + goto convert_failed; + if (!gst_base_audio_decoder_sink_convert (pad, GST_FORMAT_TIME, time_stop, + &conv, &bytes_stop)) + goto convert_failed; + + peer_event = + gst_event_new_seek (rate, GST_FORMAT_BYTES, flags, cur_type, bytes_cur, + stop_type, bytes_stop); + + res = gst_pad_push_event (base_audio_codec->sinkpad, peer_event); } -done: - gst_object_unref (base_audio_decoder); + return res; -convert_error: - GST_DEBUG_OBJECT (base_audio_decoder, "could not convert format"); - goto done; + /* ERRORS */ + convert_failed: + { + GST_DEBUG_OBJECT (base_audio_decoder, "failed to convert format %u", format); + return FALSE; + } } - -#if 0 static gboolean -gst_base_audio_decoder_sink_convert (GstPad * pad, - GstFormat src_format, gint64 src_value, - GstFormat * dest_format, gint64 * dest_value) +gst_base_audio_decoder_seek (GstBaseAudioDecoder *base_audio_decoder, + GstPad *pad, GstEvent *event) { - gboolean res = TRUE; - GstBaseAudioDecoder *enc; - - if (src_format == *dest_format) { - *dest_value = src_value; - return TRUE; - } - - enc = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - - /* FIXME: check if we are in a decoding state */ + gboolean res; - switch (src_format) { - case GST_FORMAT_BYTES: - switch (*dest_format) { -#if 0 - case GST_FORMAT_DEFAULT: - *dest_value = gst_util_uint64_scale_int (src_value, 1, - enc->bytes_per_picture); - break; +#ifndef GST_DISABLE_INDEX + GstIndex *index = gst_element_get_index (GST_ELEMENT (base_audio_decoder)); + if (index) { + res = gst_base_audio_decoder_index_seek (base_audio_decoder, + index, pad, event); + gst_object_unref (index); + } else #endif - case GST_FORMAT_TIME: - /* seems like a rather silly conversion, implement me if you like */ - default: - res = FALSE; - } - break; - case GST_FORMAT_DEFAULT: - switch (*dest_format) { - case GST_FORMAT_TIME: - *dest_value = gst_util_uint64_scale (src_value, - GST_SECOND * enc->fps_d, enc->fps_n); - break; -#if 0 - case GST_FORMAT_BYTES: - *dest_value = gst_util_uint64_scale_int (src_value, - enc->bytes_per_picture, 1); - break; -#endif - default: - res = FALSE; - } - break; - default: - res = FALSE; - break; - } + res = gst_base_audio_decoder_normal_seek (base_audio_decoder, pad, event); + + return res; } -#endif static gboolean -gst_base_audio_decoder_src_convert (GstPad * pad, - GstFormat src_format, gint64 src_value, - GstFormat * dest_format, gint64 * dest_value) +gst_base_audio_decoder_src_event (GstPad * pad, GstEvent * event) { - gboolean res = TRUE; - GstBaseAudioDecoder *enc; + gboolean res; + GstBaseAudioDecoder *base_audio_decoder; + GstBaseAudioCodec *base_audio_codec; + GstBaseAudioDecoderClass *base_audio_decoder_class; + + base_audio_decoder = GST_BASE_AUDIO_DECODER (GST_PAD_PARENT (pad)); + base_audio_codec = GST_BASE_AUDIO_CODEC (base_audio_decoder); + base_audio_decoder_class = + GST_BASE_AUDIO_DECODER_GET_CLASS (base_audio_decoder); - if (src_format == *dest_format) { - *dest_value = src_value; - return TRUE; + switch (GST_EVENT_TYPE (event)) { + case GST_EVENT_SEEK:{ + gst_event_ref (event); + res = gst_pad_push_event (base_audio_codec->sinkpad, event); + if (!res) { + res = gst_base_audio_decoder_seek (base_audio_decoder, pad, event); + } + gst_event_unref (event); + break; } - - enc = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - - /* FIXME: check if we are in a encoding state */ - - GST_DEBUG ("src convert"); - switch (src_format) { -#if 0 - case GST_FORMAT_DEFAULT: - switch (*dest_format) { - case GST_FORMAT_TIME: - *dest_value = gst_util_uint64_scale (granulepos_to_frame (src_value), - enc->fps_d * GST_SECOND, enc->fps_n); - break; - default: - res = FALSE; - } - break; - case GST_FORMAT_TIME: - switch (*dest_format) { - case GST_FORMAT_DEFAULT: - { - *dest_value = gst_util_uint64_scale (src_value, - enc->fps_n, enc->fps_d * GST_SECOND); - break; - } - default: - res = FALSE; - break; - } - break; -#endif - default: - res = FALSE; - break; + default: + res = gst_pad_push_event (base_audio_codec->sinkpad, event); + break; } - - gst_object_unref (enc); - return res; } @@ -463,8 +324,7 @@ gst_base_audio_decoder_src_query (GstPad * pad, GstQuery * query) enc = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - switch GST_QUERY_TYPE - (query) { + switch GST_QUERY_TYPE (query) { case GST_QUERY_CONVERT: { GstFormat src_fmt, dest_fmt; @@ -491,216 +351,73 @@ error: return res; } +/* FIXME: implement */ static gboolean -gst_base_audio_decoder_sink_query (GstPad * pad, GstQuery * query) +gst_base_audio_decoder_sink_convert (GstPad * pad, GstFormat src_format, + gint64 src_value, GstFormat * dest_format, gint64 * dest_value) { - GstBaseAudioDecoder *base_audio_decoder; - gboolean res = FALSE; - - base_audio_decoder = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - - GST_DEBUG_OBJECT (base_audio_decoder, "sink query fps=%d/%d", - base_audio_decoder->state.fps_n, base_audio_decoder->state.fps_d); - switch (GST_QUERY_TYPE (query)) { - case GST_QUERY_CONVERT: - { - GstFormat src_fmt, dest_fmt; - gint64 src_val, dest_val; - - gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); - res = gst_base_audio_rawaudio_convert (&base_audio_decoder->state, - src_fmt, src_val, &dest_fmt, &dest_val); - if (!res) - goto error; - gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); - break; - } - default: - res = gst_pad_query_default (pad, query); - break; - } -done: - gst_object_unref (base_audio_decoder); - - return res; -error: - GST_DEBUG_OBJECT (base_audio_decoder, "query failed"); - goto done; + return TRUE; } - -#if 0 +/* FIXME: implement */ static gboolean -gst_pad_is_negotiated (GstPad * pad) +gst_base_audio_decoder_sink_query (GstPad * pad, GstQuery * query) { - GstCaps *caps; - - g_return_val_if_fail (pad != NULL, FALSE); - - caps = gst_pad_get_negotiated_caps (pad); - if (caps) { - gst_caps_unref (caps); - return TRUE; - } - - return FALSE; + return TRUE; } -#endif static void gst_base_audio_decoder_reset (GstBaseAudioDecoder * base_audio_decoder) { - GstBaseAudioDecoderClass *base_audio_decoder_class; - GList *g; + GstBaseAudioCodecClass *base_audio_codec_class; + GstBaseAudioCodec *base_audio_codec; - base_audio_decoder_class = - GST_BASE_AUDIO_DECODER_GET_CLASS (base_audio_decoder); + base_audio_codec = GST_BASE_AUDIO_CODEC (base_audio_decoder); + base_audio_codec_class = GST_BASE_AUDIO_CODEC_GET_CLASS (base_audio_codec); GST_DEBUG ("reset"); - base_audio_decoder->started = FALSE; - - base_audio_decoder->discont = TRUE; - base_audio_decoder->have_sync = FALSE; - - base_audio_decoder->timestamp_offset = GST_CLOCK_TIME_NONE; - base_audio_decoder->system_frame_number = 0; - base_audio_decoder->presentation_frame_number = 0; - base_audio_decoder->last_sink_timestamp = GST_CLOCK_TIME_NONE; - base_audio_decoder->last_sink_offset_end = GST_CLOCK_TIME_NONE; - base_audio_decoder->base_picture_number = 0; - base_audio_decoder->last_timestamp = GST_CLOCK_TIME_NONE; - - base_audio_decoder->offset = 0; - - if (base_audio_decoder->caps) { - gst_caps_unref (base_audio_decoder->caps); - base_audio_decoder->caps = NULL; - } - - if (base_audio_decoder->current_frame) { - gst_base_audio_decoder_free_frame (base_audio_decoder->current_frame); - base_audio_decoder->current_frame = NULL; - } - - base_audio_decoder->have_src_caps = FALSE; - - for (g = g_list_first (base_audio_decoder->frames); g; g = g_list_next (g)) { - GstAudioFrame *frame = g->data; - gst_base_audio_decoder_free_frame (frame); + if (base_audio_codec_class->reset) { + base_audio_codec_class->reset (base_audio_codec); } - g_list_free (base_audio_decoder->frames); - base_audio_decoder->frames = NULL; - - if (base_audio_decoder_class->reset) { - base_audio_decoder_class->reset (base_audio_decoder); - } -} - -static GstBuffer * -gst_adapter_get_buffer (GstAdapter * adapter) -{ - return gst_buffer_ref (GST_BUFFER (adapter->buflist->data)); - } static GstFlowReturn gst_base_audio_decoder_chain (GstPad * pad, GstBuffer * buf) { GstBaseAudioDecoder *base_audio_decoder; - GstBaseAudioDecoderClass *klass; - GstBuffer *buffer; + GstBaseAudioCodec *base_audio_codec; + GstBaseAudioDecoderClass *base_audio_decoder_class; + GstBaseAudioCodecClass *base_audio_codec_class; GstFlowReturn ret; GST_DEBUG ("chain %lld", GST_BUFFER_TIMESTAMP (buf)); -#if 0 - /* requiring the pad to be negotiated makes it impossible to use - * oggdemux or filesrc ! decoder */ - if (!gst_pad_is_negotiated (pad)) { - GST_DEBUG ("not negotiated"); - return GST_FLOW_NOT_NEGOTIATED; - } -#endif - base_audio_decoder = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (base_audio_decoder); + base_audio_codec = GST_BASE_AUDIO_CODEC (base_audio_decoder); + base_audio_decoder_class = GST_BASE_AUDIO_DECODER_GET_CLASS (base_audio_decoder); + base_audio_codec_class = GST_BASE_AUDIO_CODEC_GET_CLASS (base_audio_decoder); GST_DEBUG_OBJECT (base_audio_decoder, "chain"); if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))) { GST_DEBUG_OBJECT (base_audio_decoder, "received DISCONT buffer"); - if (base_audio_decoder->started) { + if (base_audio_codec->started) { gst_base_audio_decoder_reset (base_audio_decoder); } } - if (!base_audio_decoder->started) { - klass->start (base_audio_decoder); - base_audio_decoder->started = TRUE; + if (!base_audio_codec->started) { + base_audio_codec_class->start (base_audio_codec); + base_audio_codec->started = TRUE; } - if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE) { - GST_DEBUG ("timestamp %lld offset %lld", GST_BUFFER_TIMESTAMP (buf), - base_audio_decoder->offset); - base_audio_decoder->last_sink_timestamp = GST_BUFFER_TIMESTAMP (buf); - } - if (GST_BUFFER_OFFSET_END (buf) != -1) { - GST_DEBUG ("gp %lld", GST_BUFFER_OFFSET_END (buf)); - base_audio_decoder->last_sink_offset_end = GST_BUFFER_OFFSET_END (buf); - } base_audio_decoder->offset += GST_BUFFER_SIZE (buf); -#if 0 - if (base_audio_decoder->timestamp_offset == GST_CLOCK_TIME_NONE && - GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE) { - GST_DEBUG ("got new offset %lld", GST_BUFFER_TIMESTAMP (buf)); - base_audio_decoder->timestamp_offset = GST_BUFFER_TIMESTAMP (buf); - } -#endif - - if (base_audio_decoder->current_frame == NULL) { - base_audio_decoder->current_frame = - gst_base_audio_decoder_new_frame (base_audio_decoder); - } - - gst_adapter_push (base_audio_decoder->input_adapter, buf); - - if (!base_audio_decoder->have_sync) { - int n, m; - - GST_DEBUG ("no sync, scanning"); - - n = gst_adapter_available (base_audio_decoder->input_adapter); - m = klass->scan_for_sync (base_audio_decoder, FALSE, 0, n); - - if (m >= n) { - g_warning ("subclass scanned past end %d >= %d", m, n); - } - - gst_adapter_flush (base_audio_decoder->input_adapter, m); - - if (m < n) { - GST_DEBUG ("found possible sync after %d bytes (of %d)", m, n); - - /* this is only "maybe" sync */ - base_audio_decoder->have_sync = TRUE; - } - - if (!base_audio_decoder->have_sync) { - gst_object_unref (base_audio_decoder); - return GST_FLOW_OK; - } - } - - /* FIXME: use gst_adapter_prev_timestamp() here instead? */ - buffer = gst_adapter_get_buffer (base_audio_decoder->input_adapter); - - base_audio_decoder->buffer_timestamp = GST_BUFFER_TIMESTAMP (buffer); - gst_buffer_unref (buffer); + gst_adapter_push (base_audio_codec->input_adapter, buf); do { - ret = klass->parse_data (base_audio_decoder, FALSE); + ret = base_audio_decoder_class->parse_data (base_audio_decoder); } while (ret == GST_FLOW_OK); if (ret == GST_BASE_AUDIO_DECODER_FLOW_NEED_DATA) { @@ -711,464 +428,3 @@ gst_base_audio_decoder_chain (GstPad * pad, GstBuffer * buf) gst_object_unref (base_audio_decoder); return ret; } - -static GstStateChangeReturn -gst_base_audio_decoder_change_state (GstElement * element, - GstStateChange transition) -{ - GstBaseAudioDecoder *base_audio_decoder; - GstBaseAudioDecoderClass *base_audio_decoder_class; - GstStateChangeReturn ret; - - base_audio_decoder = GST_BASE_AUDIO_DECODER (element); - base_audio_decoder_class = GST_BASE_AUDIO_DECODER_GET_CLASS (element); - - switch (transition) { - default: - break; - } - - ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); - - switch (transition) { - case GST_STATE_CHANGE_PAUSED_TO_READY: - if (base_audio_decoder_class->stop) { - base_audio_decoder_class->stop (base_audio_decoder); - } - break; - default: - break; - } - - return ret; -} - -static void -gst_base_audio_decoder_free_frame (GstAudioFrame * frame) -{ - g_return_if_fail (frame != NULL); - - if (frame->sink_buffer) { - gst_buffer_unref (frame->sink_buffer); - } -#if 0 - if (frame->src_buffer) { - gst_buffer_unref (frame->src_buffer); - } -#endif - - g_free (frame); -} - -static GstAudioFrame * -gst_base_audio_decoder_new_frame (GstBaseAudioDecoder * base_audio_decoder) -{ - GstAudioFrame *frame; - - frame = g_malloc0 (sizeof (GstAudioFrame)); - - frame->system_frame_number = base_audio_decoder->system_frame_number; - base_audio_decoder->system_frame_number++; - - frame->decode_frame_number = frame->system_frame_number - - base_audio_decoder->reorder_depth; - - frame->decode_timestamp = -1; - frame->presentation_timestamp = -1; - frame->presentation_duration = -1; - frame->n_fields = 2; - - return frame; -} - -GstFlowReturn -gst_base_audio_decoder_finish_frame (GstBaseAudioDecoder * base_audio_decoder, - GstAudioFrame * frame) -{ - GstBaseAudioDecoderClass *base_audio_decoder_class; - GstBuffer *src_buffer; - - GST_DEBUG ("finish frame"); - - base_audio_decoder_class = - GST_BASE_AUDIO_DECODER_GET_CLASS (base_audio_decoder); - - GST_DEBUG ("finish frame sync=%d pts=%lld", frame->is_sync_point, - frame->presentation_timestamp); - - if (frame->is_sync_point) { - if (GST_CLOCK_TIME_IS_VALID (frame->presentation_timestamp)) { - if (frame->presentation_timestamp != base_audio_decoder->timestamp_offset) { - GST_DEBUG ("sync timestamp %lld diff %lld", - frame->presentation_timestamp, - frame->presentation_timestamp - - base_audio_decoder->state.segment.start); - base_audio_decoder->timestamp_offset = frame->presentation_timestamp; - base_audio_decoder->field_index = 0; - } else { - /* This case is for one initial timestamp and no others, e.g., - * filesrc ! decoder ! xvimagesink */ - GST_WARNING ("sync timestamp didn't change, ignoring"); - frame->presentation_timestamp = GST_CLOCK_TIME_NONE; - } - } else { - GST_WARNING ("sync point doesn't have timestamp"); - if (GST_CLOCK_TIME_IS_VALID (base_audio_decoder->timestamp_offset)) { - GST_ERROR ("No base timestamp. Assuming frames start at 0"); - base_audio_decoder->timestamp_offset = 0; - base_audio_decoder->field_index = 0; - } - } - } - frame->field_index = base_audio_decoder->field_index; - base_audio_decoder->field_index += frame->n_fields; - - if (frame->presentation_timestamp == GST_CLOCK_TIME_NONE) { - frame->presentation_timestamp = - gst_base_audio_decoder_get_field_timestamp (base_audio_decoder, - frame->field_index); - frame->presentation_duration = GST_CLOCK_TIME_NONE; - frame->decode_timestamp = - gst_base_audio_decoder_get_timestamp (base_audio_decoder, - frame->decode_frame_number); - } - if (frame->presentation_duration == GST_CLOCK_TIME_NONE) { - frame->presentation_duration = - gst_base_audio_decoder_get_field_timestamp (base_audio_decoder, - frame->field_index + frame->n_fields) - frame->presentation_timestamp; - } - - if (GST_CLOCK_TIME_IS_VALID (base_audio_decoder->last_timestamp)) { - if (frame->presentation_timestamp < base_audio_decoder->last_timestamp) { - GST_WARNING ("decreasing timestamp (%lld < %lld)", - frame->presentation_timestamp, base_audio_decoder->last_timestamp); - } - } - base_audio_decoder->last_timestamp = frame->presentation_timestamp; - - GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_BUFFER_FLAG_DELTA_UNIT); - if (base_audio_decoder->state.interlaced) { -#ifndef GST_AUDIO_BUFFER_TFF -#define GST_AUDIO_BUFFER_TFF (GST_MINI_OBJECT_FLAG_LAST << 5) -#endif -#ifndef GST_AUDIO_BUFFER_RFF -#define GST_AUDIO_BUFFER_RFF (GST_MINI_OBJECT_FLAG_LAST << 6) -#endif -#ifndef GST_AUDIO_BUFFER_ONEFIELD -#define GST_AUDIO_BUFFER_ONEFIELD (GST_MINI_OBJECT_FLAG_LAST << 7) -#endif - int tff = base_audio_decoder->state.top_field_first; - - if (frame->field_index & 1) { - tff ^= 1; - } - if (tff) { - GST_BUFFER_FLAG_SET (frame->src_buffer, GST_AUDIO_BUFFER_TFF); - } else { - GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_AUDIO_BUFFER_TFF); - } - GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_AUDIO_BUFFER_RFF); - GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_AUDIO_BUFFER_ONEFIELD); - if (frame->n_fields == 3) { - GST_BUFFER_FLAG_SET (frame->src_buffer, GST_AUDIO_BUFFER_RFF); - } else if (frame->n_fields == 1) { - GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_AUDIO_BUFFER_ONEFIELD); - } - } - - GST_BUFFER_TIMESTAMP (frame->src_buffer) = frame->presentation_timestamp; - GST_BUFFER_DURATION (frame->src_buffer) = frame->presentation_duration; - GST_BUFFER_OFFSET (frame->src_buffer) = -1; - GST_BUFFER_OFFSET_END (frame->src_buffer) = -1; - - GST_DEBUG ("pushing frame %lld", frame->presentation_timestamp); - - base_audio_decoder->frames = - g_list_remove (base_audio_decoder->frames, frame); - - gst_base_audio_decoder_set_src_caps (base_audio_decoder); - - src_buffer = frame->src_buffer; - frame->src_buffer = NULL; - - gst_base_audio_decoder_free_frame (frame); - - if (base_audio_decoder->sink_clipping) { - gint64 start = GST_BUFFER_TIMESTAMP (src_buffer); - gint64 stop = GST_BUFFER_TIMESTAMP (src_buffer) + - GST_BUFFER_DURATION (src_buffer); - - if (gst_segment_clip (&base_audio_decoder->state.segment, GST_FORMAT_TIME, - start, stop, &start, &stop)) { - GST_BUFFER_TIMESTAMP (src_buffer) = start; - GST_BUFFER_DURATION (src_buffer) = stop - start; - } else { - GST_DEBUG ("dropping buffer outside segment"); - gst_buffer_unref (src_buffer); - return GST_FLOW_OK; - } - } - - return gst_pad_push (GST_BASE_AUDIO_CODEC_SRC_PAD (base_audio_decoder), - src_buffer); -} - -int -gst_base_audio_decoder_get_height (GstBaseAudioDecoder * base_audio_decoder) -{ - return base_audio_decoder->state.height; -} - -int -gst_base_audio_decoder_get_width (GstBaseAudioDecoder * base_audio_decoder) -{ - return base_audio_decoder->state.width; -} - -GstFlowReturn -gst_base_audio_decoder_end_of_stream (GstBaseAudioDecoder * base_audio_decoder, - GstBuffer * buffer) -{ - - if (base_audio_decoder->frames) { - GST_DEBUG ("EOS with frames left over"); - } - - return gst_pad_push (GST_BASE_AUDIO_CODEC_SRC_PAD (base_audio_decoder), - buffer); -} - -void -gst_base_audio_decoder_add_to_frame (GstBaseAudioDecoder * base_audio_decoder, - int n_bytes) -{ - GstBuffer *buf; - - GST_DEBUG ("add to frame"); - -#if 0 - if (gst_adapter_available (base_audio_decoder->output_adapter) == 0) { - GstBuffer *buffer; - - buffer = - gst_adapter_get_orig_buffer_at_offset - (base_audio_decoder->input_adapter, 0); - if (buffer) { - base_audio_decoder->current_frame->presentation_timestamp = - GST_BUFFER_TIMESTAMP (buffer); - gst_buffer_unref (buffer); - } - } -#endif - - if (n_bytes == 0) - return; - - buf = gst_adapter_take_buffer (base_audio_decoder->input_adapter, n_bytes); - - gst_adapter_push (base_audio_decoder->output_adapter, buf); -} - -static guint64 -gst_base_audio_decoder_get_timestamp (GstBaseAudioDecoder * base_audio_decoder, - int picture_number) -{ - if (base_audio_decoder->state.fps_d == 0) { - return -1; - } - if (picture_number < base_audio_decoder->base_picture_number) { - return base_audio_decoder->timestamp_offset - - (gint64) gst_util_uint64_scale (base_audio_decoder->base_picture_number - - picture_number, base_audio_decoder->state.fps_d * GST_SECOND, - base_audio_decoder->state.fps_n); - } else { - return base_audio_decoder->timestamp_offset + - gst_util_uint64_scale (picture_number - - base_audio_decoder->base_picture_number, - base_audio_decoder->state.fps_d * GST_SECOND, - base_audio_decoder->state.fps_n); - } -} - -static guint64 -gst_base_audio_decoder_get_field_timestamp (GstBaseAudioDecoder * - base_audio_decoder, int field_offset) -{ - if (base_audio_decoder->state.fps_d == 0) { - return GST_CLOCK_TIME_NONE; - } - if (field_offset < 0) { - GST_WARNING ("field offset < 0"); - return GST_CLOCK_TIME_NONE; - } - return base_audio_decoder->timestamp_offset + - gst_util_uint64_scale (field_offset, - base_audio_decoder->state.fps_d * GST_SECOND, - base_audio_decoder->state.fps_n * 2); -} - - -GstFlowReturn -gst_base_audio_decoder_have_frame (GstBaseAudioDecoder * base_audio_decoder) -{ - GstAudioFrame *frame = base_audio_decoder->current_frame; - GstBuffer *buffer; - GstBaseAudioDecoderClass *base_audio_decoder_class; - GstFlowReturn ret = GST_FLOW_OK; - int n_available; - - GST_DEBUG ("have_frame"); - - base_audio_decoder_class = - GST_BASE_AUDIO_DECODER_GET_CLASS (base_audio_decoder); - - n_available = gst_adapter_available (base_audio_decoder->output_adapter); - if (n_available) { - buffer = gst_adapter_take_buffer (base_audio_decoder->output_adapter, - n_available); - } else { - buffer = gst_buffer_new_and_alloc (0); - } - - frame->distance_from_sync = base_audio_decoder->distance_from_sync; - base_audio_decoder->distance_from_sync++; - -#if 0 - if (frame->presentation_timestamp == GST_CLOCK_TIME_NONE) { - frame->presentation_timestamp = - gst_base_audio_decoder_get_timestamp (base_audio_decoder, - frame->presentation_frame_number); - frame->presentation_duration = - gst_base_audio_decoder_get_timestamp (base_audio_decoder, - frame->presentation_frame_number + 1) - frame->presentation_timestamp; - frame->decode_timestamp = - gst_base_audio_decoder_get_timestamp (base_audio_decoder, - frame->decode_frame_number); - } -#endif - -#if 0 - GST_BUFFER_TIMESTAMP (buffer) = frame->presentation_timestamp; - GST_BUFFER_DURATION (buffer) = frame->presentation_duration; - if (frame->decode_frame_number < 0) { - GST_BUFFER_OFFSET (buffer) = 0; - } else { - GST_BUFFER_OFFSET (buffer) = frame->decode_timestamp; - } - GST_BUFFER_OFFSET_END (buffer) = GST_CLOCK_TIME_NONE; -#endif - - GST_DEBUG ("pts %" GST_TIME_FORMAT, - GST_TIME_ARGS (frame->presentation_timestamp)); - GST_DEBUG ("dts %" GST_TIME_FORMAT, GST_TIME_ARGS (frame->decode_timestamp)); - GST_DEBUG ("dist %d", frame->distance_from_sync); - - if (frame->is_sync_point) { - GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DELTA_UNIT); - } else { - GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT); - } - if (base_audio_decoder->discont) { - GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DISCONT); - base_audio_decoder->discont = FALSE; - } - - frame->sink_buffer = buffer; - - base_audio_decoder->frames = g_list_append (base_audio_decoder->frames, - frame); - - /* do something with frame */ - ret = base_audio_decoder_class->handle_frame (base_audio_decoder, frame); - if (!GST_FLOW_IS_SUCCESS (ret)) { - GST_DEBUG ("flow error!"); - } - - /* create new frame */ - base_audio_decoder->current_frame = - gst_base_audio_decoder_new_frame (base_audio_decoder); - - return ret; -} - -GstAudioState * -gst_base_audio_decoder_get_state (GstBaseAudioDecoder * base_audio_decoder) -{ - return &base_audio_decoder->state; - -} - -void -gst_base_audio_decoder_set_state (GstBaseAudioDecoder * base_audio_decoder, - GstAudioState * state) -{ - memcpy (&base_audio_decoder->state, state, sizeof (*state)); - -} - -void -gst_base_audio_decoder_lost_sync (GstBaseAudioDecoder * base_audio_decoder) -{ - g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (base_audio_decoder)); - - GST_DEBUG ("lost_sync"); - - if (gst_adapter_available (base_audio_decoder->input_adapter) >= 1) { - gst_adapter_flush (base_audio_decoder->input_adapter, 1); - } - - base_audio_decoder->have_sync = FALSE; -} - -void -gst_base_audio_decoder_set_sync_point (GstBaseAudioDecoder * base_audio_decoder) -{ - GST_DEBUG ("set_sync_point"); - - base_audio_decoder->current_frame->is_sync_point = TRUE; - base_audio_decoder->distance_from_sync = 0; - - base_audio_decoder->current_frame->presentation_timestamp = - base_audio_decoder->last_sink_timestamp; - - -} - -GstAudioFrame * -gst_base_audio_decoder_get_frame (GstBaseAudioDecoder * base_audio_decoder, - int frame_number) -{ - GList *g; - - for (g = g_list_first (base_audio_decoder->frames); g; g = g_list_next (g)) { - GstAudioFrame *frame = g->data; - - if (frame->system_frame_number == frame_number) { - return frame; - } - } - - return NULL; -} - -void -gst_base_audio_decoder_set_src_caps (GstBaseAudioDecoder * base_audio_decoder) -{ - GstCaps *caps; - GstAudioState *state = &base_audio_decoder->state; - - if (base_audio_decoder->have_src_caps) - return; - - caps = gst_audio_format_new_caps (state->format, - state->width, state->height, - state->fps_n, state->fps_d, state->par_n, state->par_d); - gst_caps_set_simple (caps, "interlaced", - G_TYPE_BOOLEAN, state->interlaced, NULL); - - GST_DEBUG ("setting caps %" GST_PTR_FORMAT, caps); - - gst_pad_set_caps (GST_BASE_AUDIO_CODEC_SRC_PAD (base_audio_decoder), caps); - - base_audio_decoder->have_src_caps = TRUE; -} diff --git a/gst-libs/gst/audio/gstbaseaudiodecoder.h b/gst-libs/gst/audio/gstbaseaudiodecoder.h index cd9676c..6150bbd 100644 --- a/gst-libs/gst/audio/gstbaseaudiodecoder.h +++ b/gst-libs/gst/audio/gstbaseaudiodecoder.h @@ -43,6 +43,13 @@ G_BEGIN_DECLS #define GST_IS_BASE_AUDIO_DECODER_CLASS(obj) \ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_BASE_AUDIO_DECODER)) +/** + * GST_BASE_AUDIO_DECODER_FLOW_NEED_DATA: + * + * Custom GstFlowReturn value indicating that more data is needed. + */ +#define GST_BASE_AUDIO_DECODER_FLOW_NEED_DATA GST_FLOW_CUSTOM_SUCCESS + typedef struct _GstBaseAudioDecoder GstBaseAudioDecoder; typedef struct _GstBaseAudioDecoderClass GstBaseAudioDecoderClass; @@ -51,12 +58,14 @@ struct _GstBaseAudioDecoder GstBaseAudioCodec base_audio_codec; /*< private >*/ + guint64 offset; }; struct _GstBaseAudioDecoderClass { GstBaseAudioCodecClass base_audio_codec_class; + GstFlowReturn (*parse_data) (GstBaseAudioDecoder *decoder); }; GType gst_base_audio_decoder_get_type (void); -- 2.7.4