/* GStreamer
* Copyright (C) 2004 Benjamin Otte <in7y118@public.uni-hamburg.de>
+ * Copyright (c) 2012 Collabora Ltd.
+ * Author : Edward Hervey <edward@collabora.com>
+ * Author : Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
GST_STATIC_CAPS ("video/x-theora")
);
-GST_BOILERPLATE (GstTheoraDec, gst_theora_dec, GstElement, GST_TYPE_ELEMENT);
+GST_BOILERPLATE (GstTheoraDec, gst_theora_dec, GstVideoDecoder,
+ GST_TYPE_VIDEO_DECODER);
static void theora_dec_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static void theora_dec_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec);
-static gboolean theora_dec_sink_event (GstPad * pad, GstEvent * event);
-static gboolean theora_dec_setcaps (GstPad * pad, GstCaps * caps);
-static GstFlowReturn theora_dec_chain (GstPad * pad, GstBuffer * buffer);
-static GstStateChangeReturn theora_dec_change_state (GstElement * element,
- GstStateChange transition);
-static gboolean theora_dec_src_event (GstPad * pad, GstEvent * event);
-static gboolean theora_dec_src_query (GstPad * pad, GstQuery * query);
-static gboolean theora_dec_src_convert (GstPad * pad,
- GstFormat src_format, gint64 src_value,
- GstFormat * dest_format, gint64 * dest_value);
-
-#if 0
-static const GstFormat *theora_get_formats (GstPad * pad);
-#endif
-#if 0
-static const GstEventMask *theora_get_event_masks (GstPad * pad);
-#endif
-static const GstQueryType *theora_get_query_types (GstPad * pad);
+static gboolean theora_dec_start (GstVideoDecoder * decoder);
+static gboolean theora_dec_stop (GstVideoDecoder * decoder);
+static gboolean theora_dec_set_format (GstVideoDecoder * decoder,
+ GstVideoCodecState * state);
+static gboolean theora_dec_reset (GstVideoDecoder * decoder, gboolean hard);
+static GstFlowReturn theora_dec_parse (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame, GstAdapter * adapter, gboolean at_eos);
+static GstFlowReturn theora_dec_handle_frame (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame);
+
+static GstFlowReturn theora_dec_decode_buffer (GstTheoraDec * dec,
+ GstBuffer * buf, GstVideoCodecFrame * frame);
static void
{
GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
- gst_element_class_add_static_pad_template (element_class,
- &theora_dec_src_factory);
- gst_element_class_add_static_pad_template (element_class,
- &theora_dec_sink_factory);
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&theora_dec_src_factory));
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&theora_dec_sink_factory));
gst_element_class_set_details_simple (element_class,
"Theora video decoder", "Codec/Decoder/Video",
"decode raw theora streams to raw YUV video",
gst_theora_dec_class_init (GstTheoraDecClass * klass)
{
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
- GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
+ GstVideoDecoderClass *video_decoder_class = GST_VIDEO_DECODER_CLASS (klass);
gobject_class->set_property = theora_dec_set_property;
gobject_class->get_property = theora_dec_get_property;
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
}
- gstelement_class->change_state = theora_dec_change_state;
+ video_decoder_class->start = GST_DEBUG_FUNCPTR (theora_dec_start);
+ video_decoder_class->stop = GST_DEBUG_FUNCPTR (theora_dec_stop);
+ video_decoder_class->reset = GST_DEBUG_FUNCPTR (theora_dec_reset);
+ video_decoder_class->set_format = GST_DEBUG_FUNCPTR (theora_dec_set_format);
+ video_decoder_class->parse = GST_DEBUG_FUNCPTR (theora_dec_parse);
+ video_decoder_class->handle_frame =
+ GST_DEBUG_FUNCPTR (theora_dec_handle_frame);
GST_DEBUG_CATEGORY_INIT (theoradec_debug, "theoradec", 0, "Theora decoder");
}
static void
gst_theora_dec_init (GstTheoraDec * dec, GstTheoraDecClass * g_class)
{
- dec->sinkpad =
- gst_pad_new_from_static_template (&theora_dec_sink_factory, "sink");
- gst_pad_set_event_function (dec->sinkpad, theora_dec_sink_event);
- gst_pad_set_setcaps_function (dec->sinkpad, theora_dec_setcaps);
- gst_pad_set_chain_function (dec->sinkpad, theora_dec_chain);
- gst_element_add_pad (GST_ELEMENT (dec), dec->sinkpad);
-
- dec->srcpad =
- gst_pad_new_from_static_template (&theora_dec_src_factory, "src");
- gst_pad_set_event_function (dec->srcpad, theora_dec_src_event);
- gst_pad_set_query_type_function (dec->srcpad, theora_get_query_types);
- gst_pad_set_query_function (dec->srcpad, theora_dec_src_query);
- gst_pad_use_fixed_caps (dec->srcpad);
-
- gst_element_add_pad (GST_ELEMENT (dec), dec->srcpad);
-
dec->crop = THEORA_DEF_CROP;
dec->telemetry_mv = THEORA_DEF_TELEMETRY_MV;
dec->telemetry_mbmode = THEORA_DEF_TELEMETRY_MBMODE;
dec->telemetry_qi = THEORA_DEF_TELEMETRY_QI;
dec->telemetry_bits = THEORA_DEF_TELEMETRY_BITS;
- dec->gather = NULL;
- dec->decode = NULL;
- dec->queued = NULL;
- dec->pendingevents = NULL;
+
+ /* input is packetized,
+ * but is not marked that way so data gets parsed and keyframes marked */
}
static void
gst_theora_dec_reset (GstTheoraDec * dec)
{
dec->need_keyframe = TRUE;
- dec->last_timestamp = -1;
- dec->discont = TRUE;
- dec->frame_nr = -1;
- dec->seqnum = gst_util_seqnum_next ();
- dec->dropped = 0;
- dec->processed = 0;
- gst_segment_init (&dec->segment, GST_FORMAT_TIME);
-
- GST_OBJECT_LOCK (dec);
- dec->proportion = 1.0;
- dec->earliest_time = -1;
- GST_OBJECT_UNLOCK (dec);
-
- g_list_foreach (dec->queued, (GFunc) gst_mini_object_unref, NULL);
- g_list_free (dec->queued);
- dec->queued = NULL;
- g_list_foreach (dec->gather, (GFunc) gst_mini_object_unref, NULL);
- g_list_free (dec->gather);
- dec->gather = NULL;
- g_list_foreach (dec->decode, (GFunc) gst_mini_object_unref, NULL);
- g_list_free (dec->decode);
- dec->decode = NULL;
- g_list_foreach (dec->pendingevents, (GFunc) gst_mini_object_unref, NULL);
- g_list_free (dec->pendingevents);
- dec->pendingevents = NULL;
-
- if (dec->tags) {
- gst_tag_list_free (dec->tags);
- dec->tags = NULL;
- }
-}
-
-#if 0
-static const GstFormat *
-theora_get_formats (GstPad * pad)
-{
- static GstFormat src_formats[] = {
- GST_FORMAT_DEFAULT, /* frames in this case */
- GST_FORMAT_TIME,
- GST_FORMAT_BYTES,
- 0
- };
- static GstFormat sink_formats[] = {
- GST_FORMAT_DEFAULT,
- GST_FORMAT_TIME,
- 0
- };
-
- return (GST_PAD_IS_SRC (pad) ? src_formats : sink_formats);
-}
-#endif
-
-#if 0
-static const GstEventMask *
-theora_get_event_masks (GstPad * pad)
-{
- static const GstEventMask theora_src_event_masks[] = {
- {GST_EVENT_SEEK, GST_SEEK_METHOD_SET | GST_SEEK_FLAG_FLUSH},
- {0,}
- };
-
- return theora_src_event_masks;
}
-#endif
-
-static const GstQueryType *
-theora_get_query_types (GstPad * pad)
-{
- static const GstQueryType theora_src_query_types[] = {
- GST_QUERY_POSITION,
- GST_QUERY_DURATION,
- GST_QUERY_CONVERT,
- 0
- };
-
- return theora_src_query_types;
-}
-
static gboolean
-theora_dec_src_convert (GstPad * pad,
- GstFormat src_format, gint64 src_value,
- GstFormat * dest_format, gint64 * dest_value)
+theora_dec_start (GstVideoDecoder * decoder)
{
- gboolean res = TRUE;
- GstTheoraDec *dec;
- guint64 scale = 1;
-
- if (src_format == *dest_format) {
- *dest_value = src_value;
- return TRUE;
- }
+ GstTheoraDec *dec = GST_THEORA_DEC (decoder);
- dec = GST_THEORA_DEC (gst_pad_get_parent (pad));
-
- /* we need the info part before we can done something */
- if (!dec->have_header)
- goto no_header;
-
- switch (src_format) {
- case GST_FORMAT_BYTES:
- switch (*dest_format) {
- case GST_FORMAT_DEFAULT:
- *dest_value = gst_util_uint64_scale_int (src_value, 8,
- dec->info.pic_height * dec->info.pic_width * dec->output_bpp);
- break;
- case GST_FORMAT_TIME:
- /* seems like a rather silly conversion, implement me if you like */
- default:
- res = FALSE;
- }
- break;
- case GST_FORMAT_TIME:
- switch (*dest_format) {
- case GST_FORMAT_BYTES:
- scale =
- dec->output_bpp * (dec->info.pic_width * dec->info.pic_height) /
- 8;
- case GST_FORMAT_DEFAULT:
- *dest_value = scale * gst_util_uint64_scale (src_value,
- dec->info.fps_numerator, dec->info.fps_denominator * GST_SECOND);
- break;
- default:
- res = FALSE;
- }
- break;
- case GST_FORMAT_DEFAULT:
- switch (*dest_format) {
- case GST_FORMAT_TIME:
- *dest_value = gst_util_uint64_scale (src_value,
- GST_SECOND * dec->info.fps_denominator, dec->info.fps_numerator);
- break;
- case GST_FORMAT_BYTES:
- *dest_value = gst_util_uint64_scale_int (src_value,
- dec->output_bpp * dec->info.pic_width * dec->info.pic_height, 8);
- break;
- default:
- res = FALSE;
- }
- break;
- default:
- res = FALSE;
- }
-done:
- gst_object_unref (dec);
- return res;
+ GST_DEBUG_OBJECT (dec, "start");
+ th_info_clear (&dec->info);
+ th_comment_clear (&dec->comment);
+ GST_DEBUG_OBJECT (dec, "Setting have_header to FALSE");
+ dec->have_header = FALSE;
+ gst_theora_dec_reset (dec);
- /* ERRORS */
-no_header:
- {
- GST_DEBUG_OBJECT (dec, "no header yet, cannot convert");
- res = FALSE;
- goto done;
- }
+ return TRUE;
}
-#if 0
static gboolean
-theora_dec_sink_convert (GstPad * pad,
- GstFormat src_format, gint64 src_value,
- GstFormat * dest_format, gint64 * dest_value)
+theora_dec_stop (GstVideoDecoder * decoder)
{
- gboolean res = TRUE;
- GstTheoraDec *dec;
-
- if (src_format == *dest_format) {
- *dest_value = src_value;
- return TRUE;
- }
-
- dec = GST_THEORA_DEC (gst_pad_get_parent (pad));
-
- /* we need the info part before we can done something */
- if (!dec->have_header)
- goto no_header;
-
- switch (src_format) {
- case GST_FORMAT_DEFAULT:
- switch (*dest_format) {
- case GST_FORMAT_TIME:
- *dest_value = _theora_granule_start_time (dec, src_value);
- break;
- default:
- res = FALSE;
- }
- break;
- case GST_FORMAT_TIME:
- switch (*dest_format) {
- case GST_FORMAT_DEFAULT:
- {
- guint rest;
-
- /* framecount */
- *dest_value = gst_util_uint64_scale (src_value,
- dec->info.fps_numerator, GST_SECOND * dec->info.fps_denominator);
-
- /* funny way of calculating granulepos in theora */
- rest = *dest_value / dec->info.keyframe_granule_shift;
- *dest_value -= rest;
- *dest_value <<= dec->granule_shift;
- *dest_value += rest;
- break;
- }
- default:
- res = FALSE;
- break;
- }
- break;
- default:
- res = FALSE;
+ GstTheoraDec *dec = GST_THEORA_DEC (decoder);
+
+ GST_DEBUG_OBJECT (dec, "stop");
+ th_info_clear (&dec->info);
+ th_comment_clear (&dec->comment);
+ th_setup_free (dec->setup);
+ dec->setup = NULL;
+ th_decode_free (dec->decoder);
+ dec->decoder = NULL;
+ gst_theora_dec_reset (dec);
+ if (dec->tags) {
+ gst_tag_list_free (dec->tags);
+ dec->tags = NULL;
}
-done:
- gst_object_unref (dec);
- return res;
- /* ERRORS */
-no_header:
- {
- GST_DEBUG_OBJECT (dec, "no header yet, cannot convert");
- res = FALSE;
- goto done;
- }
+ return TRUE;
}
-#endif
+/* FIXME : Do we want to handle hard resets differently ? */
static gboolean
-theora_dec_src_query (GstPad * pad, GstQuery * query)
+theora_dec_reset (GstVideoDecoder * bdec, gboolean hard)
{
- GstTheoraDec *dec;
-
- gboolean res = FALSE;
-
- dec = GST_THEORA_DEC (gst_pad_get_parent (pad));
-
- switch (GST_QUERY_TYPE (query)) {
- case GST_QUERY_POSITION:
- {
- gint64 value;
- GstFormat format;
- gint64 time;
-
- /* parse format */
- gst_query_parse_position (query, &format, NULL);
-
- time = dec->last_timestamp;
- time = gst_segment_to_stream_time (&dec->segment, GST_FORMAT_TIME, time);
-
- GST_LOG_OBJECT (dec,
- "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
-
- if (!(res =
- theora_dec_src_convert (pad, GST_FORMAT_TIME, time, &format,
- &value)))
- goto error;
-
- gst_query_set_position (query, format, value);
-
- GST_LOG_OBJECT (dec,
- "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
- format);
- break;
- }
- case GST_QUERY_DURATION:
- {
- /* forward to peer for total */
- res = gst_pad_peer_query (dec->sinkpad, query);
- if (!res)
- goto error;
-
- break;
- }
- case GST_QUERY_CONVERT:
- {
- GstFormat src_fmt, dest_fmt;
- gint64 src_val, dest_val;
-
- gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
- if (!(res =
- theora_dec_src_convert (pad, src_fmt, src_val, &dest_fmt,
- &dest_val)))
- goto error;
-
- gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
- break;
- }
- default:
- res = gst_pad_query_default (pad, query);
- break;
- }
-done:
- gst_object_unref (dec);
-
- return res;
-
- /* ERRORS */
-error:
- {
- GST_DEBUG_OBJECT (dec, "query failed");
- goto done;
- }
+ gst_theora_dec_reset (GST_THEORA_DEC (bdec));
+ return TRUE;
}
-static gboolean
-theora_dec_src_event (GstPad * pad, GstEvent * event)
+static GstFlowReturn
+theora_dec_parse (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame, GstAdapter * adapter, gboolean at_eos)
{
- gboolean res = TRUE;
- GstTheoraDec *dec;
+ gint av;
+ const guint8 *data;
- dec = GST_THEORA_DEC (gst_pad_get_parent (pad));
-
- switch (GST_EVENT_TYPE (event)) {
- case GST_EVENT_SEEK:
- {
- GstFormat format, tformat;
- gdouble rate;
- GstEvent *real_seek;
- GstSeekFlags flags;
- GstSeekType cur_type, stop_type;
- gint64 cur, stop;
- gint64 tcur, tstop;
- guint32 seqnum;
-
- gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, &cur,
- &stop_type, &stop);
- seqnum = gst_event_get_seqnum (event);
- gst_event_unref (event);
-
- /* we have to ask our peer to seek to time here as we know
- * nothing about how to generate a granulepos from the src
- * formats or anything.
- *
- * First bring the requested format to time
- */
- tformat = GST_FORMAT_TIME;
- if (!(res = theora_dec_src_convert (pad, format, cur, &tformat, &tcur)))
- goto convert_error;
- if (!(res = theora_dec_src_convert (pad, format, stop, &tformat, &tstop)))
- goto convert_error;
-
- /* then seek with time on the peer */
- real_seek = gst_event_new_seek (rate, GST_FORMAT_TIME,
- flags, cur_type, tcur, stop_type, tstop);
- gst_event_set_seqnum (real_seek, seqnum);
-
- res = gst_pad_push_event (dec->sinkpad, real_seek);
- break;
- }
- case GST_EVENT_QOS:
- {
- gdouble proportion;
- GstClockTimeDiff diff;
- GstClockTime timestamp;
-
- gst_event_parse_qos (event, &proportion, &diff, ×tamp);
-
- /* we cannot randomly skip frame decoding since we don't have
- * B frames. we can however use the timestamp and diff to not
- * push late frames. This would at least save us the time to
- * crop/memcpy the data. */
- GST_OBJECT_LOCK (dec);
- dec->proportion = proportion;
- dec->earliest_time = timestamp + diff;
- GST_OBJECT_UNLOCK (dec);
-
- GST_DEBUG_OBJECT (dec, "got QoS %" GST_TIME_FORMAT ", %" G_GINT64_FORMAT,
- GST_TIME_ARGS (timestamp), diff);
-
- res = gst_pad_push_event (dec->sinkpad, event);
- break;
- }
- default:
- res = gst_pad_push_event (dec->sinkpad, event);
- break;
- }
-done:
- gst_object_unref (dec);
+ av = gst_adapter_available (adapter);
- return res;
+ data = gst_adapter_peek (adapter, 1);
+ /* check for keyframe; must not be header packet */
+ if (!(data[0] & 0x80) && (data[0] & 0x40) == 0)
+ GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
- /* ERRORS */
-convert_error:
- {
- GST_DEBUG_OBJECT (dec, "could not convert format");
- goto done;
- }
+ /* and pass along all */
+ gst_video_decoder_add_to_frame (decoder, av);
+ return gst_video_decoder_have_frame (decoder);
}
-static gboolean
-theora_dec_sink_event (GstPad * pad, GstEvent * event)
-{
- gboolean ret = FALSE;
- GstTheoraDec *dec;
-
- dec = GST_THEORA_DEC (gst_pad_get_parent (pad));
-
- GST_LOG_OBJECT (dec, "handling event");
- switch (GST_EVENT_TYPE (event)) {
- case GST_EVENT_FLUSH_START:
- ret = gst_pad_push_event (dec->srcpad, event);
- break;
- case GST_EVENT_FLUSH_STOP:
- gst_theora_dec_reset (dec);
- ret = gst_pad_push_event (dec->srcpad, event);
- break;
- case GST_EVENT_EOS:
- ret = gst_pad_push_event (dec->srcpad, event);
- break;
- case GST_EVENT_NEWSEGMENT:
- {
- gboolean update;
- GstFormat format;
- gdouble rate, arate;
- gint64 start, stop, time;
-
- gst_event_parse_new_segment_full (event, &update, &rate, &arate, &format,
- &start, &stop, &time);
-
- /* we need TIME format */
- if (format != GST_FORMAT_TIME)
- goto newseg_wrong_format;
-
- GST_DEBUG_OBJECT (dec,
- "newsegment: update %d, rate %g, arate %g, start %" GST_TIME_FORMAT
- ", stop %" GST_TIME_FORMAT ", time %" GST_TIME_FORMAT,
- update, rate, arate, GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
- GST_TIME_ARGS (time));
-
- /* now configure the values */
- gst_segment_set_newsegment_full (&dec->segment, update,
- rate, arate, format, start, stop, time);
- dec->seqnum = gst_event_get_seqnum (event);
-
- /* We don't forward this unless/until the decoder is initialised */
- if (dec->have_header) {
- ret = gst_pad_push_event (dec->srcpad, event);
- } else {
- dec->pendingevents = g_list_append (dec->pendingevents, event);
- ret = TRUE;
- }
- break;
- }
- case GST_EVENT_TAG:
- {
- if (dec->have_header)
- /* and forward */
- ret = gst_pad_push_event (dec->srcpad, event);
- else {
- /* store it to send once we're initialized */
- dec->pendingevents = g_list_append (dec->pendingevents, event);
- ret = TRUE;
- }
- break;
- }
- default:
- ret = gst_pad_push_event (dec->srcpad, event);
- break;
- }
-done:
- gst_object_unref (dec);
-
- return ret;
-
- /* ERRORS */
-newseg_wrong_format:
- {
- GST_DEBUG_OBJECT (dec, "received non TIME newsegment");
- gst_event_unref (event);
- goto done;
- }
-}
static gboolean
-theora_dec_setcaps (GstPad * pad, GstCaps * caps)
+theora_dec_set_format (GstVideoDecoder * bdec, GstVideoCodecState * state)
{
GstTheoraDec *dec;
- GstStructure *s;
- const GValue *codec_data;
- dec = GST_THEORA_DEC (gst_pad_get_parent (pad));
+ dec = GST_THEORA_DEC (bdec);
- s = gst_caps_get_structure (caps, 0);
+ /* Keep a copy of the input state */
+ if (dec->input_state)
+ gst_video_codec_state_unref (dec->input_state);
+ dec->input_state = gst_video_codec_state_ref (state);
- /* parse the par, this overrides the encoded par */
- dec->have_par = gst_structure_get_fraction (s, "pixel-aspect-ratio",
- &dec->par_num, &dec->par_den);
+ /* FIXME : Interesting, we always accept any kind of caps ? */
+ if (state->codec_data) {
+ GstBuffer *buffer;
+ guint8 *data;
+ guint size;
+ guint offset;
- if ((codec_data = gst_structure_get_value (s, "codec_data"))) {
- if (G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER) {
- GstBuffer *buffer;
- guint8 *data;
- guint size;
- guint offset;
+ buffer = state->codec_data;
- buffer = gst_value_get_buffer (codec_data);
+ offset = 0;
+ size = GST_BUFFER_SIZE (buffer);
+ data = GST_BUFFER_DATA (buffer);
- offset = 0;
- size = GST_BUFFER_SIZE (buffer);
- data = GST_BUFFER_DATA (buffer);
+ while (size > 2) {
+ guint psize;
+ GstBuffer *buf;
- while (size > 2) {
- guint psize;
- GstBuffer *buf;
+ psize = (data[0] << 8) | data[1];
+ /* skip header */
+ data += 2;
+ size -= 2;
+ offset += 2;
- psize = (data[0] << 8) | data[1];
- /* skip header */
- data += 2;
- size -= 2;
- offset += 2;
+ /* make sure we don't read too much */
+ psize = MIN (psize, size);
- /* make sure we don't read too much */
- psize = MIN (psize, size);
+ buf = gst_buffer_create_sub (buffer, offset, psize);
- buf = gst_buffer_create_sub (buffer, offset, psize);
+ /* first buffer is a discont buffer */
+ if (offset == 2)
+ GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
- /* first buffer is a discont buffer */
- if (offset == 2)
- GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
+ /* now feed it to the decoder we can ignore the error */
+ theora_dec_decode_buffer (dec, buf, NULL);
+ gst_buffer_unref (buf);
- /* now feed it to the decoder we can ignore the error */
- theora_dec_chain (pad, buf);
-
- /* skip the data */
- size -= psize;
- data += psize;
- offset += psize;
- }
+ /* skip the data */
+ size -= psize;
+ data += psize;
+ offset += psize;
}
}
- gst_object_unref (dec);
+ GST_DEBUG_OBJECT (dec, "Done");
return TRUE;
}
GST_TAG_NOMINAL_BITRATE, dec->info.target_bitrate, NULL);
}
+ if (dec->tags)
+ gst_tag_list_free (dec->tags);
dec->tags = list;
return GST_FLOW_OK;
static GstFlowReturn
theora_handle_type_packet (GstTheoraDec * dec, ogg_packet * packet)
{
- GstCaps *caps;
gint par_num, par_den;
GstFlowReturn ret = GST_FLOW_OK;
- GList *walk;
- guint32 fourcc;
+ GstVideoCodecState *state;
+ GstVideoFormat fmt;
+ GstVideoInfo *info = &dec->input_state->info;
GST_DEBUG_OBJECT (dec, "fps %d/%d, PAR %d/%d",
dec->info.fps_numerator, dec->info.fps_denominator,
* the info.aspect_* values reflect PAR;
* 0:x and x:0 are allowed and can be interpreted as 1:1.
*/
- if (dec->have_par) {
- /* we had a par on the sink caps, override the encoded par */
- GST_DEBUG_OBJECT (dec, "overriding with input PAR");
- par_num = dec->par_num;
- par_den = dec->par_den;
- } else {
- /* take encoded par */
+ par_num = GST_VIDEO_INFO_PAR_N (info);
+ par_den = GST_VIDEO_INFO_PAR_D (info);
+
+ /* If we have a default PAR, see if the decoder specified a different one */
+ if (par_num == 1 && par_den == 1 &&
+ (dec->info.aspect_numerator != 0 && dec->info.aspect_denominator != 0)) {
par_num = dec->info.aspect_numerator;
par_den = dec->info.aspect_denominator;
}
- if (par_num == 0 || par_den == 0) {
- par_num = par_den = 1;
- }
/* theora has:
*
* width/height : dimension of the encoded frame
dec->info.pic_width, dec->info.pic_height,
dec->info.pic_x, dec->info.pic_y);
- if (dec->info.pixel_fmt == TH_PF_420) {
- dec->output_bpp = 12; /* Average bits per pixel. */
- fourcc = GST_MAKE_FOURCC ('I', '4', '2', '0');
- } else if (dec->info.pixel_fmt == TH_PF_422) {
- dec->output_bpp = 16;
- fourcc = GST_MAKE_FOURCC ('Y', '4', '2', 'B');
- } else if (dec->info.pixel_fmt == TH_PF_444) {
- dec->output_bpp = 24;
- fourcc = GST_MAKE_FOURCC ('Y', '4', '4', '4');
- } else {
- GST_ERROR_OBJECT (dec, "Invalid pixel format %d", dec->info.pixel_fmt);
- return GST_FLOW_ERROR;
+ switch (dec->info.pixel_fmt) {
+ case TH_PF_420:
+ fmt = GST_VIDEO_FORMAT_I420;
+ break;
+ case TH_PF_422:
+ fmt = GST_VIDEO_FORMAT_Y42B;
+ break;
+ case TH_PF_444:
+ fmt = GST_VIDEO_FORMAT_Y444;
+ break;
+ default:
+ goto unsupported_format;
}
if (dec->crop) {
- dec->width = dec->info.pic_width;
- dec->height = dec->info.pic_height;
+ GST_VIDEO_INFO_WIDTH (info) = dec->info.pic_width;
+ GST_VIDEO_INFO_HEIGHT (info) = dec->info.pic_height;
dec->offset_x = dec->info.pic_x;
dec->offset_y = dec->info.pic_y;
/* Ensure correct offsets in chroma for formats that need it
* so no need to handle them ourselves. */
if (dec->offset_x & 1 && dec->info.pixel_fmt != TH_PF_444) {
dec->offset_x--;
- dec->width++;
+ GST_VIDEO_INFO_WIDTH (info)++;
}
if (dec->offset_y & 1 && dec->info.pixel_fmt == TH_PF_420) {
dec->offset_y--;
- dec->height++;
+ GST_VIDEO_INFO_HEIGHT (info)++;
}
} else {
/* no cropping, use the encoded dimensions */
- dec->width = dec->info.frame_width;
- dec->height = dec->info.frame_height;
+ GST_VIDEO_INFO_WIDTH (info) = dec->info.frame_width;
+ GST_VIDEO_INFO_HEIGHT (info) = dec->info.frame_height;
dec->offset_x = 0;
dec->offset_y = 0;
}
GST_DEBUG_OBJECT (dec, "after fixup frame dimension %dx%d, offset %d:%d",
- dec->width, dec->height, dec->offset_x, dec->offset_y);
+ info->width, info->height, dec->offset_x, dec->offset_y);
/* done */
dec->decoder = th_decode_alloc (&dec->info, dec->setup);
GST_WARNING_OBJECT (dec, "Could not enable BITS mode visualisation");
}
- caps = gst_caps_new_simple ("video/x-raw-yuv",
- "format", GST_TYPE_FOURCC, fourcc,
- "framerate", GST_TYPE_FRACTION,
- dec->info.fps_numerator, dec->info.fps_denominator,
- "pixel-aspect-ratio", GST_TYPE_FRACTION, par_num, par_den,
- "width", G_TYPE_INT, dec->width, "height", G_TYPE_INT, dec->height,
- "color-matrix", G_TYPE_STRING, "sdtv",
- "chroma-site", G_TYPE_STRING, "jpeg", NULL);
- gst_pad_set_caps (dec->srcpad, caps);
- gst_caps_unref (caps);
+ /* Create the output state */
+ dec->output_state = state =
+ gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), fmt,
+ info->width, info->height, dec->input_state);
- dec->have_header = TRUE;
+ /* FIXME : Do we still need to set fps/par now that we pass the reference input stream ? */
+ state->info.fps_n = dec->info.fps_numerator;
+ state->info.fps_d = dec->info.fps_denominator;
+ state->info.par_n = par_num;
+ state->info.par_d = par_den;
- if (dec->pendingevents) {
- for (walk = dec->pendingevents; walk; walk = g_list_next (walk))
- gst_pad_push_event (dec->srcpad, GST_EVENT_CAST (walk->data));
- g_list_free (dec->pendingevents);
- dec->pendingevents = NULL;
- }
+ state->info.chroma_site = GST_VIDEO_CHROMA_SITE_JPEG;
+ /* FIXME : Need to specify SDTV color-matrix ... once it's handled
+ * with the backported GstVideoInfo */
+
+ dec->have_header = TRUE;
+ /* FIXME : Put this on the next outgoing frame */
+ /* FIXME : */
if (dec->tags) {
- gst_element_found_tags_for_pad (GST_ELEMENT_CAST (dec), dec->srcpad,
- dec->tags);
+ gst_element_found_tags_for_pad (GST_ELEMENT_CAST (dec),
+ GST_VIDEO_DECODER_SRC_PAD (dec), dec->tags);
dec->tags = NULL;
}
return ret;
+
+ /* ERRORS */
+unsupported_format:
+ {
+ GST_ERROR_OBJECT (dec, "Invalid pixel format %d", dec->info.pixel_fmt);
+ return GST_FLOW_ERROR;
+ }
}
static GstFlowReturn
}
}
-/* returns TRUE if buffer is within segment, else FALSE.
- * if Buffer is on segment border, it's timestamp and duration will be clipped */
-static gboolean
-clip_buffer (GstTheoraDec * dec, GstBuffer * buf)
-{
- gboolean res = TRUE;
- GstClockTime in_ts, in_dur, stop;
- gint64 cstart, cstop;
-
- in_ts = GST_BUFFER_TIMESTAMP (buf);
- in_dur = GST_BUFFER_DURATION (buf);
-
- GST_LOG_OBJECT (dec,
- "timestamp:%" GST_TIME_FORMAT " , duration:%" GST_TIME_FORMAT,
- GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur));
-
- /* can't clip without TIME segment */
- if (dec->segment.format != GST_FORMAT_TIME)
- goto beach;
-
- /* we need a start time */
- if (!GST_CLOCK_TIME_IS_VALID (in_ts))
- goto beach;
-
- /* generate valid stop, if duration unknown, we have unknown stop */
- stop =
- GST_CLOCK_TIME_IS_VALID (in_dur) ? (in_ts + in_dur) : GST_CLOCK_TIME_NONE;
-
- /* now clip */
- if (!(res = gst_segment_clip (&dec->segment, GST_FORMAT_TIME,
- in_ts, stop, &cstart, &cstop)))
- goto beach;
-
- /* update timestamp and possibly duration if the clipped stop time is
- * valid */
- GST_BUFFER_TIMESTAMP (buf) = cstart;
- if (GST_CLOCK_TIME_IS_VALID (cstop))
- GST_BUFFER_DURATION (buf) = cstop - cstart;
-
-beach:
- GST_LOG_OBJECT (dec, "%sdropping", (res ? "not " : ""));
- return res;
-}
-
-static GstFlowReturn
-theora_dec_push_forward (GstTheoraDec * dec, GstBuffer * buf)
-{
- GstFlowReturn result = GST_FLOW_OK;
-
- if (clip_buffer (dec, buf)) {
- if (dec->discont) {
- GST_LOG_OBJECT (dec, "setting DISCONT");
- GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
- dec->discont = FALSE;
- }
- result = gst_pad_push (dec->srcpad, buf);
- } else {
- gst_buffer_unref (buf);
- }
-
- return result;
-}
-
-static GstFlowReturn
-theora_dec_push_reverse (GstTheoraDec * dec, GstBuffer * buf)
-{
- GstFlowReturn result = GST_FLOW_OK;
-
- dec->queued = g_list_prepend (dec->queued, buf);
-
- return result;
-}
-
/* Allocate buffer and copy image data into Y444 format */
static GstFlowReturn
-theora_handle_image (GstTheoraDec * dec, th_ycbcr_buffer buf, GstBuffer ** out)
+theora_handle_image (GstTheoraDec * dec, th_ycbcr_buffer buf,
+ GstVideoCodecFrame * frame)
{
+ GstVideoDecoder *decoder = GST_VIDEO_DECODER (dec);
+ GstVideoInfo *info;
gint width, height, stride;
GstFlowReturn result;
int i, plane;
- GstVideoFormat format;
guint8 *dest, *src;
+ GstBuffer *out;
- switch (dec->info.pixel_fmt) {
- case TH_PF_444:
- format = GST_VIDEO_FORMAT_Y444;
- break;
- case TH_PF_420:
- format = GST_VIDEO_FORMAT_I420;
- break;
- case TH_PF_422:
- format = GST_VIDEO_FORMAT_Y42B;
- break;
- default:
- g_assert_not_reached ();
- }
+ result = gst_video_decoder_alloc_output_frame (decoder, frame);
- result =
- gst_pad_alloc_buffer_and_set_caps (dec->srcpad, GST_BUFFER_OFFSET_NONE,
- gst_video_format_get_size (format, dec->width, dec->height),
- GST_PAD_CAPS (dec->srcpad), out);
if (G_UNLIKELY (result != GST_FLOW_OK)) {
GST_DEBUG_OBJECT (dec, "could not get buffer, reason: %s",
gst_flow_get_name (result));
return result;
}
+ out = frame->output_buffer;
+ info = &dec->output_state->info;
+
+ /* FIXME : Use GstVideoInfo */
for (plane = 0; plane < 3; plane++) {
- width = gst_video_format_get_component_width (format, plane, dec->width);
- height = gst_video_format_get_component_height (format, plane, dec->height);
- stride = gst_video_format_get_row_stride (format, plane, dec->width);
+ width = GST_VIDEO_INFO_COMP_WIDTH (info, plane);
+ height = GST_VIDEO_INFO_COMP_HEIGHT (info, plane);
+ stride = GST_VIDEO_INFO_COMP_STRIDE (info, plane);
- dest =
- GST_BUFFER_DATA (*out) + gst_video_format_get_component_offset (format,
- plane, dec->width, dec->height);
+ dest = GST_BUFFER_DATA (out) + GST_VIDEO_INFO_COMP_OFFSET (info, plane);
src = buf[plane].data;
- src += ((height == dec->height) ? dec->offset_y : dec->offset_y / 2)
+ src +=
+ ((height ==
+ GST_VIDEO_INFO_HEIGHT (info)) ? dec->offset_y : dec->offset_y / 2)
* buf[plane].stride;
- src += (width == dec->width) ? dec->offset_x : dec->offset_x / 2;
+ src +=
+ (width ==
+ GST_VIDEO_INFO_WIDTH (info)) ? dec->offset_x : dec->offset_x / 2;
for (i = 0; i < height; i++) {
memcpy (dest, src, width);
static GstFlowReturn
theora_handle_data_packet (GstTheoraDec * dec, ogg_packet * packet,
- GstClockTime outtime, GstClockTime outdur)
+ GstVideoCodecFrame * frame)
{
/* normal data packet */
th_ycbcr_buffer buf;
- GstBuffer *out;
gboolean keyframe;
GstFlowReturn result;
ogg_int64_t gp;
if (G_UNLIKELY (!dec->have_header))
goto not_initialized;
- /* get timestamp and durations */
- if (outtime == -1)
- outtime = dec->last_timestamp;
- if (outdur == -1)
- outdur = gst_util_uint64_scale_int (GST_SECOND, dec->info.fps_denominator,
- dec->info.fps_numerator);
-
- /* calculate expected next timestamp */
- if (outtime != -1 && outdur != -1)
- dec->last_timestamp = outtime + outdur;
-
/* the second most significant bit of the first data byte is cleared
* for keyframes. We can only check it if it's not a zero-length packet. */
keyframe = packet->bytes && ((packet->packet[0] & 0x40) == 0);
if (G_UNLIKELY (th_decode_packetin (dec->decoder, packet, &gp) < 0))
goto decode_error;
- if (outtime != -1) {
- gboolean need_skip;
- GstClockTime running_time;
- GstClockTime earliest_time;
- gdouble proportion;
-
- /* qos needs to be done on running time */
- running_time = gst_segment_to_running_time (&dec->segment, GST_FORMAT_TIME,
- outtime);
-
- GST_OBJECT_LOCK (dec);
- proportion = dec->proportion;
- earliest_time = dec->earliest_time;
- /* check for QoS, don't perform the last steps of getting and
- * pushing the buffers that are known to be late. */
- need_skip = earliest_time != -1 && running_time <= earliest_time;
- GST_OBJECT_UNLOCK (dec);
-
- if (need_skip) {
- GstMessage *qos_msg;
- guint64 stream_time;
- gint64 jitter;
-
- GST_DEBUG_OBJECT (dec, "skipping decoding: qostime %"
- GST_TIME_FORMAT " <= %" GST_TIME_FORMAT,
- GST_TIME_ARGS (running_time), GST_TIME_ARGS (earliest_time));
-
- dec->dropped++;
-
- stream_time =
- gst_segment_to_stream_time (&dec->segment, GST_FORMAT_TIME, outtime);
- jitter = GST_CLOCK_DIFF (running_time, earliest_time);
-
- qos_msg =
- gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, running_time,
- stream_time, outtime, outdur);
- gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
- gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
- dec->processed, dec->dropped);
- gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);
-
- goto dropping_qos;
- }
- }
+ if (frame &&
+ (gst_video_decoder_get_max_decode_time (GST_VIDEO_DECODER (dec),
+ frame) < 0))
+ goto dropping_qos;
/* this does postprocessing and set up the decoded frame
* pointers in our yuv variable */
|| (buf[0].height != dec->info.frame_height)))
goto wrong_dimensions;
- result = theora_handle_image (dec, buf, &out);
+ result = theora_handle_image (dec, buf, frame);
if (result != GST_FLOW_OK)
return result;
- GST_BUFFER_OFFSET (out) = dec->frame_nr;
- if (dec->frame_nr != -1)
- dec->frame_nr++;
- GST_BUFFER_OFFSET_END (out) = dec->frame_nr;
-
- GST_BUFFER_TIMESTAMP (out) = outtime;
- GST_BUFFER_DURATION (out) = outdur;
-
- dec->processed++;
-
- if (dec->segment.rate >= 0.0)
- result = theora_dec_push_forward (dec, out);
- else
- result = theora_dec_push_reverse (dec, out);
-
return result;
/* ERRORS */
dropping:
{
GST_WARNING_OBJECT (dec, "dropping frame because we need a keyframe");
- dec->discont = TRUE;
- return GST_FLOW_OK;
+ return GST_VIDEO_DECODER_FLOW_NEED_DATA;
}
dropping_qos:
{
- if (dec->frame_nr != -1)
- dec->frame_nr++;
- dec->discont = TRUE;
GST_WARNING_OBJECT (dec, "dropping frame because of QoS");
- return GST_FLOW_OK;
+ return GST_VIDEO_DECODER_FLOW_NEED_DATA;
}
decode_error:
{
}
static GstFlowReturn
-theora_dec_decode_buffer (GstTheoraDec * dec, GstBuffer * buf)
+theora_dec_decode_buffer (GstTheoraDec * dec, GstBuffer * buf,
+ GstVideoCodecFrame * frame)
{
ogg_packet packet;
GstFlowReturn result = GST_FLOW_OK;
- GstClockTime timestamp, duration;
/* make ogg_packet out of the buffer */
packet.packet = GST_BUFFER_DATA (buf);
GST_LOG_OBJECT (dec, "decode buffer of size %ld", packet.bytes);
- /* save last seem timestamp for interpolating the next timestamps using the
- * framerate when we need to */
- timestamp = GST_BUFFER_TIMESTAMP (buf);
- duration = GST_BUFFER_DURATION (buf);
-
- GST_DEBUG_OBJECT (dec, "header=%02x, outtime=%" GST_TIME_FORMAT,
- packet.bytes ? packet.packet[0] : -1, GST_TIME_ARGS (timestamp));
+ GST_DEBUG_OBJECT (dec, "header=%02x", packet.bytes ? packet.packet[0] : -1);
/* switch depending on packet type. A zero byte packet is always a data
* packet; we don't dereference it in that case. */
goto done;
}
result = theora_handle_header_packet (dec, &packet);
+ /* header packets are not meant to be displayed */
+ /* FIXME : This is a temporary hack. The proper fix would be to
+ * not call _finish_frame() for these types of packets */
+ GST_VIDEO_CODEC_FRAME_FLAG_SET (frame,
+ GST_VIDEO_CODEC_FRAME_FLAG_DECODE_ONLY);
} else {
- result = theora_handle_data_packet (dec, &packet, timestamp, duration);
+ result = theora_handle_data_packet (dec, &packet, frame);
}
done:
return result;
}
-/* For reverse playback we use a technique that can be used for
- * any keyframe based video codec.
- *
- * Input:
- * Buffer decoding order: 7 8 9 4 5 6 1 2 3 EOS
- * Keyframe flag: K K
- * Discont flag: D D D
- *
- * - Each Discont marks a discont in the decoding order.
- * - The keyframes mark where we can start decoding.
- *
- * First we prepend incomming buffers to the gather queue, whenever we receive
- * a discont, we flush out the gather queue.
- *
- * The above data will be accumulated in the gather queue like this:
- *
- * gather queue: 9 8 7
- * D
- *
- * Whe buffer 4 is received (with a DISCONT), we flush the gather queue like
- * this:
- *
- * while (gather)
- * take head of queue and prepend to decode queue.
- * if we copied a keyframe, decode the decode queue.
- *
- * After we flushed the gather queue, we add 4 to the (now empty) gather queue.
- * We get the following situation:
- *
- * gather queue: 4
- * decode queue: 7 8 9
- *
- * After we received 5 (Keyframe) and 6:
- *
- * gather queue: 6 5 4
- * decode queue: 7 8 9
- *
- * When we receive 1 (DISCONT) which triggers a flush of the gather queue:
- *
- * Copy head of the gather queue (6) to decode queue:
- *
- * gather queue: 5 4
- * decode queue: 6 7 8 9
- *
- * Copy head of the gather queue (5) to decode queue. This is a keyframe so we
- * can start decoding.
- *
- * gather queue: 4
- * decode queue: 5 6 7 8 9
- *
- * Decode frames in decode queue, store raw decoded data in output queue, we
- * can take the head of the decode queue and prepend the decoded result in the
- * output queue:
- *
- * gather queue: 4
- * decode queue:
- * output queue: 9 8 7 6 5
- *
- * Now output all the frames in the output queue, picking a frame from the
- * head of the queue.
- *
- * Copy head of the gather queue (4) to decode queue, we flushed the gather
- * queue and can now store input buffer in the gather queue:
- *
- * gather queue: 1
- * decode queue: 4
- *
- * When we receive EOS, the queue looks like:
- *
- * gather queue: 3 2 1
- * decode queue: 4
- *
- * Fill decode queue, first keyframe we copy is 2:
- *
- * gather queue: 1
- * decode queue: 2 3 4
- *
- * Decoded output:
- *
- * gather queue: 1
- * decode queue:
- * output queue: 4 3 2
- *
- * Leftover buffer 1 cannot be decoded and must be discarded.
- */
-static GstFlowReturn
-theora_dec_flush_decode (GstTheoraDec * dec)
-{
- GstFlowReturn res = GST_FLOW_OK;
-
- while (dec->decode) {
- GstBuffer *buf = GST_BUFFER_CAST (dec->decode->data);
-
- GST_DEBUG_OBJECT (dec, "decoding buffer %p, ts %" GST_TIME_FORMAT,
- buf, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)));
-
- /* decode buffer, prepend to output queue */
- res = theora_dec_decode_buffer (dec, buf);
-
- /* don't need it anymore now */
- gst_buffer_unref (buf);
-
- dec->decode = g_list_delete_link (dec->decode, dec->decode);
- }
- while (dec->queued) {
- GstBuffer *buf = GST_BUFFER_CAST (dec->queued->data);
-
- /* iterate output queue an push downstream */
- res = gst_pad_push (dec->srcpad, buf);
-
- dec->queued = g_list_delete_link (dec->queued, dec->queued);
- }
-
- return res;
-}
-
-static GstFlowReturn
-theora_dec_chain_reverse (GstTheoraDec * dec, gboolean discont, GstBuffer * buf)
-{
- GstFlowReturn res = GST_FLOW_OK;
-
- /* if we have a discont, move buffers to the decode list */
- if (G_UNLIKELY (discont)) {
- GST_DEBUG_OBJECT (dec, "received discont,gathering buffers");
- while (dec->gather) {
- GstBuffer *gbuf;
- guint8 *data;
-
- gbuf = GST_BUFFER_CAST (dec->gather->data);
- /* remove from the gather list */
- dec->gather = g_list_delete_link (dec->gather, dec->gather);
- /* copy to decode queue */
- dec->decode = g_list_prepend (dec->decode, gbuf);
-
- /* if we copied a keyframe, flush and decode the decode queue */
- data = GST_BUFFER_DATA (gbuf);
- if (data && (data[0] & 0x40) == 0) {
- GST_DEBUG_OBJECT (dec, "copied keyframe");
- res = theora_dec_flush_decode (dec);
- }
- }
- }
-
- /* add buffer to gather queue */
- GST_DEBUG_OBJECT (dec, "gathering buffer %p, size %u", buf,
- GST_BUFFER_SIZE (buf));
- dec->gather = g_list_prepend (dec->gather, buf);
-
- return res;
-}
-
-static GstFlowReturn
-theora_dec_chain_forward (GstTheoraDec * dec, gboolean discont,
- GstBuffer * buffer)
-{
- GstFlowReturn result;
-
- result = theora_dec_decode_buffer (dec, buffer);
-
- gst_buffer_unref (buffer);
-
- return result;
-}
-
static GstFlowReturn
-theora_dec_chain (GstPad * pad, GstBuffer * buf)
+theora_dec_handle_frame (GstVideoDecoder * bdec, GstVideoCodecFrame * frame)
{
GstTheoraDec *dec;
GstFlowReturn res;
- gboolean discont;
-
- dec = GST_THEORA_DEC (gst_pad_get_parent (pad));
- /* peel of DISCONT flag */
- discont = GST_BUFFER_IS_DISCONT (buf);
+ dec = GST_THEORA_DEC (bdec);
- /* resync on DISCONT */
- if (G_UNLIKELY (discont)) {
- GST_DEBUG_OBJECT (dec, "received DISCONT buffer");
- dec->need_keyframe = TRUE;
- dec->last_timestamp = -1;
- dec->discont = TRUE;
- }
-
- if (dec->segment.rate > 0.0)
- res = theora_dec_chain_forward (dec, discont, buf);
- else
- res = theora_dec_chain_reverse (dec, discont, buf);
-
- gst_object_unref (dec);
+ res = theora_dec_decode_buffer (dec, frame->input_buffer, frame);
+ if (res == GST_FLOW_OK)
+ res = gst_video_decoder_finish_frame (bdec, frame);
return res;
}
-static GstStateChangeReturn
-theora_dec_change_state (GstElement * element, GstStateChange transition)
-{
- GstTheoraDec *dec = GST_THEORA_DEC (element);
- GstStateChangeReturn ret;
-
- switch (transition) {
- case GST_STATE_CHANGE_NULL_TO_READY:
- break;
- case GST_STATE_CHANGE_READY_TO_PAUSED:
- th_info_clear (&dec->info);
- th_comment_clear (&dec->comment);
- GST_DEBUG_OBJECT (dec, "Setting have_header to FALSE in READY->PAUSED");
- dec->have_header = FALSE;
- dec->have_par = FALSE;
- gst_theora_dec_reset (dec);
- break;
- case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
- break;
- default:
- break;
- }
-
- ret = parent_class->change_state (element, transition);
-
- switch (transition) {
- case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
- break;
- case GST_STATE_CHANGE_PAUSED_TO_READY:
- th_info_clear (&dec->info);
- th_comment_clear (&dec->comment);
- th_setup_free (dec->setup);
- dec->setup = NULL;
- th_decode_free (dec->decoder);
- dec->decoder = NULL;
- gst_theora_dec_reset (dec);
- break;
- case GST_STATE_CHANGE_READY_TO_NULL:
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void
theora_dec_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
break;
}
}
+
+gboolean
+gst_theora_dec_register (GstPlugin * plugin)
+{
+ return gst_element_register (plugin, "theoradec",
+ GST_RANK_PRIMARY, GST_TYPE_THEORA_DEC);
+}
/* GStreamer
* Copyright (C) 2004 Wim Taymans <wim@fluendo.com>
+ * Copyright (c) 2012 Collabora Ltd.
+ * Author : Edward Hervey <edward@collabora.com>
+ * Author : Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
#define THEORA_DEF_RATE_BUFFER 0
#define THEORA_DEF_MULTIPASS_CACHE_FILE NULL
#define THEORA_DEF_MULTIPASS_MODE MULTIPASS_MODE_SINGLE_PASS
-#define THEORA_DEF_DUP_ON_GAP FALSE
enum
{
PROP_0,
PROP_CAP_UNDERFLOW,
PROP_RATE_BUFFER,
PROP_MULTIPASS_CACHE_FILE,
- PROP_MULTIPASS_MODE,
- PROP_DUP_ON_GAP
+ PROP_MULTIPASS_MODE
/* FILL ME */
};
theoraenc->info.fps_numerator);
}
-/* Generate a dummy encoder context for use in th_encode_ctl queries
- Release with th_encode_free()
- This and the next routine from theora/examples/libtheora_info.c */
-static th_enc_ctx *
-dummy_encode_ctx (void)
-{
- th_enc_ctx *ctx;
- th_info info;
-
- /* set the minimal video parameters */
- th_info_init (&info);
- info.frame_width = 320;
- info.frame_height = 240;
- info.fps_numerator = 1;
- info.fps_denominator = 1;
-
- /* allocate and initialize a context object */
- ctx = th_encode_alloc (&info);
- if (!ctx)
- GST_WARNING ("Failed to allocate dummy encoder context.");
-
- /* clear the info struct */
- th_info_clear (&info);
-
- return ctx;
-}
-
-/* Query the current and maximum values for the 'speed level' setting.
- This can be used to ask the encoder to trade off encoding quality
- vs. performance cost, for example to adapt to realtime constraints. */
-static int
-check_speed_level (th_enc_ctx * ctx, int *current, int *max)
-{
- int ret;
-
- /* query the current speed level */
- ret = th_encode_ctl (ctx, TH_ENCCTL_GET_SPLEVEL, current, sizeof (int));
- if (ret) {
- GST_WARNING ("Error %d getting current speed level.", ret);
- return ret;
- }
- /* query the maximum speed level, which varies by encoder version */
- ret = th_encode_ctl (ctx, TH_ENCCTL_GET_SPLEVEL_MAX, max, sizeof (int));
- if (ret) {
- GST_WARNING ("Error %d getting maximum speed level.", ret);
- return ret;
- }
-
- return 0;
-}
-
static GstStaticPadTemplate theora_enc_sink_factory =
GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_STATIC_CAPS ("video/x-theora")
);
-static GstCaps *theora_enc_src_caps;
-
-static void
-_do_init (GType object_type)
-{
- const GInterfaceInfo preset_interface_info = {
- NULL, /* interface_init */
- NULL, /* interface_finalize */
- NULL /* interface_data */
- };
+GST_BOILERPLATE (GstTheoraEnc, gst_theora_enc, GstVideoEncoder,
+ GST_TYPE_VIDEO_ENCODER);
- g_type_add_interface_static (object_type, GST_TYPE_PRESET,
- &preset_interface_info);
-}
-
-GST_BOILERPLATE_FULL (GstTheoraEnc, gst_theora_enc, GstElement,
- GST_TYPE_ELEMENT, _do_init);
+static gboolean theora_enc_start (GstVideoEncoder * enc);
+static gboolean theora_enc_stop (GstVideoEncoder * enc);
+static gboolean theora_enc_set_format (GstVideoEncoder * enc,
+ GstVideoCodecState * state);
+static GstFlowReturn theora_enc_handle_frame (GstVideoEncoder * enc,
+ GstVideoCodecFrame * frame);
+static GstFlowReturn theora_enc_pre_push (GstVideoEncoder * benc,
+ GstVideoCodecFrame * frame);
+static GstFlowReturn theora_enc_finish (GstVideoEncoder * enc);
-static gboolean theora_enc_sink_event (GstPad * pad, GstEvent * event);
-static gboolean theora_enc_src_event (GstPad * pad, GstEvent * event);
-static GstFlowReturn theora_enc_chain (GstPad * pad, GstBuffer * buffer);
-static GstStateChangeReturn theora_enc_change_state (GstElement * element,
- GstStateChange transition);
static GstCaps *theora_enc_sink_getcaps (GstPad * pad);
-static gboolean theora_enc_sink_setcaps (GstPad * pad, GstCaps * caps);
static void theora_enc_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static void theora_enc_set_property (GObject * object, guint prop_id,
static gboolean theora_enc_write_multipass_cache (GstTheoraEnc * enc,
gboolean begin, gboolean eos);
-static char *theora_enc_get_supported_formats (void);
-
-static void theora_timefifo_free (GstTheoraEnc * enc);
-static GstFlowReturn
-theora_enc_encode_and_push (GstTheoraEnc * enc, ogg_packet op,
- GstBuffer * buffer);
-
static void
gst_theora_enc_base_init (gpointer g_class)
{
GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
- gst_element_class_add_static_pad_template (element_class,
- &theora_enc_src_factory);
- gst_element_class_add_static_pad_template (element_class,
- &theora_enc_sink_factory);
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&theora_enc_src_factory));
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&theora_enc_sink_factory));
gst_element_class_set_details_simple (element_class,
"Theora video encoder", "Codec/Encoder/Video",
"encode raw YUV video to a theora stream",
gst_theora_enc_class_init (GstTheoraEncClass * klass)
{
GObjectClass *gobject_class = (GObjectClass *) klass;
- GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
- char *caps_string;
-
- /* query runtime encoder properties */
- th_enc_ctx *th_ctx;
- int default_speed_level = THEORA_DEF_SPEEDLEVEL;
- int max_speed_level = default_speed_level;
-
- GST_DEBUG_CATEGORY_INIT (theoraenc_debug, "theoraenc", 0, "Theora encoder");
-
- th_ctx = dummy_encode_ctx ();
- if (th_ctx) {
- if (check_speed_level (th_ctx, &default_speed_level, &max_speed_level))
- GST_WARNING
- ("Failed to determine settings for the speed-level property.");
- th_encode_free (th_ctx);
- }
+ GstVideoEncoderClass *gstvideo_encoder_class =
+ GST_VIDEO_ENCODER_CLASS (klass);
gobject_class->set_property = theora_enc_set_property;
gobject_class->get_property = theora_enc_get_property;
gobject_class->finalize = theora_enc_finalize;
+ gstvideo_encoder_class->start = GST_DEBUG_FUNCPTR (theora_enc_start);
+ gstvideo_encoder_class->stop = GST_DEBUG_FUNCPTR (theora_enc_stop);
+ gstvideo_encoder_class->set_format =
+ GST_DEBUG_FUNCPTR (theora_enc_set_format);
+ gstvideo_encoder_class->handle_frame =
+ GST_DEBUG_FUNCPTR (theora_enc_handle_frame);
+ gstvideo_encoder_class->pre_push = GST_DEBUG_FUNCPTR (theora_enc_pre_push);
+ gstvideo_encoder_class->finish = GST_DEBUG_FUNCPTR (theora_enc_finish);
+
g_object_class_install_property (gobject_class, PROP_CENTER,
g_param_spec_boolean ("center", "Center",
"ignored and kept for API compat only", TRUE,
(GParamFlags) G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_SPEEDLEVEL,
g_param_spec_int ("speed-level", "Speed level",
- "Controls the amount of analysis performed when encoding."
- " Higher values trade compression quality for speed."
- " This property requires libtheora version >= 1.0"
- ", and the maximum value may vary based on encoder version.",
- 0, max_speed_level, default_speed_level,
- (GParamFlags) G_PARAM_READWRITE | G_PARAM_CONSTRUCT |
- G_PARAM_STATIC_STRINGS));
+ "Controls the amount of motion vector searching done while "
+ "encoding. This property requires libtheora version >= 1.0",
+ 0, 3, THEORA_DEF_SPEEDLEVEL,
+ (GParamFlags) G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_VP3_COMPATIBLE,
g_param_spec_boolean ("vp3-compatible", "VP3 Compatible",
- "Disables non-VP3 compatible features",
+ "Disables non-VP3 compatible features."
+ " This property requires libtheora version >= 1.1",
THEORA_DEF_VP3_COMPATIBLE,
(GParamFlags) G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_DROP_FRAMES,
- g_param_spec_boolean ("drop-frames", "Drop Frames",
- "Allow or disallow frame dropping",
+ g_param_spec_boolean ("drop-frames", "VP3 Compatible",
+ "Allow or disallow frame dropping."
+ " This property requires libtheora version >= 1.1",
THEORA_DEF_DROP_FRAMES,
(GParamFlags) G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_CAP_OVERFLOW,
- g_param_spec_boolean ("cap-overflow", "Cap Overflow",
- "Enable capping of bit reservoir overflows",
+ g_param_spec_boolean ("cap-overflow", "VP3 Compatible",
+ "Enable capping of bit reservoir overflows."
+ " This property requires libtheora version >= 1.1",
THEORA_DEF_CAP_OVERFLOW,
(GParamFlags) G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_CAP_UNDERFLOW,
- g_param_spec_boolean ("cap-underflow", "Cap Underflow",
- "Enable capping of bit reservoir underflows",
+ g_param_spec_boolean ("cap-underflow", "VP3 Compatible",
+ "Enable capping of bit reservoir underflows."
+ " This property requires libtheora version >= 1.1",
THEORA_DEF_CAP_UNDERFLOW,
(GParamFlags) G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_RATE_BUFFER,
g_param_spec_int ("rate-buffer", "Rate Control Buffer",
"Sets the size of the rate control buffer, in units of frames. "
"The default value of 0 instructs the encoder to automatically "
- "select an appropriate value",
+ "select an appropriate value."
+ " This property requires libtheora version >= 1.1",
0, 1000, THEORA_DEF_RATE_BUFFER,
(GParamFlags) G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_MULTIPASS_CACHE_FILE,
"Single pass or first/second pass", GST_TYPE_MULTIPASS_MODE,
THEORA_DEF_MULTIPASS_MODE,
(GParamFlags) G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
- g_object_class_install_property (gobject_class, PROP_DUP_ON_GAP,
- g_param_spec_boolean ("dup-on-gap", "Create DUP frame on GAP flag",
- "Allow codec to handle frames with GAP flag as duplicates "
- "of previous frame. "
- "This is good to work with variable frame rate stabilized "
- "by videorate element. It will add variable latency with maximal "
- "size of keyframe distance, this way it is a bad idea "
- "to use with live streams.",
- THEORA_DEF_DUP_ON_GAP,
- (GParamFlags) G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- caps_string = g_strdup_printf ("video/x-raw-yuv, "
- "format = (fourcc) { %s }, "
- "framerate = (fraction) [1/MAX, MAX], "
- "width = (int) [ 1, MAX ], " "height = (int) [ 1, MAX ]",
- theora_enc_get_supported_formats ());
- theora_enc_src_caps = gst_caps_from_string (caps_string);
- g_free (caps_string);
- gstelement_class->change_state = theora_enc_change_state;
+ GST_DEBUG_CATEGORY_INIT (theoraenc_debug, "theoraenc", 0, "Theora encoder");
}
static void
gst_theora_enc_init (GstTheoraEnc * enc, GstTheoraEncClass * g_class)
{
- enc->sinkpad =
- gst_pad_new_from_static_template (&theora_enc_sink_factory, "sink");
- gst_pad_set_chain_function (enc->sinkpad, theora_enc_chain);
- gst_pad_set_event_function (enc->sinkpad, theora_enc_sink_event);
- gst_pad_set_getcaps_function (enc->sinkpad, theora_enc_sink_getcaps);
- gst_pad_set_setcaps_function (enc->sinkpad, theora_enc_sink_setcaps);
- gst_element_add_pad (GST_ELEMENT (enc), enc->sinkpad);
-
- enc->srcpad =
- gst_pad_new_from_static_template (&theora_enc_src_factory, "src");
- gst_pad_set_event_function (enc->srcpad, theora_enc_src_event);
- gst_pad_use_fixed_caps (enc->srcpad);
- gst_element_add_pad (GST_ELEMENT (enc), enc->srcpad);
-
- gst_segment_init (&enc->segment, GST_FORMAT_UNDEFINED);
+ gst_pad_set_getcaps_function (GST_VIDEO_ENCODER_SINK_PAD (enc),
+ GST_DEBUG_FUNCPTR (theora_enc_sink_getcaps));
enc->video_bitrate = THEORA_DEF_BITRATE;
enc->video_quality = THEORA_DEF_QUALITY;
enc->keyframe_freq = THEORA_DEF_KEYFRAME_FREQ;
enc->keyframe_force = THEORA_DEF_KEYFRAME_FREQ_FORCE;
- enc->expected_ts = GST_CLOCK_TIME_NONE;
-
- /* enc->speed_level is set to the libtheora default by the constructor */
+ enc->speed_level = THEORA_DEF_SPEEDLEVEL;
enc->vp3_compatible = THEORA_DEF_VP3_COMPATIBLE;
enc->drop_frames = THEORA_DEF_DROP_FRAMES;
enc->cap_overflow = THEORA_DEF_CAP_OVERFLOW;
enc->cap_underflow = THEORA_DEF_CAP_UNDERFLOW;
enc->rate_buffer = THEORA_DEF_RATE_BUFFER;
- enc->dup_on_gap = THEORA_DEF_DUP_ON_GAP;
enc->multipass_mode = THEORA_DEF_MULTIPASS_MODE;
enc->multipass_cache_file = THEORA_DEF_MULTIPASS_CACHE_FILE;
theora_enc_clear_multipass_cache (enc);
+ if (enc->input_state)
+ gst_video_codec_state_unref (enc->input_state);
+
G_OBJECT_CLASS (parent_class)->finalize (object);
}
GST_OBJECT_LOCK (enc);
enc->info.target_bitrate = enc->video_bitrate;
- if (enc->quality_changed) {
- enc->info.quality = enc->video_quality;
- } else {
- if (enc->video_bitrate == 0) {
- enc->info.quality = enc->video_quality;
- }
- }
+ enc->info.quality = enc->video_quality;
enc->bitrate_changed = FALSE;
enc->quality_changed = FALSE;
GST_OBJECT_UNLOCK (enc);
theora_enc_write_multipass_cache (enc, TRUE, FALSE);
}
-static void
-theora_enc_clear (GstTheoraEnc * enc)
+static gboolean
+theora_enc_start (GstVideoEncoder * benc)
{
+ GstTheoraEnc *enc;
+
+ GST_DEBUG_OBJECT (benc, "start: init theora");
+ enc = GST_THEORA_ENC (benc);
+
+ th_info_init (&enc->info);
+ th_comment_init (&enc->comment);
enc->packetno = 0;
- enc->bytes_out = 0;
- enc->granulepos_offset = 0;
- enc->timestamp_offset = 0;
- theora_timefifo_free (enc);
+ if (enc->multipass_mode >= MULTIPASS_MODE_FIRST_PASS) {
+ GError *err = NULL;
+
+ if (!enc->multipass_cache_file) {
+ GST_ELEMENT_ERROR (enc, LIBRARY, SETTINGS, (NULL), (NULL));
+ return FALSE;
+ }
+ enc->multipass_cache_fd =
+ g_io_channel_new_file (enc->multipass_cache_file,
+ (enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS ? "w" : "r"), &err);
+
+ if (enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS)
+ enc->multipass_cache_adapter = gst_adapter_new ();
- enc->next_ts = GST_CLOCK_TIME_NONE;
- enc->next_discont = FALSE;
- enc->expected_ts = GST_CLOCK_TIME_NONE;
+ if (!enc->multipass_cache_fd) {
+ GST_ELEMENT_ERROR (enc, RESOURCE, OPEN_READ, (NULL),
+ ("Failed to open multipass cache file: %s", err->message));
+ g_error_free (err);
+ return FALSE;
+ }
+
+ g_io_channel_set_encoding (enc->multipass_cache_fd, NULL, NULL);
+ }
+
+ return TRUE;
+}
+
+static gboolean
+theora_enc_stop (GstVideoEncoder * benc)
+{
+ GstTheoraEnc *enc;
+
+ GST_DEBUG_OBJECT (benc, "stop: clearing theora state");
+ enc = GST_THEORA_ENC (benc);
+
+ if (enc->encoder) {
+ th_encode_free (enc->encoder);
+ enc->encoder = NULL;
+ }
+ th_comment_clear (&enc->comment);
+ th_info_clear (&enc->info);
+
+ enc->initialised = FALSE;
+
+ return TRUE;
}
static char *
{
th_enc_ctx *encoder;
th_info info;
- static const struct
+ struct
{
th_pixel_fmt pixelformat;
- const char fourcc[5];
+ const char *fourcc;
} formats[] = {
{
TH_PF_420, "I420"}, {
static GstCaps *
theora_enc_sink_getcaps (GstPad * pad)
{
- GstTheoraEnc *encoder;
- GstPad *peer;
GstCaps *caps;
+ char *supported_formats, *caps_string;
- /* If we already have caps return them */
- if (GST_PAD_CAPS (pad))
- return gst_caps_ref (GST_PAD_CAPS (pad));
-
- encoder = GST_THEORA_ENC (gst_pad_get_parent (pad));
- if (!encoder)
+ supported_formats = theora_enc_get_supported_formats ();
+ if (!supported_formats) {
+ GST_WARNING ("no supported formats found. Encoder disabled?");
return gst_caps_new_empty ();
-
- peer = gst_pad_get_peer (encoder->srcpad);
- if (peer) {
- const GstCaps *templ_caps;
- GstCaps *peer_caps, *tmp_caps;
- GstStructure *s;
- guint i, n;
-
- peer_caps = gst_pad_get_caps (peer);
-
- /* Translate peercaps to YUV */
- peer_caps = gst_caps_make_writable (peer_caps);
- n = gst_caps_get_size (peer_caps);
- for (i = 0; i < n; i++) {
- s = gst_caps_get_structure (peer_caps, i);
-
- gst_structure_set_name (s, "video/x-raw-yuv");
- gst_structure_remove_field (s, "streamheader");
- }
-
- templ_caps = gst_pad_get_pad_template_caps (pad);
-
- tmp_caps = gst_caps_intersect (peer_caps, templ_caps);
- caps = gst_caps_intersect (tmp_caps, theora_enc_src_caps);
- gst_caps_unref (tmp_caps);
- gst_caps_unref (peer_caps);
- gst_object_unref (peer);
- peer = NULL;
- } else {
- caps = gst_caps_ref (theora_enc_src_caps);
}
- gst_object_unref (encoder);
+ caps_string = g_strdup_printf ("video/x-raw-yuv, "
+ "format = (fourcc) { %s }, "
+ "framerate = (fraction) [1/MAX, MAX], "
+ "width = (int) [ 1, MAX ], " "height = (int) [ 1, MAX ]",
+ supported_formats);
+ caps = gst_caps_from_string (caps_string);
+ g_free (caps_string);
+ g_free (supported_formats);
+ GST_DEBUG ("Supported caps: %" GST_PTR_FORMAT, caps);
return caps;
}
static gboolean
-theora_enc_sink_setcaps (GstPad * pad, GstCaps * caps)
+theora_enc_set_format (GstVideoEncoder * benc, GstVideoCodecState * state)
{
- GstStructure *structure = gst_caps_get_structure (caps, 0);
- GstTheoraEnc *enc = GST_THEORA_ENC (gst_pad_get_parent (pad));
- guint32 fourcc;
- const GValue *par;
- gint fps_n, fps_d;
-
- gst_structure_get_fourcc (structure, "format", &fourcc);
- gst_structure_get_int (structure, "width", &enc->width);
- gst_structure_get_int (structure, "height", &enc->height);
- gst_structure_get_fraction (structure, "framerate", &fps_n, &fps_d);
- par = gst_structure_get_value (structure, "pixel-aspect-ratio");
+ GstTheoraEnc *enc = GST_THEORA_ENC (benc);
+ GstVideoInfo *info = &state->info;
+
+ enc->width = GST_VIDEO_INFO_WIDTH (info);
+ enc->height = GST_VIDEO_INFO_HEIGHT (info);
th_info_clear (&enc->info);
th_info_init (&enc->info);
enc->info.frame_height = GST_ROUND_UP_16 (enc->height);
enc->info.pic_width = enc->width;
enc->info.pic_height = enc->height;
- switch (fourcc) {
- case GST_MAKE_FOURCC ('I', '4', '2', '0'):
+ switch (GST_VIDEO_INFO_FORMAT (info)) {
+ case GST_VIDEO_FORMAT_I420:
enc->info.pixel_fmt = TH_PF_420;
break;
- case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
+ case GST_VIDEO_FORMAT_Y42B:
enc->info.pixel_fmt = TH_PF_422;
break;
- case GST_MAKE_FOURCC ('Y', '4', '4', '4'):
+ case GST_VIDEO_FORMAT_Y444:
enc->info.pixel_fmt = TH_PF_444;
break;
default:
g_assert_not_reached ();
}
- enc->info.fps_numerator = enc->fps_n = fps_n;
- enc->info.fps_denominator = enc->fps_d = fps_d;
- if (par) {
- enc->info.aspect_numerator = gst_value_get_fraction_numerator (par);
- enc->par_n = gst_value_get_fraction_numerator (par);
- enc->info.aspect_denominator = gst_value_get_fraction_denominator (par);
- enc->par_d = gst_value_get_fraction_denominator (par);
- } else {
- /* setting them to 0 indicates that the decoder can chose a good aspect
- * ratio, defaulting to 1/1 */
- enc->info.aspect_numerator = 0;
- enc->par_n = 1;
- enc->info.aspect_denominator = 0;
- enc->par_d = 1;
- }
+ enc->info.fps_numerator = enc->fps_n = GST_VIDEO_INFO_FPS_N (info);
+ enc->info.fps_denominator = enc->fps_d = GST_VIDEO_INFO_FPS_D (info);
+ enc->info.aspect_numerator = GST_VIDEO_INFO_PAR_N (info);
+ enc->info.aspect_denominator = GST_VIDEO_INFO_PAR_D (info);
enc->info.colorspace = TH_CS_UNSPECIFIED;
+ /* Save input state */
+ if (enc->input_state)
+ gst_video_codec_state_unref (enc->input_state);
+ enc->input_state = gst_video_codec_state_ref (state);
+
/* as done in theora */
enc->info.keyframe_granule_shift = _ilog (enc->keyframe_force - 1);
GST_DEBUG_OBJECT (enc,
theora_enc_reset (enc);
enc->initialised = TRUE;
- gst_object_unref (enc);
-
return TRUE;
}
-static guint64
-granulepos_add (guint64 granulepos, guint64 addend, gint shift)
+static GstFlowReturn
+theora_enc_pre_push (GstVideoEncoder * benc, GstVideoCodecFrame * frame)
{
- guint64 iframe, pframe;
-
- iframe = granulepos >> shift;
- pframe = granulepos - (iframe << shift);
- iframe += addend;
+ GstTheoraEnc *enc = GST_THEORA_ENC (benc);
+ guint64 pfn;
- return (iframe << shift) + pframe;
+ /* see ext/ogg/README; OFFSET_END takes "our" granulepos, OFFSET its
+ * time representation */
+ /* granulepos from sync frame */
+ pfn = frame->presentation_frame_number - frame->distance_from_sync;
+ /* correct to correspond to linear running time */
+ pfn -= enc->pfn_offset;
+ pfn += enc->granulepos_offset + 1;
+ /* granulepos */
+ GST_BUFFER_OFFSET_END (frame->output_buffer) =
+ (pfn << enc->info.keyframe_granule_shift) + frame->distance_from_sync;
+ GST_BUFFER_OFFSET (frame->output_buffer) = granulepos_to_timestamp (enc,
+ GST_BUFFER_OFFSET_END (frame->output_buffer));
+
+ return GST_FLOW_OK;
}
-/* prepare a buffer for transmission by passing data through libtheora */
static GstFlowReturn
-theora_buffer_from_packet (GstTheoraEnc * enc, ogg_packet * packet,
- GstClockTime timestamp, GstClockTime running_time,
- GstClockTime duration, GstBuffer ** buffer)
+theora_push_packet (GstTheoraEnc * enc, ogg_packet * packet)
{
+ GstVideoEncoder *benc;
GstBuffer *buf;
- GstFlowReturn ret = GST_FLOW_OK;
+ GstFlowReturn ret;
+ GstVideoCodecFrame *frame;
+
+ benc = GST_VIDEO_ENCODER (enc);
buf = gst_buffer_new_and_alloc (packet->bytes);
if (!buf) {
}
memcpy (GST_BUFFER_DATA (buf), packet->packet, packet->bytes);
- gst_buffer_set_caps (buf, GST_PAD_CAPS (enc->srcpad));
- /* see ext/ogg/README; OFFSET_END takes "our" granulepos, OFFSET its
- * time representation */
- GST_BUFFER_OFFSET_END (buf) =
- granulepos_add (packet->granulepos, enc->granulepos_offset,
- enc->info.keyframe_granule_shift);
- GST_BUFFER_OFFSET (buf) = granulepos_to_timestamp (enc,
- GST_BUFFER_OFFSET_END (buf));
-
- GST_BUFFER_TIMESTAMP (buf) = timestamp;
- GST_BUFFER_DURATION (buf) = duration;
-
- if (enc->next_discont) {
- GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
- enc->next_discont = FALSE;
- }
- /* th_packet_iskeyframe returns positive for keyframes */
- if (th_packet_iskeyframe (packet) > 0) {
- GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
+ frame = gst_video_encoder_get_oldest_frame (benc);
+ frame->output_buffer = buf;
+
+ /* the second most significant bit of the first data byte is cleared
+ * for keyframes */
+ if (packet->bytes > 0 && (packet->packet[0] & 0x40) == 0) {
+ GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
} else {
- GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
+ GST_VIDEO_CODEC_FRAME_UNSET_SYNC_POINT (frame);
}
enc->packetno++;
-done:
- *buffer = buf;
- return ret;
-}
-
-/* push out the buffer and do internal bookkeeping */
-static GstFlowReturn
-theora_push_buffer (GstTheoraEnc * enc, GstBuffer * buffer)
-{
- GstFlowReturn ret;
-
- enc->bytes_out += GST_BUFFER_SIZE (buffer);
-
- ret = gst_pad_push (enc->srcpad, buffer);
-
- return ret;
-}
-
-static GstFlowReturn
-theora_push_packet (GstTheoraEnc * enc, ogg_packet * packet,
- GstClockTime timestamp, GstClockTime running_time, GstClockTime duration)
-{
- GstBuffer *buf;
- GstFlowReturn ret;
-
- ret =
- theora_buffer_from_packet (enc, packet, timestamp, running_time, duration,
- &buf);
- if (ret == GST_FLOW_OK)
- ret = theora_push_buffer (enc, buf);
+ ret = gst_video_encoder_finish_frame (benc, frame);
+done:
return ret;
}
static GstCaps *
-theora_set_header_on_caps (GstCaps * caps, GSList * buffers)
+theora_set_header_on_caps (GstCaps * caps, GList * buffers)
{
GstStructure *structure;
GValue array = { 0 };
GValue value = { 0 };
GstBuffer *buffer;
- GSList *walk;
+ GList *walk;
caps = gst_caps_make_writable (caps);
structure = gst_caps_get_structure (caps, 0);
}
static void
-theora_enc_force_keyframe (GstTheoraEnc * enc)
-{
- GstClockTime next_ts;
-
- /* make sure timestamps increment after resetting the decoder */
- next_ts = enc->next_ts + enc->timestamp_offset;
-
- theora_enc_reset (enc);
- enc->granulepos_offset =
- gst_util_uint64_scale (next_ts, enc->fps_n, GST_SECOND * enc->fps_d);
- enc->timestamp_offset = next_ts;
- enc->next_ts = 0;
-}
-
-static gboolean
-theora_enc_sink_event (GstPad * pad, GstEvent * event)
-{
- GstTheoraEnc *enc;
- ogg_packet op;
- gboolean res;
-
- enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));
-
- switch (GST_EVENT_TYPE (event)) {
- case GST_EVENT_NEWSEGMENT:
- {
- gboolean update;
- gdouble rate, applied_rate;
- GstFormat format;
- gint64 start, stop, time;
-
- gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
- &format, &start, &stop, &time);
-
- gst_segment_set_newsegment_full (&enc->segment, update, rate,
- applied_rate, format, start, stop, time);
-
- res = gst_pad_push_event (enc->srcpad, event);
- break;
- }
- case GST_EVENT_EOS:
- if (enc->initialised) {
- /* clear all standing buffers */
- if (enc->dup_on_gap)
- theora_enc_encode_and_push (enc, op, NULL);
- /* push last packet with eos flag, should not be called */
- while (th_encode_packetout (enc->encoder, 1, &op)) {
- GstClockTime next_time =
- th_granule_time (enc->encoder, op.granulepos) * GST_SECOND;
-
- theora_push_packet (enc, &op, GST_CLOCK_TIME_NONE, enc->next_ts,
- next_time - enc->next_ts);
- enc->next_ts = next_time;
- }
- }
- if (enc->initialised && enc->multipass_cache_fd
- && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS)
- theora_enc_write_multipass_cache (enc, TRUE, TRUE);
-
- theora_enc_clear_multipass_cache (enc);
-
- res = gst_pad_push_event (enc->srcpad, event);
- break;
- case GST_EVENT_FLUSH_STOP:
- gst_segment_init (&enc->segment, GST_FORMAT_UNDEFINED);
- res = gst_pad_push_event (enc->srcpad, event);
- theora_timefifo_free (enc);
- break;
- case GST_EVENT_CUSTOM_DOWNSTREAM:
- {
- const GstStructure *s;
-
- s = gst_event_get_structure (event);
-
- if (gst_structure_has_name (s, "GstForceKeyUnit"))
- theora_enc_force_keyframe (enc);
- res = gst_pad_push_event (enc->srcpad, event);
- break;
- }
- default:
- res = gst_pad_push_event (enc->srcpad, event);
- break;
- }
- return res;
-}
-
-static gboolean
-theora_enc_src_event (GstPad * pad, GstEvent * event)
-{
- GstTheoraEnc *enc;
- gboolean res = TRUE;
-
- enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));
-
- switch (GST_EVENT_TYPE (event)) {
- case GST_EVENT_CUSTOM_UPSTREAM:
- {
- const GstStructure *s;
-
- s = gst_event_get_structure (event);
-
- if (gst_structure_has_name (s, "GstForceKeyUnit")) {
- GST_OBJECT_LOCK (enc);
- enc->force_keyframe = TRUE;
- GST_OBJECT_UNLOCK (enc);
- /* consume the event */
- res = TRUE;
- gst_event_unref (event);
- } else {
- res = gst_pad_push_event (enc->sinkpad, event);
- }
- break;
- }
- default:
- res = gst_pad_push_event (enc->sinkpad, event);
- break;
- }
-
- return res;
-}
-
-static gboolean
-theora_enc_is_discontinuous (GstTheoraEnc * enc, GstClockTime timestamp,
- GstClockTime duration)
-{
- GstClockTimeDiff max_diff;
- gboolean ret = FALSE;
-
- /* Allow 3/4 a frame off */
- max_diff = (enc->info.fps_denominator * GST_SECOND * 3) /
- (enc->info.fps_numerator * 4);
-
- if (timestamp != GST_CLOCK_TIME_NONE
- && enc->expected_ts != GST_CLOCK_TIME_NONE) {
- if ((GstClockTimeDiff) (timestamp - enc->expected_ts) > max_diff) {
- GST_DEBUG_OBJECT (enc, "Incoming TS %" GST_TIME_FORMAT
- " exceeds expected value %" GST_TIME_FORMAT
- " by too much, marking discontinuity",
- GST_TIME_ARGS (timestamp), GST_TIME_ARGS (enc->expected_ts));
- ret = TRUE;
- }
- }
-
- if (GST_CLOCK_TIME_IS_VALID (duration))
- enc->expected_ts = timestamp + duration;
- else
- enc->expected_ts = GST_CLOCK_TIME_NONE;
-
- return ret;
-}
-
-static void
theora_enc_init_buffer (th_ycbcr_buffer buf, th_info * info, guint8 * data)
{
GstVideoFormat format;
{
GstBuffer *cache_buf;
const guint8 *cache_data;
- gsize bytes_read = 0;
- gint bytes_consumed = 0;
+ gsize bytes_read = 0, bytes_consumed = 0;
GIOStatus stat = G_IO_STATUS_NORMAL;
gboolean done = FALSE;
}
- if (stat == G_IO_STATUS_ERROR || bytes_read < 0) {
+ if (stat == G_IO_STATUS_ERROR || bytes_read < 0 || bytes_written < 0) {
if (begin) {
if (eos)
GST_ELEMENT_WARNING (enc, RESOURCE, WRITE, (NULL),
return TRUE;
}
-/**
- * g_slice_free can't be used with g_queue_foreach.
- * so we create new function with predefined GstClockTime size.
- */
-static void
-theora_free_gstclocktime (gpointer mem)
-{
- g_slice_free (GstClockTime, mem);
-}
-
-static void
-theora_timefifo_in (GstTheoraEnc * enc, const GstClockTime * timestamp)
-{
- GstClockTime *ptr;
-
- if (!enc->t_queue)
- enc->t_queue = g_queue_new ();
-
- g_assert (enc->t_queue != NULL);
-
- ptr = g_slice_new (GstClockTime);
- *ptr = *timestamp;
-
- g_queue_push_head (enc->t_queue, ptr);
-}
-
-static GstClockTime
-theora_timefifo_out (GstTheoraEnc * enc)
-{
- GstClockTime ret, *ptr;
-
- g_assert (enc->t_queue != NULL);
-
- ptr = g_queue_pop_tail (enc->t_queue);
- g_assert (ptr != NULL);
-
- ret = *ptr;
- theora_free_gstclocktime (ptr);
-
- return ret;
-}
-
-/**
- * theora_timefifo_truncate - truncate the timestamp queue.
- * After frame encoding we should have only one buffer for next time.
- * The count of timestamps should be the same. If it is less,
- * some thing really bad has happened. If it is bigger, encoder
- * decided to return less then we ordered.
- * TODO: for now we will just drop this timestamps. The better solution
- * probably will be to recovery frames by recovery timestamps with
- * last buffer.
- */
static void
-theora_timefifo_truncate (GstTheoraEnc * enc)
+theora_enc_reset_ts (GstTheoraEnc * enc, GstClockTime running_time, gint pfn)
{
- if (enc->dup_on_gap) {
- guint length;
- g_assert (enc->t_queue != NULL);
- length = g_queue_get_length (enc->t_queue);
-
- if (length > 1) {
- /* it is also not good if we have more then 1. */
- GST_DEBUG_OBJECT (enc, "Dropping %u time stamps", length - 1);
- while (g_queue_get_length (enc->t_queue) > 1) {
- theora_timefifo_out (enc);
- }
- }
- }
-}
-
-static void
-theora_timefifo_free (GstTheoraEnc * enc)
-{
- if (enc->t_queue) {
- if (g_queue_get_length (enc->t_queue))
- g_queue_foreach (enc->t_queue, (GFunc) theora_free_gstclocktime, NULL);
- g_queue_free (enc->t_queue);
- enc->t_queue = NULL;
- }
- /* prevbuf makes no sense without timestamps,
- * so clear it too. */
- if (enc->prevbuf) {
- gst_buffer_unref (enc->prevbuf);
- enc->prevbuf = NULL;
- }
-
-}
-
-static void
-theora_update_prevbuf (GstTheoraEnc * enc, GstBuffer * buffer)
-{
- if (enc->prevbuf) {
- gst_buffer_unref (enc->prevbuf);
- enc->prevbuf = NULL;
- }
- enc->prevbuf = gst_buffer_ref (buffer);
+ enc->granulepos_offset =
+ gst_util_uint64_scale (running_time, enc->fps_n, GST_SECOND * enc->fps_d);
+ enc->timestamp_offset = running_time;
+ enc->pfn_offset = pfn;
}
-/**
- * theora_enc_encode_and_push - encode buffer or queued previous buffer
- * buffer - buffer to encode. If set to NULL it should encode only
- * queued buffers and produce dups if needed.
- */
-
-static GstFlowReturn
-theora_enc_encode_and_push (GstTheoraEnc * enc, ogg_packet op,
- GstBuffer * buffer)
+static GstBuffer *
+theora_enc_buffer_from_header_packet (GstTheoraEnc * enc, ogg_packet * packet)
{
- GstFlowReturn ret;
- th_ycbcr_buffer ycbcr;
- gint res;
-
- if (enc->dup_on_gap) {
- guint t_queue_length;
-
- if (enc->t_queue)
- t_queue_length = g_queue_get_length (enc->t_queue);
- else
- t_queue_length = 0;
-
- if (buffer) {
- GstClockTime timestamp = GST_BUFFER_TIMESTAMP (buffer);
-
- /* videorate can easy create 200 dup frames in one shot.
- * In this case th_encode_ctl will just return TH_EINVAL
- * and we will generate only one frame as result.
- * To make us more bullet proof, make sure we have no
- * more dup frames than keyframe interval.
- */
- if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_GAP) &&
- enc->keyframe_force > t_queue_length) {
- GST_DEBUG_OBJECT (enc, "Got GAP frame, queue as duplicate.");
-
- theora_timefifo_in (enc, ×tamp);
- gst_buffer_unref (buffer);
- return GST_FLOW_OK;
- } else {
- theora_timefifo_in (enc, ×tamp);
- /* We should have one frame delay to create correct frame order.
- * First time we got buffer, prevbuf should be empty. Nothing else
- * should be done here.
- */
- if (!enc->prevbuf) {
- theora_update_prevbuf (enc, buffer);
- gst_buffer_unref (buffer);
- return GST_FLOW_OK;
- } else {
- theora_update_prevbuf (enc, buffer);
- /* after theora_update_prevbuf t_queue_length was changed */
- t_queue_length++;
-
- if (t_queue_length > 2) {
- /* now in t_queue_length should be two real buffers: current and
- * previous. All others are timestamps of duplicate frames. */
- t_queue_length -= 2;
- res = th_encode_ctl (enc->encoder, TH_ENCCTL_SET_DUP_COUNT,
- &t_queue_length, sizeof (t_queue_length));
- if (res < 0)
- GST_WARNING_OBJECT (enc, "Failed marking dups for last frame");
- }
- }
- }
- } else {
- /* if there is no buffer, then probably we got EOS or discontinuous.
- * We need to encode every thing what was left in the queue
- */
- GST_DEBUG_OBJECT (enc, "Encode collected buffers.");
- if (t_queue_length > 1) {
- t_queue_length--;
- res = th_encode_ctl (enc->encoder, TH_ENCCTL_SET_DUP_COUNT,
- &t_queue_length, sizeof (t_queue_length));
- if (res < 0)
- GST_WARNING_OBJECT (enc, "Failed marking dups for last frame.");
- } else {
- GST_DEBUG_OBJECT (enc, "Prevbuffer is empty. Nothing to encode.");
- return GST_FLOW_OK;
- }
- }
- theora_enc_init_buffer (ycbcr, &enc->info, GST_BUFFER_DATA (enc->prevbuf));
- } else
- theora_enc_init_buffer (ycbcr, &enc->info, GST_BUFFER_DATA (buffer));
-
- /* check for buffer, it can be optional */
- if (enc->current_discont && buffer) {
- GstClockTime timestamp = GST_BUFFER_TIMESTAMP (buffer);
- GstClockTime running_time =
- gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, timestamp);
- theora_enc_reset (enc);
- enc->granulepos_offset =
- gst_util_uint64_scale (running_time, enc->fps_n,
- GST_SECOND * enc->fps_d);
- enc->timestamp_offset = running_time;
- enc->next_ts = 0;
- enc->next_discont = TRUE;
- }
-
- if (enc->multipass_cache_fd
- && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
- if (!theora_enc_read_multipass_cache (enc)) {
- ret = GST_FLOW_ERROR;
- goto multipass_read_failed;
- }
- }
-#ifdef TH_ENCCTL_SET_DUPLICATE_FLAG
- if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_GAP)) {
- th_encode_ctl (enc->encoder, TH_ENCCTL_SET_DUPLICATE_FLAG, NULL, 0);
- }
-#endif
-
- res = th_encode_ycbcr_in (enc->encoder, ycbcr);
- /* none of the failure cases can happen here */
- g_assert (res == 0);
-
- if (enc->multipass_cache_fd
- && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
- if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
- ret = GST_FLOW_ERROR;
- goto multipass_write_failed;
- }
- }
-
- ret = GST_FLOW_OK;
- while (th_encode_packetout (enc->encoder, 0, &op)) {
- GstClockTime next_time, duration;
- GstClockTime timestamp = 0;
- GST_DEBUG_OBJECT (enc, "encoded. granule:%" G_GINT64_FORMAT ", packet:%p, "
- "bytes:%ld", (gint64) op.granulepos, op.packet, op.bytes);
-
- next_time = th_granule_time (enc->encoder, op.granulepos) * GST_SECOND;
- duration = next_time - enc->next_ts;
-
- if (enc->dup_on_gap && !enc->current_discont)
- timestamp = theora_timefifo_out (enc);
- else
- timestamp = GST_BUFFER_TIMESTAMP (buffer);
-
- ret = theora_push_packet (enc, &op, timestamp, enc->next_ts, duration);
-
- enc->next_ts = next_time;
- if (ret != GST_FLOW_OK) {
- theora_timefifo_truncate (enc);
- goto data_push;
- }
- }
-
- theora_timefifo_truncate (enc);
- if (buffer)
- gst_buffer_unref (buffer);
- enc->current_discont = FALSE;
-
- return ret;
-
- /* ERRORS */
-multipass_read_failed:
- {
- gst_buffer_unref (buffer);
- return ret;
- }
-multipass_write_failed:
- {
- gst_buffer_unref (buffer);
- return ret;
- }
-data_push:
- {
- gst_buffer_unref (buffer);
- return ret;
- }
+ GstBuffer *outbuf;
+
+ outbuf = gst_buffer_new_and_alloc (packet->bytes);
+ memcpy (GST_BUFFER_DATA (outbuf), packet->packet, packet->bytes);
+ GST_BUFFER_OFFSET (outbuf) = 0;
+ GST_BUFFER_OFFSET_END (outbuf) = 0;
+ GST_BUFFER_TIMESTAMP (outbuf) = GST_CLOCK_TIME_NONE;
+ GST_BUFFER_DURATION (outbuf) = GST_CLOCK_TIME_NONE;
+
+ GST_DEBUG ("created header packet buffer, %d bytes",
+ GST_BUFFER_SIZE (outbuf));
+ return outbuf;
}
static GstFlowReturn
-theora_enc_chain (GstPad * pad, GstBuffer * buffer)
+theora_enc_handle_frame (GstVideoEncoder * benc, GstVideoCodecFrame * frame)
{
GstTheoraEnc *enc;
ogg_packet op;
- GstClockTime timestamp, duration, running_time;
+ GstClockTime timestamp, running_time;
GstFlowReturn ret;
gboolean force_keyframe;
- enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));
+ enc = GST_THEORA_ENC (benc);
/* we keep track of two timelines.
- * - The timestamps from the incoming buffers, which we copy to the outgoing
+ * - The timestamps from the incomming buffers, which we copy to the outgoing
* encoded buffers as-is. We need to do this as we simply forward the
* newsegment events.
* - The running_time of the buffers, which we use to construct the granulepos
* in the packets.
*/
- timestamp = GST_BUFFER_TIMESTAMP (buffer);
- duration = GST_BUFFER_DURATION (buffer);
+ timestamp = frame->pts;
+ /* incoming buffers are clipped, so this should be positive */
running_time =
- gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, timestamp);
- if ((gint64) running_time < 0) {
- GST_DEBUG_OBJECT (enc, "Dropping buffer, timestamp: %" GST_TIME_FORMAT,
- GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)));
- gst_buffer_unref (buffer);
- return GST_FLOW_OK;
- }
+ gst_segment_to_running_time (&GST_VIDEO_ENCODER_INPUT_SEGMENT (enc),
+ GST_FORMAT_TIME, timestamp);
+ g_return_val_if_fail (running_time >= 0 || timestamp < 0, GST_FLOW_ERROR);
GST_OBJECT_LOCK (enc);
if (enc->bitrate_changed) {
}
/* see if we need to schedule a keyframe */
- force_keyframe = enc->force_keyframe;
- enc->force_keyframe = FALSE;
+ force_keyframe = GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame);
GST_OBJECT_UNLOCK (enc);
- if (force_keyframe) {
- GstClockTime stream_time;
- GstStructure *s;
-
- stream_time = gst_segment_to_stream_time (&enc->segment,
- GST_FORMAT_TIME, timestamp);
-
- s = gst_structure_new ("GstForceKeyUnit",
- "timestamp", G_TYPE_UINT64, timestamp,
- "stream-time", G_TYPE_UINT64, stream_time,
- "running-time", G_TYPE_UINT64, running_time, NULL);
-
- theora_enc_force_keyframe (enc);
-
- gst_pad_push_event (enc->srcpad,
- gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, s));
- }
-
- /* make sure we copy the discont flag to the next outgoing buffer when it's
- * set on the incoming buffer */
- if (GST_BUFFER_IS_DISCONT (buffer)) {
- enc->next_discont = TRUE;
- }
-
if (enc->packetno == 0) {
/* no packets written yet, setup headers */
GstCaps *caps;
GstBuffer *buf;
- GSList *buffers = NULL;
+ GList *buffers = NULL;
int result;
+ GstVideoCodecState *state;
enc->granulepos_offset = 0;
enc->timestamp_offset = 0;
while ((result =
th_encode_flushheader (enc->encoder, &enc->comment, &op)) > 0) {
- ret =
- theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
- GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf);
- if (ret != GST_FLOW_OK) {
- goto header_buffer_alloc;
- }
- buffers = g_slist_prepend (buffers, buf);
+ buf = theora_enc_buffer_from_header_packet (enc, &op);
+ buffers = g_list_prepend (buffers, buf);
}
if (result < 0) {
- g_slist_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
- g_slist_free (buffers);
+ g_list_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
+ g_list_free (buffers);
goto encoder_disabled;
}
- buffers = g_slist_reverse (buffers);
+ buffers = g_list_reverse (buffers);
/* mark buffers and put on caps */
- caps = gst_caps_new_simple ("video/x-theora",
- "width", G_TYPE_INT, enc->width,
- "height", G_TYPE_INT, enc->height,
- "framerate", GST_TYPE_FRACTION, enc->fps_n, enc->fps_d,
- "pixel-aspect-ratio", GST_TYPE_FRACTION, enc->par_n, enc->par_d, NULL);
+ caps = gst_caps_new_simple ("video/x-theora", NULL);
caps = theora_set_header_on_caps (caps, buffers);
- GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, caps);
- gst_pad_set_caps (enc->srcpad, caps);
+ state = gst_video_encoder_set_output_state (benc, caps, enc->input_state);
- g_slist_foreach (buffers, (GFunc) gst_buffer_set_caps, caps);
+ GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, state->caps);
- gst_caps_unref (caps);
+ gst_video_codec_state_unref (state);
- /* push out the header buffers */
- while (buffers) {
- buf = buffers->data;
- buffers = g_slist_delete_link (buffers, buffers);
- if ((ret = theora_push_buffer (enc, buf)) != GST_FLOW_OK) {
- g_slist_foreach (buffers, (GFunc) gst_buffer_unref, NULL);
- g_slist_free (buffers);
- goto header_push;
- }
- }
+ gst_video_encoder_set_headers (benc, buffers);
- enc->granulepos_offset =
- gst_util_uint64_scale (running_time, enc->fps_n,
- GST_SECOND * enc->fps_d);
- enc->timestamp_offset = running_time;
- enc->next_ts = 0;
+ theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
}
- enc->current_discont = theora_enc_is_discontinuous (enc,
- running_time, duration);
+ {
+ th_ycbcr_buffer ycbcr;
+ gint res;
- /* empty queue if discontinuous */
- if (enc->current_discont && enc->dup_on_gap)
- theora_enc_encode_and_push (enc, op, NULL);
+ theora_enc_init_buffer (ycbcr, &enc->info,
+ GST_BUFFER_DATA (frame->input_buffer));
- ret = theora_enc_encode_and_push (enc, op, buffer);
+ if (force_keyframe) {
+ theora_enc_reset (enc);
+ theora_enc_reset_ts (enc, running_time, frame->presentation_frame_number);
+ }
+
+ if (enc->multipass_cache_fd
+ && enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS) {
+ if (!theora_enc_read_multipass_cache (enc)) {
+ ret = GST_FLOW_ERROR;
+ goto multipass_read_failed;
+ }
+ }
+
+ res = th_encode_ycbcr_in (enc->encoder, ycbcr);
+ /* none of the failure cases can happen here */
+ g_assert (res == 0);
+
+ if (enc->multipass_cache_fd
+ && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS) {
+ if (!theora_enc_write_multipass_cache (enc, FALSE, FALSE)) {
+ ret = GST_FLOW_ERROR;
+ goto multipass_write_failed;
+ }
+ }
+
+ ret = GST_FLOW_OK;
+ while (th_encode_packetout (enc->encoder, 0, &op)) {
+ ret = theora_push_packet (enc, &op);
+ if (ret != GST_FLOW_OK)
+ goto data_push;
+ }
+ }
return ret;
/* ERRORS */
-header_buffer_alloc:
+multipass_read_failed:
{
- gst_buffer_unref (buffer);
return ret;
}
-header_push:
+multipass_write_failed:
+ {
+ return ret;
+ }
+data_push:
{
- gst_buffer_unref (buffer);
return ret;
}
encoder_disabled:
{
GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
("libtheora has been compiled with the encoder disabled"));
- gst_buffer_unref (buffer);
return GST_FLOW_ERROR;
}
}
-static GstStateChangeReturn
-theora_enc_change_state (GstElement * element, GstStateChange transition)
+static gboolean
+theora_enc_finish (GstVideoEncoder * benc)
{
GstTheoraEnc *enc;
- GstStateChangeReturn ret;
- th_enc_ctx *th_ctx;
-
- enc = GST_THEORA_ENC (element);
-
- switch (transition) {
- case GST_STATE_CHANGE_NULL_TO_READY:
- th_ctx = dummy_encode_ctx ();
- if (!th_ctx) {
- GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
- ("libtheora has been compiled with the encoder disabled"));
- return GST_STATE_CHANGE_FAILURE;
- }
- th_encode_free (th_ctx);
- break;
- case GST_STATE_CHANGE_READY_TO_PAUSED:
- GST_DEBUG_OBJECT (enc, "READY->PAUSED Initing theora state");
- th_info_init (&enc->info);
- th_comment_init (&enc->comment);
- enc->packetno = 0;
- enc->force_keyframe = FALSE;
-
- if (enc->multipass_mode >= MULTIPASS_MODE_FIRST_PASS) {
- GError *err = NULL;
-
- if (!enc->multipass_cache_file) {
- ret = GST_STATE_CHANGE_FAILURE;
- GST_ELEMENT_ERROR (enc, LIBRARY, SETTINGS, (NULL), (NULL));
- return ret;
- }
- enc->multipass_cache_fd =
- g_io_channel_new_file (enc->multipass_cache_file,
- (enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS ? "w" : "r"),
- &err);
-
- if (enc->multipass_mode == MULTIPASS_MODE_SECOND_PASS)
- enc->multipass_cache_adapter = gst_adapter_new ();
-
- if (!enc->multipass_cache_fd) {
- ret = GST_STATE_CHANGE_FAILURE;
- GST_ELEMENT_ERROR (enc, RESOURCE, OPEN_READ, (NULL),
- ("Failed to open multipass cache file: %s", err->message));
- g_error_free (err);
- return ret;
- }
-
- g_io_channel_set_encoding (enc->multipass_cache_fd, NULL, NULL);
- }
- break;
- case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
- break;
- default:
- break;
- }
-
- ret = parent_class->change_state (element, transition);
+ ogg_packet op;
- switch (transition) {
- case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
- break;
- case GST_STATE_CHANGE_PAUSED_TO_READY:
- GST_DEBUG_OBJECT (enc, "PAUSED->READY Clearing theora state");
- if (enc->encoder) {
- th_encode_free (enc->encoder);
- enc->encoder = NULL;
- }
- th_comment_clear (&enc->comment);
- th_info_clear (&enc->info);
+ enc = GST_THEORA_ENC (benc);
- theora_enc_clear (enc);
- enc->initialised = FALSE;
- break;
- case GST_STATE_CHANGE_READY_TO_NULL:
- break;
- default:
- break;
+ if (enc->initialised) {
+ /* push last packet with eos flag, should not be called */
+ while (th_encode_packetout (enc->encoder, 1, &op)) {
+ theora_push_packet (enc, &op);
+ }
}
+ if (enc->initialised && enc->multipass_cache_fd
+ && enc->multipass_mode == MULTIPASS_MODE_FIRST_PASS)
+ theora_enc_write_multipass_cache (enc, TRUE, TRUE);
- return ret;
+ theora_enc_clear_multipass_cache (enc);
+
+ return TRUE;
}
static void
case PROP_NOISE_SENSITIVITY:
case PROP_SHARPNESS:
/* kept for API compat, but ignored */
- GST_WARNING_OBJECT (object, "Obsolete property '%s' ignored",
- pspec->name);
break;
case PROP_BITRATE:
GST_OBJECT_LOCK (enc);
enc->video_bitrate = g_value_get_int (value) * 1000;
+ enc->video_quality = 0;
enc->bitrate_changed = TRUE;
GST_OBJECT_UNLOCK (enc);
break;
case PROP_QUALITY:
GST_OBJECT_LOCK (enc);
- if (GST_STATE (enc) >= GST_STATE_PAUSED && enc->video_bitrate > 0) {
+ if (GST_STATE (enc) >= GST_STATE_PAUSED && enc->video_quality == 0) {
GST_WARNING_OBJECT (object, "Can't change from bitrate to quality mode"
" while playing");
} else {
break;
case PROP_SPEEDLEVEL:
enc->speed_level = g_value_get_int (value);
- if (enc->encoder) {
- th_encode_ctl (enc->encoder, TH_ENCCTL_SET_SPLEVEL, &enc->speed_level,
- sizeof (enc->speed_level));
- }
break;
case PROP_VP3_COMPATIBLE:
enc->vp3_compatible = g_value_get_boolean (value);
case PROP_MULTIPASS_MODE:
enc->multipass_mode = g_value_get_enum (value);
break;
- case PROP_DUP_ON_GAP:
- enc->dup_on_gap = g_value_get_boolean (value);
- break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
case PROP_MULTIPASS_MODE:
g_value_set_enum (value, enc->multipass_mode);
break;
- case PROP_DUP_ON_GAP:
- g_value_set_boolean (value, enc->dup_on_gap);
- break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
+
+gboolean
+gst_theora_enc_register (GstPlugin * plugin)
+{
+ return gst_element_register (plugin, "theoraenc",
+ GST_RANK_PRIMARY, GST_TYPE_THEORA_ENC);
+}