AG_GST_DEFAULT_ELEMENTS
dnl *** plug-ins to include ***
- musepack musicbrainz nas neon ofa openal rsvg schro sdl smooth sndfile soundtouch spandsp timidity \
+dnl Non ported plugins (non-dependant, then dependant)
+dnl Make sure you have a space before and after all plugins
+GST_PLUGINS_NONPORTED=" adpcmdec adpcmenc aiff asfmux \
+ autoconvert camerabin cdxaparse coloreffects \
+ dccp debugutils dtmf faceoverlay festival \
+ fieldanalysis freeverb freeze frei0r gaudieffects geometrictransform h264parse \
+ hdvparse hls id3tag inter interlace ivfparse jpegformat jp2kdecimator \
+ kate liveadder legacyresample librfb mpegdemux mpegtsmux \
+ mpegpsmux mpegvideoparse mve mxf mythtv nsf nuvdemux \
+ patchdetect pcapparse pnm rawparse real removesilence rtpmux rtpvp8 scaletempo \
+ sdi segmentclip siren speed subenc stereo tta videofilters \
+ videomaxrate videomeasure videosignal vmnc \
+ decklink fbdev linsys shm vcd \
+ voaacenc apexsink bz2 cdaudio celt cog curl dc1394 dirac directfb dts resindvd \
+ gsettings gsm jp2k ladspa modplug mpeg2enc mplex mimic \
++ musepack musicbrainz nas neon ofa openal opencv rsvg schro sdl smooth sndfile soundtouch spandsp timidity \
+ wildmidi xvid apple_media "
+AC_SUBST(GST_PLUGINS_NONPORTED)
dnl these are all the gst plug-ins, compilable without additional libs
AG_GST_CHECK_PLUGIN(adpcmdec)
# CFLAGS and LDFLAGS for compiling scan program. Only needed if your app/lib
# contains GtkObjects/GObjects and you want to document signals and properties.
- GTKDOC_CFLAGS = -DGST_USE_UNSTABLE_API $(GST_PLUGINS_BAD_CFLAGS) $(GST_BAD_CFLAGS) $(GST_PLUGINS_BASE_CFLAGS) $(GST_BASE_CFLAGS)
-GTKDOC_CFLAGS = $(GST_PLUGINS_BAD_CFLAGS) $(GST_PLUGINS_BASE_CFLAGS) $(GST_BASE_CFLAGS)
++GTKDOC_CFLAGS = -DGST_USE_UNSTABLE_API $(GST_PLUGINS_BAD_CFLAGS) $(GST_PLUGINS_BASE_CFLAGS) $(GST_BASE_CFLAGS)
GTKDOC_LIBS = \
$(top_builddir)/gst-libs/gst/codecparsers/libgstcodecparsers-@GST_MAJORMINOR@.la \
$(top_builddir)/gst-libs/gst/basecamerabinsrc/libgstbasecamerabinsrc-@GST_MAJORMINOR@.la \
$(top_builddir)/gst-libs/gst/interfaces/libgstphotography-@GST_MAJORMINOR@.la \
$(top_builddir)/gst-libs/gst/signalprocessor/libgstsignalprocessor-@GST_MAJORMINOR@.la \
$(top_builddir)/gst-libs/gst/video/libgstbasevideo-@GST_MAJORMINOR@.la \
- $(GST_BASE_LIBS) $(GST_BAD_LIBS)
- $(GST_BASE_LIBS)
++ $(GST_BASE_LIBS)
GTKDOC_CC=$(LIBTOOL) --tag=CC --mode=compile $(CC)
GTKDOC_LD=$(LIBTOOL) --tag=CC --mode=link $(CC)
}
if (G_UNLIKELY ((ret_size = faacEncEncode (faac->handle, (gint32 *) data,
- size / faac->bps, GST_BUFFER_DATA (out_buf),
- GST_BUFFER_SIZE (out_buf))) < 0))
+ size / faac->bps, out_data, out_size)) < 0))
goto encode_failed;
- GST_LOG_OBJECT (faac, "encoder return: %d", ret_size);
- if (G_LIKELY (ret_size > 0)) {
- GST_BUFFER_SIZE (out_buf) = ret_size;
+ gst_buffer_unmap (in_buf, data, size);
+
+ GST_LOG_OBJECT (faac, "encoder return: %" G_GSIZE_FORMAT, ret_size);
+
+ if (ret_size > 0) {
+ gst_buffer_unmap (out_buf, out_data, ret_size);
ret = gst_audio_encoder_finish_frame (enc, out_buf, faac->samples);
} else {
+ gst_buffer_unmap (out_buf, out_data, 0);
gst_buffer_unref (out_buf);
+ /* re-create encoder after final flush */
+ if (!in_buf) {
+ GST_DEBUG_OBJECT (faac, "flushed; recreating encoder");
+ gst_faac_close_encoder (faac);
+ if (!gst_faac_open_encoder (faac))
+ ret = GST_FLOW_ERROR;
+ }
}
return ret;
GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
- GST_STATIC_CAPS ("audio/x-raw-int, "
+ GST_STATIC_CAPS ("audio/x-raw, "
+ "format = (string) { " GST_AUDIO_NE (S16) " }, "
- "rate = (int) { 8000, 12000, 16000, 24000, 48000 }, "
+ "rate = (int) { 48000, 24000, 16000, 12000, 8000 }, "
- "channels = (int) [ 1, 8 ], "
- "endianness = (int) BYTE_ORDER, "
- "signed = (boolean) true, " "width = (int) 16, " "depth = (int) 16")
+ "channels = (int) [ 1, 8 ] ")
);
static GstStaticPadTemplate opus_dec_sink_factory =
static GstFlowReturn
gst_opus_dec_parse_header (GstOpusDec * dec, GstBuffer * buf)
{
- const guint8 *data = GST_BUFFER_DATA (buf);
+ const guint8 *data;
GstCaps *caps;
- GstStructure *s;
const GstAudioChannelPosition *pos = NULL;
g_return_val_if_fail (gst_opus_header_is_id_header (buf), GST_FLOW_ERROR);
GST_INFO_OBJECT (dec,
"Skipping %u samples (%u at 48000 Hz, %u left to skip)", skip,
scaled_skip, dec->pre_skip);
+ }
- if (gst_buffer_get_size (outbuf) == 0) {
- gst_buffer_unref (outbuf);
- outbuf = NULL;
- }
- if (GST_BUFFER_SIZE (outbuf) == 0) {
++ if (gst_buffer_get_size (outbuf) == 0) {
+ gst_buffer_unref (outbuf);
+ outbuf = NULL;
}
/* Apply gain */
static gboolean gst_opus_enc_sink_event (GstAudioEncoder * benc,
GstEvent * event);
-static GstCaps *gst_opus_enc_sink_getcaps (GstAudioEncoder * benc);
++static GstCaps *gst_opus_enc_sink_getcaps (GstAudioEncoder * benc,
++ GstCaps * filter);
static gboolean gst_opus_enc_setup (GstOpusEnc * enc);
static void gst_opus_enc_get_property (GObject * object, guint prop_id,
return FALSE;
}
-gst_opus_enc_sink_getcaps (GstAudioEncoder * benc)
+ static GstCaps *
- peercaps = gst_pad_peer_get_caps (GST_AUDIO_ENCODER_SRC_PAD (benc));
++gst_opus_enc_sink_getcaps (GstAudioEncoder * benc, GstCaps * filter)
+ {
+ GstOpusEnc *enc;
+ GstCaps *caps;
+ GstCaps *peercaps = NULL;
+ GstCaps *intersect = NULL;
+ guint i;
+ gboolean allow_multistream;
+
+ enc = GST_OPUS_ENC (benc);
+
+ GST_DEBUG_OBJECT (enc, "sink getcaps");
+
++ peercaps = gst_pad_peer_query_caps (GST_AUDIO_ENCODER_SRC_PAD (benc), filter);
+ if (!peercaps) {
+ GST_DEBUG_OBJECT (benc, "No peercaps, returning template sink caps");
+ return
+ gst_caps_copy (gst_pad_get_pad_template_caps
+ (GST_AUDIO_ENCODER_SINK_PAD (benc)));
+ }
+
+ intersect = gst_caps_intersect (peercaps,
+ gst_pad_get_pad_template_caps (GST_AUDIO_ENCODER_SRC_PAD (benc)));
+ gst_caps_unref (peercaps);
+
+ if (gst_caps_is_empty (intersect))
+ return intersect;
+
+ allow_multistream = FALSE;
+ for (i = 0; i < gst_caps_get_size (intersect); i++) {
+ GstStructure *s = gst_caps_get_structure (intersect, i);
+ gboolean multistream;
+ if (gst_structure_get_boolean (s, "multistream", &multistream)) {
+ if (multistream) {
+ allow_multistream = TRUE;
+ }
+ } else {
+ allow_multistream = TRUE;
+ }
+ }
+
+ gst_caps_unref (intersect);
+
+ caps =
+ gst_caps_copy (gst_pad_get_pad_template_caps (GST_AUDIO_ENCODER_SINK_PAD
+ (benc)));
+ if (!allow_multistream) {
+ GValue range = { 0 };
+ g_value_init (&range, GST_TYPE_INT_RANGE);
+ gst_value_set_int_range (&range, 1, 2);
+ for (i = 0; i < gst_caps_get_size (caps); i++) {
+ GstStructure *s = gst_caps_get_structure (caps, i);
+ gst_structure_set_value (s, "channels", &range);
+ }
+ g_value_unset (&range);
+ }
+
++ if (filter) {
++ GstCaps *tmp = gst_caps_intersect_full (caps, filter,
++ GST_CAPS_INTERSECT_FIRST);
++ gst_caps_unref (caps);
++ caps = tmp;
++ }
++
+ GST_DEBUG_OBJECT (enc, "Returning caps: %" GST_PTR_FORMAT, caps);
+ return caps;
+ }
+
static GstFlowReturn
gst_opus_enc_encode (GstOpusEnc * enc, GstBuffer * buf)
{
gst_opus_header_create_caps_from_headers (GstCaps ** caps, GSList ** headers,
GstBuffer * buf1, GstBuffer * buf2)
{
+ int n_streams, family;
+ gboolean multistream;
++ guint8 *data;
++ gsize size;
+
g_return_if_fail (caps);
g_return_if_fail (headers && !*headers);
- g_return_if_fail (GST_BUFFER_SIZE (buf1) >= 19);
++ g_return_if_fail (gst_buffer_get_size (buf1) >= 19);
++
++ data = gst_buffer_map (buf1, &size, NULL, GST_MAP_READ);
+
+ /* work out the number of streams */
- family = GST_BUFFER_DATA (buf1)[18];
++ family = data[18];
+ if (family == 0) {
+ n_streams = 1;
+ } else {
+ /* only included in the header for family > 0 */
- g_return_if_fail (GST_BUFFER_SIZE (buf1) >= 20);
- n_streams = GST_BUFFER_DATA (buf1)[19];
++ if (size >= 20)
++ n_streams = data[19];
++ else {
++ g_warning ("family > 0 but header buffer size < 20");
++ gst_buffer_unmap (buf1, data, size);
++ return;
++ }
+ }
+
++ gst_buffer_unmap (buf1, data, size);
+
/* mark and put on caps */
- *caps = gst_caps_from_string ("audio/x-opus");
+ multistream = n_streams > 1;
+ *caps = gst_caps_new_simple ("audio/x-opus",
+ "multistream", G_TYPE_BOOLEAN, multistream, NULL);
*caps = _gst_caps_set_buffer_array (*caps, "streamheader", buf1, buf2, NULL);
*headers = g_slist_prepend (*headers, buf2);
--- /dev/null
-
-
+ /*
+ * Opus Depayloader Gst Element
+ *
+ * @author: Danilo Cesar Lemes de Paula <danilo.cesar@collabora.co.uk>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+ # include "config.h"
+ #endif
+
+ #include <string.h>
+ #include <stdlib.h>
+ #include <gst/rtp/gstrtpbuffer.h>
+ #include "gstrtpopusdepay.h"
+
+ GST_DEBUG_CATEGORY_STATIC (rtpopusdepay_debug);
+ #define GST_CAT_DEFAULT (rtpopusdepay_debug)
+
-static GstBuffer *gst_rtp_opus_depay_process (GstBaseRTPDepayload * depayload,
+ static GstStaticPadTemplate gst_rtp_opus_depay_sink_template =
+ GST_STATIC_PAD_TEMPLATE ("sink",
+ GST_PAD_SINK,
+ GST_PAD_ALWAYS,
+ GST_STATIC_CAPS ("application/x-rtp, "
+ "media = (string) \"audio\", "
+ "payload = (int) " GST_RTP_PAYLOAD_DYNAMIC_STRING ","
+ "clock-rate = (int) 48000, "
+ "encoding-name = (string) \"X-GST-OPUS-DRAFT-SPITTKA-00\"")
+ );
+
+ static GstStaticPadTemplate gst_rtp_opus_depay_src_template =
+ GST_STATIC_PAD_TEMPLATE ("src",
+ GST_PAD_SRC,
+ GST_PAD_ALWAYS,
+ GST_STATIC_CAPS ("audio/x-opus")
+ );
+
-static gboolean gst_rtp_opus_depay_setcaps (GstBaseRTPDepayload * depayload,
++static GstBuffer *gst_rtp_opus_depay_process (GstRTPBaseDepayload * depayload,
+ GstBuffer * buf);
-GST_BOILERPLATE (GstRTPOpusDepay, gst_rtp_opus_depay, GstBaseRTPDepayload,
- GST_TYPE_BASE_RTP_DEPAYLOAD);
++static gboolean gst_rtp_opus_depay_setcaps (GstRTPBaseDepayload * depayload,
+ GstCaps * caps);
+
-gst_rtp_opus_depay_base_init (gpointer klass)
++G_DEFINE_TYPE (GstRTPOpusDepay, gst_rtp_opus_depay,
++ GST_TYPE_RTP_BASE_DEPAYLOAD);
+
+ static void
- GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
++gst_rtp_opus_depay_class_init (GstRTPOpusDepayClass * klass)
+ {
-}
-
-static void
-gst_rtp_opus_depay_class_init (GstRTPOpusDepayClass * klass)
-{
- GstBaseRTPDepayloadClass *gstbasertpdepayload_class;
-
- gstbasertpdepayload_class = (GstBaseRTPDepayloadClass *) klass;
++ GstRTPBaseDepayloadClass *gstbasertpdepayload_class;
++ GstElementClass *element_class;
++
++ element_class = GST_ELEMENT_CLASS (klass);
++ gstbasertpdepayload_class = (GstRTPBaseDepayloadClass *) klass;
+
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&gst_rtp_opus_depay_src_template));
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&gst_rtp_opus_depay_sink_template));
+ gst_element_class_set_details_simple (element_class,
+ "RTP Opus packet depayloader", "Codec/Depayloader/Network/RTP",
+ "Extracts Opus audio from RTP packets",
+ "Danilo Cesar Lemes de Paula <danilo.cesar@collabora.co.uk>");
-gst_rtp_opus_depay_init (GstRTPOpusDepay * rtpopusdepay,
- GstRTPOpusDepayClass * klass)
+
+ gstbasertpdepayload_class->process = gst_rtp_opus_depay_process;
+ gstbasertpdepayload_class->set_caps = gst_rtp_opus_depay_setcaps;
+
+ GST_DEBUG_CATEGORY_INIT (rtpopusdepay_debug, "rtpopusdepay", 0,
+ "Opus RTP Depayloader");
+ }
+
+ static void
-gst_rtp_opus_depay_setcaps (GstBaseRTPDepayload * depayload, GstCaps * caps)
++gst_rtp_opus_depay_init (GstRTPOpusDepay * rtpopusdepay)
+ {
+
+ }
+
+ static gboolean
- srccaps = gst_caps_new_simple ("audio/x-opus", NULL);
- ret = gst_pad_set_caps (GST_BASE_RTP_DEPAYLOAD_SRCPAD (depayload), srccaps);
++gst_rtp_opus_depay_setcaps (GstRTPBaseDepayload * depayload, GstCaps * caps)
+ {
+ GstCaps *srccaps;
+ gboolean ret;
+
-gst_rtp_opus_depay_process (GstBaseRTPDepayload * depayload, GstBuffer * buf)
++ srccaps = gst_caps_new_empty_simple ("audio/x-opus");
++ ret = gst_pad_set_caps (GST_RTP_BASE_DEPAYLOAD_SRCPAD (depayload), srccaps);
+
+ GST_DEBUG_OBJECT (depayload,
+ "set caps on source: %" GST_PTR_FORMAT " (ret=%d)", srccaps, ret);
+ gst_caps_unref (srccaps);
+
+ depayload->clock_rate = 48000;
+
+ return ret;
+ }
+
+ static GstBuffer *
- outbuf = gst_rtp_buffer_get_payload_buffer (buf);
++gst_rtp_opus_depay_process (GstRTPBaseDepayload * depayload, GstBuffer * buf)
+ {
+ GstBuffer *outbuf;
++ GstRTPBuffer rtpbuf = { NULL, };
++
++ gst_rtp_buffer_map (buf, GST_MAP_READ, &rtpbuf);
++ outbuf = gst_rtp_buffer_get_payload_buffer (&rtpbuf);
++ gst_rtp_buffer_unmap (&rtpbuf);
+
+ return outbuf;
+ }
--- /dev/null
-#include <gst/rtp/gstbasertpdepayload.h>
+ /*
+ * Opus Depayloader Gst Element
+ *
+ * @author: Danilo Cesar Lemes de Paula <danilo.eu@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+ #ifndef __GST_RTP_OPUS_DEPAY_H__
+ #define __GST_RTP_OPUS_DEPAY_H__
+
+ #include <gst/gst.h>
- GstBaseRTPDepayload depayload;
++#include <gst/rtp/gstrtpbasedepayload.h>
+
+ G_BEGIN_DECLS typedef struct _GstRTPOpusDepay GstRTPOpusDepay;
+ typedef struct _GstRTPOpusDepayClass GstRTPOpusDepayClass;
+
+ #define GST_TYPE_RTP_OPUS_DEPAY \
+ (gst_rtp_opus_depay_get_type())
+ #define GST_RTP_OPUS_DEPAY(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_RTP_OPUS_DEPAY,GstRTPOpusDepay))
+ #define GST_RTP_OPUS_DEPAY_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_RTP_OPUS_DEPAY,GstRTPOpusDepayClass))
+ #define GST_IS_RTP_OPUS_DEPAY(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_RTP_OPUS_DEPAY))
+ #define GST_IS_RTP_OPUS_DEPAY_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_RTP_OPUS_DEPAY))
+
+
+ struct _GstRTPOpusDepay
+ {
- GstBaseRTPDepayloadClass parent_class;
++ GstRTPBaseDepayload depayload;
+
+ };
+
+ struct _GstRTPOpusDepayClass
+ {
++ GstRTPBaseDepayloadClass parent_class;
+ };
+
+ GType gst_rtp_opus_depay_get_type (void);
+
+ G_END_DECLS
+ #endif /* __GST_RTP_OPUS_DEPAY_H__ */
--- /dev/null
-static gboolean gst_rtp_opus_pay_setcaps (GstBaseRTPPayload * payload,
+ /*
+ * Opus Payloader Gst Element
+ *
+ * @author: Danilo Cesar Lemes de Paula <danilo.cesar@collabora.co.uk>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+ # include "config.h"
+ #endif
+
+ #include <string.h>
+
+ #include <gst/rtp/gstrtpbuffer.h>
+
+ #include "gstrtpopuspay.h"
+
+ GST_DEBUG_CATEGORY_STATIC (rtpopuspay_debug);
+ #define GST_CAT_DEFAULT (rtpopuspay_debug)
+
+
+ static GstStaticPadTemplate gst_rtp_opus_pay_sink_template =
+ GST_STATIC_PAD_TEMPLATE ("sink",
+ GST_PAD_SINK,
+ GST_PAD_ALWAYS,
+ GST_STATIC_CAPS ("audio/x-opus, multistream = (boolean) FALSE")
+ );
+
+ static GstStaticPadTemplate gst_rtp_opus_pay_src_template =
+ GST_STATIC_PAD_TEMPLATE ("src",
+ GST_PAD_SRC,
+ GST_PAD_ALWAYS,
+ GST_STATIC_CAPS ("application/x-rtp, "
+ "media = (string) \"audio\", "
+ "payload = (int) " GST_RTP_PAYLOAD_DYNAMIC_STRING ", "
+ "clock-rate = (int) 48000, "
+ "encoding-name = (string) \"X-GST-OPUS-DRAFT-SPITTKA-00\"")
+ );
+
-static GstFlowReturn gst_rtp_opus_pay_handle_buffer (GstBaseRTPPayload *
++static gboolean gst_rtp_opus_pay_setcaps (GstRTPBasePayload * payload,
+ GstCaps * caps);
-GST_BOILERPLATE (GstRtpOPUSPay, gst_rtp_opus_pay, GstBaseRTPPayload,
- GST_TYPE_BASE_RTP_PAYLOAD);
++static GstFlowReturn gst_rtp_opus_pay_handle_buffer (GstRTPBasePayload *
+ payload, GstBuffer * buffer);
+
-gst_rtp_opus_pay_base_init (gpointer klass)
++G_DEFINE_TYPE (GstRtpOPUSPay, gst_rtp_opus_pay, GST_TYPE_RTP_BASE_PAYLOAD);
+
+ static void
- GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
++gst_rtp_opus_pay_class_init (GstRtpOPUSPayClass * klass)
+ {
-}
-
-static void
-gst_rtp_opus_pay_class_init (GstRtpOPUSPayClass * klass)
-{
- GstBaseRTPPayloadClass *gstbasertppayload_class;
-
- gstbasertppayload_class = (GstBaseRTPPayloadClass *) klass;
-
- gstbasertppayload_class->set_caps = gst_rtp_opus_pay_setcaps;
- gstbasertppayload_class->handle_buffer = gst_rtp_opus_pay_handle_buffer;
++ GstRTPBasePayloadClass *gstbasertppayload_class;
++ GstElementClass *element_class;
++
++ gstbasertppayload_class = (GstRTPBasePayloadClass *) klass;
++ element_class = GST_ELEMENT_CLASS (klass);
++
++ gstbasertppayload_class->set_caps = gst_rtp_opus_pay_setcaps;
++ gstbasertppayload_class->handle_buffer = gst_rtp_opus_pay_handle_buffer;
+
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&gst_rtp_opus_pay_src_template));
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&gst_rtp_opus_pay_sink_template));
+
+ gst_element_class_set_details_simple (element_class,
+ "RTP Opus payloader",
+ "Codec/Payloader/Network/RTP",
+ "Puts Opus audio in RTP packets",
+ "Danilo Cesar Lemes de Paula <danilo.cesar@collabora.co.uk>");
-gst_rtp_opus_pay_init (GstRtpOPUSPay * rtpopuspay, GstRtpOPUSPayClass * klass)
+
+ GST_DEBUG_CATEGORY_INIT (rtpopuspay_debug, "rtpopuspay", 0,
+ "Opus RTP Payloader");
+ }
+
+ static void
-gst_rtp_opus_pay_setcaps (GstBaseRTPPayload * payload, GstCaps * caps)
++gst_rtp_opus_pay_init (GstRtpOPUSPay * rtpopuspay)
+ {
+ }
+
+ static gboolean
- gst_basertppayload_set_options (payload, "audio", FALSE,
++gst_rtp_opus_pay_setcaps (GstRTPBasePayload * payload, GstCaps * caps)
+ {
+ gboolean res;
+ gchar *capsstr;
+
+ capsstr = gst_caps_to_string (caps);
+
- gst_basertppayload_set_outcaps (payload, "caps", G_TYPE_STRING, capsstr,
++ gst_rtp_base_payload_set_options (payload, "audio", FALSE,
+ "X-GST-OPUS-DRAFT-SPITTKA-00", 48000);
+ res =
-gst_rtp_opus_pay_handle_buffer (GstBaseRTPPayload * basepayload,
++ gst_rtp_base_payload_set_outcaps (payload, "caps", G_TYPE_STRING, capsstr,
+ NULL);
+ g_free (capsstr);
+
+ return res;
+ }
+
+ static GstFlowReturn
- GstClockTime timestamp;
-
- guint size;
- guint8 *data;
- guint8 *payload;
-
- size = GST_BUFFER_SIZE (buffer);
- data = GST_BUFFER_DATA (buffer);
- timestamp = GST_BUFFER_TIMESTAMP (buffer);
-
- outbuf = gst_rtp_buffer_new_allocate (size, 0, 0);
- payload = gst_rtp_buffer_get_payload (outbuf);
-
- memcpy (payload, data, size);
-
- gst_rtp_buffer_set_marker (outbuf, FALSE);
- GST_BUFFER_TIMESTAMP (outbuf) = timestamp;
-
- return gst_basertppayload_push (basepayload, outbuf);
++gst_rtp_opus_pay_handle_buffer (GstRTPBasePayload * basepayload,
+ GstBuffer * buffer)
+ {
++ GstRTPBuffer rtpbuf = { NULL, };
+ GstBuffer *outbuf;
++ gsize size;
++ gpointer *data;
++
++ /* Copy data and timestamp to a new output buffer
++ * FIXME : Don't we have a convenience function for this ? */
++ data = gst_buffer_map (buffer, &size, NULL, GST_MAP_READ);
++ outbuf = gst_rtp_buffer_new_copy_data (data, size);
++ GST_BUFFER_TIMESTAMP (outbuf) = GST_BUFFER_TIMESTAMP (buffer);
++
++ /* Unmap and free input buffer */
++ gst_buffer_unmap (buffer, data, size);
++ gst_buffer_unref (buffer);
++
++ /* Remove marker from RTP buffer */
++ gst_rtp_buffer_map (outbuf, GST_MAP_WRITE, &rtpbuf);
++ gst_rtp_buffer_set_marker (&rtpbuf, FALSE);
++ gst_rtp_buffer_unmap (&rtpbuf);
++
++ /* Push out */
++ return gst_rtp_base_payload_push (basepayload, outbuf);
+ }
--- /dev/null
-#include <gst/rtp/gstbasertppayload.h>
+ /*
+ * Opus Payloader Gst Element
+ *
+ * @author: Danilo Cesar Lemes de Paula <danilo.eu@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+ #ifndef __GST_RTP_OPUS_PAY_H__
+ #define __GST_RTP_OPUS_PAY_H__
+
+ #include <gst/gst.h>
- GstBaseRTPPayload payload;
++#include <gst/rtp/gstrtpbasepayload.h>
+
+ G_BEGIN_DECLS
+
+ #define GST_TYPE_RTP_OPUS_PAY \
+ (gst_rtp_opus_pay_get_type())
+ #define GST_RTP_OPUS_PAY(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_RTP_OPUS_PAY,GstRtpOPUSPay))
+ #define GST_RTP_OPUS_PAY_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_RTP_OPUS_PAY,GstRtpOPUSPayClass))
+ #define GST_IS_RTP_OPUS_PAY(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_RTP_OPUS_PAY))
+ #define GST_IS_RTP_OPUS_PAY_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_RTP_OPUS_PAY))
+
+ typedef struct _GstRtpOPUSPay GstRtpOPUSPay;
+ typedef struct _GstRtpOPUSPayClass GstRtpOPUSPayClass;
+
+ struct _GstRtpOPUSPay
+ {
- GstBaseRTPPayloadClass parent_class;
++ GstRTPBasePayload payload;
+ };
+
+ struct _GstRtpOPUSPayClass
+ {
++ GstRTPBasePayloadClass parent_class;
+ };
+
+ GType gst_rtp_opus_pay_get_type (void);
+
+ G_END_DECLS
+
+ #endif /* __GST_RTP_OPUS_PAY_H__ */
static GstStateChangeReturn gst_base_video_codec_change_state (GstElement *
element, GstStateChange transition);
-GType
-gst_video_frame_get_type (void)
-{
- static volatile gsize type = 0;
+static GstElementClass *parent_class = NULL;
- if (g_once_init_enter (&type)) {
- GType _type;
+G_DEFINE_BOXED_TYPE (GstVideoFrameState, gst_video_frame_state,
+ (GBoxedCopyFunc) gst_video_frame_state_ref,
- (GBoxedFreeFunc) gst_video_frame_state_unref)
++ (GBoxedFreeFunc) gst_video_frame_state_unref);
- _type = g_boxed_type_register_static ("GstVideoFrame",
- (GBoxedCopyFunc) gst_video_frame_ref,
- (GBoxedFreeFunc) gst_video_frame_unref);
- g_once_init_leave (&type, _type);
- }
- return (GType) type;
-}
-
-GST_BOILERPLATE (GstBaseVideoCodec, gst_base_video_codec, GstElement,
- GST_TYPE_ELEMENT);
+/* NOTE (Edward): Do not use G_DEFINE_* because we need to have
+ * a GClassInitFunc called with the target class (which the macros
- * don't handle). */
- static void gst_base_video_codec_class_init (GstBaseVideoCodecClass *
- klass);
- static void gst_base_video_codec_init (GstBaseVideoCodec * dec,
++ * don't handle).
++ */
++static void gst_base_video_codec_class_init (GstBaseVideoCodecClass * klass);
++static void gst_base_video_codec_init (GstBaseVideoCodec * dec,
+ GstBaseVideoCodecClass * klass);
-static void
-gst_base_video_codec_base_init (gpointer g_class)
+GType
+gst_base_video_codec_get_type (void)
{
- GST_DEBUG_CATEGORY_INIT (basevideocodec_debug, "basevideocodec", 0,
- "Base Video Codec");
+ static volatile gsize base_video_codec_type = 0;
+ if (g_once_init_enter (&base_video_codec_type)) {
+ GType _type;
+ static const GTypeInfo base_video_codec_info = {
+ sizeof (GstBaseVideoCodecClass),
+ NULL,
+ NULL,
+ (GClassInitFunc) gst_base_video_codec_class_init,
+ NULL,
+ NULL,
+ sizeof (GstBaseVideoCodec),
+ 0,
+ (GInstanceInitFunc) gst_base_video_codec_init,
+ };
+
+ _type = g_type_register_static (GST_TYPE_ELEMENT,
+ "GstBaseVideoCodec", &base_video_codec_info, G_TYPE_FLAG_ABSTRACT);
+ g_once_init_leave (&base_video_codec_type, _type);
+ }
+ return base_video_codec_type;
}
static void
GST_DEBUG_CATEGORY (basevideoencoder_debug);
#define GST_CAT_DEFAULT basevideoencoder_debug
+ typedef struct _ForcedKeyUnitEvent ForcedKeyUnitEvent;
+ struct _ForcedKeyUnitEvent
+ {
+ GstClockTime running_time;
+ gboolean pending; /* TRUE if this was requested already */
+ gboolean all_headers;
+ guint count;
+ };
+
+ static void
+ forced_key_unit_event_free (ForcedKeyUnitEvent * evt)
+ {
+ g_slice_free (ForcedKeyUnitEvent, evt);
+ }
+
+ static ForcedKeyUnitEvent *
+ forced_key_unit_event_new (GstClockTime running_time, gboolean all_headers,
+ guint count)
+ {
+ ForcedKeyUnitEvent *evt = g_slice_new0 (ForcedKeyUnitEvent);
+
+ evt->running_time = running_time;
+ evt->all_headers = all_headers;
+ evt->count = count;
+
+ return evt;
+ }
+
static void gst_base_video_encoder_finalize (GObject * object);
-static gboolean gst_base_video_encoder_sink_setcaps (GstPad * pad,
- GstCaps * caps);
-static GstCaps *gst_base_video_encoder_sink_getcaps (GstPad * pad);
+static GstCaps *gst_base_video_encoder_sink_getcaps (GstPad * pad,
+ GstCaps * filter);
static gboolean gst_base_video_encoder_src_event (GstPad * pad,
- GstEvent * event);
+ GstObject * parent, GstEvent * event);
static gboolean gst_base_video_encoder_sink_event (GstPad * pad,
- GstEvent * event);
+ GstObject * parent, GstEvent * event);
+static gboolean gst_base_video_encoder_sink_query (GstPad * pad,
+ GstObject * parent, GstQuery * query);
static GstFlowReturn gst_base_video_encoder_chain (GstPad * pad,
- GstBuffer * buf);
+ GstObject * parent, GstBuffer * buf);
static GstStateChangeReturn gst_base_video_encoder_change_state (GstElement *
element, GstStateChange transition);
-static const GstQueryType *gst_base_video_encoder_get_query_types (GstPad *
- pad);
static gboolean gst_base_video_encoder_src_query (GstPad * pad,
- GstQuery * query);
+ GstObject * parent, GstQuery * query);
-static void
-_do_init (GType object_type)
-{
- const GInterfaceInfo preset_interface_info = {
- NULL, /* interface_init */
- NULL, /* interface_finalize */
- NULL /* interface_data */
- };
-
- g_type_add_interface_static (object_type, GST_TYPE_PRESET,
- &preset_interface_info);
-}
-
-GST_BOILERPLATE_FULL (GstBaseVideoEncoder, gst_base_video_encoder,
- GstBaseVideoCodec, GST_TYPE_BASE_VIDEO_CODEC, _do_init);
-
-static void
-gst_base_video_encoder_base_init (gpointer g_class)
-{
- GST_DEBUG_CATEGORY_INIT (basevideoencoder_debug, "basevideoencoder", 0,
- "Base Video Encoder");
-
-}
+#define gst_base_video_encoder_parent_class parent_class
+G_DEFINE_TYPE_WITH_CODE (GstBaseVideoEncoder, gst_base_video_encoder,
- GST_TYPE_BASE_VIDEO_CODEC, G_IMPLEMENT_INTERFACE (GST_TYPE_PRESET, NULL);
- );
++ GST_TYPE_BASE_VIDEO_CODEC, G_IMPLEMENT_INTERFACE (GST_TYPE_PRESET, NULL););
static void
gst_base_video_encoder_class_init (GstBaseVideoEncoderClass * klass)
static void
gst_base_video_encoder_finalize (GObject * object)
{
- GstBaseVideoEncoder *base_video_encoder;
-
++ GstBaseVideoEncoder *base_video_encoder = (GstBaseVideoEncoder *) object;
GST_DEBUG_OBJECT (object, "finalize");
- base_video_encoder = GST_BASE_VIDEO_ENCODER (object);
+ gst_buffer_replace (&base_video_encoder->headers, NULL);
+
G_OBJECT_CLASS (parent_class)->finalize (object);
}
break;
}
- base_video_encoder->a.at_eos = FALSE;
+ base_video_encoder->at_eos = FALSE;
- gst_segment_set_newsegment_full (&GST_BASE_VIDEO_CODEC
- (base_video_encoder)->segment, update, rate, applied_rate, format,
- start, stop, position);
+ gst_segment_copy_into (segment, &GST_BASE_VIDEO_CODEC
+ (base_video_encoder)->segment);
GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder);
break;
}
(base_video_encoder)->segment, GST_FORMAT_TIME,
frame->presentation_timestamp);
+ /* re-use upstream event if any so it also conveys any additional
+ * info upstream arranged in there */
GST_OBJECT_LOCK (base_video_encoder);
- if (base_video_encoder->force_keyunit_event) {
- ev = base_video_encoder->force_keyunit_event;
- base_video_encoder->force_keyunit_event = NULL;
- } else {
- ev = gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM,
- gst_structure_new_empty ("GstForceKeyUnit"));
+ for (l = base_video_encoder->force_key_unit; l; l = l->next) {
+ ForcedKeyUnitEvent *tmp = l->data;
+
+ /* Skip non-pending keyunits */
+ if (!tmp->pending)
+ continue;
+
+ /* Simple case, keyunit ASAP */
+ if (tmp->running_time == GST_CLOCK_TIME_NONE) {
+ fevt = tmp;
+ break;
+ }
+
+ /* Event for before this frame */
+ if (tmp->running_time <= running_time) {
+ fevt = tmp;
+ break;
+ }
+ }
+
+ if (fevt) {
+ base_video_encoder->force_key_unit =
+ g_list_remove (base_video_encoder->force_key_unit, fevt);
}
GST_OBJECT_UNLOCK (base_video_encoder);
- gst_structure_set (gst_event_writable_structure (ev),
- "timestamp", G_TYPE_UINT64, frame->presentation_timestamp,
- "stream-time", G_TYPE_UINT64, stream_time,
- "running-time", G_TYPE_UINT64, running_time, NULL);
+ if (fevt) {
+ stream_time =
+ gst_segment_to_stream_time (&GST_BASE_VIDEO_CODEC
+ (base_video_encoder)->segment, GST_FORMAT_TIME,
+ frame->presentation_timestamp);
- gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder), ev);
- }
+ ev = gst_video_event_new_downstream_force_key_unit
+ (frame->presentation_timestamp, stream_time, running_time,
+ fevt->all_headers, fevt->count);
- /* no buffer data means this frame is skipped/dropped */
- if (!frame->src_buffer) {
- GST_DEBUG_OBJECT (base_video_encoder, "skipping frame %" GST_TIME_FORMAT,
- GST_TIME_ARGS (frame->presentation_timestamp));
- goto done;
+ gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder),
+ ev);
+
+ if (fevt->all_headers) {
+ if (base_video_encoder->headers) {
+ headers = gst_buffer_ref (base_video_encoder->headers);
- headers = gst_buffer_make_metadata_writable (headers);
++ headers = gst_buffer_make_writable (headers);
+ }
+ }
+
+ GST_DEBUG_OBJECT (base_video_encoder,
+ "Forced key unit: running-time %" GST_TIME_FORMAT
+ ", all_headers %d, count %u",
+ GST_TIME_ARGS (running_time), fevt->all_headers, fevt->count);
+ forced_key_unit_event_free (fevt);
+ }
}
if (frame->is_sync_point) {
GST_BUFFER_DURATION (frame->src_buffer) = frame->presentation_duration;
GST_BUFFER_OFFSET (frame->src_buffer) = frame->decode_timestamp;
+ if (G_UNLIKELY (headers)) {
+ GST_BUFFER_TIMESTAMP (headers) = frame->presentation_timestamp;
+ GST_BUFFER_DURATION (headers) = 0;
+ GST_BUFFER_OFFSET (headers) = frame->decode_timestamp;
+ }
+
/* update rate estimate */
GST_BASE_VIDEO_CODEC (base_video_encoder)->bytes +=
- GST_BUFFER_SIZE (frame->src_buffer);
+ gst_buffer_get_size (frame->src_buffer);
if (GST_CLOCK_TIME_IS_VALID (frame->presentation_duration)) {
GST_BASE_VIDEO_CODEC (base_video_encoder)->time +=
frame->presentation_duration;
static gboolean gst_wave_scope_render (GstBaseAudioVisualizer * base,
GstBuffer * audio, GstBuffer * video);
-
-GST_BOILERPLATE (GstWaveScope, gst_wave_scope, GstBaseAudioVisualizer,
- GST_TYPE_BASE_AUDIO_VISUALIZER);
-
-static void
-gst_wave_scope_base_init (gpointer g_class)
-{
- GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
-
- gst_element_class_set_details_simple (element_class, "Waveform oscilloscope",
- "Visualization",
- "Simple waveform oscilloscope", "Stefan Kost <ensonic@users.sf.net>");
-
- gst_element_class_add_static_pad_template (element_class,
- &gst_wave_scope_src_template);
- gst_element_class_add_static_pad_template (element_class,
- &gst_wave_scope_sink_template);
-}
++#define gst_wave_scope_parent_class parent_class
+G_DEFINE_TYPE (GstWaveScope, gst_wave_scope, GST_TYPE_BASE_AUDIO_VISUALIZER);
static void
gst_wave_scope_class_init (GstWaveScopeClass * g_class)
break;
}
- return TRUE;
+ return GST_PAD_PROBE_OK;
}
-static gboolean
-gst_camera_bin_audio_src_data_probe (GstPad * pad, GstMiniObject * obj,
+static GstPadProbeReturn
- gst_camera_bin_audio_src_event_probe (GstPad * pad, GstPadProbeInfo * info,
++gst_camera_bin_audio_src_data_probe (GstPad * pad, GstPadProbeInfo * info,
gpointer data)
{
GstCameraBin2 *camera = data;
- gboolean ret = TRUE;
+ gboolean ret = GST_PAD_PROBE_OK;
- GstEvent *event = GST_EVENT (info->data);
- if (GST_EVENT_TYPE (event) == GST_EVENT_EOS) {
- /* we only let an EOS pass when the user is stopping a capture */
- if (camera->audio_drop_eos) {
- if (GST_IS_BUFFER (obj)) {
++ if (GST_IS_BUFFER (data)) {
+ if (G_UNLIKELY (camera->audio_send_newseg)) {
- GstBuffer *buf = GST_BUFFER_CAST (obj);
++ GstBuffer *buf = GST_BUFFER_CAST (data);
+ GstClockTime ts = GST_BUFFER_TIMESTAMP (buf);
+ GstPad *peer;
++ GstSegment segment;
+
+ if (!GST_CLOCK_TIME_IS_VALID (ts)) {
+ ts = 0;
+ }
+
+ peer = gst_pad_get_peer (pad);
+ g_return_val_if_fail (peer != NULL, TRUE);
+
- gst_pad_send_event (peer, gst_event_new_new_segment (FALSE, 1.0,
- GST_FORMAT_TIME, ts, -1, 0));
++ gst_segment_init (&segment, GST_FORMAT_TIME);
++ segment.start = ts;
++ gst_pad_send_event (peer, gst_event_new_segment (&segment));
+
+ gst_object_unref (peer);
+
+ camera->audio_send_newseg = FALSE;
+ }
+ } else {
- GstEvent *event = GST_EVENT_CAST (obj);
++ GstEvent *event = GST_EVENT_CAST (data);
+ if (GST_EVENT_TYPE (event) == GST_EVENT_EOS) {
+ /* we only let an EOS pass when the user is stopping a capture */
+ if (camera->audio_drop_eos) {
- ret = FALSE;
++ ret = GST_PAD_PROBE_DROP;
+ } else {
+ camera->audio_drop_eos = TRUE;
+ /* should already be false, but reinforce in case no buffers get
+ * pushed */
+ camera->audio_send_newseg = FALSE;
+ }
- } else if (GST_EVENT_TYPE (event) == GST_EVENT_NEWSEGMENT) {
- ret = FALSE;
++ } else if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
+ ret = GST_PAD_PROBE_DROP;
- } else {
- camera->audio_drop_eos = TRUE;
}
}
GstEncodingContainerProfile *prof;
GstCaps *caps;
- caps = gst_caps_new_simple ("application/ogg", NULL, NULL);
- caps = gst_caps_new_simple ("application/ogg", NULL);
++ caps = gst_caps_new_empty_simple ("application/ogg");
prof = gst_encoding_container_profile_new ("ogg", "theora+vorbis+ogg",
caps, NULL);
gst_caps_unref (caps);
- caps = gst_caps_new_simple ("video/x-theora", NULL, NULL);
- caps = gst_caps_new_simple ("video/x-theora", NULL);
++ caps = gst_caps_new_empty_simple ("video/x-theora");
if (!gst_encoding_container_profile_add_profile (prof,
(GstEncodingProfile *) gst_encoding_video_profile_new (caps,
NULL, NULL, 1))) {
}
gst_caps_unref (caps);
- caps = gst_caps_new_simple ("audio/x-vorbis", NULL, NULL);
- caps = gst_caps_new_simple ("audio/x-vorbis", NULL);
++ caps = gst_caps_new_empty_simple ("audio/x-vorbis");
if (!gst_encoding_container_profile_add_profile (prof,
(GstEncodingProfile *) gst_encoding_audio_profile_new (caps,
NULL, NULL, 1))) {
GstEncodingVideoProfile *vprof;
GstCaps *caps;
- caps = gst_caps_new_simple ("image/jpeg", NULL, NULL);
- caps = gst_caps_new_simple ("image/jpeg", NULL);
++ caps = gst_caps_new_empty_simple ("image/jpeg");
vprof = gst_encoding_video_profile_new (caps, NULL, NULL, 1);
gst_encoding_video_profile_set_variableframerate (vprof, TRUE);
srcpad = gst_element_get_static_pad (camera->audio_src, "src");
- /* 1) drop EOS for audiosrc elements that push them on state_changes
- * (basesrc does this)
- * 2) Fix newsegment events to have start time = first buffer ts */
- gst_pad_add_data_probe (srcpad,
- (GCallback) gst_camera_bin_audio_src_data_probe, camera);
+ /* drop EOS for audiosrc elements that push them on state_changes
+ * (basesrc does this) */
- gst_pad_add_probe (srcpad, GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM,
- gst_camera_bin_audio_src_event_probe, gst_object_ref (camera),
++ gst_pad_add_probe (srcpad, GST_PAD_PROBE_TYPE_DATA_DOWNSTREAM,
++ gst_camera_bin_audio_src_data_probe, gst_object_ref (camera),
+ gst_object_unref);
gst_object_unref (srcpad);
}
if (self->video_rec_status == GST_VIDEO_RECORDING_STATUS_DONE) {
/* NOP */
} else if (self->video_rec_status == GST_VIDEO_RECORDING_STATUS_STARTING) {
+ GstClockTime ts;
++ GstSegment segment;
+
GST_DEBUG_OBJECT (self, "Starting video recording");
self->video_rec_status = GST_VIDEO_RECORDING_STATUS_RUNNING;
- gst_pad_push_event (self->vidsrc, gst_event_new_new_segment (FALSE, 1.0,
- GST_FORMAT_TIME, ts, -1, 0));
+ ts = GST_BUFFER_TIMESTAMP (buffer);
+ if (!GST_CLOCK_TIME_IS_VALID (ts))
+ ts = 0;
++ gst_segment_init (&segment, GST_FORMAT_TIME);
++ segment.start = ts;
++ gst_pad_push_event (self->vidsrc, gst_event_new_segment (&segment));
+
/* post preview */
GST_DEBUG_OBJECT (self, "Posting preview for video");
gst_base_camera_src_post_preview (camerasrc, buffer);
enum
{
- ARG_0
+ PROP_AGGREGATE_GOPS = 1
};
+ #define DEFAULT_AGGREGATE_GOPS FALSE
+
static GstStaticPadTemplate mpegpsmux_sink_factory =
- GST_STATIC_PAD_TEMPLATE ("sink_%d",
+ GST_STATIC_PAD_TEMPLATE ("sink_%u",
GST_PAD_SINK,
GST_PAD_REQUEST,
GST_STATIC_CAPS ("video/mpeg, "
GValue * value, GParamSpec * pspec);
static gboolean gst_h264_parse_set_caps (GstBaseParse * parse, GstCaps * caps);
-static GstCaps *gst_h264_parse_get_caps (GstBaseParse * parse);
-static GstFlowReturn gst_h264_parse_chain (GstPad * pad, GstBuffer * buffer);
+static GstCaps *gst_h264_parse_get_caps (GstBaseParse * parse,
+ GstCaps * filter);
+static GstFlowReturn gst_h264_parse_chain (GstPad * pad, GstObject * parent,
+ GstBuffer * buffer);
+ static gboolean gst_h264_parse_event (GstBaseParse * parse, GstEvent * event);
+ static gboolean gst_h264_parse_src_event (GstBaseParse * parse,
+ GstEvent * event);
-static void
-gst_h264_parse_base_init (gpointer g_class)
-{
- GstElementClass *gstelement_class = GST_ELEMENT_CLASS (g_class);
-
- gst_element_class_add_static_pad_template (gstelement_class, &srctemplate);
- gst_element_class_add_static_pad_template (gstelement_class, &sinktemplate);
-
- gst_element_class_set_details_simple (gstelement_class, "H.264 parser",
- "Codec/Parser/Converter/Video",
- "Parses H.264 streams",
- "Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>");
-
- GST_DEBUG_CATEGORY_INIT (h264_parse_debug, "h264parse", 0, "h264 parser");
-}
-
static void
gst_h264_parse_class_init (GstH264ParseClass * klass)
{
GST_DEBUG_FUNCPTR (gst_h264_parse_pre_push_frame);
parse_class->set_sink_caps = GST_DEBUG_FUNCPTR (gst_h264_parse_set_caps);
parse_class->get_sink_caps = GST_DEBUG_FUNCPTR (gst_h264_parse_get_caps);
+ parse_class->event = GST_DEBUG_FUNCPTR (gst_h264_parse_event);
+ parse_class->src_event = GST_DEBUG_FUNCPTR (gst_h264_parse_src_event);
+
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&srctemplate));
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&sinktemplate));
+
+ gst_element_class_set_details_simple (gstelement_class, "H.264 parser",
+ "Codec/Parser/Converter/Video",
+ "Parses H.264 streams",
+ "Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>");
}
static void
return res;
}
+ static gboolean
+ gst_h264_parse_event (GstBaseParse * parse, GstEvent * event)
+ {
+ gboolean handled = FALSE;
+ GstH264Parse *h264parse = GST_H264_PARSE (parse);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_CUSTOM_DOWNSTREAM:
+ {
+ GstClockTime timestamp, stream_time, running_time;
+ gboolean all_headers;
+ guint count;
+
+ if (!gst_video_event_is_force_key_unit (event))
+ break;
+
+ gst_video_event_parse_downstream_force_key_unit (event,
+ ×tamp, &stream_time, &running_time, &all_headers, &count);
+
+ GST_INFO_OBJECT (h264parse, "received downstream force key unit event, "
+ "seqnum %d running_time %" GST_TIME_FORMAT " all_headers %d count %d",
+ gst_event_get_seqnum (event), GST_TIME_ARGS (running_time),
+ all_headers, count);
+ handled = TRUE;
+
+ if (h264parse->force_key_unit_event) {
+ GST_INFO_OBJECT (h264parse, "ignoring force key unit event "
+ "as one is already queued");
+ break;
+ }
+
+ h264parse->pending_key_unit_ts = running_time;
+ gst_event_replace (&h264parse->force_key_unit_event, event);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return handled;
+ }
+
+ static gboolean
+ gst_h264_parse_src_event (GstBaseParse * parse, GstEvent * event)
+ {
+ gboolean handled = FALSE;
+ GstH264Parse *h264parse = GST_H264_PARSE (parse);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_CUSTOM_UPSTREAM:
+ {
+ GstClockTime running_time;
+ gboolean all_headers;
+ guint count;
+
+ if (!gst_video_event_is_force_key_unit (event))
+ break;
+
+ gst_video_event_parse_upstream_force_key_unit (event,
+ &running_time, &all_headers, &count);
+
+ GST_INFO_OBJECT (h264parse, "received upstream force-key-unit event, "
+ "seqnum %d running_time %" GST_TIME_FORMAT " all_headers %d count %d",
+ gst_event_get_seqnum (event), GST_TIME_ARGS (running_time),
+ all_headers, count);
+
+ if (!all_headers)
+ break;
+
+ h264parse->pending_key_unit_ts = running_time;
+ gst_event_replace (&h264parse->force_key_unit_event, event);
+ /* leave handled = FALSE so that the event gets propagated upstream */
+ break;
+ }
+ default:
+ break;
+ }
+
+ return handled;
+ }
+
static GstFlowReturn
-gst_h264_parse_chain (GstPad * pad, GstBuffer * buffer)
+gst_h264_parse_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
{
- GstH264Parse *h264parse = GST_H264_PARSE (GST_PAD_PARENT (pad));
+ GstH264Parse *h264parse = GST_H264_PARSE (parent);
if (h264parse->packetized && buffer) {
GstBuffer *sub;
--- /dev/null
-GST_BOILERPLATE (GstMpeg4VParse, gst_mpeg4vparse, GstBaseParse,
- GST_TYPE_BASE_PARSE);
+ /* GStreamer
+ * Copyright (C) <2008> Mindfruit B.V.
+ * @author Sjoerd Simons <sjoerd@luon.net>
+ * Copyright (C) <2007> Julien Moutte <julien@fluendo.com>
+ * Copyright (C) <2011> Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>
+ * Copyright (C) <2011> Nokia Corporation
+ * Copyright (C) <2011> Intel
+ * Copyright (C) <2011> Collabora Ltd.
+ * Copyright (C) <2011> Thibault Saunier <thibault.saunier@collabora.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+ #include "config.h"
+ #endif
+
+ #include <string.h>
+ #include <gst/base/gstbytereader.h>
+ #include <gst/pbutils/codec-utils.h>
+ #include <gst/video/video.h>
+
+ #include "gstmpeg4videoparse.h"
+
+ GST_DEBUG_CATEGORY (mpeg4v_parse_debug);
+ #define GST_CAT_DEFAULT mpeg4v_parse_debug
+
+ static GstStaticPadTemplate src_template =
+ GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC,
+ GST_PAD_ALWAYS,
+ GST_STATIC_CAPS ("video/mpeg, "
+ "mpegversion = (int) 4, "
+ "parsed = (boolean) true, " "systemstream = (boolean) false")
+ );
+
+ static GstStaticPadTemplate sink_template =
+ GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK,
+ GST_PAD_ALWAYS,
+ GST_STATIC_CAPS ("video/mpeg, "
+ "mpegversion = (int) 4, " "systemstream = (boolean) false")
+ );
+
+ /* Properties */
+ #define DEFAULT_PROP_DROP TRUE
+ #define DEFAULT_CONFIG_INTERVAL (0)
+
+ enum
+ {
+ PROP_0,
+ PROP_DROP,
+ PROP_CONFIG_INTERVAL,
+ PROP_LAST
+ };
+
-static GstCaps *gst_mpeg4vparse_get_caps (GstBaseParse * parse);
++#define gst_mpeg4vparse_parent_class parent_class
++G_DEFINE_TYPE (GstMpeg4VParse, gst_mpeg4vparse, GST_TYPE_BASE_PARSE);
+
+ static gboolean gst_mpeg4vparse_start (GstBaseParse * parse);
+ static gboolean gst_mpeg4vparse_stop (GstBaseParse * parse);
+ static gboolean gst_mpeg4vparse_check_valid_frame (GstBaseParse * parse,
+ GstBaseParseFrame * frame, guint * framesize, gint * skipsize);
+ static GstFlowReturn gst_mpeg4vparse_parse_frame (GstBaseParse * parse,
+ GstBaseParseFrame * frame);
+ static GstFlowReturn gst_mpeg4vparse_pre_push_frame (GstBaseParse * parse,
+ GstBaseParseFrame * frame);
+ static gboolean gst_mpeg4vparse_set_caps (GstBaseParse * parse, GstCaps * caps);
-static void
-gst_mpeg4vparse_base_init (gpointer klass)
-{
- GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
-
- gst_element_class_add_static_pad_template (element_class, &src_template);
- gst_element_class_add_static_pad_template (element_class, &sink_template);
-
- gst_element_class_set_details_simple (element_class,
- "MPEG 4 video elementary stream parser", "Codec/Parser/Video",
- "Parses MPEG-4 Part 2 elementary video streams",
- "Julien Moutte <julien@fluendo.com>");
-
- GST_DEBUG_CATEGORY_INIT (mpeg4v_parse_debug, "mpeg4videoparse", 0,
- "MPEG-4 video parser");
-}
-
++static GstCaps *gst_mpeg4vparse_get_caps (GstBaseParse * parse,
++ GstCaps * filter);
+
+ static void gst_mpeg4vparse_set_property (GObject * object, guint prop_id,
+ const GValue * value, GParamSpec * pspec);
+ static void gst_mpeg4vparse_get_property (GObject * object, guint prop_id,
+ GValue * value, GParamSpec * pspec);
+ static gboolean gst_mpeg4vparse_event (GstBaseParse * parse, GstEvent * event);
+ static gboolean gst_mpeg4vparse_src_event (GstBaseParse * parse,
+ GstEvent * event);
+
-gst_mpeg4vparse_init (GstMpeg4VParse * parse, GstMpeg4VParseClass * g_class)
+ static void
+ gst_mpeg4vparse_set_property (GObject * object, guint property_id,
+ const GValue * value, GParamSpec * pspec)
+ {
+ GstMpeg4VParse *parse = GST_MPEG4VIDEO_PARSE (object);
+
+ switch (property_id) {
+ case PROP_DROP:
+ parse->drop = g_value_get_boolean (value);
+ break;
+ case PROP_CONFIG_INTERVAL:
+ parse->interval = g_value_get_uint (value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
+ }
+ }
+
+ static void
+ gst_mpeg4vparse_get_property (GObject * object, guint property_id,
+ GValue * value, GParamSpec * pspec)
+ {
+ GstMpeg4VParse *parse = GST_MPEG4VIDEO_PARSE (object);
+
+ switch (property_id) {
+ case PROP_DROP:
+ g_value_set_boolean (value, parse->drop);
+ break;
+ case PROP_CONFIG_INTERVAL:
+ g_value_set_uint (value, parse->interval);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
+ }
+ }
+
+ static void
+ gst_mpeg4vparse_class_init (GstMpeg4VParseClass * klass)
+ {
+ GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
+ GstBaseParseClass *parse_class = GST_BASE_PARSE_CLASS (klass);
++ GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
+
+ parent_class = g_type_class_peek_parent (klass);
+
+ gobject_class->set_property = gst_mpeg4vparse_set_property;
+ gobject_class->get_property = gst_mpeg4vparse_get_property;
+
+ g_object_class_install_property (gobject_class, PROP_DROP,
+ g_param_spec_boolean ("drop", "drop",
+ "Drop data untill valid configuration data is received either "
+ "in the stream or through caps", DEFAULT_PROP_DROP,
+ G_PARAM_CONSTRUCT | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_CONFIG_INTERVAL,
+ g_param_spec_uint ("config-interval",
+ "Configuration Send Interval",
+ "Send Configuration Insertion Interval in seconds (configuration headers "
+ "will be multiplexed in the data stream when detected.) (0 = disabled)",
+ 0, 3600, DEFAULT_CONFIG_INTERVAL,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
++ gst_element_class_add_pad_template (element_class,
++ gst_static_pad_template_get (&src_template));
++ gst_element_class_add_pad_template (element_class,
++ gst_static_pad_template_get (&sink_template));
++
++ gst_element_class_set_details_simple (element_class,
++ "MPEG 4 video elementary stream parser", "Codec/Parser/Video",
++ "Parses MPEG-4 Part 2 elementary video streams",
++ "Julien Moutte <julien@fluendo.com>");
++
++ GST_DEBUG_CATEGORY_INIT (mpeg4v_parse_debug, "mpeg4videoparse", 0,
++ "MPEG-4 video parser");
++
+ /* Override BaseParse vfuncs */
+ parse_class->start = GST_DEBUG_FUNCPTR (gst_mpeg4vparse_start);
+ parse_class->stop = GST_DEBUG_FUNCPTR (gst_mpeg4vparse_stop);
+ parse_class->check_valid_frame =
+ GST_DEBUG_FUNCPTR (gst_mpeg4vparse_check_valid_frame);
+ parse_class->parse_frame = GST_DEBUG_FUNCPTR (gst_mpeg4vparse_parse_frame);
+ parse_class->pre_push_frame =
+ GST_DEBUG_FUNCPTR (gst_mpeg4vparse_pre_push_frame);
+ parse_class->set_sink_caps = GST_DEBUG_FUNCPTR (gst_mpeg4vparse_set_caps);
+ parse_class->get_sink_caps = GST_DEBUG_FUNCPTR (gst_mpeg4vparse_get_caps);
+ parse_class->event = GST_DEBUG_FUNCPTR (gst_mpeg4vparse_event);
+ parse_class->src_event = GST_DEBUG_FUNCPTR (gst_mpeg4vparse_src_event);
+ }
+
+ static void
- if (mp4vparse->config && size == GST_BUFFER_SIZE (mp4vparse->config) &&
- memcmp (GST_BUFFER_DATA (mp4vparse->config), data, size) == 0)
++gst_mpeg4vparse_init (GstMpeg4VParse * parse)
+ {
+ parse->interval = DEFAULT_CONFIG_INTERVAL;
+ parse->last_report = GST_CLOCK_TIME_NONE;
+ }
+
+ static void
+ gst_mpeg4vparse_reset_frame (GstMpeg4VParse * mp4vparse)
+ {
+ /* done parsing; reset state */
+ mp4vparse->last_sc = -1;
+ mp4vparse->vop_offset = -1;
+ mp4vparse->vo_found = FALSE;
+ mp4vparse->vol_offset = -1;
+ }
+
+ static void
+ gst_mpeg4vparse_reset (GstMpeg4VParse * mp4vparse)
+ {
+ gst_mpeg4vparse_reset_frame (mp4vparse);
+ mp4vparse->update_caps = TRUE;
+ mp4vparse->profile = NULL;
+ mp4vparse->level = NULL;
+ mp4vparse->pending_key_unit_ts = GST_CLOCK_TIME_NONE;
+ mp4vparse->force_key_unit_event = NULL;
+
+ gst_buffer_replace (&mp4vparse->config, NULL);
+ memset (&mp4vparse->vol, 0, sizeof (mp4vparse->vol));
+ }
+
+ static gboolean
+ gst_mpeg4vparse_start (GstBaseParse * parse)
+ {
+ GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEO_PARSE (parse);
+
+ GST_DEBUG_OBJECT (parse, "start");
+
+ gst_mpeg4vparse_reset (mp4vparse);
+ /* at least this much for a valid frame */
+ gst_base_parse_set_min_frame_size (parse, 6);
+
+ return TRUE;
+ }
+
+ static gboolean
+ gst_mpeg4vparse_stop (GstBaseParse * parse)
+ {
+ GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEO_PARSE (parse);
+
+ GST_DEBUG_OBJECT (parse, "stop");
+
+ gst_mpeg4vparse_reset (mp4vparse);
+
+ return TRUE;
+ }
+
+ static gboolean
+ gst_mpeg4vparse_process_config (GstMpeg4VParse * mp4vparse,
+ const guint8 * data, guint offset, gsize size)
+ {
+ /* only do stuff if something new */
- mp4vparse->config = gst_buffer_new_and_alloc (size);
-
- memcpy (GST_BUFFER_DATA (mp4vparse->config), data, size);
-
++ if (!gst_buffer_memcmp (mp4vparse->config, offset, data, size))
+ return TRUE;
+
+ if (mp4vparse->vol_offset < 0) {
+ GST_WARNING ("No video object Layer parsed in this frame, cannot accept "
+ "config");
+ return FALSE;
+ }
+
+ /* If the parsing fail, we accept the config only if we don't have
+ * any config yet. */
+ if (gst_mpeg4_parse_video_object_layer (&mp4vparse->vol,
+ NULL, data + mp4vparse->vol_offset,
+ size - mp4vparse->vol_offset) != GST_MPEG4_PARSER_OK &&
+ mp4vparse->config)
+ return FALSE;
+
+ GST_LOG_OBJECT (mp4vparse, "Width/Height: %u/%u, "
+ "time increment resolution: %u fixed time increment: %u",
+ mp4vparse->vol.width, mp4vparse->vol.height,
+ mp4vparse->vol.vop_time_increment_resolution,
+ mp4vparse->vol.fixed_vop_time_increment);
+
+
+ GST_LOG_OBJECT (mp4vparse, "accepting parsed config size %" G_GSSIZE_FORMAT,
+ size);
+
+ if (mp4vparse->config != NULL)
+ gst_buffer_unref (mp4vparse->config);
+
- guint8 *data = GST_BUFFER_DATA (frame->buffer);
- guint size = GST_BUFFER_SIZE (frame->buffer);
++ mp4vparse->config = gst_buffer_new_wrapped (g_memdup (data, size), size);
+
+ /* trigger src caps update */
+ mp4vparse->update_caps = TRUE;
+
+ return TRUE;
+ }
+
+ /* caller guarantees at least start code in @buf at @off */
+ static gboolean
+ gst_mpeg4vparse_process_sc (GstMpeg4VParse * mp4vparse, GstMpeg4Packet * packet,
+ gsize size)
+ {
+
+ GST_LOG_OBJECT (mp4vparse, "process startcode %x", packet->type);
+
+ /* if we found a VOP, next start code ends it,
+ * except for final VOS end sequence code included in last VOP-frame */
+ if (mp4vparse->vop_offset >= 0 &&
+ packet->type != GST_MPEG4_VISUAL_OBJ_SEQ_END) {
+ if (G_LIKELY (size > mp4vparse->vop_offset + 1)) {
+ mp4vparse->intra_frame =
+ ((packet->data[mp4vparse->vop_offset + 1] >> 6 & 0x3) == 0);
+ } else {
+ GST_WARNING_OBJECT (mp4vparse, "no data following VOP startcode");
+ mp4vparse->intra_frame = FALSE;
+ }
+ GST_LOG_OBJECT (mp4vparse, "ending frame of size %d, is intra %d",
+ packet->offset - 3, mp4vparse->intra_frame);
+ return TRUE;
+ }
+
+ switch (packet->type) {
+ case GST_MPEG4_VIDEO_OBJ_PLANE:
+ case GST_MPEG4_GROUP_OF_VOP:
+ {
+
+ if (packet->type == GST_MPEG4_VIDEO_OBJ_PLANE) {
+ GST_LOG_OBJECT (mp4vparse, "startcode is VOP");
+ mp4vparse->vop_offset = packet->offset;
+ } else {
+ GST_LOG_OBJECT (mp4vparse, "startcode is GOP");
+ }
+ /* parse config data ending here if proper startcodes found earlier;
+ * preferably start at VOS (visual object sequence),
+ * otherwise at VO (video object) */
+ if (mp4vparse->vo_found) {
+
+ /*Do not take care startcode into account */
+ gst_mpeg4vparse_process_config (mp4vparse,
+ packet->data, packet->offset, packet->offset - 3);
+
+ /* avoid accepting again for a VOP sc following a GOP sc */
+ mp4vparse->vo_found = FALSE;
+ }
+ break;
+ }
+ case GST_MPEG4_VISUAL_OBJ_SEQ_START:
+ GST_LOG_OBJECT (mp4vparse, "Visual Sequence Start");
+ mp4vparse->vo_found = TRUE;
+ mp4vparse->profile = gst_codec_utils_mpeg4video_get_profile (packet->data
+ + packet->offset + 1, packet->offset);
+ mp4vparse->level = gst_codec_utils_mpeg4video_get_level (packet->data
+ + packet->offset + 1, packet->offset);
+ break;
+ case GST_MPEG4_VISUAL_OBJ:
+ GST_LOG_OBJECT (mp4vparse, "Visual Object");
+ default:
+ if (packet->type >= GST_MPEG4_VIDEO_LAYER_FIRST &&
+ packet->type <= GST_MPEG4_VIDEO_LAYER_LAST) {
+
+ GST_LOG_OBJECT (mp4vparse, "Video Object Layer");
+
+ /* wee keep track of the offset to parse later on */
+ if (mp4vparse->vol_offset < 0)
+ mp4vparse->vol_offset = packet->offset;
+
+ /* VO (video object) cases */
+ } else if (packet->type <= GST_MPEG4_VIDEO_OBJ_LAST) {
+ GST_LOG_OBJECT (mp4vparse, "Video object");
+ mp4vparse->vo_found = TRUE;
+ }
+ break;
+ }
+
+ /* at least need to have a VOP in a frame */
+ return FALSE;
+ }
+
+ /* FIXME move into baseparse, or anything equivalent;
+ * see https://bugzilla.gnome.org/show_bug.cgi?id=650093 */
+ #define GST_BASE_PARSE_FRAME_FLAG_PARSING 0x10000
+
+ static gboolean
+ gst_mpeg4vparse_check_valid_frame (GstBaseParse * parse,
+ GstBaseParseFrame * frame, guint * framesize, gint * skipsize)
+ {
+ GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEO_PARSE (parse);
+ GstMpeg4Packet packet;
- gboolean ret;
++ guint8 *data = NULL;
++ gsize size;
+ gint off = 0;
- return FALSE;
++ gboolean ret = FALSE;
++
++ data = gst_buffer_map (frame->buffer, &size, NULL, GST_MAP_READ);
+
+ retry:
+ /* at least start code and subsequent byte */
+ if (G_UNLIKELY (size - off < 5))
- return FALSE;
++ goto out;
+
+ /* avoid stale cached parsing state */
+ if (!(frame->flags & GST_BASE_PARSE_FRAME_FLAG_PARSING)) {
+ GST_LOG_OBJECT (mp4vparse, "parsing new frame");
+ gst_mpeg4vparse_reset_frame (mp4vparse);
+ frame->flags |= GST_BASE_PARSE_FRAME_FLAG_PARSING;
+ } else {
+ GST_LOG_OBJECT (mp4vparse, "resuming frame parsing");
+ }
+
+ /* if already found a previous start code, e.g. start of frame, go for next */
+ if (mp4vparse->last_sc >= 0) {
+ off = mp4vparse->last_sc;
+ goto next;
+ }
+
+ /* didn't find anything that looks like a sync word, skip */
+ switch (gst_mpeg4_parse (&packet, TRUE, NULL, data, off, size)) {
+ case (GST_MPEG4_PARSER_NO_PACKET):
+ case (GST_MPEG4_PARSER_ERROR):
+ *skipsize = size - 3;
- return FALSE;
++ goto out;
+ default:
+ break;
+ }
+ off = packet.offset;
+
+ /* possible frame header, but not at offset 0? skip bytes before sync */
+ if (G_UNLIKELY (off > 3)) {
+ *skipsize = off - 3;
- return TRUE;
++ goto out;
+ }
+
+ switch (packet.type) {
+ case GST_MPEG4_GROUP_OF_VOP:
+ case GST_MPEG4_VISUAL_OBJ_SEQ_START:
+ case GST_MPEG4_VIDEO_OBJ_PLANE:
+ break;
+ default:
+ if (packet.type <= GST_MPEG4_VIDEO_OBJ_LAST)
+ break;
+ /* undesirable sc */
+ GST_LOG_OBJECT (mp4vparse, "start code is no VOS, VO, VOP or GOP");
+ goto retry;
+ }
+
+ /* found sc */
+ mp4vparse->last_sc = 0;
+
+ /* examine start code, which should not end frame at present */
+ gst_mpeg4vparse_process_sc (mp4vparse, &packet, size);
+
+ next:
+ GST_LOG_OBJECT (mp4vparse, "Looking for frame end");
+
+ /* start is fine as of now */
+ *skipsize = 0;
+ /* position a bit further than last sc */
+ off++;
+
+ /* so now we have start code at start of data; locate next packet */
+ switch (gst_mpeg4_parse (&packet, TRUE, NULL, data, off, size)) {
+ case (GST_MPEG4_PARSER_NO_PACKET_END):
+ ret = gst_mpeg4vparse_process_sc (mp4vparse, &packet, size);
+ if (ret)
+ break;
+ case (GST_MPEG4_PARSER_NO_PACKET):
+ case (GST_MPEG4_PARSER_ERROR):
+ /* if draining, take all */
+ if (GST_BASE_PARSE_DRAINING (parse)) {
+ *framesize = size;
- return FALSE;
++ ret = TRUE;
+ } else {
+ /* resume scan where we left it */
+ mp4vparse->last_sc = size - 3;
+ /* request best next available */
+ *framesize = G_MAXUINT;
- if (G_LIKELY (GST_PAD_CAPS (GST_BASE_PARSE_SRC_PAD (mp4vparse)) &&
+ }
++ goto out;
++ break;
+ default:
+ /* decide whether this startcode ends a frame */
+ ret = gst_mpeg4vparse_process_sc (mp4vparse, &packet, size);
+ break;
+ }
+
+ off = packet.offset;
+
+ if (ret) {
+ *framesize = off - 3;
+ } else {
+ goto next;
+ }
+
++out:
++ gst_buffer_unmap (frame->buffer, data, size);
+ return ret;
+ }
+
+ static void
+ gst_mpeg4vparse_update_src_caps (GstMpeg4VParse * mp4vparse)
+ {
+ GstCaps *caps = NULL;
+
+ GST_LOG_OBJECT (mp4vparse, "Updating caps");
+
+ /* only update if no src caps yet or explicitly triggered */
- caps = GST_PAD_CAPS (GST_BASE_PARSE_SINK_PAD (mp4vparse));
++ if (G_LIKELY (gst_pad_has_current_caps (GST_BASE_PARSE_SRC_PAD (mp4vparse)) &&
+ !mp4vparse->update_caps))
+ return;
+
+ /* carry over input caps as much as possible; override with our own stuff */
- caps = gst_caps_copy (caps);
++ caps = gst_pad_get_current_caps (GST_BASE_PARSE_SINK_PAD (mp4vparse));
+ if (caps) {
- if ((GST_BUFFER_SIZE (buffer) < GST_BUFFER_SIZE (mp4vparse->config)) ||
- memcmp (GST_BUFFER_DATA (buffer),
- GST_BUFFER_DATA (mp4vparse->config),
- GST_BUFFER_SIZE (mp4vparse->config))) {
++ GstCaps *tmp = gst_caps_copy (caps);
++ gst_caps_unref (caps);
++ caps = tmp;
+ } else {
+ caps = gst_caps_new_simple ("video/mpeg",
+ "mpegversion", G_TYPE_INT, 4, NULL);
+ }
+
+ gst_caps_set_simple (caps, "systemstream", G_TYPE_BOOLEAN, FALSE,
+ "parsed", G_TYPE_BOOLEAN, TRUE, NULL);
+
+ if (mp4vparse->profile && mp4vparse->level) {
+ gst_caps_set_simple (caps, "profile", G_TYPE_STRING, mp4vparse->profile,
+ "level", G_TYPE_STRING, mp4vparse->level, NULL);
+ }
+
+ if (mp4vparse->config != NULL) {
+ gst_caps_set_simple (caps, "codec_data",
+ GST_TYPE_BUFFER, mp4vparse->config, NULL);
+ }
+
+ if (mp4vparse->vol.width > 0 && mp4vparse->vol.height > 0) {
+ gst_caps_set_simple (caps, "width", G_TYPE_INT, mp4vparse->vol.width,
+ "height", G_TYPE_INT, mp4vparse->vol.height, NULL);
+ }
+
+ /* perhaps we have a framerate */
+ if (mp4vparse->vol.fixed_vop_time_increment != 0) {
+ gint fps_num = mp4vparse->vol.vop_time_increment_resolution;
+ gint fps_den = mp4vparse->vol.fixed_vop_time_increment;
+ GstClockTime latency = gst_util_uint64_scale (GST_SECOND, fps_den, fps_num);
+
+ gst_caps_set_simple (caps, "framerate",
+ GST_TYPE_FRACTION, fps_num, fps_den, NULL);
+ gst_base_parse_set_frame_rate (GST_BASE_PARSE (mp4vparse),
+ fps_num, fps_den, 0, 0);
+ gst_base_parse_set_latency (GST_BASE_PARSE (mp4vparse), latency, latency);
+ }
+
+ /* or pixel-aspect-ratio */
+ if (mp4vparse->vol.par_width > 0 && mp4vparse->vol.par_height > 0) {
+ gst_caps_set_simple (caps, "pixel-aspect-ratio",
+ GST_TYPE_FRACTION, mp4vparse->vol.par_width,
+ mp4vparse->vol.par_height, NULL);
+ }
+
+ gst_pad_set_caps (GST_BASE_PARSE_SRC_PAD (mp4vparse), caps);
+ gst_caps_unref (caps);
+ }
+
+ static GstFlowReturn
+ gst_mpeg4vparse_parse_frame (GstBaseParse * parse, GstBaseParseFrame * frame)
+ {
+ GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEO_PARSE (parse);
+ GstBuffer *buffer = frame->buffer;
+
+ gst_mpeg4vparse_update_src_caps (mp4vparse);
+
+ if (mp4vparse->intra_frame)
+ GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DELTA_UNIT);
+ else
+ GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT);
+
+ if (G_UNLIKELY (mp4vparse->drop && !mp4vparse->config)) {
+ GST_LOG_OBJECT (mp4vparse, "dropping frame as no config yet");
+ return GST_BASE_PARSE_FLOW_DROPPED;
+ } else
+ return GST_FLOW_OK;
+ }
+
+ static GstEvent *
+ check_pending_key_unit_event (GstEvent * pending_event, GstSegment * segment,
+ GstClockTime timestamp, guint flags, GstClockTime pending_key_unit_ts)
+ {
+ GstClockTime running_time, stream_time;
+ gboolean all_headers;
+ guint count;
+ GstEvent *event = NULL;
+
+ g_return_val_if_fail (segment != NULL, NULL);
+
+ if (pending_event == NULL)
+ goto out;
+
+ if (GST_CLOCK_TIME_IS_VALID (pending_key_unit_ts) &&
+ timestamp == GST_CLOCK_TIME_NONE)
+ goto out;
+
+ running_time = gst_segment_to_running_time (segment,
+ GST_FORMAT_TIME, timestamp);
+
+ GST_INFO ("now %" GST_TIME_FORMAT " wanted %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (running_time), GST_TIME_ARGS (pending_key_unit_ts));
+ if (GST_CLOCK_TIME_IS_VALID (pending_key_unit_ts) &&
+ running_time < pending_key_unit_ts)
+ goto out;
+
+ if (flags & GST_BUFFER_FLAG_DELTA_UNIT) {
+ GST_DEBUG ("pending force key unit, waiting for keyframe");
+ goto out;
+ }
+
+ stream_time = gst_segment_to_stream_time (segment,
+ GST_FORMAT_TIME, timestamp);
+
+ gst_video_event_parse_upstream_force_key_unit (pending_event,
+ NULL, &all_headers, &count);
+
+ event =
+ gst_video_event_new_downstream_force_key_unit (timestamp, stream_time,
+ running_time, all_headers, count);
+ gst_event_set_seqnum (event, gst_event_get_seqnum (pending_event));
+
+ out:
+ return event;
+ }
+
+ static void
+ gst_mpeg4vparse_prepare_key_unit (GstMpeg4VParse * parse, GstEvent * event)
+ {
+ GstClockTime running_time;
+ guint count;
+
+ parse->pending_key_unit_ts = GST_CLOCK_TIME_NONE;
+ gst_event_replace (&parse->force_key_unit_event, NULL);
+
+ gst_video_event_parse_downstream_force_key_unit (event,
+ NULL, NULL, &running_time, NULL, &count);
+
+ GST_INFO_OBJECT (parse, "pushing downstream force-key-unit event %d "
+ "%" GST_TIME_FORMAT " count %d", gst_event_get_seqnum (event),
+ GST_TIME_ARGS (running_time), count);
+ gst_pad_push_event (GST_BASE_PARSE_SRC_PAD (parse), event);
+ }
+
+
+ static GstFlowReturn
+ gst_mpeg4vparse_pre_push_frame (GstBaseParse * parse, GstBaseParseFrame * frame)
+ {
+ GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEO_PARSE (parse);
+ GstBuffer *buffer = frame->buffer;
+ gboolean push_codec = FALSE;
+ GstEvent *event = NULL;
+
+ if ((event = check_pending_key_unit_event (mp4vparse->force_key_unit_event,
+ &parse->segment, GST_BUFFER_TIMESTAMP (buffer),
+ GST_BUFFER_FLAGS (buffer), mp4vparse->pending_key_unit_ts))) {
+ gst_mpeg4vparse_prepare_key_unit (mp4vparse, event);
+ push_codec = TRUE;
+ }
+
+ /* periodic config sending */
+ if (mp4vparse->interval > 0 || push_codec) {
+ GstClockTime timestamp = GST_BUFFER_TIMESTAMP (buffer);
+ guint64 diff;
+
+ /* init */
+ if (!GST_CLOCK_TIME_IS_VALID (mp4vparse->last_report)) {
+ mp4vparse->last_report = timestamp;
+ }
+
+ if (!GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT)) {
+ if (timestamp > mp4vparse->last_report)
+ diff = timestamp - mp4vparse->last_report;
+ else
+ diff = 0;
+
+ GST_LOG_OBJECT (mp4vparse,
+ "now %" GST_TIME_FORMAT ", last config %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (timestamp), GST_TIME_ARGS (mp4vparse->last_report));
+
+ GST_LOG_OBJECT (mp4vparse,
+ "interval since last config %" GST_TIME_FORMAT, GST_TIME_ARGS (diff));
+
+ if (GST_TIME_AS_SECONDS (diff) >= mp4vparse->interval || push_codec) {
++ guint8 *cdata;
++ gsize csize;
++ gboolean diffconf;
++
+ /* we need to send config now first */
+ GST_INFO_OBJECT (parse, "inserting config in stream");
++ cdata = gst_buffer_map (mp4vparse->config, &csize, NULL, GST_MAP_READ);
++ diffconf = (gst_buffer_get_size (buffer) < csize)
++ || gst_buffer_memcmp (buffer, 0, cdata, csize);
++ gst_buffer_unmap (mp4vparse->config, cdata, csize);
+
+ /* avoid inserting duplicate config */
- gst_buffer_copy_metadata (superbuf, buffer, GST_BUFFER_COPY_ALL);
++ if (diffconf) {
+ GstBuffer *superbuf;
+
+ /* insert header */
+ superbuf = gst_buffer_merge (mp4vparse->config, buffer);
- data = GST_BUFFER_DATA (buf);
- size = GST_BUFFER_SIZE (buf);
++ gst_buffer_copy_into (superbuf, buffer, GST_BUFFER_COPY_METADATA, 0,
++ csize);
+ gst_buffer_replace (&frame->buffer, superbuf);
+ gst_buffer_unref (superbuf);
+ } else {
+ GST_INFO_OBJECT (parse, "... but avoiding duplication");
+ }
+
+ if (G_UNLIKELY (timestamp != -1)) {
+ mp4vparse->last_report = timestamp;
+ }
+ }
+ }
+ }
+
+ return GST_FLOW_OK;
+ }
+
+ static gboolean
+ gst_mpeg4vparse_set_caps (GstBaseParse * parse, GstCaps * caps)
+ {
+ GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEO_PARSE (parse);
+ GstStructure *s;
+ const GValue *value;
+ GstBuffer *buf;
+ guint8 *data;
+ gsize size;
+
+ GstMpeg4Packet packet;
+ GstMpeg4ParseResult res;
+
+ GST_DEBUG_OBJECT (parse, "setcaps called with %" GST_PTR_FORMAT, caps);
+
+ s = gst_caps_get_structure (caps, 0);
+
+ if ((value = gst_structure_get_value (s, "codec_data")) != NULL
+ && (buf = gst_value_get_buffer (value))) {
+ /* best possible parse attempt,
+ * src caps are based on sink caps so it will end up in there
+ * whether sucessful or not */
- gst_mpeg4vparse_process_config (mp4vparse, GST_BUFFER_DATA (buf),
- 3, GST_BUFFER_SIZE (buf));
++ data = gst_buffer_map (buf, &size, NULL, GST_MAP_READ);
+ res = gst_mpeg4_parse (&packet, TRUE, NULL, data, 0, size);
+
+ while (res == GST_MPEG4_PARSER_OK || res == GST_MPEG4_PARSER_NO_PACKET_END) {
+
+ if (packet.type >= GST_MPEG4_VIDEO_LAYER_FIRST &&
+ packet.type <= GST_MPEG4_VIDEO_LAYER_LAST)
+ mp4vparse->vol_offset = packet.offset;
+
+ res = gst_mpeg4_parse (&packet, TRUE, NULL, data, packet.offset, size);
+ }
+
+ /* And take it as config */
-gst_mpeg4vparse_get_caps (GstBaseParse * parse)
++ gst_mpeg4vparse_process_config (mp4vparse, data, 3, size);
++ gst_buffer_unmap (buf, data, size);
+ }
+
+ /* let's not interfere and accept regardless of config parsing success */
+ return TRUE;
+ }
+
+
+ static GstCaps *
++gst_mpeg4vparse_get_caps (GstBaseParse * parse, GstCaps * filter)
+ {
+ GstCaps *peercaps;
+ GstCaps *res;
+
+ peercaps = gst_pad_get_allowed_caps (GST_BASE_PARSE_SRC_PAD (parse));
+ if (peercaps) {
+ guint i, n;
+
+ /* Remove the parsed field */
+ peercaps = gst_caps_make_writable (peercaps);
+ n = gst_caps_get_size (peercaps);
+ for (i = 0; i < n; i++) {
+ GstStructure *s = gst_caps_get_structure (peercaps, i);
+
+ gst_structure_remove_field (s, "parsed");
+ }
+
+ res =
+ gst_caps_intersect_full (peercaps,
+ gst_pad_get_pad_template_caps (GST_BASE_PARSE_SRC_PAD (parse)),
+ GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (peercaps);
+ } else {
+ res =
+ gst_caps_copy (gst_pad_get_pad_template_caps (GST_BASE_PARSE_SINK_PAD
+ (parse)));
+ }
+
++ if (filter) {
++ GstCaps *tmp = gst_caps_intersect_full (res, filter,
++ GST_CAPS_INTERSECT_FIRST);
++ gst_caps_unref (res);
++ res = tmp;
++ }
++
++
+ return res;
+ }
+
+ static gboolean
+ gst_mpeg4vparse_event (GstBaseParse * parse, GstEvent * event)
+ {
+ gboolean handled = FALSE;
+ GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEO_PARSE (parse);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_CUSTOM_DOWNSTREAM:
+ {
+ GstClockTime timestamp, stream_time, running_time;
+ gboolean all_headers;
+ guint count;
+
+ if (!gst_video_event_is_force_key_unit (event))
+ break;
+
+ gst_video_event_parse_downstream_force_key_unit (event,
+ ×tamp, &stream_time, &running_time, &all_headers, &count);
+
+ GST_INFO_OBJECT (mp4vparse, "received downstream force key unit event, "
+ "seqnum %d running_time %" GST_TIME_FORMAT " all_headers %d count %d",
+ gst_event_get_seqnum (event), GST_TIME_ARGS (running_time),
+ all_headers, count);
+ handled = TRUE;
+
+ if (mp4vparse->force_key_unit_event) {
+ GST_INFO_OBJECT (mp4vparse, "ignoring force key unit event "
+ "as one is already queued");
+ break;
+ }
+
+ mp4vparse->pending_key_unit_ts = running_time;
+ gst_event_replace (&mp4vparse->force_key_unit_event, event);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return handled;
+ }
+
+ static gboolean
+ gst_mpeg4vparse_src_event (GstBaseParse * parse, GstEvent * event)
+ {
+ gboolean handled = FALSE;
+ GstMpeg4VParse *mp4vparse = GST_MPEG4VIDEO_PARSE (parse);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_CUSTOM_UPSTREAM:
+ {
+ GstClockTime running_time;
+ gboolean all_headers;
+ guint count;
+
+ if (!gst_video_event_is_force_key_unit (event))
+ break;
+
+ gst_video_event_parse_upstream_force_key_unit (event,
+ &running_time, &all_headers, &count);
+
+ GST_INFO_OBJECT (mp4vparse, "received upstream force-key-unit event, "
+ "seqnum %d running_time %" GST_TIME_FORMAT " all_headers %d count %d",
+ gst_event_get_seqnum (event), GST_TIME_ARGS (running_time),
+ all_headers, count);
+
+ if (!all_headers)
+ break;
+
+ mp4vparse->pending_key_unit_ts = running_time;
+ gst_event_replace (&mp4vparse->force_key_unit_event, event);
+ /* leave handled = FALSE so that the event gets propagated upstream */
+ break;
+ }
+ default:
+ break;
+ }
+
+ return handled;
+ }
for (j = 0; j < gst_caps_get_size (caps); j++) {
GstCaps *in_caps, *out_caps;
GstStructure *s;
-- guint32 fourcc;
++ const gchar *fourcc;
in_caps = gst_caps_copy_nth (caps, i);
out_caps = gst_caps_copy_nth (caps, j);
/* FIXME remove if videotestsrc and video format handle these properly */
s = gst_caps_get_structure (in_caps, 0);
-- if (gst_structure_get_fourcc (s, "format", &fourcc)) {
-- if (fourcc == GST_MAKE_FOURCC ('Y', 'U', 'V', '9') ||
-- fourcc == GST_MAKE_FOURCC ('Y', 'V', 'U', '9') ||
-- fourcc == GST_MAKE_FOURCC ('v', '2', '1', '6')) {
++ if ((fourcc = gst_structure_get_string (s, "format"))) {
++ if (!strcmp (fourcc, "YUV9") ||
++ !strcmp (fourcc, "YVU9") || !strcmp (fourcc, "v216")) {
gst_caps_unref (in_caps);
gst_caps_unref (out_caps);
continue;