dnl initialize autoconf
dnl when going to/from release please set the nano (fourth number) right !
dnl releases only do Wall, cvs and prerelease does Werror too
-AC_INIT(GStreamer Libav, 0.10.12.1,
+AC_INIT(GStreamer Libav, 0.11.0.1,
http://bugzilla.gnome.org/enter_bug.cgi?product=GStreamer,
gst-ffmpeg)
dnl our libraries and install dirs use major.minor as a version
GST_MAJORMINOR=$PACKAGE_VERSION_MAJOR.$PACKAGE_VERSION_MINOR
dnl we override it here if we need to for the release candidate of new series
-GST_MAJORMINOR=0.10
+GST_MAJORMINOR=0.11
AC_SUBST(GST_MAJORMINOR)
AG_GST_LIBTOOL_PREPARE
AM_PROG_LIBTOOL
dnl *** required versions of GStreamer stuff ***
-GST_REQ=0.10.31
+GST_REQ=0.11.0
ORC_REQ=0.4.6
dnl *** autotools stuff ****
dnl prefer internal headers to already installed ones
dnl also add builddir include for enumtypes and marshal
dnl add GST_OPTION_CFLAGS, but overridable
+GST_CFLAGS="$GST_CFLAGS -DGST_USE_UNSTABLE_API"
GST_CFLAGS="-I\$(top_srcdir)/gst-libs -I\$(top_builddir)/gst-libs $GST_CFLAGS $GLIB_EXTRA_CFLAGS \$(GST_OPTION_CFLAGS)"
AC_SUBST(GST_CFLAGS)
AC_SUBST(GST_LIBS)
gstffmpegcfg.c \
gstffmpegdemux.c \
gstffmpegmux.c \
- gstffmpegdeinterlace.c \
- gstffmpegaudioresample.c
+ gstffmpegdeinterlace.c
+#\
+# gstffmpegaudioresample.c
# \
# gstffmpegscale.c
#endif
#if 0
gst_ffmpegcsp_register (plugin);
-#endif
gst_ffmpegaudioresample_register (plugin);
+#endif
av_register_protocol2 (&gstreamer_protocol, sizeof (URLProtocol));
av_register_protocol2 (&gstpipe_protocol, sizeof (URLProtocol));
GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_STATIC_CAPS
- ("audio/x-raw-int, endianness = (int) BYTE_ORDER, signed = (boolean) true, width = (int) 16, depth = (int) 16, channels = (int) { 1 , 2 }, rate = (int) [1, MAX ]")
+ ("audio/x-raw,"
+ "format = (string) " GST_AUDIO_NE (S16) ","
+ "channels = (int) { 1 , 2 }, rate = (int) [1, MAX ]")
);
static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
GST_STATIC_CAPS
- ("audio/x-raw-int, endianness = (int) BYTE_ORDER, signed = (boolean) true, width = (int) 16, depth = (int) 16, channels = (int) [ 1 , 6 ], rate = (int) [1, MAX ]")
+ ("audio/x-raw,"
+ "format = (string) " GST_AUDIO_NE (S16) ","
+ "channels = (int) { 1 , 2 }, rate = (int) [1, MAX ]")
);
GST_BOILERPLATE (GstFFMpegAudioResample, gst_ffmpegaudioresample,
static GstCaps *gst_ffmpegaudioresample_transform_caps (GstBaseTransform *
trans, GstPadDirection direction, GstCaps * caps);
static gboolean gst_ffmpegaudioresample_transform_size (GstBaseTransform *
- trans, GstPadDirection direction, GstCaps * caps, guint size,
- GstCaps * othercaps, guint * othersize);
+ trans, GstPadDirection direction, GstCaps * caps, gsize size,
+ GstCaps * othercaps, gsize * othersize);
static gboolean gst_ffmpegaudioresample_get_unit_size (GstBaseTransform * trans,
- GstCaps * caps, guint * size);
+ GstCaps * caps, gsize * size);
static gboolean gst_ffmpegaudioresample_set_caps (GstBaseTransform * trans,
GstCaps * incaps, GstCaps * outcaps);
static GstFlowReturn gst_ffmpegaudioresample_transform (GstBaseTransform *
static gboolean
gst_ffmpegaudioresample_transform_size (GstBaseTransform * trans,
- GstPadDirection direction, GstCaps * caps, guint size, GstCaps * othercaps,
- guint * othersize)
+ GstPadDirection direction, GstCaps * caps, gsize size, GstCaps * othercaps,
+ gsize * othersize)
{
gint inrate, outrate;
gint inchanns, outchanns;
static gboolean
gst_ffmpegaudioresample_get_unit_size (GstBaseTransform * trans, GstCaps * caps,
- guint * size)
+ gsize * size)
{
gint channels;
GstStructure *structure;
GstFFMpegAudioResample *resample = GST_FFMPEGAUDIORESAMPLE (trans);
gint nbsamples;
gint ret;
+ guint8 *indata, *outdata;
+ gsize insize, outsize;
- gst_buffer_copy_metadata (outbuf, inbuf, GST_BUFFER_COPY_TIMESTAMPS);
- nbsamples = GST_BUFFER_SIZE (inbuf) / (2 * resample->in_channels);
+ gst_buffer_copy_into (outbuf, inbuf, GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+
+ indata = gst_buffer_map (inbuf, &insize, NULL, GST_MAP_READ);
+ nbsamples = insize / (2 * resample->in_channels);
GST_LOG_OBJECT (resample, "input buffer duration:%" GST_TIME_FORMAT,
GST_TIME_ARGS (GST_BUFFER_DURATION (inbuf)));
+ outdata = gst_buffer_map (outbuf, &outsize, NULL, GST_MAP_WRITE);
GST_DEBUG_OBJECT (resample,
"audio_resample(ctx, output:%p [size:%d], input:%p [size:%d], nbsamples:%d",
- GST_BUFFER_DATA (outbuf), GST_BUFFER_SIZE (outbuf),
- GST_BUFFER_DATA (inbuf), GST_BUFFER_SIZE (inbuf), nbsamples);
+ outdata, outsize, indata, insize, nbsamples);
- ret = audio_resample (resample->res, (short *) GST_BUFFER_DATA (outbuf),
- (short *) GST_BUFFER_DATA (inbuf), nbsamples);
+ ret =
+ audio_resample (resample->res, (short *) outdata, (short *) indata,
+ nbsamples);
GST_DEBUG_OBJECT (resample, "audio_resample returned %d", ret);
GST_BUFFER_DURATION (outbuf) = gst_util_uint64_scale (ret, GST_SECOND,
resample->out_rate);
- GST_BUFFER_SIZE (outbuf) = ret * 2 * resample->out_channels;
+
+ outsize = ret * 2 * resample->out_channels;
+ gst_buffer_unmap (outbuf, outdata, outsize);
+ gst_buffer_unmap (inbuf, indata, insize);
GST_LOG_OBJECT (resample, "Output buffer duration:%" GST_TIME_FORMAT,
GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)));
#include "gstffmpeg.h"
#include "gstffmpegcodecmap.h"
+#include <gst/video/video.h>
+#include <gst/audio/audio.h>
#include <gst/pbutils/codec-utils.h>
/*
{
GstStructure *str = gst_caps_get_structure (caps, 0);
const GValue *palette_v;
- const GstBuffer *palette;
+ GstBuffer *palette;
/* do we have a palette? */
if ((palette_v = gst_structure_get_value (str, "palette_data")) && context) {
palette = gst_value_get_buffer (palette_v);
- if (GST_BUFFER_SIZE (palette) >= AVPALETTE_SIZE) {
+ if (gst_buffer_get_size (palette) >= AVPALETTE_SIZE) {
if (context->palctrl)
av_free (context->palctrl);
context->palctrl = av_malloc (sizeof (AVPaletteControl));
context->palctrl->palette_changed = 1;
- memcpy (context->palctrl->palette, GST_BUFFER_DATA (palette),
+ gst_buffer_extract (palette, 0, context->palctrl->palette,
AVPALETTE_SIZE);
}
}
if (context->palctrl) {
GstBuffer *palette = gst_buffer_new_and_alloc (AVPALETTE_SIZE);
- memcpy (GST_BUFFER_DATA (palette), context->palctrl->palette,
- AVPALETTE_SIZE);
+ gst_buffer_fill (palette, 0, context->palctrl->palette, AVPALETTE_SIZE);
gst_caps_set_simple (caps, "palette_data", GST_TYPE_BUFFER, palette, NULL);
}
}
case CODEC_ID_DVVIDEO:
{
if (encode && context) {
- guint32 fourcc;
+ const gchar *format;
switch (context->pix_fmt) {
case PIX_FMT_YUYV422:
- fourcc = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2');
+ format = "YUY2";
break;
case PIX_FMT_YUV420P:
- fourcc = GST_MAKE_FOURCC ('I', '4', '2', '0');
+ format = "I420";
break;
case PIX_FMT_YUVA420P:
- fourcc = GST_MAKE_FOURCC ('A', '4', '2', '0');
+ format = "A420";
break;
case PIX_FMT_YUV411P:
- fourcc = GST_MAKE_FOURCC ('Y', '4', '1', 'B');
+ format = "Y41B";
break;
case PIX_FMT_YUV422P:
- fourcc = GST_MAKE_FOURCC ('Y', '4', '2', 'B');
+ format = "Y42B";
break;
case PIX_FMT_YUV410P:
- fourcc = GST_MAKE_FOURCC ('Y', 'U', 'V', '9');
+ format = "YUV9";
break;
default:
GST_WARNING
- ("Couldnt' find fourcc for pixfmt %d, defaulting to I420",
+ ("Couldnt' find format for pixfmt %d, defaulting to I420",
context->pix_fmt);
- fourcc = GST_MAKE_FOURCC ('I', '4', '2', '0');
+ format = "I420";
break;
}
caps = gst_ff_vid_caps_new (context, codec_id, "video/x-dv",
"systemstream", G_TYPE_BOOLEAN, FALSE,
- "format", GST_TYPE_FOURCC, fourcc, NULL);
+ "format", G_TYPE_STRING, format, NULL);
} else {
caps = gst_ff_vid_caps_new (context, codec_id, "video/x-dv",
"systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
break;
case CODEC_ID_VC1:
caps = gst_ff_vid_caps_new (context, codec_id, "video/x-wmv",
- "wmvversion", G_TYPE_INT, 3, "format", GST_TYPE_FOURCC,
- GST_MAKE_FOURCC ('W', 'V', 'C', '1'), NULL);
+ "wmvversion", G_TYPE_INT, 3, "format", G_TYPE_STRING, "WVC1", NULL);
break;
case CODEC_ID_QDM2:
caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-qdm2", NULL);
case CODEC_ID_PCM_S8:
case CODEC_ID_PCM_U8:
{
- gint width = 0, depth = 0, endianness = 0;
- gboolean signedness = FALSE; /* blabla */
+ GstAudioFormat format;
switch (codec_id) {
case CODEC_ID_PCM_S16LE:
- width = 16;
- depth = 16;
- endianness = G_LITTLE_ENDIAN;
- signedness = TRUE;
+ format = GST_AUDIO_FORMAT_S16LE;
break;
case CODEC_ID_PCM_S16BE:
- width = 16;
- depth = 16;
- endianness = G_BIG_ENDIAN;
- signedness = TRUE;
+ format = GST_AUDIO_FORMAT_S16BE;
break;
case CODEC_ID_PCM_U16LE:
- width = 16;
- depth = 16;
- endianness = G_LITTLE_ENDIAN;
- signedness = FALSE;
+ format = GST_AUDIO_FORMAT_U16LE;
break;
case CODEC_ID_PCM_U16BE:
- width = 16;
- depth = 16;
- endianness = G_BIG_ENDIAN;
- signedness = FALSE;
+ format = GST_AUDIO_FORMAT_U16BE;
break;
case CODEC_ID_PCM_S8:
- width = 8;
- depth = 8;
- endianness = G_BYTE_ORDER;
- signedness = TRUE;
+ format = GST_AUDIO_FORMAT_S8;
break;
case CODEC_ID_PCM_U8:
- width = 8;
- depth = 8;
- endianness = G_BYTE_ORDER;
- signedness = FALSE;
+ format = GST_AUDIO_FORMAT_U8;
break;
default:
g_assert (0); /* don't worry, we never get here */
break;
}
- caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-raw-int",
- "width", G_TYPE_INT, width,
- "depth", G_TYPE_INT, depth,
- "endianness", G_TYPE_INT, endianness,
- "signed", G_TYPE_BOOLEAN, signedness, NULL);
+ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-raw",
+ "format", G_TYPE_STRING, gst_audio_format_to_string (format), NULL);
}
break;
break;
case CODEC_ID_SHORTEN:
- caps = gst_caps_new_simple ("audio/x-shorten", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-shorten");
break;
case CODEC_ID_ALAC:
/* Note that ffmpeg has no encoder yet, but just for safety. In the
* encoder case, we want to add things like samplerate, channels... */
if (!encode) {
- caps = gst_caps_new_simple ("audio/x-flac", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-flac");
}
break;
caps = NULL;
break;
case CODEC_ID_BMP:
- caps = gst_caps_new_simple ("image/bmp", NULL);
+ caps = gst_caps_new_empty_simple ("image/bmp");
break;
case CODEC_ID_TTA:
caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-tta", NULL);
if (context && context->extradata_size > 0) {
GstBuffer *data = gst_buffer_new_and_alloc (context->extradata_size);
- memcpy (GST_BUFFER_DATA (data), context->extradata,
- context->extradata_size);
+ gst_buffer_fill (data, 0, context->extradata, context->extradata_size);
gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, data, NULL);
gst_buffer_unref (data);
}
return caps;
}
-/* Convert a FFMPEG Pixel Format and optional AVCodecContext
- * to a GstCaps. If the context is ommitted, no fixed values
- * for video/audio size will be included in the GstCaps
- *
- * See below for usefullness
- */
-
-GstCaps *
-gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context,
- enum CodecID codec_id)
+/* Convert a FFMPEG Pixel Format to a GStreamer VideoFormat */
+GstVideoFormat
+gst_ffmpeg_pixfmt_to_video_format (enum PixelFormat pix_fmt)
{
- GstCaps *caps = NULL;
-
- int bpp = 0, depth = 0, endianness = 0;
- gulong g_mask = 0, r_mask = 0, b_mask = 0, a_mask = 0;
- guint32 fmt = 0;
+ GstVideoFormat fmt;
switch (pix_fmt) {
case PIX_FMT_YUVJ420P:
case PIX_FMT_YUV420P:
- fmt = GST_MAKE_FOURCC ('I', '4', '2', '0');
+ fmt = GST_VIDEO_FORMAT_I420;
break;
case PIX_FMT_YUVA420P:
- fmt = GST_MAKE_FOURCC ('A', '4', '2', '0');
+ fmt = GST_VIDEO_FORMAT_A420;
break;
case PIX_FMT_YUYV422:
- fmt = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2');
+ fmt = GST_VIDEO_FORMAT_YUY2;
break;
case PIX_FMT_RGB24:
- bpp = depth = 24;
- endianness = G_BIG_ENDIAN;
- r_mask = 0xff0000;
- g_mask = 0x00ff00;
- b_mask = 0x0000ff;
+ fmt = GST_VIDEO_FORMAT_RGB;
break;
case PIX_FMT_BGR24:
- bpp = depth = 24;
- endianness = G_BIG_ENDIAN;
- r_mask = 0x0000ff;
- g_mask = 0x00ff00;
- b_mask = 0xff0000;
+ fmt = GST_VIDEO_FORMAT_BGR;
break;
case PIX_FMT_YUVJ422P:
case PIX_FMT_YUV422P:
- fmt = GST_MAKE_FOURCC ('Y', '4', '2', 'B');
+ fmt = GST_VIDEO_FORMAT_Y42B;
break;
case PIX_FMT_YUVJ444P:
case PIX_FMT_YUV444P:
- fmt = GST_MAKE_FOURCC ('Y', '4', '4', '4');
+ fmt = GST_VIDEO_FORMAT_Y444;
break;
case PIX_FMT_RGB32:
- bpp = 32;
- depth = 32;
- endianness = G_BIG_ENDIAN;
#if (G_BYTE_ORDER == G_BIG_ENDIAN)
- r_mask = 0x00ff0000;
- g_mask = 0x0000ff00;
- b_mask = 0x000000ff;
- a_mask = 0xff000000;
+ fmt = GST_VIDEO_FORMAT_xRGB;
#else
- r_mask = 0x0000ff00;
- g_mask = 0x00ff0000;
- b_mask = 0xff000000;
- a_mask = 0x000000ff;
+ fmt = GST_VIDEO_FORMAT_BGRx;
#endif
break;
case PIX_FMT_YUV410P:
- fmt = GST_MAKE_FOURCC ('Y', 'U', 'V', '9');
+ fmt = GST_VIDEO_FORMAT_YUV9;
break;
case PIX_FMT_YUV411P:
- fmt = GST_MAKE_FOURCC ('Y', '4', '1', 'B');
+ fmt = GST_VIDEO_FORMAT_Y41B;
break;
case PIX_FMT_RGB565:
- bpp = depth = 16;
- endianness = G_BYTE_ORDER;
- r_mask = 0xf800;
- g_mask = 0x07e0;
- b_mask = 0x001f;
+ fmt = GST_VIDEO_FORMAT_RGB16;
break;
case PIX_FMT_RGB555:
- bpp = 16;
- depth = 15;
- endianness = G_BYTE_ORDER;
- r_mask = 0x7c00;
- g_mask = 0x03e0;
- b_mask = 0x001f;
+ fmt = GST_VIDEO_FORMAT_RGB15;
break;
case PIX_FMT_PAL8:
- bpp = depth = 8;
- endianness = G_BYTE_ORDER;
+ fmt = GST_VIDEO_FORMAT_RGB8_PALETTED;
break;
case PIX_FMT_GRAY8:
- bpp = depth = 8;
- caps = gst_ff_vid_caps_new (context, codec_id, "video/x-raw-gray",
- "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, depth, NULL);
+ fmt = GST_VIDEO_FORMAT_GRAY8;
break;
default:
/* give up ... */
+ fmt = GST_VIDEO_FORMAT_UNKNOWN;
break;
}
+ return fmt;
+}
- if (caps == NULL) {
- if (bpp != 0) {
- if (r_mask != 0) {
- if (a_mask) {
- caps = gst_ff_vid_caps_new (context, codec_id, "video/x-raw-rgb",
- "bpp", G_TYPE_INT, bpp,
- "depth", G_TYPE_INT, depth,
- "red_mask", G_TYPE_INT, r_mask,
- "green_mask", G_TYPE_INT, g_mask,
- "blue_mask", G_TYPE_INT, b_mask,
- "alpha_mask", G_TYPE_INT, a_mask,
- "endianness", G_TYPE_INT, endianness, NULL);
- } else {
- caps = gst_ff_vid_caps_new (context, codec_id, "video/x-raw-rgb",
- "bpp", G_TYPE_INT, bpp,
- "depth", G_TYPE_INT, depth,
- "red_mask", G_TYPE_INT, r_mask,
- "green_mask", G_TYPE_INT, g_mask,
- "blue_mask", G_TYPE_INT, b_mask,
- "endianness", G_TYPE_INT, endianness, NULL);
- }
- } else {
- caps = gst_ff_vid_caps_new (context, codec_id, "video/x-raw-rgb",
- "bpp", G_TYPE_INT, bpp,
- "depth", G_TYPE_INT, depth,
- "endianness", G_TYPE_INT, endianness, NULL);
- if (caps && context) {
- gst_ffmpeg_set_palette (caps, context);
- }
- }
- } else if (fmt) {
- caps = gst_ff_vid_caps_new (context, codec_id, "video/x-raw-yuv",
- "format", GST_TYPE_FOURCC, fmt, NULL);
- }
+/* Convert a FFMPEG Pixel Format and optional AVCodecContext
+ * to a GstCaps. If the context is ommitted, no fixed values
+ * for video/audio size will be included in the GstCaps
+ *
+ * See below for usefullness
+ */
+
+GstCaps *
+gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context,
+ enum CodecID codec_id)
+{
+ GstCaps *caps = NULL;
+ GstVideoFormat format;
+
+ format = gst_ffmpeg_pixfmt_to_video_format (pix_fmt);
+
+ if (format != GST_VIDEO_FORMAT_UNKNOWN) {
+ caps = gst_ff_vid_caps_new (context, codec_id, "video/x-raw",
+ "format", G_TYPE_STRING, gst_video_format_to_string (format), NULL);
}
if (caps != NULL) {
AVCodecContext * context, enum CodecID codec_id)
{
GstCaps *caps = NULL;
-
- int bpp = 0;
- gboolean integer = TRUE;
- gboolean signedness = FALSE;
+ GstAudioFormat format;
switch (sample_fmt) {
case SAMPLE_FMT_S16:
- signedness = TRUE;
- bpp = 16;
+ format = GST_AUDIO_FORMAT_S16;
break;
-
case SAMPLE_FMT_S32:
- signedness = TRUE;
- bpp = 32;
+ format = GST_AUDIO_FORMAT_S32;
break;
-
case SAMPLE_FMT_FLT:
- integer = FALSE;
- bpp = 32;
+ format = GST_AUDIO_FORMAT_F32;
break;
-
case SAMPLE_FMT_DBL:
- integer = FALSE;
- bpp = 64;
+ format = GST_AUDIO_FORMAT_F64;
break;
default:
/* .. */
+ format = GST_AUDIO_FORMAT_UNKNOWN;
break;
}
- if (bpp) {
- if (integer) {
- caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-raw-int",
- "signed", G_TYPE_BOOLEAN, signedness,
- "endianness", G_TYPE_INT, G_BYTE_ORDER,
- "width", G_TYPE_INT, bpp, "depth", G_TYPE_INT, bpp, NULL);
- } else {
- caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-raw-float",
- "endianness", G_TYPE_INT, G_BYTE_ORDER,
- "width", G_TYPE_INT, bpp, NULL);
- }
- }
-
- if (caps != NULL) {
+ if (format != GST_AUDIO_FORMAT_UNKNOWN) {
+ caps = gst_ff_aud_caps_new (context, codec_id, "audio/x-raw",
+ "format", G_TYPE_STRING, gst_audio_format_to_string (format), NULL);
GST_LOG ("caps for sample_fmt=%d: %" GST_PTR_FORMAT, sample_fmt, caps);
} else {
GST_LOG ("No caps found for sample_fmt=%d", sample_fmt);
AVCodecContext * context, gboolean raw)
{
GstStructure *structure;
- gint depth = 0, width = 0, endianness = 0;
- gboolean signedness = FALSE;
- const gchar *name;
+ const gchar *fmt;
+ GstAudioFormat format = GST_AUDIO_FORMAT_UNKNOWN;
g_return_if_fail (gst_caps_get_size (caps) == 1);
+
structure = gst_caps_get_structure (caps, 0);
gst_structure_get_int (structure, "channels", &context->channels);
if (!raw)
return;
- name = gst_structure_get_name (structure);
-
- if (!strcmp (name, "audio/x-raw-float")) {
- /* FLOAT */
- if (gst_structure_get_int (structure, "width", &width) &&
- gst_structure_get_int (structure, "endianness", &endianness)) {
- if (endianness == G_BYTE_ORDER) {
- if (width == 32)
- context->sample_fmt = SAMPLE_FMT_FLT;
- else if (width == 64)
- context->sample_fmt = SAMPLE_FMT_DBL;
- }
- }
- } else {
- /* INT */
- if (gst_structure_get_int (structure, "width", &width) &&
- gst_structure_get_int (structure, "depth", &depth) &&
- gst_structure_get_boolean (structure, "signed", &signedness) &&
- gst_structure_get_int (structure, "endianness", &endianness)) {
- if ((endianness == G_BYTE_ORDER) && (signedness == TRUE)) {
- if ((width == 16) && (depth == 16))
- context->sample_fmt = SAMPLE_FMT_S16;
- else if ((width == 32) && (depth == 32))
- context->sample_fmt = SAMPLE_FMT_S32;
- }
+ if (gst_structure_has_name (structure, "audio/x-raw")) {
+ if ((fmt = gst_structure_get_string (structure, "format"))) {
+ format = gst_audio_format_from_string (fmt);
}
}
-}
+ switch (format) {
+ case GST_AUDIO_FORMAT_F32:
+ context->sample_fmt = SAMPLE_FMT_FLT;
+ break;
+ case GST_AUDIO_FORMAT_F64:
+ context->sample_fmt = SAMPLE_FMT_DBL;
+ break;
+ case GST_AUDIO_FORMAT_S32:
+ context->sample_fmt = SAMPLE_FMT_S32;
+ break;
+ case GST_AUDIO_FORMAT_S16:
+ context->sample_fmt = SAMPLE_FMT_S16;
+ break;
+ default:
+ break;
+ }
+}
/* Convert a GstCaps (video/raw) to a FFMPEG PixFmt
* and other video properties in a AVCodecContext.
GstStructure *structure;
const GValue *fps;
const GValue *par = NULL;
+ const gchar *fmt;
+ GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;
GST_DEBUG ("converting caps %" GST_PTR_FORMAT, caps);
g_return_if_fail (gst_caps_get_size (caps) == 1);
g_return_if_fail (fps != NULL && GST_VALUE_HOLDS_FRACTION (fps));
- if (strcmp (gst_structure_get_name (structure), "video/x-raw-yuv") == 0) {
- guint32 fourcc;
-
- if (gst_structure_get_fourcc (structure, "format", &fourcc)) {
- switch (fourcc) {
- case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
- context->pix_fmt = PIX_FMT_YUYV422;
- break;
- case GST_MAKE_FOURCC ('I', '4', '2', '0'):
- context->pix_fmt = PIX_FMT_YUV420P;
- break;
- case GST_MAKE_FOURCC ('A', '4', '2', '0'):
- context->pix_fmt = PIX_FMT_YUVA420P;
- break;
- case GST_MAKE_FOURCC ('Y', '4', '1', 'B'):
- context->pix_fmt = PIX_FMT_YUV411P;
- break;
- case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
- context->pix_fmt = PIX_FMT_YUV422P;
- break;
- case GST_MAKE_FOURCC ('Y', 'U', 'V', '9'):
- context->pix_fmt = PIX_FMT_YUV410P;
- break;
-#if 0
- case FIXME:
- context->pix_fmt = PIX_FMT_YUV444P;
- break;
-#endif
- }
+ if (gst_structure_has_name (structure, "video/x-raw")) {
+ if ((fmt = gst_structure_get_string (structure, "format"))) {
+ format = gst_video_format_from_string (fmt);
}
- } else if (strcmp (gst_structure_get_name (structure),
- "video/x-raw-rgb") == 0) {
- gint bpp = 0, rmask = 0, endianness = 0;
+ }
- if (gst_structure_get_int (structure, "bpp", &bpp) &&
- gst_structure_get_int (structure, "endianness", &endianness)) {
- if (gst_structure_get_int (structure, "red_mask", &rmask)) {
- switch (bpp) {
- case 32:
+ switch (format) {
+ case GST_VIDEO_FORMAT_YUY2:
+ context->pix_fmt = PIX_FMT_YUYV422;
+ break;
+ case GST_VIDEO_FORMAT_I420:
+ context->pix_fmt = PIX_FMT_YUV420P;
+ break;
+ case GST_VIDEO_FORMAT_A420:
+ context->pix_fmt = PIX_FMT_YUVA420P;
+ break;
+ case GST_VIDEO_FORMAT_Y41B:
+ context->pix_fmt = PIX_FMT_YUV411P;
+ break;
+ case GST_VIDEO_FORMAT_Y42B:
+ context->pix_fmt = PIX_FMT_YUV422P;
+ break;
+ case GST_VIDEO_FORMAT_YUV9:
+ context->pix_fmt = PIX_FMT_YUV410P;
+ break;
+ case GST_VIDEO_FORMAT_Y444:
+ context->pix_fmt = PIX_FMT_YUV444P;
+ break;
+ case GST_VIDEO_FORMAT_GRAY8:
+ context->pix_fmt = PIX_FMT_GRAY8;
+ break;
+ case GST_VIDEO_FORMAT_xRGB:
#if (G_BYTE_ORDER == G_BIG_ENDIAN)
- if (rmask == 0x00ff0000)
-#else
- if (rmask == 0x0000ff00)
+ context->pix_fmt = PIX_FMT_RGB32;
#endif
- context->pix_fmt = PIX_FMT_RGB32;
- break;
- case 24:
- if (rmask == 0x0000FF)
- context->pix_fmt = PIX_FMT_BGR24;
- else
- context->pix_fmt = PIX_FMT_RGB24;
- break;
- case 16:
- if (endianness == G_BYTE_ORDER)
- context->pix_fmt = PIX_FMT_RGB565;
- break;
- case 15:
- if (endianness == G_BYTE_ORDER)
- context->pix_fmt = PIX_FMT_RGB555;
- break;
- default:
- /* nothing */
- break;
- }
- } else {
- if (bpp == 8) {
- context->pix_fmt = PIX_FMT_PAL8;
- gst_ffmpeg_get_palette (caps, context);
- }
- }
- }
- } else if (strcmp (gst_structure_get_name (structure),
- "video/x-raw-gray") == 0) {
- gint bpp = 0;
-
- if (gst_structure_get_int (structure, "bpp", &bpp)) {
- switch (bpp) {
- case 8:
- context->pix_fmt = PIX_FMT_GRAY8;
- break;
- }
- }
+ break;
+ case GST_VIDEO_FORMAT_BGRx:
+#if (G_BYTE_ORDER == G_LITTLE_ENDIAN)
+ context->pix_fmt = PIX_FMT_RGB32;
+#endif
+ break;
+ case GST_VIDEO_FORMAT_RGB:
+ context->pix_fmt = PIX_FMT_RGB24;
+ break;
+ case GST_VIDEO_FORMAT_BGR:
+ context->pix_fmt = PIX_FMT_BGR24;
+ break;
+ case GST_VIDEO_FORMAT_RGB16:
+ context->pix_fmt = PIX_FMT_RGB565;
+ break;
+ case GST_VIDEO_FORMAT_RGB15:
+ context->pix_fmt = PIX_FMT_RGB555;
+ break;
+ case GST_VIDEO_FORMAT_RGB8_PALETTED:
+ context->pix_fmt = PIX_FMT_PAL8;
+ gst_ffmpeg_get_palette (caps, context);
+ break;
+ default:
+ break;
}
}
{
GstStructure *str;
const GValue *value;
- const GstBuffer *buf;
+ GstBuffer *buf;
GST_LOG ("codec_id:%d, codec_type:%d, caps:%" GST_PTR_FORMAT " context:%p",
codec_id, codec_type, caps, context);
/* extradata parsing (esds [mpeg4], wma/wmv, msmpeg4v1/2/3, etc.) */
if ((value = gst_structure_get_value (str, "codec_data"))) {
- guint size;
+ gsize size;
guint8 *data;
- buf = GST_BUFFER_CAST (gst_value_get_mini_object (value));
- size = GST_BUFFER_SIZE (buf);
- data = GST_BUFFER_DATA (buf);
+ buf = gst_value_get_buffer (value);
+ data = gst_buffer_map (buf, &size, NULL, GST_MAP_READ);
/* free the old one if it is there */
if (context->extradata)
}
GST_DEBUG ("have codec data of size %d", size);
+
+ gst_buffer_unmap (buf, data, size);
} else if (context->extradata == NULL && codec_id != CODEC_ID_AAC_LATM &&
codec_id != CODEC_ID_FLAC) {
/* no extradata, alloc dummy with 0 sized, some codecs insist on reading
case CODEC_ID_DVVIDEO:
{
- guint32 fourcc;
+ const gchar *format;
- if (gst_structure_get_fourcc (str, "format", &fourcc))
- switch (fourcc) {
- case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
- context->pix_fmt = PIX_FMT_YUYV422;
- break;
- case GST_MAKE_FOURCC ('I', '4', '2', '0'):
- context->pix_fmt = PIX_FMT_YUV420P;
- break;
- case GST_MAKE_FOURCC ('A', '4', '2', '0'):
- context->pix_fmt = PIX_FMT_YUVA420P;
- break;
- case GST_MAKE_FOURCC ('Y', '4', '1', 'B'):
- context->pix_fmt = PIX_FMT_YUV411P;
- break;
- case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
- context->pix_fmt = PIX_FMT_YUV422P;
- break;
- case GST_MAKE_FOURCC ('Y', 'U', 'V', '9'):
- context->pix_fmt = PIX_FMT_YUV410P;
- break;
- default:
- GST_WARNING ("couldn't convert fourcc %" GST_FOURCC_FORMAT
- " to a pixel format", GST_FOURCC_ARGS (fourcc));
- break;
+ if ((format = gst_structure_get_string (str, "format"))) {
+
+ if (g_str_equal (format, "YUY2"))
+ context->pix_fmt = PIX_FMT_YUYV422;
+ else if (g_str_equal (format, "I420"))
+ context->pix_fmt = PIX_FMT_YUV420P;
+ else if (g_str_equal (format, "A420"))
+ context->pix_fmt = PIX_FMT_YUVA420P;
+ else if (g_str_equal (format, "Y41B"))
+ context->pix_fmt = PIX_FMT_YUV411P;
+ else if (g_str_equal (format, "Y42B"))
+ context->pix_fmt = PIX_FMT_YUV422P;
+ else if (g_str_equal (format, "YUV9"))
+ context->pix_fmt = PIX_FMT_YUV410P;
+ else {
+ GST_WARNING ("couldn't convert format %s" " to a pixel format",
+ format);
}
+ } else
+ GST_WARNING ("No specified format");
break;
}
case CODEC_ID_H263P:
caps = gst_caps_new_simple ("application/x-pn-realmedia",
"systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
} else if (!strcmp (format_name, "asf")) {
- caps = gst_caps_new_simple ("video/x-ms-asf", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-ms-asf");
} else if (!strcmp (format_name, "avi")) {
- caps = gst_caps_new_simple ("video/x-msvideo", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-msvideo");
} else if (!strcmp (format_name, "wav")) {
- caps = gst_caps_new_simple ("audio/x-wav", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-wav");
} else if (!strcmp (format_name, "ape")) {
- caps = gst_caps_new_simple ("application/x-ape", NULL);
+ caps = gst_caps_new_empty_simple ("application/x-ape");
} else if (!strcmp (format_name, "swf")) {
- caps = gst_caps_new_simple ("application/x-shockwave-flash", NULL);
+ caps = gst_caps_new_empty_simple ("application/x-shockwave-flash");
} else if (!strcmp (format_name, "au")) {
- caps = gst_caps_new_simple ("audio/x-au", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-au");
} else if (!strcmp (format_name, "dv")) {
caps = gst_caps_new_simple ("video/x-dv",
"systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
} else if (!strcmp (format_name, "4xm")) {
- caps = gst_caps_new_simple ("video/x-4xm", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-4xm");
} else if (!strcmp (format_name, "matroska")) {
- caps = gst_caps_new_simple ("video/x-matroska", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-matroska");
} else if (!strcmp (format_name, "mp3")) {
- caps = gst_caps_new_simple ("application/x-id3", NULL);
+ caps = gst_caps_new_empty_simple ("application/x-id3");
} else if (!strcmp (format_name, "flic")) {
- caps = gst_caps_new_simple ("video/x-fli", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-fli");
} else if (!strcmp (format_name, "flv")) {
- caps = gst_caps_new_simple ("video/x-flv", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-flv");
} else if (!strcmp (format_name, "tta")) {
- caps = gst_caps_new_simple ("audio/x-ttafile", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-ttafile");
} else if (!strcmp (format_name, "aiff")) {
- caps = gst_caps_new_simple ("audio/x-aiff", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-aiff");
} else if (!strcmp (format_name, "mov_mp4_m4a_3gp_3g2")) {
caps =
gst_caps_from_string
GST_LOG ("Could not create stream format caps for %s", format_name);
name = g_strdup_printf ("application/x-gst_ff-%s", format_name);
- caps = gst_caps_new_simple (name, NULL);
+ caps = gst_caps_new_empty_simple (name);
g_free (name);
}
mimetype = gst_structure_get_name (structure);
- if (!strcmp (mimetype, "video/x-raw-rgb") ||
- !strcmp (mimetype, "video/x-raw-yuv")) {
+ if (!strcmp (mimetype, "video/x-raw")) {
id = CODEC_ID_RAWVIDEO;
video = TRUE;
- } else if (!strcmp (mimetype, "audio/x-raw-int")) {
- gint depth, width, endianness;
- gboolean signedness;
-
- if (gst_structure_get_int (structure, "endianness", &endianness) &&
- gst_structure_get_boolean (structure, "signed", &signedness) &&
- gst_structure_get_int (structure, "width", &width) &&
- gst_structure_get_int (structure, "depth", &depth) && depth == width) {
- switch (depth) {
- case 8:
- if (signedness) {
- id = CODEC_ID_PCM_S8;
- } else {
- id = CODEC_ID_PCM_U8;
- }
+ } else if (!strcmp (mimetype, "audio/x-raw")) {
+ GstAudioInfo info;
+
+ if (gst_audio_info_from_caps (&info, caps)) {
+ switch (GST_AUDIO_INFO_FORMAT (&info)) {
+ case GST_AUDIO_FORMAT_S8:
+ id = CODEC_ID_PCM_S8;
break;
- case 16:
- switch (endianness) {
- case G_BIG_ENDIAN:
- if (signedness) {
- id = CODEC_ID_PCM_S16BE;
- } else {
- id = CODEC_ID_PCM_U16BE;
- }
- break;
- case G_LITTLE_ENDIAN:
- if (signedness) {
- id = CODEC_ID_PCM_S16LE;
- } else {
- id = CODEC_ID_PCM_U16LE;
- }
- break;
- }
+ case GST_AUDIO_FORMAT_U8:
+ id = CODEC_ID_PCM_U8;
+ break;
+ case GST_AUDIO_FORMAT_S16LE:
+ id = CODEC_ID_PCM_S16LE;
+ break;
+ case GST_AUDIO_FORMAT_S16BE:
+ id = CODEC_ID_PCM_S16BE;
+ break;
+ case GST_AUDIO_FORMAT_U16LE:
+ id = CODEC_ID_PCM_U16LE;
+ break;
+ case GST_AUDIO_FORMAT_U16BE:
+ id = CODEC_ID_PCM_U16BE;
+ break;
+ default:
break;
}
if (id != CODEC_ID_NONE)
break;
case 3:
{
- guint32 fourcc;
+ const gchar *format;
/* WMV3 unless the fourcc exists and says otherwise */
id = CODEC_ID_WMV3;
- if (gst_structure_get_fourcc (structure, "format", &fourcc)) {
- if ((fourcc == GST_MAKE_FOURCC ('W', 'V', 'C', '1')) ||
- (fourcc == GST_MAKE_FOURCC ('W', 'M', 'V', 'A'))) {
- id = CODEC_ID_VC1;
- }
- }
- }
+ if ((format = gst_structure_get_string (structure, "format")) &&
+ (g_str_equal (format, "WVC1") || g_str_equal (format, "WMVA")))
+ id = CODEC_ID_VC1;
+
break;
+ }
}
}
if (id != CODEC_ID_NONE)
#endif
#include <gst/gst.h>
+#include <gst/video/video.h>
#include <gst/audio/multichannel.h>
/*
GstCaps *
gst_ffmpeg_formatid_to_caps (const gchar *format_name);
+GstVideoFormat
+gst_ffmpeg_pixfmt_to_video_format (enum PixelFormat pix_fmt);
+
/* Convert a FFMPEG Pixel Format and optional AVCodecContext
* to a GstCaps. If the context is ommitted, no fixed values
* for video/audio size will be included in the GstCaps
#include <gst/gst.h>
#include <gst/video/video.h>
+#include <gst/video/gstvideopool.h>
#include "gstffmpeg.h"
#include "gstffmpegcodecmap.h"
#include "gstffmpegutils.h"
-/* define to enable alternative buffer refcounting algorithm */
-#undef EXTRA_REF
-
typedef struct _GstFFMpegDec GstFFMpegDec;
#define MAX_TS_MASK 0xff
AVCodecContext *context;
AVFrame *picture;
gboolean opened;
+ GstBufferPool *pool;
+
+ /* from incoming caps */
+ gint in_width;
+ gint in_height;
+ gint in_par_n;
+ gint in_par_d;
+ gint in_fps_n;
+ gint in_fps_d;
+
+ /* current context */
+ enum PixelFormat ctx_pix_fmt;
+ gint ctx_width;
+ gint ctx_height;
+ gint ctx_par_n;
+ gint ctx_par_d;
+ gint ctx_ticks;
+ gint ctx_time_d;
+ gint ctx_time_n;
+ gint ctx_interlaced;
+
+ /* current output format */
+ GstVideoInfo out_info;
+
union
{
struct
{
- gint width, height;
- gint clip_width, clip_height;
- gint par_n, par_d;
- gint fps_n, fps_d;
- gint old_fps_n, old_fps_d;
- gboolean interlaced;
-
- enum PixelFormat pix_fmt;
- } video;
- struct
- {
gint channels;
gint samplerate;
gint depth;
} audio;
} format;
+
+
gboolean waiting_for_key;
gboolean discont;
gboolean clear_ts;
guint8 *padded;
guint padded_size;
- GValue *par; /* pixel aspect ratio of incoming data */
gboolean current_dr; /* if direct rendering is enabled */
- gboolean extra_ref; /* keep extra ref around in get/release */
/* some properties */
enum AVDiscard skip_frame;
/* reverse playback queue */
GList *queued;
-
- /* Can downstream allocate 16bytes aligned data. */
- gboolean can_allocate_aligned;
};
typedef struct _GstFFMpegDecClass GstFFMpegDecClass;
static void gst_ffmpegdec_init (GstFFMpegDec * ffmpegdec);
static void gst_ffmpegdec_finalize (GObject * object);
-static gboolean gst_ffmpegdec_query (GstPad * pad, GstQuery * query);
+static gboolean gst_ffmpegdec_src_query (GstPad * pad, GstQuery * query);
static gboolean gst_ffmpegdec_src_event (GstPad * pad, GstEvent * event);
-static gboolean gst_ffmpegdec_setcaps (GstPad * pad, GstCaps * caps);
static gboolean gst_ffmpegdec_sink_event (GstPad * pad, GstEvent * event);
static GstFlowReturn gst_ffmpegdec_chain (GstPad * pad, GstBuffer * buf);
static void gst_ffmpegdec_get_property (GObject * object,
guint prop_id, GValue * value, GParamSpec * pspec);
-static gboolean gst_ffmpegdec_negotiate (GstFFMpegDec * ffmpegdec,
+static gboolean gst_ffmpegdec_video_negotiate (GstFFMpegDec * ffmpegdec,
+ gboolean force);
+static gboolean gst_ffmpegdec_audio_negotiate (GstFFMpegDec * ffmpegdec,
gboolean force);
/* some sort of bufferpool handling, but different */
sinkcaps = gst_caps_from_string ("unknown/unknown");
}
if (in_plugin->type == AVMEDIA_TYPE_VIDEO) {
- srccaps = gst_caps_from_string ("video/x-raw-rgb; video/x-raw-yuv");
+ srccaps = gst_caps_from_string ("video/x-raw");
} else {
srccaps = gst_ffmpeg_codectype_to_audio_caps (NULL,
in_plugin->id, FALSE, in_plugin);
g_param_spec_boolean ("debug-mv", "Debug motion vectors",
"Whether ffmpeg should print motion vectors on top of the image",
DEFAULT_DEBUG_MV, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-#if 0
- g_object_class_install_property (gobject_class, PROP_CROP,
- g_param_spec_boolean ("crop", "Crop",
- "Crop images to the display region",
- DEFAULT_CROP, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-#endif
caps = klass->in_plugin->capabilities;
if (caps & (CODEC_CAP_FRAME_THREADS | CODEC_CAP_SLICE_THREADS)) {
/* setup pads */
ffmpegdec->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
- gst_pad_set_setcaps_function (ffmpegdec->sinkpad,
- GST_DEBUG_FUNCPTR (gst_ffmpegdec_setcaps));
gst_pad_set_event_function (ffmpegdec->sinkpad,
GST_DEBUG_FUNCPTR (gst_ffmpegdec_sink_event));
gst_pad_set_chain_function (ffmpegdec->sinkpad,
gst_pad_set_event_function (ffmpegdec->srcpad,
GST_DEBUG_FUNCPTR (gst_ffmpegdec_src_event));
gst_pad_set_query_function (ffmpegdec->srcpad,
- GST_DEBUG_FUNCPTR (gst_ffmpegdec_query));
+ GST_DEBUG_FUNCPTR (gst_ffmpegdec_src_query));
gst_element_add_pad (GST_ELEMENT (ffmpegdec), ffmpegdec->srcpad);
/* some ffmpeg data */
ffmpegdec->picture = avcodec_alloc_frame ();
ffmpegdec->pctx = NULL;
ffmpegdec->pcache = NULL;
- ffmpegdec->par = NULL;
ffmpegdec->opened = FALSE;
ffmpegdec->waiting_for_key = TRUE;
ffmpegdec->skip_frame = ffmpegdec->lowres = 0;
ffmpegdec->crop = DEFAULT_CROP;
ffmpegdec->max_threads = DEFAULT_MAX_THREADS;
- ffmpegdec->format.video.par_n = -1;
- ffmpegdec->format.video.fps_n = -1;
- ffmpegdec->format.video.old_fps_n = -1;
gst_segment_init (&ffmpegdec->segment, GST_FORMAT_TIME);
-
- /* We initially assume downstream can allocate 16 bytes aligned buffers */
- ffmpegdec->can_allocate_aligned = TRUE;
}
static void
{
GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) object;
- if (ffmpegdec->context != NULL) {
+ if (ffmpegdec->context != NULL)
av_free (ffmpegdec->context);
- ffmpegdec->context = NULL;
- }
- if (ffmpegdec->picture != NULL) {
+ if (ffmpegdec->picture != NULL)
av_free (ffmpegdec->picture);
- ffmpegdec->picture = NULL;
- }
+
+ if (ffmpegdec->pool)
+ gst_object_unref (ffmpegdec->pool);
G_OBJECT_CLASS (parent_class)->finalize (object);
}
static gboolean
-gst_ffmpegdec_query (GstPad * pad, GstQuery * query)
+gst_ffmpegdec_src_query (GstPad * pad, GstQuery * query)
{
GstFFMpegDec *ffmpegdec;
- GstPad *peer;
gboolean res;
ffmpegdec = (GstFFMpegDec *) gst_pad_get_parent (pad);
- res = FALSE;
-
- if ((peer = gst_pad_get_peer (ffmpegdec->sinkpad))) {
- /* just forward to peer */
- res = gst_pad_query (peer, query);
- gst_object_unref (peer);
- }
+ /* just forward to peer */
+ res = gst_pad_peer_query (ffmpegdec->sinkpad, query);
#if 0
{
GstFormat bfmt;
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_QOS:
{
+ GstQOSType type;
gdouble proportion;
GstClockTimeDiff diff;
GstClockTime timestamp;
- gst_event_parse_qos (event, &proportion, &diff, ×tamp);
+ gst_event_parse_qos (event, &type, &proportion, &diff, ×tamp);
/* update our QoS values */
gst_ffmpegdec_update_qos (ffmpegdec, proportion, timestamp + diff);
GST_LOG_OBJECT (ffmpegdec, "closing ffmpeg codec");
- if (ffmpegdec->par) {
- g_free (ffmpegdec->par);
- ffmpegdec->par = NULL;
- }
-
if (ffmpegdec->context->priv_data)
gst_ffmpeg_avcodec_close (ffmpegdec->context);
ffmpegdec->opened = FALSE;
av_parser_close (ffmpegdec->pctx);
ffmpegdec->pctx = NULL;
}
-
- ffmpegdec->format.video.par_n = -1;
- ffmpegdec->format.video.fps_n = -1;
- ffmpegdec->format.video.old_fps_n = -1;
- ffmpegdec->format.video.interlaced = FALSE;
}
/* with LOCK */
switch (oclass->in_plugin->type) {
case AVMEDIA_TYPE_VIDEO:
- ffmpegdec->format.video.width = 0;
- ffmpegdec->format.video.height = 0;
- ffmpegdec->format.video.clip_width = -1;
- ffmpegdec->format.video.clip_height = -1;
- ffmpegdec->format.video.pix_fmt = PIX_FMT_NB;
- ffmpegdec->format.video.interlaced = FALSE;
+ /* clear values */
+ ffmpegdec->ctx_pix_fmt = PIX_FMT_NB;
+ ffmpegdec->ctx_width = 0;
+ ffmpegdec->ctx_height = 0;
+ ffmpegdec->ctx_ticks = 1;
+ ffmpegdec->ctx_time_n = 0;
+ ffmpegdec->ctx_time_d = 0;
+ ffmpegdec->ctx_par_n = 0;
+ ffmpegdec->ctx_par_d = 0;
break;
case AVMEDIA_TYPE_AUDIO:
ffmpegdec->format.audio.samplerate = 0;
}
gst_ffmpegdec_reset_ts (ffmpegdec);
- /* FIXME, reset_qos holds the LOCK */
- ffmpegdec->proportion = 0.0;
+ /* FIXME, reset_qos will take the LOCK and this function is already called
+ * with the LOCK */
+ ffmpegdec->proportion = 0.5;
ffmpegdec->earliest_time = -1;
return TRUE;
}
static gboolean
-gst_ffmpegdec_setcaps (GstPad * pad, GstCaps * caps)
+gst_ffmpegdec_setcaps (GstFFMpegDec * ffmpegdec, GstCaps * caps)
{
- GstFFMpegDec *ffmpegdec;
GstFFMpegDecClass *oclass;
GstStructure *structure;
const GValue *par;
const GValue *fps;
gboolean ret = TRUE;
- ffmpegdec = (GstFFMpegDec *) (gst_pad_get_parent (pad));
oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
- GST_DEBUG_OBJECT (pad, "setcaps called");
+ GST_DEBUG_OBJECT (ffmpegdec, "setcaps called");
GST_OBJECT_LOCK (ffmpegdec);
structure = gst_caps_get_structure (caps, 0);
par = gst_structure_get_value (structure, "pixel-aspect-ratio");
- if (par) {
+ if (par != NULL && GST_VALUE_HOLDS_FRACTION (par)) {
+ ffmpegdec->in_par_n = gst_value_get_fraction_numerator (par);
+ ffmpegdec->in_par_d = gst_value_get_fraction_denominator (par);
GST_DEBUG_OBJECT (ffmpegdec, "sink caps have pixel-aspect-ratio of %d:%d",
- gst_value_get_fraction_numerator (par),
- gst_value_get_fraction_denominator (par));
- /* should be NULL */
- if (ffmpegdec->par)
- g_free (ffmpegdec->par);
- ffmpegdec->par = g_new0 (GValue, 1);
- gst_value_init_and_copy (ffmpegdec->par, par);
+ ffmpegdec->in_par_n, ffmpegdec->in_par_d);
+ } else {
+ GST_DEBUG_OBJECT (ffmpegdec, "no input pixel-aspect-ratio");
+ ffmpegdec->in_par_n = 0;
+ ffmpegdec->in_par_d = 0;
}
- /* get the framerate from incoming caps. fps_n is set to -1 when
+ /* get the framerate from incoming caps. fps_n is set to 0 when
* there is no valid framerate */
fps = gst_structure_get_value (structure, "framerate");
if (fps != NULL && GST_VALUE_HOLDS_FRACTION (fps)) {
- ffmpegdec->format.video.fps_n = gst_value_get_fraction_numerator (fps);
- ffmpegdec->format.video.fps_d = gst_value_get_fraction_denominator (fps);
- GST_DEBUG_OBJECT (ffmpegdec, "Using framerate %d/%d from incoming caps",
- ffmpegdec->format.video.fps_n, ffmpegdec->format.video.fps_d);
+ ffmpegdec->in_fps_n = gst_value_get_fraction_numerator (fps);
+ ffmpegdec->in_fps_d = gst_value_get_fraction_denominator (fps);
+ GST_DEBUG_OBJECT (ffmpegdec, "sink caps have framerate of %d/%d",
+ ffmpegdec->in_fps_n, ffmpegdec->in_fps_d);
} else {
- ffmpegdec->format.video.fps_n = -1;
- GST_DEBUG_OBJECT (ffmpegdec, "Using framerate from codec");
+ GST_DEBUG_OBJECT (ffmpegdec, "no input framerate ");
+ ffmpegdec->in_fps_n = 0;
+ ffmpegdec->in_fps_d = 0;
}
/* figure out if we can use direct rendering */
ffmpegdec->current_dr = FALSE;
- ffmpegdec->extra_ref = FALSE;
if (ffmpegdec->direct_rendering) {
GST_DEBUG_OBJECT (ffmpegdec, "trying to enable direct rendering");
if (oclass->in_plugin->capabilities & CODEC_CAP_DR1) {
- if (oclass->in_plugin->id == CODEC_ID_H264) {
- GST_DEBUG_OBJECT (ffmpegdec, "disable direct rendering setup for H264");
- /* does not work, many stuff reads outside of the planes */
- ffmpegdec->current_dr = FALSE;
- ffmpegdec->extra_ref = TRUE;
- } else if ((oclass->in_plugin->id == CODEC_ID_SVQ1) ||
- (oclass->in_plugin->id == CODEC_ID_VP5) ||
- (oclass->in_plugin->id == CODEC_ID_VP6) ||
- (oclass->in_plugin->id == CODEC_ID_VP6F) ||
- (oclass->in_plugin->id == CODEC_ID_VP6A)) {
- GST_DEBUG_OBJECT (ffmpegdec,
- "disable direct rendering setup for broken stride support");
- /* does not work, uses a incompatible stride. See #610613 */
- ffmpegdec->current_dr = FALSE;
- ffmpegdec->extra_ref = TRUE;
- } else {
- GST_DEBUG_OBJECT (ffmpegdec, "enabled direct rendering");
- ffmpegdec->current_dr = TRUE;
- }
+ GST_DEBUG_OBJECT (ffmpegdec, "enabled direct rendering");
+ ffmpegdec->current_dr = TRUE;
} else {
GST_DEBUG_OBJECT (ffmpegdec, "direct rendering not supported");
}
}
- if (ffmpegdec->current_dr) {
- /* do *not* draw edges when in direct rendering, for some reason it draws
- * outside of the memory. */
- ffmpegdec->context->flags |= CODEC_FLAG_EMU_EDGE;
- }
/* for AAC we only use av_parse if not on stream-format==raw or ==loas */
if (oclass->in_plugin->id == CODEC_ID_AAC
if (!gst_ffmpegdec_open (ffmpegdec))
goto open_failed;
- /* clipping region */
- gst_structure_get_int (structure, "width",
- &ffmpegdec->format.video.clip_width);
- gst_structure_get_int (structure, "height",
- &ffmpegdec->format.video.clip_height);
-
- GST_DEBUG_OBJECT (pad, "clipping to %dx%d",
- ffmpegdec->format.video.clip_width, ffmpegdec->format.video.clip_height);
+ /* clipping region. take into account the lowres property */
+ if (gst_structure_get_int (structure, "width", &ffmpegdec->in_width))
+ ffmpegdec->in_width >>= ffmpegdec->lowres;
+ else
+ ffmpegdec->in_width = -1;
- /* take into account the lowres property */
- if (ffmpegdec->format.video.clip_width != -1)
- ffmpegdec->format.video.clip_width >>= ffmpegdec->lowres;
- if (ffmpegdec->format.video.clip_height != -1)
- ffmpegdec->format.video.clip_height >>= ffmpegdec->lowres;
+ if (gst_structure_get_int (structure, "height", &ffmpegdec->in_height))
+ ffmpegdec->in_height >>= ffmpegdec->lowres;
+ else
+ ffmpegdec->in_height = -1;
- GST_DEBUG_OBJECT (pad, "final clipping to %dx%d",
- ffmpegdec->format.video.clip_width, ffmpegdec->format.video.clip_height);
+ GST_DEBUG_OBJECT (ffmpegdec, "clipping to %dx%d",
+ ffmpegdec->in_width, ffmpegdec->in_height);
done:
GST_OBJECT_UNLOCK (ffmpegdec);
- gst_object_unref (ffmpegdec);
-
return ret;
/* ERRORS */
open_failed:
{
GST_DEBUG_OBJECT (ffmpegdec, "Failed to open");
- if (ffmpegdec->par) {
- g_free (ffmpegdec->par);
- ffmpegdec->par = NULL;
- }
ret = FALSE;
goto done;
}
}
-static GstFlowReturn
-alloc_output_buffer (GstFFMpegDec * ffmpegdec, GstBuffer ** outbuf,
- gint width, gint height)
+static void
+gst_ffmpegdec_fill_picture (GstFFMpegDec * ffmpegdec, GstVideoFrame * frame,
+ AVFrame * picture)
{
- GstFlowReturn ret;
- gint fsize;
-
- ret = GST_FLOW_ERROR;
- *outbuf = NULL;
+ guint i;
- GST_LOG_OBJECT (ffmpegdec, "alloc output buffer");
+ /* setup data pointers and strides */
+ for (i = 0; i < GST_VIDEO_FRAME_N_PLANES (frame); i++) {
+ picture->data[i] = GST_VIDEO_FRAME_PLANE_DATA (frame, i);
+ picture->linesize[i] = GST_VIDEO_FRAME_PLANE_STRIDE (frame, i);
- /* see if we need renegotiation */
- if (G_UNLIKELY (!gst_ffmpegdec_negotiate (ffmpegdec, FALSE)))
- goto negotiate_failed;
-
- /* get the size of the gstreamer output buffer given a
- * width/height/format */
- fsize = gst_ffmpeg_avpicture_get_size (ffmpegdec->context->pix_fmt,
- width, height);
-
- if (!ffmpegdec->context->palctrl && ffmpegdec->can_allocate_aligned) {
- GST_LOG_OBJECT (ffmpegdec, "calling pad_alloc");
- /* no pallete, we can use the buffer size to alloc */
- ret = gst_pad_alloc_buffer_and_set_caps (ffmpegdec->srcpad,
- GST_BUFFER_OFFSET_NONE, fsize,
- GST_PAD_CAPS (ffmpegdec->srcpad), outbuf);
- if (G_UNLIKELY (ret != GST_FLOW_OK))
- goto alloc_failed;
-
- /* If buffer isn't 128-bit aligned, create a memaligned one ourselves */
- if (((uintptr_t) GST_BUFFER_DATA (*outbuf)) % 16) {
- GST_DEBUG_OBJECT (ffmpegdec,
- "Downstream can't allocate aligned buffers.");
- ffmpegdec->can_allocate_aligned = FALSE;
- gst_buffer_unref (*outbuf);
- *outbuf = new_aligned_buffer (fsize, GST_PAD_CAPS (ffmpegdec->srcpad));
- }
- } else {
- GST_LOG_OBJECT (ffmpegdec,
- "not calling pad_alloc, we have a pallete or downstream can't give 16 byte aligned buffers.");
- /* for paletted data we can't use pad_alloc_buffer(), because
- * fsize contains the size of the palette, so the overall size
- * is bigger than ffmpegcolorspace's unit size, which will
- * prompt GstBaseTransform to complain endlessly ... */
- *outbuf = new_aligned_buffer (fsize, GST_PAD_CAPS (ffmpegdec->srcpad));
- ret = GST_FLOW_OK;
- }
- /* set caps, we do this here because the buffer is still writable here and we
- * are sure to be negotiated */
- gst_buffer_set_caps (*outbuf, GST_PAD_CAPS (ffmpegdec->srcpad));
-
- return ret;
-
- /* special cases */
-negotiate_failed:
- {
- GST_DEBUG_OBJECT (ffmpegdec, "negotiate failed");
- return GST_FLOW_NOT_NEGOTIATED;
- }
-alloc_failed:
- {
- GST_DEBUG_OBJECT (ffmpegdec, "pad_alloc failed %d (%s)", ret,
- gst_flow_get_name (ret));
- return ret;
+ GST_LOG_OBJECT (ffmpegdec, "plane %d: data %p, linesize %d", i,
+ picture->data[i], picture->linesize[i]);
}
}
+/* called when ffmpeg wants us to allocate a buffer to write the decoded frame
+ * into. We try to give it memory from our pool */
static int
gst_ffmpegdec_get_buffer (AVCodecContext * context, AVFrame * picture)
{
GstBuffer *buf = NULL;
GstFFMpegDec *ffmpegdec;
- gint width, height;
- gint coded_width, coded_height;
- gint res;
+ GstFlowReturn ret;
+ GstVideoFrame frame;
ffmpegdec = (GstFFMpegDec *) context->opaque;
/* make sure we don't free the buffer when it's not ours */
picture->opaque = NULL;
- /* take width and height before clipping */
- width = context->width;
- height = context->height;
- coded_width = context->coded_width;
- coded_height = context->coded_height;
-
- GST_LOG_OBJECT (ffmpegdec, "dimension %dx%d, coded %dx%d", width, height,
- coded_width, coded_height);
- if (!ffmpegdec->current_dr) {
- GST_LOG_OBJECT (ffmpegdec, "direct rendering disabled, fallback alloc");
- res = avcodec_default_get_buffer (context, picture);
-
- GST_LOG_OBJECT (ffmpegdec, "linsize %d %d %d", picture->linesize[0],
- picture->linesize[1], picture->linesize[2]);
- GST_LOG_OBJECT (ffmpegdec, "data %u %u %u", 0,
- (guint) (picture->data[1] - picture->data[0]),
- (guint) (picture->data[2] - picture->data[0]));
- return res;
- }
-
- switch (context->codec_type) {
- case AVMEDIA_TYPE_VIDEO:
- /* some ffmpeg video plugins don't see the point in setting codec_type ... */
- case AVMEDIA_TYPE_UNKNOWN:
- {
- GstFlowReturn ret;
- gint clip_width, clip_height;
-
- /* take final clipped output size */
- if ((clip_width = ffmpegdec->format.video.clip_width) == -1)
- clip_width = width;
- if ((clip_height = ffmpegdec->format.video.clip_height) == -1)
- clip_height = height;
-
- GST_LOG_OBJECT (ffmpegdec, "raw outsize %d/%d", width, height);
-
- /* this is the size ffmpeg needs for the buffer */
- avcodec_align_dimensions (context, &width, &height);
+ /* see if we need renegotiation */
+ if (G_UNLIKELY (!gst_ffmpegdec_video_negotiate (ffmpegdec, FALSE)))
+ goto negotiate_failed;
- GST_LOG_OBJECT (ffmpegdec, "aligned outsize %d/%d, clip %d/%d",
- width, height, clip_width, clip_height);
+ if (!ffmpegdec->current_dr)
+ goto no_dr;
- if (width != clip_width || height != clip_height) {
- /* We can't alloc if we need to clip the output buffer later */
- GST_LOG_OBJECT (ffmpegdec, "we need clipping, fallback alloc");
- return avcodec_default_get_buffer (context, picture);
- }
+ /* alloc with aligned dimensions for ffmpeg */
+ GST_LOG_OBJECT (ffmpegdec, "doing alloc from pool");
+ ret = gst_buffer_pool_acquire_buffer (ffmpegdec->pool, &buf, NULL);
+ if (G_UNLIKELY (ret != GST_FLOW_OK))
+ goto alloc_failed;
- /* alloc with aligned dimensions for ffmpeg */
- ret = alloc_output_buffer (ffmpegdec, &buf, width, height);
- if (G_UNLIKELY (ret != GST_FLOW_OK)) {
- /* alloc default buffer when we can't get one from downstream */
- GST_LOG_OBJECT (ffmpegdec, "alloc failed, fallback alloc");
- return avcodec_default_get_buffer (context, picture);
- }
+ if (!gst_video_frame_map (&frame, &ffmpegdec->out_info, buf,
+ GST_MAP_READWRITE))
+ goto invalid_frame;
- /* copy the right pointers and strides in the picture object */
- gst_ffmpeg_avpicture_fill ((AVPicture *) picture,
- GST_BUFFER_DATA (buf), context->pix_fmt, width, height);
- break;
- }
- case AVMEDIA_TYPE_AUDIO:
- default:
- GST_ERROR_OBJECT (ffmpegdec,
- "_get_buffer() should never get called for non-video buffers !");
- g_assert_not_reached ();
- break;
- }
+ gst_ffmpegdec_fill_picture (ffmpegdec, &frame, picture);
/* tell ffmpeg we own this buffer, tranfer the ref we have on the buffer to
* the opaque data. */
picture->type = FF_BUFFER_TYPE_USER;
picture->age = 256 * 256 * 256 * 64;
- picture->opaque = buf;
+ picture->opaque = g_slice_dup (GstVideoFrame, &frame);
-#ifdef EXTRA_REF
- if (picture->reference != 0 || ffmpegdec->extra_ref) {
- GST_DEBUG_OBJECT (ffmpegdec, "adding extra ref");
- gst_buffer_ref (buf);
- }
-#endif
-
- GST_LOG_OBJECT (ffmpegdec, "returned buffer %p", buf);
+ GST_LOG_OBJECT (ffmpegdec, "returned buffer %p in frame %p", buf,
+ picture->opaque);
return 0;
+
+ /* fallbacks */
+negotiate_failed:
+ {
+ GST_DEBUG_OBJECT (ffmpegdec, "negotiate failed");
+ goto fallback;
+ }
+no_dr:
+ {
+ GST_LOG_OBJECT (ffmpegdec, "direct rendering disabled, fallback alloc");
+ goto fallback;
+ }
+alloc_failed:
+ {
+ /* alloc default buffer when we can't get one from downstream */
+ GST_LOG_OBJECT (ffmpegdec, "alloc failed, fallback alloc");
+ goto fallback;
+ }
+invalid_frame:
+ {
+ /* alloc default buffer when we can't get one from downstream */
+ GST_LOG_OBJECT (ffmpegdec, "failed to map frame, fallback alloc");
+ gst_buffer_unref (buf);
+ goto fallback;
+ }
+fallback:
+ {
+ return avcodec_default_get_buffer (context, picture);
+ }
}
+/* called when ffmpeg is done with our buffer */
static void
gst_ffmpegdec_release_buffer (AVCodecContext * context, AVFrame * picture)
{
gint i;
GstBuffer *buf;
GstFFMpegDec *ffmpegdec;
+ GstVideoFrame *frame;
ffmpegdec = (GstFFMpegDec *) context->opaque;
}
/* we remove the opaque data now */
- buf = GST_BUFFER_CAST (picture->opaque);
- GST_DEBUG_OBJECT (ffmpegdec, "release buffer %p", buf);
+ frame = picture->opaque;
picture->opaque = NULL;
-#ifdef EXTRA_REF
- if (picture->reference != 0 || ffmpegdec->extra_ref) {
- GST_DEBUG_OBJECT (ffmpegdec, "remove extra ref");
- gst_buffer_unref (buf);
- }
-#else
+ /* unmap buffer data */
+ gst_video_frame_unmap (frame);
+ buf = frame->buffer;
+
+ GST_DEBUG_OBJECT (ffmpegdec, "release buffer %p in frame %p", buf, frame);
+
+ g_slice_free (GstVideoFrame, frame);
gst_buffer_unref (buf);
-#endif
/* zero out the reference in ffmpeg */
for (i = 0; i < 4; i++) {
}
static void
-gst_ffmpegdec_add_pixel_aspect_ratio (GstFFMpegDec * ffmpegdec,
- GstStructure * s)
+gst_ffmpegdec_update_par (GstFFMpegDec * ffmpegdec, gint * par_n, gint * par_d)
{
gboolean demuxer_par_set = FALSE;
gboolean decoder_par_set = FALSE;
GST_OBJECT_LOCK (ffmpegdec);
- if (ffmpegdec->par) {
- demuxer_num = gst_value_get_fraction_numerator (ffmpegdec->par);
- demuxer_denom = gst_value_get_fraction_denominator (ffmpegdec->par);
+ if (ffmpegdec->in_par_n && ffmpegdec->in_par_d) {
+ demuxer_num = ffmpegdec->in_par_n;
+ demuxer_denom = ffmpegdec->in_par_d;
demuxer_par_set = TRUE;
GST_DEBUG_OBJECT (ffmpegdec, "Demuxer PAR: %d:%d", demuxer_num,
demuxer_denom);
}
- if (ffmpegdec->context->sample_aspect_ratio.num &&
- ffmpegdec->context->sample_aspect_ratio.den) {
- decoder_num = ffmpegdec->context->sample_aspect_ratio.num;
- decoder_denom = ffmpegdec->context->sample_aspect_ratio.den;
+ if (ffmpegdec->ctx_par_n && ffmpegdec->ctx_par_d) {
+ decoder_num = ffmpegdec->ctx_par_n;
+ decoder_denom = ffmpegdec->ctx_par_d;
decoder_par_set = TRUE;
GST_DEBUG_OBJECT (ffmpegdec, "Decoder PAR: %d:%d", decoder_num,
decoder_denom);
GST_DEBUG_OBJECT (ffmpegdec,
"Setting decoder provided pixel-aspect-ratio of %u:%u", decoder_num,
decoder_denom);
- gst_structure_set (s, "pixel-aspect-ratio", GST_TYPE_FRACTION, decoder_num,
- decoder_denom, NULL);
+ *par_n = decoder_num;
+ *par_d = decoder_denom;
return;
}
GST_DEBUG_OBJECT (ffmpegdec,
"Setting demuxer provided pixel-aspect-ratio of %u:%u", demuxer_num,
demuxer_denom);
- gst_structure_set (s, "pixel-aspect-ratio", GST_TYPE_FRACTION, demuxer_num,
- demuxer_denom, NULL);
+ *par_n = demuxer_num;
+ *par_d = demuxer_denom;
return;
}
no_par:
{
GST_DEBUG_OBJECT (ffmpegdec,
"Neither demuxer nor codec provide a pixel-aspect-ratio");
+ *par_n = 1;
+ *par_d = 1;
return;
}
}
static gboolean
-gst_ffmpegdec_negotiate (GstFFMpegDec * ffmpegdec, gboolean force)
+gst_ffmpegdec_bufferpool (GstFFMpegDec * ffmpegdec, GstCaps * caps)
+{
+ GstQuery *query;
+ GstBufferPool *pool = NULL;
+ guint size, min, max, prefix, alignment;
+ GstStructure *config;
+ guint edge;
+ AVCodecContext *context = ffmpegdec->context;
+
+ GST_DEBUG_OBJECT (ffmpegdec, "setting up bufferpool");
+
+ /* find a pool for the negotiated caps now */
+ query = gst_query_new_allocation (caps, TRUE);
+
+ if (gst_pad_peer_query (ffmpegdec->srcpad, query)) {
+ /* we got configuration from our peer, parse them */
+ gst_query_parse_allocation_params (query, &size, &min, &max, &prefix,
+ &alignment, &pool);
+ size = MAX (size, ffmpegdec->out_info.size);
+ } else {
+ GST_DEBUG_OBJECT (ffmpegdec, "peer query failed, using defaults");
+ size = ffmpegdec->out_info.size;
+ min = max = 0;
+ prefix = 0;
+ alignment = 15;
+ }
+
+ gst_query_unref (query);
+
+ if (pool == NULL) {
+ /* we did not get a pool, make one ourselves then */
+ pool = gst_buffer_pool_new ();
+ }
+
+ config = gst_buffer_pool_get_config (pool);
+ gst_buffer_pool_config_set (config, caps, size, min, max, prefix,
+ alignment | 15);
+
+ if (gst_buffer_pool_has_option (pool, GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT)) {
+ GstVideoAlignment align;
+ gint width, height;
+ gint linesize_align[4];
+
+ width = ffmpegdec->ctx_width;
+ height = ffmpegdec->ctx_height;
+ /* let ffmpeg find the alignment and padding */
+ avcodec_align_dimensions2 (context, &width, &height, linesize_align);
+ edge = context->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width ();
+ /* increase the size for the padding */
+ width += edge << 1;
+ height += edge << 1;
+
+ align.padding_top = edge;
+ align.padding_left = edge;
+ align.padding_right = width - ffmpegdec->ctx_width - edge;
+ align.padding_bottom = height - ffmpegdec->ctx_height - edge;
+
+ GST_DEBUG_OBJECT (ffmpegdec, "aligned dimension %dx%d -> %dx%d "
+ "padding t:%u l:%u r:%u b:%u",
+ ffmpegdec->ctx_width, ffmpegdec->ctx_height, width, height,
+ align.padding_top, align.padding_left, align.padding_right,
+ align.padding_bottom);
+
+ gst_buffer_pool_config_add_option (config,
+ GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
+ gst_buffer_pool_config_set_video_alignment (config, &align);
+ } else {
+ GST_DEBUG_OBJECT (ffmpegdec,
+ "alignment not supported, disable direct rendering");
+ /* disable direct rendering. This will make us use the fallback ffmpeg
+ * picture allocation code with padding etc. We will then do the final
+ * copy (with cropping) into a buffer from our pool */
+ ffmpegdec->current_dr = FALSE;
+ }
+ /* and store */
+ gst_buffer_pool_set_config (pool, config);
+
+ if (ffmpegdec->pool) {
+ gst_buffer_pool_set_active (ffmpegdec->pool, FALSE);
+ gst_object_unref (ffmpegdec->pool);
+ }
+ ffmpegdec->pool = pool;
+
+ /* and activate */
+ gst_buffer_pool_set_active (pool, TRUE);
+
+ return TRUE;
+}
+
+static gboolean
+update_video_context (GstFFMpegDec * ffmpegdec, gboolean force)
+{
+ AVCodecContext *context = ffmpegdec->context;
+
+ if (!force && ffmpegdec->ctx_width == context->width
+ && ffmpegdec->ctx_height == context->height
+ && ffmpegdec->ctx_ticks == context->ticks_per_frame
+ && ffmpegdec->ctx_time_n == context->time_base.num
+ && ffmpegdec->ctx_time_d == context->time_base.den
+ && ffmpegdec->ctx_pix_fmt == context->pix_fmt
+ && ffmpegdec->ctx_par_n == context->sample_aspect_ratio.num
+ && ffmpegdec->ctx_par_d == context->sample_aspect_ratio.den)
+ return FALSE;
+
+ GST_DEBUG_OBJECT (ffmpegdec,
+ "Renegotiating video from %dx%d@ %d:%d PAR %d/%d fps to %dx%d@ %d:%d PAR %d/%d fps",
+ ffmpegdec->ctx_width, ffmpegdec->ctx_height,
+ ffmpegdec->ctx_par_n, ffmpegdec->ctx_par_d,
+ ffmpegdec->ctx_time_n, ffmpegdec->ctx_time_d,
+ context->width, context->height,
+ context->sample_aspect_ratio.num,
+ context->sample_aspect_ratio.den,
+ context->time_base.num, context->time_base.den);
+
+ ffmpegdec->ctx_width = context->width;
+ ffmpegdec->ctx_height = context->height;
+ ffmpegdec->ctx_ticks = context->ticks_per_frame;
+ ffmpegdec->ctx_time_n = context->time_base.num;
+ ffmpegdec->ctx_time_d = context->time_base.den;
+ ffmpegdec->ctx_pix_fmt = context->pix_fmt;
+ ffmpegdec->ctx_par_n = context->sample_aspect_ratio.num;
+ ffmpegdec->ctx_par_d = context->sample_aspect_ratio.den;
+
+ return TRUE;
+}
+
+static gboolean
+gst_ffmpegdec_video_negotiate (GstFFMpegDec * ffmpegdec, gboolean force)
{
GstFFMpegDecClass *oclass;
GstCaps *caps;
+ gint width, height;
+ gint fps_n, fps_d;
+ GstVideoInfo info;
+ GstVideoFormat fmt;
oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
- switch (oclass->in_plugin->type) {
- case AVMEDIA_TYPE_VIDEO:
- if (!force && ffmpegdec->format.video.width == ffmpegdec->context->width
- && ffmpegdec->format.video.height == ffmpegdec->context->height
- && ffmpegdec->format.video.fps_n == ffmpegdec->format.video.old_fps_n
- && ffmpegdec->format.video.fps_d == ffmpegdec->format.video.old_fps_d
- && ffmpegdec->format.video.pix_fmt == ffmpegdec->context->pix_fmt
- && ffmpegdec->format.video.par_n ==
- ffmpegdec->context->sample_aspect_ratio.num
- && ffmpegdec->format.video.par_d ==
- ffmpegdec->context->sample_aspect_ratio.den)
- return TRUE;
- GST_DEBUG_OBJECT (ffmpegdec,
- "Renegotiating video from %dx%d@ %d:%d PAR %d/%d fps to %dx%d@ %d:%d PAR %d/%d fps",
- ffmpegdec->format.video.width, ffmpegdec->format.video.height,
- ffmpegdec->format.video.par_n, ffmpegdec->format.video.par_d,
- ffmpegdec->format.video.old_fps_n, ffmpegdec->format.video.old_fps_n,
- ffmpegdec->context->width, ffmpegdec->context->height,
- ffmpegdec->context->sample_aspect_ratio.num,
- ffmpegdec->context->sample_aspect_ratio.den,
- ffmpegdec->format.video.fps_n, ffmpegdec->format.video.fps_d);
- ffmpegdec->format.video.width = ffmpegdec->context->width;
- ffmpegdec->format.video.height = ffmpegdec->context->height;
- ffmpegdec->format.video.old_fps_n = ffmpegdec->format.video.fps_n;
- ffmpegdec->format.video.old_fps_d = ffmpegdec->format.video.fps_d;
- ffmpegdec->format.video.pix_fmt = ffmpegdec->context->pix_fmt;
- ffmpegdec->format.video.par_n =
- ffmpegdec->context->sample_aspect_ratio.num;
- ffmpegdec->format.video.par_d =
- ffmpegdec->context->sample_aspect_ratio.den;
- break;
- case AVMEDIA_TYPE_AUDIO:
- {
- gint depth = av_smp_format_depth (ffmpegdec->context->sample_fmt);
- if (!force && ffmpegdec->format.audio.samplerate ==
- ffmpegdec->context->sample_rate &&
- ffmpegdec->format.audio.channels == ffmpegdec->context->channels &&
- ffmpegdec->format.audio.depth == depth)
- return TRUE;
- GST_DEBUG_OBJECT (ffmpegdec,
- "Renegotiating audio from %dHz@%dchannels (%d) to %dHz@%dchannels (%d)",
- ffmpegdec->format.audio.samplerate, ffmpegdec->format.audio.channels,
- ffmpegdec->format.audio.depth,
- ffmpegdec->context->sample_rate, ffmpegdec->context->channels, depth);
- ffmpegdec->format.audio.samplerate = ffmpegdec->context->sample_rate;
- ffmpegdec->format.audio.channels = ffmpegdec->context->channels;
- ffmpegdec->format.audio.depth = depth;
+ force |= gst_pad_check_reconfigure (ffmpegdec->srcpad);
+
+ /* first check if anything changed */
+ if (!update_video_context (ffmpegdec, force))
+ return TRUE;
+
+ /* now we're going to construct the video info for the final output
+ * format */
+ gst_video_info_init (&info);
+
+ fmt = gst_ffmpeg_pixfmt_to_video_format (ffmpegdec->ctx_pix_fmt);
+ if (fmt == GST_VIDEO_FORMAT_UNKNOWN)
+ goto unknown_format;
+
+ /* determine the width and height, start with the dimension of the
+ * context */
+ width = ffmpegdec->ctx_width;
+ height = ffmpegdec->ctx_height;
+
+ /* if there is a width/height specified in the input, use that */
+ if (ffmpegdec->in_width != -1 && ffmpegdec->in_width < width)
+ width = ffmpegdec->in_width;
+ if (ffmpegdec->in_height != -1 && ffmpegdec->in_height < height)
+ height = ffmpegdec->in_height;
+
+ /* now store the values */
+ gst_video_info_set_format (&info, fmt, width, height);
+
+ /* set the interlaced flag */
+ if (ffmpegdec->ctx_interlaced)
+ info.flags |= GST_VIDEO_FLAG_INTERLACED;
+ else
+ info.flags &= ~GST_VIDEO_FLAG_INTERLACED;
+
+ /* try to find a good framerate */
+ if (ffmpegdec->in_fps_d) {
+ /* take framerate from input when it was specified (#313970) */
+ fps_n = ffmpegdec->in_fps_n;
+ fps_d = ffmpegdec->in_fps_d;
+ } else {
+ fps_n = ffmpegdec->ctx_time_d / ffmpegdec->ctx_ticks;
+ fps_d = ffmpegdec->ctx_time_n;
+
+ if (!fps_d) {
+ GST_LOG_OBJECT (ffmpegdec, "invalid framerate: %d/0, -> %d/1", fps_n,
+ fps_n);
+ fps_d = 1;
+ }
+ if (gst_util_fraction_compare (fps_n, fps_d, 1000, 1) > 0) {
+ GST_LOG_OBJECT (ffmpegdec, "excessive framerate: %d/%d, -> 0/1", fps_n,
+ fps_d);
+ fps_n = 0;
+ fps_d = 1;
}
- break;
- default:
- break;
}
+ GST_LOG_OBJECT (ffmpegdec, "setting framerate: %d/%d", fps_n, fps_d);
+ info.fps_n = fps_n;
+ info.fps_d = fps_d;
+
+ /* calculate and update par now */
+ gst_ffmpegdec_update_par (ffmpegdec, &info.par_n, &info.par_d);
+
+ caps = gst_video_info_to_caps (&info);
+
+ if (!gst_pad_set_caps (ffmpegdec->srcpad, caps))
+ goto caps_failed;
+
+ ffmpegdec->out_info = info;
+
+ /* now figure out a bufferpool */
+ if (!gst_ffmpegdec_bufferpool (ffmpegdec, caps))
+ goto no_bufferpool;
+
+ gst_caps_unref (caps);
+ return TRUE;
+
+ /* ERRORS */
+unknown_format:
+ {
+#ifdef HAVE_FFMPEG_UNINSTALLED
+ /* using internal ffmpeg snapshot */
+ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
+ ("Could not find GStreamer caps mapping for FFmpeg codec '%s'.",
+ oclass->in_plugin->name), (NULL));
+#else
+ /* using external ffmpeg */
+ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
+ ("Could not find GStreamer caps mapping for FFmpeg codec '%s', and "
+ "you are using an external libavcodec. This is most likely due to "
+ "a packaging problem and/or libavcodec having been upgraded to a "
+ "version that is not compatible with this version of "
+ "gstreamer-ffmpeg. Make sure your gstreamer-ffmpeg and libavcodec "
+ "packages come from the same source/repository.",
+ oclass->in_plugin->name), (NULL));
+#endif
+ return FALSE;
+ }
+caps_failed:
+ {
+ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
+ ("Could not set caps for ffmpeg decoder (%s), not fixed?",
+ oclass->in_plugin->name));
+ gst_caps_unref (caps);
+
+ return FALSE;
+ }
+no_bufferpool:
+ {
+ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
+ ("Could not create bufferpool for fmpeg decoder (%s)",
+ oclass->in_plugin->name));
+ gst_caps_unref (caps);
+
+ return FALSE;
+ }
+}
+
+static gboolean
+update_audio_context (GstFFMpegDec * ffmpegdec, gboolean force)
+{
+ AVCodecContext *context = ffmpegdec->context;
+ gint depth;
+
+ depth = av_smp_format_depth (context->sample_fmt);
+
+ if (!force && ffmpegdec->format.audio.samplerate ==
+ context->sample_rate &&
+ ffmpegdec->format.audio.channels == context->channels &&
+ ffmpegdec->format.audio.depth == depth)
+ return FALSE;
+
+ GST_DEBUG_OBJECT (ffmpegdec,
+ "Renegotiating audio from %dHz@%dchannels (%d) to %dHz@%dchannels (%d)",
+ ffmpegdec->format.audio.samplerate, ffmpegdec->format.audio.channels,
+ ffmpegdec->format.audio.depth,
+ context->sample_rate, context->channels, depth);
+
+ ffmpegdec->format.audio.samplerate = context->sample_rate;
+ ffmpegdec->format.audio.channels = context->channels;
+ ffmpegdec->format.audio.depth = depth;
+
+ return TRUE;
+}
+
+static gboolean
+gst_ffmpegdec_audio_negotiate (GstFFMpegDec * ffmpegdec, gboolean force)
+{
+ GstFFMpegDecClass *oclass;
+ GstCaps *caps;
+
+ oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
+
+ if (!update_audio_context (ffmpegdec, force))
+ return TRUE;
+
+ /* convert the raw output format to caps */
caps = gst_ffmpeg_codectype_to_caps (oclass->in_plugin->type,
ffmpegdec->context, oclass->in_plugin->id, FALSE);
-
if (caps == NULL)
goto no_caps;
- switch (oclass->in_plugin->type) {
- case AVMEDIA_TYPE_VIDEO:
- {
- gint width, height;
- gboolean interlaced;
-
- width = ffmpegdec->format.video.clip_width;
- height = ffmpegdec->format.video.clip_height;
- interlaced = ffmpegdec->format.video.interlaced;
-
- if (width != -1 && height != -1) {
- /* overwrite the output size with the dimension of the
- * clipping region but only if they are smaller. */
- if (width < ffmpegdec->context->width)
- gst_caps_set_simple (caps, "width", G_TYPE_INT, width, NULL);
- if (height < ffmpegdec->context->height)
- gst_caps_set_simple (caps, "height", G_TYPE_INT, height, NULL);
- }
- gst_caps_set_simple (caps, "interlaced", G_TYPE_BOOLEAN, interlaced,
- NULL);
-
- /* If a demuxer provided a framerate then use it (#313970) */
- if (ffmpegdec->format.video.fps_n != -1) {
- gst_caps_set_simple (caps, "framerate",
- GST_TYPE_FRACTION, ffmpegdec->format.video.fps_n,
- ffmpegdec->format.video.fps_d, NULL);
- }
- gst_ffmpegdec_add_pixel_aspect_ratio (ffmpegdec,
- gst_caps_get_structure (caps, 0));
- break;
- }
- case AVMEDIA_TYPE_AUDIO:
- {
- break;
- }
- default:
- break;
- }
+ GST_LOG_OBJECT (ffmpegdec, "output caps %" GST_PTR_FORMAT, caps);
if (!gst_pad_set_caps (ffmpegdec->srcpad, caps))
goto caps_failed;
GstClockTime in_dur)
{
gboolean res = TRUE;
- gint64 cstart, cstop;
+ guint64 cstart, cstop;
GstClockTime stop;
GST_LOG_OBJECT (dec,
{
GstFlowReturn ret;
- ret = GST_FLOW_OK;
- *outbuf = NULL;
-
if (ffmpegdec->picture->opaque != NULL) {
+ GstVideoFrame *frame;
+
/* we allocated a picture already for ffmpeg to decode into, let's pick it
* up and use it now. */
- *outbuf = (GstBuffer *) ffmpegdec->picture->opaque;
- GST_LOG_OBJECT (ffmpegdec, "using opaque buffer %p", *outbuf);
-#ifndef EXTRA_REF
+ frame = ffmpegdec->picture->opaque;
+ *outbuf = frame->buffer;
+ GST_LOG_OBJECT (ffmpegdec, "using opaque buffer %p on frame %p", *outbuf,
+ frame);
gst_buffer_ref (*outbuf);
-#endif
} else {
- AVPicture pic, *outpic;
+ GstVideoFrame frame;
+ AVPicture *src, *dest;
+ AVFrame pic;
gint width, height;
+ GstBuffer *buf;
- GST_LOG_OBJECT (ffmpegdec, "get output buffer");
-
- /* figure out size of output buffer, this is the clipped output size because
- * we will copy the picture into it but only when the clipping region is
- * smaller than the actual picture size. */
- if ((width = ffmpegdec->format.video.clip_width) == -1)
- width = ffmpegdec->context->width;
- else if (width > ffmpegdec->context->width)
- width = ffmpegdec->context->width;
-
- if ((height = ffmpegdec->format.video.clip_height) == -1)
- height = ffmpegdec->context->height;
- else if (height > ffmpegdec->context->height)
- height = ffmpegdec->context->height;
+ GST_LOG_OBJECT (ffmpegdec, "allocating an output buffer");
- GST_LOG_OBJECT (ffmpegdec, "clip width %d/height %d", width, height);
+ if (G_UNLIKELY (!gst_ffmpegdec_video_negotiate (ffmpegdec, FALSE)))
+ goto negotiate_failed;
- ret = alloc_output_buffer (ffmpegdec, outbuf, width, height);
+ ret = gst_buffer_pool_acquire_buffer (ffmpegdec->pool, &buf, NULL);
if (G_UNLIKELY (ret != GST_FLOW_OK))
goto alloc_failed;
- /* original ffmpeg code does not handle odd sizes correctly.
- * This patched up version does */
- gst_ffmpeg_avpicture_fill (&pic, GST_BUFFER_DATA (*outbuf),
- ffmpegdec->context->pix_fmt, width, height);
+ if (!gst_video_frame_map (&frame, &ffmpegdec->out_info, buf,
+ GST_MAP_READWRITE))
+ goto invalid_frame;
- outpic = (AVPicture *) ffmpegdec->picture;
+ gst_ffmpegdec_fill_picture (ffmpegdec, &frame, &pic);
- GST_LOG_OBJECT (ffmpegdec, "linsize %d %d %d", outpic->linesize[0],
- outpic->linesize[1], outpic->linesize[2]);
- GST_LOG_OBJECT (ffmpegdec, "data %u %u %u", 0,
- (guint) (outpic->data[1] - outpic->data[0]),
- (guint) (outpic->data[2] - outpic->data[0]));
+ width = ffmpegdec->out_info.width;
+ height = ffmpegdec->out_info.height;
- av_picture_copy (&pic, outpic, ffmpegdec->context->pix_fmt, width, height);
+ src = (AVPicture *) ffmpegdec->picture;
+ dest = (AVPicture *) & pic;
+
+ GST_LOG_OBJECT (ffmpegdec, "copy picture to output buffer %dx%d", width,
+ height);
+ av_picture_copy (dest, src, ffmpegdec->context->pix_fmt, width, height);
+
+ gst_video_frame_unmap (&frame);
+
+ *outbuf = buf;
}
ffmpegdec->picture->reordered_opaque = -1;
- return ret;
+ return GST_FLOW_OK;
/* special cases */
+negotiate_failed:
+ {
+ GST_DEBUG_OBJECT (ffmpegdec, "negotiation failed");
+ return GST_FLOW_NOT_NEGOTIATED;
+ }
alloc_failed:
{
- GST_DEBUG_OBJECT (ffmpegdec, "pad_alloc failed");
+ GST_DEBUG_OBJECT (ffmpegdec, "buffer alloc failed");
return ret;
}
+invalid_frame:
+ {
+ GST_DEBUG_OBJECT (ffmpegdec, "could not map frame");
+ return GST_FLOW_ERROR;
+ }
}
static void
ffmpegdec->picture->interlaced_frame);
if (G_UNLIKELY (ffmpegdec->picture->interlaced_frame !=
- ffmpegdec->format.video.interlaced)) {
+ ffmpegdec->ctx_interlaced)) {
GST_WARNING ("Change in interlacing ! picture:%d, recorded:%d",
- ffmpegdec->picture->interlaced_frame,
- ffmpegdec->format.video.interlaced);
- ffmpegdec->format.video.interlaced = ffmpegdec->picture->interlaced_frame;
- gst_ffmpegdec_negotiate (ffmpegdec, TRUE);
+ ffmpegdec->picture->interlaced_frame, ffmpegdec->ctx_interlaced);
+ ffmpegdec->ctx_interlaced = ffmpegdec->picture->interlaced_frame;
+ gst_ffmpegdec_video_negotiate (ffmpegdec, TRUE);
}
-
-
+#if 0
/* Whether a frame is interlaced or not is unknown at the time of
buffer allocation, so caps on the buffer in opaque will have
the previous frame's interlaced flag set. So if interlacedness
GstStructure *s = gst_caps_get_structure (GST_BUFFER_CAPS (buffer), 0);
gboolean interlaced;
gboolean found = gst_structure_get_boolean (s, "interlaced", &interlaced);
- if (!found || (!!interlaced != !!ffmpegdec->format.video.interlaced)) {
+ if (!found || (! !interlaced != ! !ffmpegdec->format.video.interlaced)) {
GST_DEBUG_OBJECT (ffmpegdec,
"Buffer interlacing does not match pad, updating");
buffer = gst_buffer_make_metadata_writable (buffer);
}
}
}
+#endif
/* check if we are dealing with a keyframe here, this will also check if we
* are dealing with B frames. */
/* out_offset already contains the offset from ts_info */
GST_LOG_OBJECT (ffmpegdec, "Using offset returned by ffmpeg");
} else if (out_timestamp != GST_CLOCK_TIME_NONE) {
- GstFormat out_fmt = GST_FORMAT_DEFAULT;
GST_LOG_OBJECT (ffmpegdec, "Using offset converted from timestamp");
/* FIXME, we should really remove this as it's not nice at all to do
* upstream queries for each frame to get the frame offset. We also can't
* on outgoing buffers. We should have metadata so that the upstream peer
* can set a frame number on the encoded data. */
gst_pad_query_peer_convert (ffmpegdec->sinkpad,
- GST_FORMAT_TIME, out_timestamp, &out_fmt, &out_offset);
+ GST_FORMAT_TIME, out_timestamp, GST_FORMAT_DEFAULT, &out_offset);
} else if (dec_info->offset != GST_BUFFER_OFFSET_NONE) {
/* FIXME, the input offset is input media specific and might not
* be the same for the output media. (byte offset as input, frame number
out_duration = ffmpegdec->last_diff;
} else {
/* if we have an input framerate, use that */
- if (ffmpegdec->format.video.fps_n != -1 &&
- (ffmpegdec->format.video.fps_n != 1000 &&
- ffmpegdec->format.video.fps_d != 1)) {
+ if (ffmpegdec->out_info.fps_n != -1 &&
+ (ffmpegdec->out_info.fps_n != 1000 && ffmpegdec->out_info.fps_d != 1)) {
GST_LOG_OBJECT (ffmpegdec, "using input framerate for duration");
out_duration = gst_util_uint64_scale_int (GST_SECOND,
- ffmpegdec->format.video.fps_d, ffmpegdec->format.video.fps_n);
+ ffmpegdec->out_info.fps_d, ffmpegdec->out_info.fps_n);
} else {
/* don't try to use the decoder's framerate when it seems a bit abnormal,
* which we assume when den >= 1000... */
/* palette is not part of raw video frame in gst and the size
* of the outgoing buffer needs to be adjusted accordingly */
- if (ffmpegdec->context->palctrl != NULL)
- GST_BUFFER_SIZE (*outbuf) -= AVPALETTE_SIZE;
+ if (ffmpegdec->context->palctrl != NULL) {
+
+ gst_buffer_resize (*outbuf, 0,
+ gst_buffer_get_size (*outbuf) - AVPALETTE_SIZE);
+ }
/* now see if we need to clip the buffer against the segment boundaries. */
if (G_UNLIKELY (!clip_video_buffer (ffmpegdec, *outbuf, out_timestamp,
GST_BUFFER_FLAG_SET (*outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
if (ffmpegdec->picture->top_field_first)
- GST_BUFFER_FLAG_SET (*outbuf, GST_VIDEO_BUFFER_TFF);
+ GST_BUFFER_FLAG_SET (*outbuf, GST_VIDEO_BUFFER_FLAG_TFF);
beach:
GstClockTime in_dur)
{
GstClockTime stop;
- gint64 diff, ctime, cstop;
+ gint64 diff;
+ guint64 ctime, cstop;
gboolean res = TRUE;
+ gsize size, offset;
+
+ size = gst_buffer_get_size (buf);
+ offset = 0;
GST_LOG_OBJECT (dec,
"timestamp:%" GST_TIME_FORMAT ", duration:%" GST_TIME_FORMAT
- ", size %u", GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur),
- GST_BUFFER_SIZE (buf));
+ ", size %" G_GSIZE_FORMAT, GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur),
+ size);
/* can't clip without TIME segment */
if (G_UNLIKELY (dec->segment.format != GST_FORMAT_TIME))
GST_DEBUG_OBJECT (dec, "clipping start to %" GST_TIME_FORMAT " %"
G_GINT64_FORMAT " bytes", GST_TIME_ARGS (ctime), diff);
- GST_BUFFER_SIZE (buf) -= diff;
- GST_BUFFER_DATA (buf) += diff;
+ offset += diff;
+ size -= diff;
}
if (G_UNLIKELY ((diff = stop - cstop) > 0)) {
/* bring clipped time to bytes */
GST_DEBUG_OBJECT (dec, "clipping stop to %" GST_TIME_FORMAT " %"
G_GINT64_FORMAT " bytes", GST_TIME_ARGS (cstop), diff);
- GST_BUFFER_SIZE (buf) -= diff;
+ size -= diff;
}
+ gst_buffer_resize (buf, offset, size);
GST_BUFFER_TIMESTAMP (buf) = ctime;
GST_BUFFER_DURATION (buf) = cstop - ctime;
gint have_data = AVCODEC_MAX_AUDIO_FRAME_SIZE;
GstClockTime out_timestamp, out_duration;
gint64 out_offset;
+ int16_t *odata;
AVPacket packet;
GST_DEBUG_OBJECT (ffmpegdec,
dec_info->offset, GST_TIME_ARGS (dec_info->timestamp),
GST_TIME_ARGS (dec_info->duration), GST_TIME_ARGS (ffmpegdec->next_out));
- *outbuf =
- new_aligned_buffer (AVCODEC_MAX_AUDIO_FRAME_SIZE,
- GST_PAD_CAPS (ffmpegdec->srcpad));
+ *outbuf = new_aligned_buffer (AVCODEC_MAX_AUDIO_FRAME_SIZE);
+
+ odata = gst_buffer_map (*outbuf, NULL, NULL, GST_MAP_WRITE);
gst_avpacket_init (&packet, data, size);
- len = avcodec_decode_audio3 (ffmpegdec->context,
- (int16_t *) GST_BUFFER_DATA (*outbuf), &have_data, &packet);
+ len = avcodec_decode_audio3 (ffmpegdec->context, odata, &have_data, &packet);
+
GST_DEBUG_OBJECT (ffmpegdec,
"Decode audio: len=%d, have_data=%d", len, have_data);
if (len >= 0 && have_data > 0) {
+ /* Buffer size */
+ gst_buffer_unmap (*outbuf, odata, have_data);
+
GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
- if (!gst_ffmpegdec_negotiate (ffmpegdec, FALSE)) {
+ if (!gst_ffmpegdec_audio_negotiate (ffmpegdec, FALSE)) {
gst_buffer_unref (*outbuf);
*outbuf = NULL;
len = -1;
goto beach;
}
- /* Buffer size */
- GST_BUFFER_SIZE (*outbuf) = have_data;
-
/*
* Timestamps:
*
GST_BUFFER_TIMESTAMP (*outbuf) = out_timestamp;
GST_BUFFER_DURATION (*outbuf) = out_duration;
GST_BUFFER_OFFSET (*outbuf) = out_offset;
- gst_buffer_set_caps (*outbuf, GST_PAD_CAPS (ffmpegdec->srcpad));
/* the next timestamp we'll use when interpolating */
if (GST_CLOCK_TIME_IS_VALID (out_timestamp))
goto clipped;
} else {
+ gst_buffer_unmap (*outbuf, odata, 0);
gst_buffer_unref (*outbuf);
*outbuf = NULL;
}
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
ffmpegdec->discont = FALSE;
}
-
if (ffmpegdec->segment.rate > 0.0) {
/* and off we go */
*ret = gst_pad_push (ffmpegdec->srcpad, outbuf);
clear_queued (ffmpegdec);
break;
}
- case GST_EVENT_NEWSEGMENT:
+ case GST_EVENT_CAPS:
{
- gboolean update;
- GstFormat fmt;
- gint64 start, stop, time;
- gdouble rate, arate;
+ GstCaps *caps;
+
+ gst_event_parse_caps (event, &caps);
+
+ ret = gst_ffmpegdec_setcaps (ffmpegdec, caps);
- gst_event_parse_new_segment_full (event, &update, &rate, &arate, &fmt,
- &start, &stop, &time);
+ gst_event_unref (event);
+ goto done;
+ }
+ case GST_EVENT_SEGMENT:
+ {
+ GstSegment segment;
- switch (fmt) {
+ gst_event_copy_segment (event, &segment);
+
+ switch (segment.format) {
case GST_FORMAT_TIME:
/* fine, our native segment format */
break;
GST_DEBUG_OBJECT (ffmpegdec, "bitrate: %d", bit_rate);
/* convert values to TIME */
- if (start != -1)
- start = gst_util_uint64_scale_int (start, GST_SECOND, bit_rate);
- if (stop != -1)
- stop = gst_util_uint64_scale_int (stop, GST_SECOND, bit_rate);
- if (time != -1)
- time = gst_util_uint64_scale_int (time, GST_SECOND, bit_rate);
+ if (segment.start != -1)
+ segment.start =
+ gst_util_uint64_scale_int (segment.start, GST_SECOND, bit_rate);
+ if (segment.stop != -1)
+ segment.stop =
+ gst_util_uint64_scale_int (segment.stop, GST_SECOND, bit_rate);
+ if (segment.time != -1)
+ segment.time =
+ gst_util_uint64_scale_int (segment.time, GST_SECOND, bit_rate);
/* unref old event */
gst_event_unref (event);
/* create new converted time segment */
- fmt = GST_FORMAT_TIME;
+ segment.format = GST_FORMAT_TIME;
/* FIXME, bitrate is not good enough too find a good stop, let's
* hope start and time were 0... meh. */
- stop = -1;
- event = gst_event_new_new_segment (update, rate, fmt,
- start, stop, time);
+ segment.stop = -1;
+ event = gst_event_new_segment (&segment);
break;
}
default:
if (ffmpegdec->context->codec)
gst_ffmpegdec_drain (ffmpegdec);
- GST_DEBUG_OBJECT (ffmpegdec,
- "NEWSEGMENT in time start %" GST_TIME_FORMAT " -- stop %"
- GST_TIME_FORMAT, GST_TIME_ARGS (start), GST_TIME_ARGS (stop));
+ GST_DEBUG_OBJECT (ffmpegdec, "SEGMENT in time %" GST_SEGMENT_FORMAT,
+ &segment);
/* and store the values */
- gst_segment_set_newsegment_full (&ffmpegdec->segment, update,
- rate, arate, fmt, start, stop, time);
+ gst_segment_copy_into (&segment, &ffmpegdec->segment);
break;
}
default:
GstFFMpegDec *ffmpegdec;
GstFFMpegDecClass *oclass;
guint8 *data, *bdata;
+ guint8 *odata;
+ gsize osize;
gint size, bsize, len, have_data;
GstFlowReturn ret = GST_FLOW_OK;
GstClockTime in_timestamp;
ffmpegdec->last_frames = 0;
}
- GST_LOG_OBJECT (ffmpegdec,
- "Received new data of size %u, offset:%" G_GUINT64_FORMAT ", ts:%"
- GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT ", info %d",
- GST_BUFFER_SIZE (inbuf), GST_BUFFER_OFFSET (inbuf),
- GST_TIME_ARGS (in_timestamp), GST_TIME_ARGS (in_duration), in_info->idx);
-
/* workarounds, functions write to buffers:
* libavcodec/svq1.c:svq1_decode_frame writes to the given buffer.
* libavcodec/svq3.c:svq3_decode_slice_header too.
inbuf = gst_buffer_make_writable (inbuf);
}
- bdata = GST_BUFFER_DATA (inbuf);
- bsize = GST_BUFFER_SIZE (inbuf);
+ odata = gst_buffer_map (inbuf, &osize, NULL, GST_MAP_READ);
+
+ bdata = odata;
+ bsize = osize;
+
+ GST_LOG_OBJECT (ffmpegdec,
+ "Received new data of size %u, offset:%" G_GUINT64_FORMAT ", ts:%"
+ GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT ", info %d",
+ bsize, in_offset, GST_TIME_ARGS (in_timestamp),
+ GST_TIME_ARGS (in_duration), in_info->idx);
if (ffmpegdec->do_padding) {
/* add padding */
bsize, bdata);
} while (bsize > 0);
+ gst_buffer_unmap (inbuf, odata, osize);
+
/* keep left-over */
if (ffmpegdec->pctx && bsize > 0) {
in_timestamp = GST_BUFFER_TIMESTAMP (inbuf);
"Keeping %d bytes of data with offset %" G_GINT64_FORMAT ", timestamp %"
GST_TIME_FORMAT, bsize, in_offset, GST_TIME_ARGS (in_timestamp));
- ffmpegdec->pcache = gst_buffer_create_sub (inbuf,
- GST_BUFFER_SIZE (inbuf) - bsize, bsize);
+ ffmpegdec->pcache = gst_buffer_copy_region (inbuf, GST_BUFFER_COPY_ALL,
+ gst_buffer_get_size (inbuf) - bsize, bsize);
/* we keep timestamp, even though all we really know is that the correct
* timestamp is not below the one from inbuf */
GST_BUFFER_TIMESTAMP (ffmpegdec->pcache) = in_timestamp;
g_free (ffmpegdec->padded);
ffmpegdec->padded = NULL;
ffmpegdec->padded_size = 0;
- ffmpegdec->can_allocate_aligned = TRUE;
+ if (ffmpegdec->pool) {
+ gst_buffer_pool_set_active (ffmpegdec->pool, FALSE);
+ gst_object_unref (ffmpegdec->pool);
+ }
+ ffmpegdec->pool = NULL;
break;
default:
break;
static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
- GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("I420"))
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("I420"))
);
static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
- GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("I420"))
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("I420"))
);
-GST_BOILERPLATE (GstFFMpegDeinterlace, gst_ffmpegdeinterlace, GstElement,
- GST_TYPE_ELEMENT);
+G_DEFINE_TYPE (GstFFMpegDeinterlace, gst_ffmpegdeinterlace, GST_TYPE_ELEMENT);
static GstFlowReturn gst_ffmpegdeinterlace_chain (GstPad * pad,
GstBuffer * inbuf);
static void
-gst_ffmpegdeinterlace_base_init (gpointer g_class)
-{
- GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
-
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&src_factory));
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&sink_factory));
- gst_element_class_set_details_simple (element_class,
- "FFMPEG Deinterlace element", "Filter/Effect/Video/Deinterlace",
- "Deinterlace video", "Luca Ognibene <luogni@tin.it>");
-}
-
-static void
gst_ffmpegdeinterlace_class_init (GstFFMpegDeinterlaceClass * klass)
{
GObjectClass *gobject_class = (GObjectClass *) klass;
+ GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
gobject_class->set_property = gst_ffmpegdeinterlace_set_property;
gobject_class->get_property = gst_ffmpegdeinterlace_get_property;
GST_TYPE_FFMPEGDEINTERLACE_MODES,
DEFAULT_MODE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)
);
+
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&src_factory));
+ gst_element_class_add_pad_template (element_class,
+ gst_static_pad_template_get (&sink_factory));
+
+ gst_element_class_set_details_simple (element_class,
+ "FFMPEG Deinterlace element", "Filter/Effect/Video/Deinterlace",
+ "Deinterlace video", "Luca Ognibene <luogni@tin.it>");
}
static void
return ret;
}
+static gboolean
+gst_ffmpegdeinterlace_sink_event (GstPad * pad, GstEvent * event)
+{
+ GstFFMpegDeinterlace *deinterlace =
+ GST_FFMPEGDEINTERLACE (gst_pad_get_parent (pad));
+ gboolean ret = FALSE;
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_CAPS:
+ {
+ GstCaps *caps;
+
+ gst_event_parse_caps (event, &caps);
+ ret = gst_ffmpegdeinterlace_sink_setcaps (pad, caps);
+ gst_event_unref (event);
+ break;
+ }
+ default:
+ ret = gst_pad_push_event (deinterlace->srcpad, event);
+ break;
+ }
+
+ gst_object_unref (deinterlace);
+
+ return ret;
+}
+
static void
-gst_ffmpegdeinterlace_init (GstFFMpegDeinterlace * deinterlace,
- GstFFMpegDeinterlaceClass * klass)
+gst_ffmpegdeinterlace_init (GstFFMpegDeinterlace * deinterlace)
{
deinterlace->sinkpad =
gst_pad_new_from_static_template (&sink_factory, "sink");
- gst_pad_set_setcaps_function (deinterlace->sinkpad,
- gst_ffmpegdeinterlace_sink_setcaps);
+ gst_pad_set_event_function (deinterlace->sinkpad,
+ gst_ffmpegdeinterlace_sink_event);
gst_pad_set_chain_function (deinterlace->sinkpad,
gst_ffmpegdeinterlace_chain);
gst_element_add_pad (GST_ELEMENT (deinterlace), deinterlace->sinkpad);
GST_FFMPEGDEINTERLACE (gst_pad_get_parent (pad));
GstBuffer *outbuf = NULL;
GstFlowReturn result;
+ guint8 *from_data, *to_data;
+ gsize from_size, to_size;
GST_OBJECT_LOCK (deinterlace);
if (deinterlace->reconfigure) {
deinterlace->reconfigure = FALSE;
GST_OBJECT_UNLOCK (deinterlace);
- if (GST_PAD_CAPS (deinterlace->srcpad))
- gst_ffmpegdeinterlace_sink_setcaps (deinterlace->sinkpad,
- GST_PAD_CAPS (deinterlace->sinkpad));
+ if (gst_pad_has_current_caps (deinterlace->srcpad)) {
+ GstCaps *caps;
+
+ caps = gst_pad_get_current_caps (deinterlace->sinkpad);
+ gst_ffmpegdeinterlace_sink_setcaps (deinterlace->sinkpad, caps);
+ gst_caps_unref (caps);
+ }
} else {
GST_OBJECT_UNLOCK (deinterlace);
}
if (deinterlace->passthrough)
return gst_pad_push (deinterlace->srcpad, inbuf);
- result =
- gst_pad_alloc_buffer (deinterlace->srcpad, GST_BUFFER_OFFSET_NONE,
- deinterlace->to_size, GST_PAD_CAPS (deinterlace->srcpad), &outbuf);
- if (result == GST_FLOW_OK) {
- gst_ffmpeg_avpicture_fill (&deinterlace->from_frame,
- GST_BUFFER_DATA (inbuf), deinterlace->pixfmt, deinterlace->width,
- deinterlace->height);
+ outbuf = gst_buffer_new_and_alloc (deinterlace->to_size);
- gst_ffmpeg_avpicture_fill (&deinterlace->to_frame, GST_BUFFER_DATA (outbuf),
- deinterlace->pixfmt, deinterlace->width, deinterlace->height);
+ from_data = gst_buffer_map (inbuf, &from_size, NULL, GST_MAP_READ);
+ gst_ffmpeg_avpicture_fill (&deinterlace->from_frame, from_data,
+ deinterlace->pixfmt, deinterlace->width, deinterlace->height);
- avpicture_deinterlace (&deinterlace->to_frame, &deinterlace->from_frame,
- deinterlace->pixfmt, deinterlace->width, deinterlace->height);
+ to_data = gst_buffer_map (outbuf, &to_size, NULL, GST_MAP_WRITE);
+ gst_ffmpeg_avpicture_fill (&deinterlace->to_frame, to_data,
+ deinterlace->pixfmt, deinterlace->width, deinterlace->height);
- gst_buffer_copy_metadata (outbuf, inbuf, GST_BUFFER_COPY_TIMESTAMPS);
+ avpicture_deinterlace (&deinterlace->to_frame, &deinterlace->from_frame,
+ deinterlace->pixfmt, deinterlace->width, deinterlace->height);
+ gst_buffer_unmap (outbuf, to_data, to_size);
+ gst_buffer_unmap (inbuf, from_data, from_size);
- result = gst_pad_push (deinterlace->srcpad, outbuf);
- }
+ gst_buffer_copy_into (outbuf, inbuf, GST_BUFFER_COPY_TIMESTAMPS, 0, -1);
+
+ result = gst_pad_push (deinterlace->srcpad, outbuf);
gst_buffer_unref (inbuf);
GST_OBJECT_LOCK (self);
new_mode = g_value_get_enum (value);
- if (self->mode != new_mode && GST_PAD_CAPS (self->srcpad)) {
+ if (self->mode != new_mode && gst_pad_has_current_caps (self->srcpad)) {
self->reconfigure = TRUE;
self->new_mode = new_mode;
} else {
/* segment stuff */
GstSegment segment;
- gboolean running;
/* cached seek in READY */
GstEvent *seek_event;
/* get the stream for seeking */
stream = demux->context->streams[index];
/* initial seek position */
- target = segment->last_stop;
+ target = segment->position;
/* convert target to ffmpeg time */
fftarget = gst_ffmpeg_time_gst_to_ff (target, stream->time_base);
GST_DEBUG_OBJECT (demux, "seek success, returned %d", seekret);
- segment->last_stop = target;
+ segment->position = target;
segment->time = target;
segment->start = target;
res = TRUE;
/* FIXME, use source pad */
if (cur_type != GST_SEEK_TYPE_NONE && cur != -1)
- res = gst_pad_query_convert (demux->sinkpad, format, cur, &fmt, &cur);
+ res = gst_pad_query_convert (demux->sinkpad, format, cur, fmt, &cur);
if (res && stop_type != GST_SEEK_TYPE_NONE && stop != -1)
- res = gst_pad_query_convert (demux->sinkpad, format, stop, &fmt, &stop);
+ res = gst_pad_query_convert (demux->sinkpad, format, stop, fmt, &stop);
if (!res)
goto no_format;
/* now configure the seek segment */
if (event) {
- gst_segment_set_seek (&seeksegment, rate, format, flags,
+ gst_segment_do_seek (&seeksegment, rate, format, flags,
cur_type, cur, stop_type, stop, &update);
}
GST_DEBUG_OBJECT (demux, "segment configured from %" G_GINT64_FORMAT
" to %" G_GINT64_FORMAT ", position %" G_GINT64_FORMAT,
- seeksegment.start, seeksegment.stop, seeksegment.last_stop);
+ seeksegment.start, seeksegment.stop, seeksegment.position);
/* make the sinkpad available for data passing since we might need
* it when doing the seek */
GST_OBJECT_LOCK (demux);
demux->flushing = FALSE;
GST_OBJECT_UNLOCK (demux);
- gst_pad_push_event (demux->sinkpad, gst_event_new_flush_stop ());
+ gst_pad_push_event (demux->sinkpad, gst_event_new_flush_stop (TRUE));
}
- /* do the seek, segment.last_stop contains new position. */
+ /* do the seek, segment.position contains new position. */
res = gst_ffmpegdemux_do_seek (demux, &seeksegment);
/* and prepare to continue streaming */
/* send flush stop, peer will accept data and events again. We
* are not yet providing data as we still have the STREAM_LOCK. */
- gst_ffmpegdemux_push_event (demux, gst_event_new_flush_stop ());
+ gst_ffmpegdemux_push_event (demux, gst_event_new_flush_stop (TRUE));
for (n = 0; n < MAX_STREAMS; ++n) {
if (demux->streams[n])
demux->streams[n]->last_flow = GST_FLOW_OK;
}
- } else if (res && demux->running) {
- /* we are running the current segment and doing a non-flushing seek,
- * close the segment first based on the last_stop. */
- GST_DEBUG_OBJECT (demux, "closing running segment %" G_GINT64_FORMAT
- " to %" G_GINT64_FORMAT, demux->segment.start,
- demux->segment.last_stop);
-
- gst_ffmpegdemux_push_event (demux,
- gst_event_new_new_segment (TRUE,
- demux->segment.rate, demux->segment.format,
- demux->segment.start, demux->segment.last_stop,
- demux->segment.time));
}
/* if successfull seek, we update our real segment and push
* out the new segment. */
if (demux->segment.flags & GST_SEEK_FLAG_SEGMENT) {
gst_element_post_message (GST_ELEMENT (demux),
gst_message_new_segment_start (GST_OBJECT (demux),
- demux->segment.format, demux->segment.last_stop));
+ demux->segment.format, demux->segment.position));
}
- /* now send the newsegment */
+ /* now send the newsegment, FIXME, do this from the streaming thread */
GST_DEBUG_OBJECT (demux, "Sending newsegment from %" G_GINT64_FORMAT
- " to %" G_GINT64_FORMAT, demux->segment.last_stop, demux->segment.stop);
+ " to %" G_GINT64_FORMAT, demux->segment.position, demux->segment.stop);
- gst_ffmpegdemux_push_event (demux,
- gst_event_new_new_segment (FALSE,
- demux->segment.rate, demux->segment.format,
- demux->segment.last_stop, demux->segment.stop,
- demux->segment.time));
+ gst_ffmpegdemux_push_event (demux, gst_event_new_segment (&demux->segment));
}
/* Mark discont on all srcpads and remove eos */
/* and restart the task in case it got paused explicitely or by
* the FLUSH_START event we pushed out. */
- demux->running = TRUE;
gst_pad_start_task (demux->sinkpad, (GstTaskFunction) gst_ffmpegdemux_loop,
demux->sinkpad);
gst_query_parse_seeking (query, &format, NULL, NULL, NULL);
seekable = demux->seekable;
- if (!gst_pad_query_duration (pad, &format, &dur)) {
+ if (!gst_pad_query_duration (pad, format, &dur)) {
/* unlikely that we don't know duration but can seek */
seekable = FALSE;
dur = -1;
/* metadata */
if ((codec = gst_ffmpeg_get_codecid_longname (ctx->codec_id))) {
- stream->tags = gst_tag_list_new ();
+ stream->tags = gst_tag_list_new_empty ();
gst_tag_list_add (stream->tags, GST_TAG_MERGE_REPLACE,
(ctx->codec_type == AVMEDIA_TYPE_VIDEO) ?
GST_TIME_ARGS (demux->duration));
/* store duration in the segment as well */
- gst_segment_set_duration (&demux->segment, GST_FORMAT_TIME, demux->duration);
+ demux->segment.duration = demux->duration;
GST_OBJECT_LOCK (demux);
demux->opened = TRUE;
gst_ffmpegdemux_perform_seek (demux, event);
gst_event_unref (event);
} else {
- gst_ffmpegdemux_push_event (demux,
- gst_event_new_new_segment (FALSE,
- demux->segment.rate, demux->segment.format,
- demux->segment.start, demux->segment.stop, demux->segment.time));
+ gst_ffmpegdemux_push_event (demux, gst_event_new_segment (&demux->segment));
}
while (cached_events) {
event = cached_events->data;
- GST_INFO_OBJECT (demux, "pushing cached %s event: %" GST_PTR_FORMAT,
- GST_EVENT_TYPE_NAME (event), event->structure);
+ GST_INFO_OBJECT (demux, "pushing cached event: %" GST_PTR_FORMAT, event);
gst_ffmpegdemux_push_event (demux, event);
cached_events = g_list_delete_link (cached_events, cached_events);
}
static void
gst_ffmpegdemux_type_find (GstTypeFind * tf, gpointer priv)
{
- guint8 *data;
+ const guint8 *data;
AVInputFormat *in_plugin = (AVInputFormat *) priv;
gint res = 0;
guint64 length;
AVProbeData probe_data;
probe_data.filename = "";
- probe_data.buf = data;
+ probe_data.buf = (guint8 *) data;
probe_data.buf_size = length;
res = in_plugin->read_probe (&probe_data);
else
outsize = pkt.size;
- stream->last_flow = gst_pad_alloc_buffer_and_set_caps (srcpad,
- GST_CLOCK_TIME_NONE, outsize, GST_PAD_CAPS (srcpad), &outbuf);
+ outbuf = gst_buffer_new_and_alloc (outsize);
if ((ret = gst_ffmpegdemux_aggregated_flow (demux)) != GST_FLOW_OK)
goto no_buffer;
- /* If the buffer allocation failed, don't try sending it ! */
- if (stream->last_flow != GST_FLOW_OK)
- goto done;
-
/* copy the data from packet into the target buffer
* and do conversions for raw video packets */
if (rawvideo) {
AVPicture src, dst;
const gchar *plugin_name =
((GstFFMpegDemuxClass *) (G_OBJECT_GET_CLASS (demux)))->in_plugin->name;
+ guint8 *data;
+ gsize size;
if (strcmp (plugin_name, "gif") == 0) {
src.data[0] = pkt.data;
avstream->codec->height);
}
- gst_ffmpeg_avpicture_fill (&dst, GST_BUFFER_DATA (outbuf),
+ data = gst_buffer_map (outbuf, &size, NULL, GST_MAP_WRITE);
+ gst_ffmpeg_avpicture_fill (&dst, data,
avstream->codec->pix_fmt, avstream->codec->width,
avstream->codec->height);
av_picture_copy (&dst, &src, avstream->codec->pix_fmt,
avstream->codec->width, avstream->codec->height);
+ gst_buffer_unmap (outbuf, data, size);
} else {
- memcpy (GST_BUFFER_DATA (outbuf), pkt.data, outsize);
+ gst_buffer_fill (outbuf, 0, pkt.data, outsize);
}
GST_BUFFER_TIMESTAMP (outbuf) = timestamp;
GST_DEBUG_OBJECT (demux,
"Sending out buffer time:%" GST_TIME_FORMAT " size:%d",
- GST_TIME_ARGS (timestamp), GST_BUFFER_SIZE (outbuf));
+ GST_TIME_ARGS (timestamp), gst_buffer_get_size (outbuf));
ret = stream->last_flow = gst_pad_push (srcpad, outbuf);
{
GST_LOG_OBJECT (demux, "pausing task, reason %d (%s)", ret,
gst_flow_get_name (ret));
- demux->running = FALSE;
if (demux->seekable)
gst_pad_pause_task (demux->sinkpad);
else {
demux = (GstFFMpegDemux *) (GST_PAD_PARENT (sinkpad));
ffpipe = &(demux->ffpipe);
- GST_LOG_OBJECT (demux, "%s event: %" GST_PTR_FORMAT,
- GST_EVENT_TYPE_NAME (event), event->structure);
+ GST_LOG_OBJECT (demux, "event: %" GST_PTR_FORMAT, event);
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_FLUSH_START:
ffpipe->srcresult = GST_FLOW_OK;
/* loop may have decided to end itself as a result of flush WRONG_STATE */
gst_task_start (demux->task);
- demux->running = TRUE;
demux->flushing = FALSE;
GST_LOG_OBJECT (demux, "loop started");
GST_FFMPEG_PIPE_MUTEX_UNLOCK (ffpipe);
if (G_UNLIKELY (ffpipe->srcresult != GST_FLOW_OK))
goto ignore;
- GST_DEBUG ("Giving a buffer of %d bytes", GST_BUFFER_SIZE (buffer));
+ GST_DEBUG ("Giving a buffer of %d bytes", gst_buffer_get_size (buffer));
gst_adapter_push (ffpipe->adapter, buffer);
buffer = NULL;
while (gst_adapter_available (ffpipe->adapter) >= ffpipe->needed) {
static gboolean
gst_ffmpegdemux_sink_activate (GstPad * sinkpad)
{
- GstFFMpegDemux *demux;
- gboolean res;
+ GstQuery *query;
+ gboolean pull_mode;
- demux = (GstFFMpegDemux *) (gst_pad_get_parent (sinkpad));
+ query = gst_query_new_scheduling ();
+
+ if (!gst_pad_peer_query (sinkpad, query)) {
+ gst_query_unref (query);
+ goto activate_push;
+ }
+
+ gst_query_parse_scheduling (query, &pull_mode, NULL, NULL, NULL, NULL, NULL);
+
+ if (!pull_mode)
+ goto activate_push;
- res = FALSE;
+ GST_DEBUG_OBJECT (sinkpad, "activating pull");
+ return gst_pad_activate_pull (sinkpad, TRUE);
- if (gst_pad_check_pull_range (sinkpad))
- res = gst_pad_activate_pull (sinkpad, TRUE);
- else {
- res = gst_pad_activate_push (sinkpad, TRUE);
+activate_push:
+ {
+ GST_DEBUG_OBJECT (sinkpad, "activating push");
+ return gst_pad_activate_push (sinkpad, TRUE);
}
- gst_object_unref (demux);
- return res;
}
/* push mode:
demux->ffpipe.eos = FALSE;
demux->ffpipe.srcresult = GST_FLOW_OK;
demux->ffpipe.needed = 0;
- demux->running = TRUE;
demux->seekable = FALSE;
res = gst_task_start (demux->task);
} else {
g_static_rec_mutex_lock (demux->task_lock);
g_static_rec_mutex_unlock (demux->task_lock);
res = gst_task_join (demux->task);
- demux->running = FALSE;
demux->seekable = FALSE;
}
demux = (GstFFMpegDemux *) (gst_pad_get_parent (sinkpad));
if (active) {
- demux->running = TRUE;
demux->seekable = TRUE;
res = gst_pad_start_task (sinkpad, (GstTaskFunction) gst_ffmpegdemux_loop,
demux);
} else {
- demux->running = FALSE;
res = gst_pad_stop_task (sinkpad);
demux->seekable = FALSE;
}
static void gst_ffmpegenc_init (GstFFMpegEnc * ffmpegenc);
static void gst_ffmpegenc_finalize (GObject * object);
-static gboolean gst_ffmpegenc_setcaps (GstPad * pad, GstCaps * caps);
-static GstCaps *gst_ffmpegenc_getcaps (GstPad * pad);
+static gboolean gst_ffmpegenc_setcaps (GstFFMpegEnc * ffmpegenc,
+ GstCaps * caps);
+static GstCaps *gst_ffmpegenc_getcaps (GstPad * pad, GstCaps * filter);
static GstFlowReturn gst_ffmpegenc_chain_video (GstPad * pad,
GstBuffer * buffer);
static GstFlowReturn gst_ffmpegenc_chain_audio (GstPad * pad,
GstBuffer * buffer);
-static gboolean gst_ffmpegenc_event_video (GstPad * pad, GstEvent * event);
+static gboolean gst_ffmpegenc_event_sink (GstPad * pad, GstEvent * event);
static gboolean gst_ffmpegenc_event_src (GstPad * pad, GstEvent * event);
static void gst_ffmpegenc_set_property (GObject * object,
if (!(srccaps = gst_ffmpeg_codecid_to_caps (in_plugin->id, NULL, TRUE))) {
GST_DEBUG ("Couldn't get source caps for encoder '%s'", in_plugin->name);
- srccaps = gst_caps_new_simple ("unknown/unknown", NULL);
+ srccaps = gst_caps_new_empty_simple ("unknown/unknown");
}
if (in_plugin->type == AVMEDIA_TYPE_VIDEO) {
- sinkcaps = gst_caps_from_string
- ("video/x-raw-rgb; video/x-raw-yuv; video/x-raw-gray");
+ sinkcaps = gst_caps_from_string ("video/x-raw");
} else {
sinkcaps = gst_ffmpeg_codectype_to_audio_caps (NULL,
in_plugin->id, TRUE, in_plugin);
}
if (!sinkcaps) {
GST_DEBUG ("Couldn't get sink caps for encoder '%s'", in_plugin->name);
- sinkcaps = gst_caps_new_simple ("unknown/unknown", NULL);
+ sinkcaps = gst_caps_new_empty_simple ("unknown/unknown");
}
/* pad templates */
/* setup pads */
ffmpegenc->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
- gst_pad_set_setcaps_function (ffmpegenc->sinkpad, gst_ffmpegenc_setcaps);
gst_pad_set_getcaps_function (ffmpegenc->sinkpad, gst_ffmpegenc_getcaps);
ffmpegenc->srcpad = gst_pad_new_from_template (oclass->srctempl, "src");
gst_pad_use_fixed_caps (ffmpegenc->srcpad);
ffmpegenc->file = NULL;
ffmpegenc->delay = g_queue_new ();
+ gst_pad_set_event_function (ffmpegenc->sinkpad, gst_ffmpegenc_event_sink);
+
if (oclass->in_plugin->type == AVMEDIA_TYPE_VIDEO) {
gst_pad_set_chain_function (ffmpegenc->sinkpad, gst_ffmpegenc_chain_video);
/* so we know when to flush the buffers on EOS */
- gst_pad_set_event_function (ffmpegenc->sinkpad, gst_ffmpegenc_event_video);
gst_pad_set_event_function (ffmpegenc->srcpad, gst_ffmpegenc_event_src);
ffmpegenc->bitrate = DEFAULT_VIDEO_BITRATE;
GstCaps *intersect = NULL;
guint i;
- othercaps = gst_pad_peer_get_caps (ffmpegenc->srcpad);
+ othercaps = gst_pad_peer_get_caps (ffmpegenc->srcpad, NULL);
if (!othercaps)
return gst_caps_copy (caps);
width = gst_structure_get_value (s, "width");
framerate = gst_structure_get_value (s, "framerate");
- tmps = gst_structure_new ("video/x-raw-rgb", NULL);
+ tmps = gst_structure_empty_new ("video/x-raw");
if (width)
gst_structure_set_value (tmps, "width", width);
if (height)
gst_structure_set_value (tmps, "height", height);
if (framerate)
gst_structure_set_value (tmps, "framerate", framerate);
- gst_caps_merge_structure (tmpcaps, gst_structure_copy (tmps));
-
- gst_structure_set_name (tmps, "video/x-raw-yuv");
- gst_caps_merge_structure (tmpcaps, gst_structure_copy (tmps));
-
- gst_structure_set_name (tmps, "video/x-raw-gray");
gst_caps_merge_structure (tmpcaps, tmps);
}
gst_caps_unref (intersect);
static GstCaps *
-gst_ffmpegenc_getcaps (GstPad * pad)
+gst_ffmpegenc_getcaps (GstPad * pad, GstCaps * filter)
{
GstFFMpegEnc *ffmpegenc = (GstFFMpegEnc *) GST_PAD_PARENT (pad);
GstFFMpegEncClass *oclass =
GstCaps *finalcaps = NULL;
gint i;
- GST_DEBUG_OBJECT (ffmpegenc, "getting caps");
+ GST_DEBUG_OBJECT (ffmpegenc, "getting caps, filter %" GST_PTR_FORMAT, filter);
/* audio needs no special care */
if (oclass->in_plugin->type == AVMEDIA_TYPE_AUDIO) {
- caps = gst_caps_copy (gst_pad_get_pad_template_caps (pad));
+ caps = gst_pad_get_pad_template_caps (pad);
+ if (filter)
+ caps = gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
+ else
+ caps = gst_caps_copy (caps);
- GST_DEBUG_OBJECT (ffmpegenc, "audio caps, return template %" GST_PTR_FORMAT,
- caps);
+ GST_DEBUG_OBJECT (ffmpegenc, "audio caps, return intersected template %"
+ GST_PTR_FORMAT, caps);
return caps;
}
/* cached */
if (oclass->sinkcaps) {
caps = gst_ffmpegenc_get_possible_sizes (ffmpegenc, pad, oclass->sinkcaps);
- GST_DEBUG_OBJECT (ffmpegenc, "return cached caps %" GST_PTR_FORMAT, caps);
- return caps;
+ if (filter) {
+ finalcaps =
+ gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (caps);
+ } else {
+ finalcaps = caps;
+ }
+ GST_DEBUG_OBJECT (ffmpegenc,
+ "return intersected cached caps %" GST_PTR_FORMAT, finalcaps);
+ return finalcaps;
}
/* create cache etc. */
if (!caps) {
caps = gst_ffmpegenc_get_possible_sizes (ffmpegenc, pad,
gst_pad_get_pad_template_caps (pad));
+ if (filter) {
+ finalcaps =
+ gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (caps);
+ } else {
+ finalcaps = caps;
+ }
GST_DEBUG_OBJECT (ffmpegenc, "probing gave nothing, "
- "return template %" GST_PTR_FORMAT, caps);
- return caps;
+ "return intersected template %" GST_PTR_FORMAT, finalcaps);
+ return finalcaps;
}
GST_DEBUG_OBJECT (ffmpegenc, "probed caps gave %" GST_PTR_FORMAT, caps);
finalcaps = gst_ffmpegenc_get_possible_sizes (ffmpegenc, pad, caps);
gst_caps_unref (caps);
+ if (filter) {
+ caps = finalcaps;
+ finalcaps =
+ gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (caps);
+ }
+
return finalcaps;
}
static gboolean
-gst_ffmpegenc_setcaps (GstPad * pad, GstCaps * caps)
+gst_ffmpegenc_setcaps (GstFFMpegEnc * ffmpegenc, GstCaps * caps)
{
GstCaps *other_caps;
GstCaps *allowed_caps;
GstCaps *icaps;
enum PixelFormat pix_fmt;
- GstFFMpegEnc *ffmpegenc = (GstFFMpegEnc *) GST_PAD_PARENT (pad);
GstFFMpegEncClass *oclass =
(GstFFMpegEncClass *) G_OBJECT_GET_CLASS (ffmpegenc);
if (ffmpegenc->opened) {
gst_ffmpeg_avcodec_close (ffmpegenc->context);
ffmpegenc->opened = FALSE;
- /* fixed src caps;
- * so clear src caps for proper (re-)negotiation */
- gst_pad_set_caps (ffmpegenc->srcpad, NULL);
}
/* set defaults */
{
GstFFMpegEnc *ffmpegenc = (GstFFMpegEnc *) (GST_PAD_PARENT (pad));
GstBuffer *outbuf;
+ guint8 *data;
+ gsize size;
gint ret_size = 0, frame_size;
gboolean force_keyframe;
+ if (G_UNLIKELY (!ffmpegenc->opened))
+ goto not_negotiated;
+
GST_DEBUG_OBJECT (ffmpegenc,
"Received buffer of time %" GST_TIME_FORMAT,
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (inbuf)));
if (force_keyframe)
ffmpegenc->picture->pict_type = FF_I_TYPE;
+ data = gst_buffer_map (inbuf, &size, NULL, GST_MAP_READ);
frame_size = gst_ffmpeg_avpicture_fill ((AVPicture *) ffmpegenc->picture,
- GST_BUFFER_DATA (inbuf),
+ data,
ffmpegenc->context->pix_fmt,
ffmpegenc->context->width, ffmpegenc->context->height);
- g_return_val_if_fail (frame_size == GST_BUFFER_SIZE (inbuf), GST_FLOW_ERROR);
+ g_return_val_if_fail (frame_size == size, GST_FLOW_ERROR);
ffmpegenc->picture->pts =
gst_ffmpeg_time_gst_to_ff (GST_BUFFER_TIMESTAMP (inbuf) /
ret_size = avcodec_encode_video (ffmpegenc->context,
ffmpegenc->working_buf, ffmpegenc->working_buf_size, ffmpegenc->picture);
+ gst_buffer_unmap (inbuf, data, size);
+
if (ret_size < 0) {
#ifndef GST_DISABLE_GST_DEBUG
GstFFMpegEncClass *oclass =
GST_ERROR_SYSTEM);
outbuf = gst_buffer_new_and_alloc (ret_size);
- memcpy (GST_BUFFER_DATA (outbuf), ffmpegenc->working_buf, ret_size);
+ gst_buffer_fill (outbuf, 0, ffmpegenc->working_buf, ret_size);
GST_BUFFER_TIMESTAMP (outbuf) = GST_BUFFER_TIMESTAMP (inbuf);
GST_BUFFER_DURATION (outbuf) = GST_BUFFER_DURATION (inbuf);
/* buggy codec may not set coded_frame */
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
} else
GST_WARNING_OBJECT (ffmpegenc, "codec did not provide keyframe info");
- gst_buffer_set_caps (outbuf, GST_PAD_CAPS (ffmpegenc->srcpad));
gst_buffer_unref (inbuf);
}
return gst_pad_push (ffmpegenc->srcpad, outbuf);
+
+ /* ERRORS */
+not_negotiated:
+ {
+ GST_ELEMENT_ERROR (ffmpegenc, CORE, NEGOTIATION, (NULL),
+ ("not configured to input format before data start"));
+ gst_buffer_unref (inbuf);
+ return GST_FLOW_NOT_NEGOTIATED;
+ }
}
static GstFlowReturn
/* We need to provide at least ffmpegs minimal buffer size */
outbuf = gst_buffer_new_and_alloc (max_size + FF_MIN_BUFFER_SIZE);
- audio_out = GST_BUFFER_DATA (outbuf);
+ audio_out = gst_buffer_map (outbuf, NULL, NULL, GST_MAP_WRITE);
GST_LOG_OBJECT (ffmpegenc, "encoding buffer of max size %d", max_size);
if (ffmpegenc->buffer_size != max_size)
res = avcodec_encode_audio (ctx, audio_out, max_size, (short *) audio_in);
if (res < 0) {
+ gst_buffer_unmap (outbuf, audio_out, 0);
GST_ERROR_OBJECT (ffmpegenc, "Failed to encode buffer: %d", res);
gst_buffer_unref (outbuf);
return GST_FLOW_OK;
}
GST_LOG_OBJECT (ffmpegenc, "got output size %d", res);
- GST_BUFFER_SIZE (outbuf) = res;
+ gst_buffer_unmap (outbuf, audio_out, res);
GST_BUFFER_TIMESTAMP (outbuf) = timestamp;
GST_BUFFER_DURATION (outbuf) = duration;
if (discont)
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
- gst_buffer_set_caps (outbuf, GST_PAD_CAPS (ffmpegenc->srcpad));
GST_LOG_OBJECT (ffmpegenc, "pushing size %d, timestamp %" GST_TIME_FORMAT,
res, GST_TIME_ARGS (timestamp));
GstFFMpegEncClass *oclass;
AVCodecContext *ctx;
GstClockTime timestamp, duration;
- guint size, frame_size;
+ gsize size, frame_size;
gint osize;
GstFlowReturn ret;
gint out_size;
ffmpegenc = (GstFFMpegEnc *) (GST_OBJECT_PARENT (pad));
oclass = (GstFFMpegEncClass *) G_OBJECT_GET_CLASS (ffmpegenc);
+ if (G_UNLIKELY (!ffmpegenc->opened))
+ goto not_negotiated;
+
ctx = ffmpegenc->context;
- size = GST_BUFFER_SIZE (inbuf);
+ size = gst_buffer_get_size (inbuf);
timestamp = GST_BUFFER_TIMESTAMP (inbuf);
duration = GST_BUFFER_DURATION (inbuf);
discont = GST_BUFFER_IS_DISCONT (inbuf);
* or samplesize to divide by the samplerate */
/* take an audio buffer out of the adapter */
- in_data = (guint8 *) gst_adapter_peek (ffmpegenc->adapter, frame_bytes);
+ in_data = (guint8 *) gst_adapter_map (ffmpegenc->adapter, frame_bytes);
ffmpegenc->adapter_consumed += frame_size;
/* calculate timestamp and duration relative to start of adapter and to
gst_ffmpegenc_encode_audio (ffmpegenc, in_data, frame_bytes, out_size,
timestamp, duration, ffmpegenc->discont);
- gst_adapter_flush (ffmpegenc->adapter, frame_bytes);
+ gst_adapter_unmap (ffmpegenc->adapter, frame_bytes);
if (ret != GST_FLOW_OK)
goto push_failed;
if (coded_bps)
out_size = (out_size * coded_bps) / 8;
- in_data = (guint8 *) GST_BUFFER_DATA (inbuf);
+ in_data = (guint8 *) gst_buffer_map (inbuf, &size, NULL, GST_MAP_READ);
ret = gst_ffmpegenc_encode_audio (ffmpegenc, in_data, size, out_size,
timestamp, duration, discont);
+ gst_buffer_unmap (inbuf, in_data, size);
gst_buffer_unref (inbuf);
if (ret != GST_FLOW_OK)
return GST_FLOW_OK;
/* ERRORS */
+not_negotiated:
+ {
+ GST_ELEMENT_ERROR (ffmpegenc, CORE, NEGOTIATION, (NULL),
+ ("not configured to input format before data start"));
+ gst_buffer_unref (inbuf);
+ return GST_FLOW_NOT_NEGOTIATED;
+ }
push_failed:
{
GST_DEBUG_OBJECT (ffmpegenc, "Failed to push buffer %d (%s)", ret,
inbuf = g_queue_pop_head (ffmpegenc->delay);
outbuf = gst_buffer_new_and_alloc (ret_size);
- memcpy (GST_BUFFER_DATA (outbuf), ffmpegenc->working_buf, ret_size);
+ gst_buffer_fill (outbuf, 0, ffmpegenc->working_buf, ret_size);
GST_BUFFER_TIMESTAMP (outbuf) = GST_BUFFER_TIMESTAMP (inbuf);
GST_BUFFER_DURATION (outbuf) = GST_BUFFER_DURATION (inbuf);
if (!ffmpegenc->context->coded_frame->key_frame)
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
- gst_buffer_set_caps (outbuf, GST_PAD_CAPS (ffmpegenc->srcpad));
gst_buffer_unref (inbuf);
}
static gboolean
-gst_ffmpegenc_event_video (GstPad * pad, GstEvent * event)
+gst_ffmpegenc_event_sink (GstPad * pad, GstEvent * event)
{
GstFFMpegEnc *ffmpegenc = (GstFFMpegEnc *) (GST_PAD_PARENT (pad));
}
break;
}
+ case GST_EVENT_CAPS:{
+ GstCaps *caps;
+ gboolean ret;
+
+ gst_event_parse_caps (event, &caps);
+ ret = gst_ffmpegenc_setcaps (ffmpegenc, caps);
+ gst_event_unref (event);
+ return ret;
+ break;
+ }
default:
break;
}
static gboolean gst_ffmpegmux_setcaps (GstPad * pad, GstCaps * caps);
static GstPad *gst_ffmpegmux_request_new_pad (GstElement * element,
- GstPadTemplate * templ, const gchar * name);
+ GstPadTemplate * templ, const gchar * name, const GstCaps * caps);
static GstFlowReturn gst_ffmpegmux_collected (GstCollectPads * pads,
gpointer user_data);
if (videosinkcaps)
gst_caps_unref (videosinkcaps);
- videosinkcaps =
- gst_caps_from_string ("video/x-raw-rgb, bpp=(int)24, depth=(int)24");
+ videosinkcaps = gst_caps_from_string ("video/x-raw, format=(string)RGB");
}
/* pad templates */
static GstPad *
gst_ffmpegmux_request_new_pad (GstElement * element,
- GstPadTemplate * templ, const gchar * name)
+ GstPadTemplate * templ, const gchar * name, const GstCaps * caps)
{
GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) element;
GstElementClass *klass = GST_ELEMENT_GET_CLASS (element);
gst_pad_set_event_function (pad,
GST_DEBUG_FUNCPTR (gst_ffmpegmux_sink_event));
- gst_pad_set_setcaps_function (pad, GST_DEBUG_FUNCPTR (gst_ffmpegmux_setcaps));
gst_element_add_pad (element, pad);
/* AVStream needs to be created */
gst_tag_setter_merge_tags (setter, taglist, mode);
break;
}
+ case GST_EVENT_CAPS:{
+ GstCaps *caps;
+ gst_event_parse_caps (event, &caps);
+ if (!(res = gst_ffmpegmux_setcaps (pad, caps)))
+ goto beach;
+ break;
+ }
default:
break;
}
/* chaining up to collectpads default event function */
res = ffmpegmux->event_function (pad, event);
+beach:
gst_object_unref (ffmpegmux);
return res;
}
GstBuffer *buf;
AVPacket pkt;
gboolean need_free = FALSE;
+ gsize size;
/* push out current buffer */
buf = gst_collect_pads_pop (ffmpegmux->collect,
if (strcmp (ffmpegmux->context->oformat->name, "gif") == 0) {
AVStream *st = ffmpegmux->context->streams[best_pad->padnum];
AVPicture src, dst;
+ guint8 *data;
need_free = TRUE;
pkt.size = st->codec->width * st->codec->height * 3;
dst.data[2] = NULL;
dst.linesize[0] = st->codec->width * 3;
- gst_ffmpeg_avpicture_fill (&src, GST_BUFFER_DATA (buf),
+ data = gst_buffer_map (buf, &size, NULL, GST_MAP_READ);
+ gst_ffmpeg_avpicture_fill (&src, data,
PIX_FMT_RGB24, st->codec->width, st->codec->height);
av_picture_copy (&dst, &src, PIX_FMT_RGB24,
st->codec->width, st->codec->height);
+ gst_buffer_unmap (buf, data, size);
} else {
- pkt.data = GST_BUFFER_DATA (buf);
- pkt.size = GST_BUFFER_SIZE (buf);
+ pkt.data = gst_buffer_map (buf, &size, NULL, GST_MAP_READ);
+ pkt.size = size;
}
pkt.stream_index = best_pad->padnum;
else
pkt.duration = 0;
av_write_frame (ffmpegmux->context, &pkt);
- gst_buffer_unref (buf);
- if (need_free)
+ if (need_free) {
g_free (pkt.data);
+ } else {
+ gst_buffer_unmap (buf, pkt.data, pkt.size);
+ }
+ gst_buffer_unref (buf);
} else {
/* close down */
av_write_trailer (ffmpegmux->context);
switch (ret) {
case GST_FLOW_OK:
- total = (gint) GST_BUFFER_SIZE (inbuf);
- memcpy (buf, GST_BUFFER_DATA (inbuf), total);
+ total = (gint) gst_buffer_get_size (inbuf);
+ gst_buffer_extract (inbuf, 0, buf, total);
gst_buffer_unref (inbuf);
break;
case GST_FLOW_UNEXPECTED:
g_return_val_if_fail (h->flags != URL_RDONLY, -EIO);
/* create buffer and push data further */
- if (gst_pad_alloc_buffer_and_set_caps (info->pad,
- info->offset, size, GST_PAD_CAPS (info->pad), &outbuf) != GST_FLOW_OK)
- return 0;
+ outbuf = gst_buffer_new_and_alloc (size);
- memcpy (GST_BUFFER_DATA (outbuf), buf, size);
+ gst_buffer_fill (outbuf, 0, buf, size);
if (gst_pad_push (info->pad, outbuf) != GST_FLOW_OK)
return 0;
gst_ffmpegdata_seek (URLContext * h, int64_t pos, int whence)
{
GstProtocolInfo *info;
- guint64 newpos = 0;
+ guint64 newpos = 0, oldpos;
GST_DEBUG ("Seeking to %" G_GINT64_FORMAT ", whence=%d",
(gint64) pos, whence);
case AVSEEK_SIZE:
/* ffmpeg wants to know the current end position in bytes ! */
{
- GstFormat format = GST_FORMAT_BYTES;
gint64 duration;
GST_DEBUG ("Seek end");
if (gst_pad_is_linked (info->pad))
- if (gst_pad_query_duration (GST_PAD_PEER (info->pad), &format,
- &duration))
+ if (gst_pad_query_duration (GST_PAD_PEER (info->pad),
+ GST_FORMAT_BYTES, &duration))
newpos = ((guint64) duration) + pos;
}
break;
break;
case URL_WRONLY:
{
+ GstSegment segment;
+
+ oldpos = info->offset;
+
/* srcpad */
switch (whence) {
case SEEK_SET:
+ {
info->offset = (guint64) pos;
- gst_pad_push_event (info->pad, gst_event_new_new_segment
- (TRUE, 1.0, GST_FORMAT_BYTES, info->offset,
- GST_CLOCK_TIME_NONE, info->offset));
break;
+ }
case SEEK_CUR:
info->offset += pos;
- gst_pad_push_event (info->pad, gst_event_new_new_segment
- (TRUE, 1.0, GST_FORMAT_BYTES, info->offset,
- GST_CLOCK_TIME_NONE, info->offset));
break;
default:
break;
}
newpos = info->offset;
- }
+
+ if (newpos != oldpos) {
+ gst_segment_init (&segment, GST_FORMAT_BYTES);
+ segment.start = newpos;
+ segment.time = newpos;
+ gst_pad_push_event (info->pad, gst_event_new_segment (&segment));
+ }
break;
+ }
default:
g_assert (0);
break;
gst_ffmpeg_pipe_read (URLContext * h, unsigned char *buf, int size)
{
GstFFMpegPipe *ffpipe;
- const guint8 *data;
guint available;
ffpipe = (GstFFMpegPipe *) h->priv_data;
size = MIN (available, size);
if (size) {
GST_LOG ("Getting %d bytes", size);
- data = gst_adapter_peek (ffpipe->adapter, size);
- memcpy (buf, data, size);
+ gst_adapter_copy (ffpipe->adapter, buf, 0, size);
gst_adapter_flush (ffpipe->adapter, size);
GST_LOG ("%d bytes left in adapter",
gst_adapter_available (ffpipe->adapter));
* for any processing. */
GstBuffer *
-new_aligned_buffer (gint size, GstCaps * caps)
+new_aligned_buffer (gint size)
{
GstBuffer *buf;
buf = gst_buffer_new ();
- GST_BUFFER_DATA (buf) = GST_BUFFER_MALLOCDATA (buf) = av_malloc (size);
- GST_BUFFER_SIZE (buf) = size;
- GST_BUFFER_FREE_FUNC (buf) = av_free;
- if (caps)
- gst_buffer_set_caps (buf, caps);
+ gst_buffer_take_memory (buf, -1,
+ gst_memory_new_wrapped (0, av_malloc (size), av_free, size, 0, size));
return buf;
}
av_smp_format_depth(enum SampleFormat smp_fmt);
GstBuffer *
-new_aligned_buffer (gint size, GstCaps * caps);
+new_aligned_buffer (gint size);
#endif /* __GST_FFMPEG_UTILS_H__ */
GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
- GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("{ IYUV, I420, YV12, Y42B, Y41B }"))
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("{ IYUV, I420, YV12, Y42B, Y41B }"))
);
static GstStaticPadTemplate gst_post_proc_sink_template =
GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
- GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("{ IYUV, I420, YV12, Y42B, Y41B }"))
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("{ IYUV, I420, YV12, Y42B, Y41B }"))
);
GST_DEBUG_CATEGORY (postproc_debug);
gint stride[3];
guint8 *outplane[3];
guint8 *inplane[3];
+ guint8 *data;
+ gsize size;
/* postprocess the buffer ! */
postproc = (GstPostProc *) btrans;
+ data = gst_buffer_map (in, &size, NULL, GST_MAP_READWRITE);
+
stride[0] = postproc->ystride;
stride[1] = postproc->ustride;
stride[2] = postproc->vstride;
- outplane[0] = inplane[0] = GST_BUFFER_DATA (in);
+ outplane[0] = inplane[0] = data;
outplane[1] = inplane[1] = outplane[0] + postproc->ysize;
outplane[2] = inplane[2] = outplane[1] + postproc->usize;
postproc->width, postproc->height, (int8_t *) "", 0,
postproc->mode, postproc->context, 0);
+ gst_buffer_unmap (in, data, size);
+
return GST_FLOW_OK;
}
GstPad *sinkpad, *srcpad;
/* state */
- gint in_width, in_height;
- gint out_width, out_height;
+ GstVideoInfo in_info, out_info;
enum PixelFormat in_pixfmt, out_pixfmt;
struct SwsContext *ctx;
- /* cached auxiliary data */
- gint in_stride[3], in_offset[3];
- gint out_stride[3], out_offset[3];
-
/* property */
gint method;
} GstFFMpegScale;
/* libswscale supported formats depend on endianness */
#if G_BYTE_ORDER == G_BIG_ENDIAN
#define VIDEO_CAPS \
- GST_VIDEO_CAPS_RGB "; " GST_VIDEO_CAPS_BGR "; " \
- GST_VIDEO_CAPS_xRGB "; " GST_VIDEO_CAPS_xBGR "; " \
- GST_VIDEO_CAPS_ARGB "; " GST_VIDEO_CAPS_ABGR "; " \
- GST_VIDEO_CAPS_YUV ("{ I420, YUY2, UYVY, Y41B, Y42B }")
+ GST_VIDEO_CAPS_MAKE ("{ RGB, BGR, xRGB, xBGR, ARGB, ABGR, I420, YUY2, UYVY, Y41B, Y42B }")
#else
#define VIDEO_CAPS \
- GST_VIDEO_CAPS_RGB "; " GST_VIDEO_CAPS_BGR "; " \
- GST_VIDEO_CAPS_RGBx "; " GST_VIDEO_CAPS_BGRx "; " \
- GST_VIDEO_CAPS_RGBA "; " GST_VIDEO_CAPS_BGRA "; " \
- GST_VIDEO_CAPS_YUV ("{ I420, YUY2, UYVY, Y41B, Y42B }")
+ GST_VIDEO_CAPS_MAKE ("{ RGB, BGR, RGBx, BGRx, RGBA, BGRA, I420, YUY2, UYVY, Y41B, Y42B }")
#endif
static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
/* FILL ME */
};
-GST_BOILERPLATE (GstFFMpegScale, gst_ffmpegscale, GstBaseTransform,
- GST_TYPE_BASE_TRANSFORM);
+#define gst_ffmpegscale_parent_class parent_class
+G_DEFINE_TYPE (GstFFMpegScale, gst_ffmpegscale, GST_TYPE_BASE_TRANSFORM);
static void gst_ffmpegscale_finalize (GObject * object);
static void gst_ffmpegscale_set_property (GObject * object, guint prop_id,
static gboolean gst_ffmpegscale_stop (GstBaseTransform * trans);
static GstCaps *gst_ffmpegscale_transform_caps (GstBaseTransform * trans,
- GstPadDirection direction, GstCaps * caps);
+ GstPadDirection direction, GstCaps * caps, GstCaps * filter);
static void gst_ffmpegscale_fixate_caps (GstBaseTransform * trans,
GstPadDirection direction, GstCaps * caps, GstCaps * othercaps);
static gboolean gst_ffmpegscale_get_unit_size (GstBaseTransform * trans,
- GstCaps * caps, guint * size);
+ GstCaps * caps, gsize * size);
static gboolean gst_ffmpegscale_set_caps (GstBaseTransform * trans,
GstCaps * incaps, GstCaps * outcaps);
static GstFlowReturn gst_ffmpegscale_transform (GstBaseTransform * trans,
GstEvent * event);
static void
-gst_ffmpegscale_base_init (gpointer g_class)
-{
- GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
-
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&src_factory));
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&sink_factory));
- gst_element_class_set_details_simple (element_class, "FFMPEG Scale element",
- "Filter/Converter/Video",
- "Converts video from one resolution to another",
- "Luca Ognibene <luogni@tin.it>, Mark Nauwelaerts <mnauw@users.sf.net>");
-}
-
-static void
gst_ffmpegscale_class_init (GstFFMpegScaleClass * klass)
{
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
+ GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
GstBaseTransformClass *trans_class = GST_BASE_TRANSFORM_CLASS (klass);
gobject_class->finalize = gst_ffmpegscale_finalize;
GST_TYPE_FFMPEGSCALE_METHOD, DEFAULT_PROP_METHOD,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&src_factory));
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&sink_factory));
+
+ gst_element_class_set_details_simple (gstelement_class,
+ "FFMPEG Scale element", "Filter/Converter/Video",
+ "Converts video from one resolution to another",
+ "Luca Ognibene <luogni@tin.it>, Mark Nauwelaerts <mnauw@users.sf.net>");
+
trans_class->stop = GST_DEBUG_FUNCPTR (gst_ffmpegscale_stop);
trans_class->transform_caps =
GST_DEBUG_FUNCPTR (gst_ffmpegscale_transform_caps);
}
static void
-gst_ffmpegscale_init (GstFFMpegScale * scale, GstFFMpegScaleClass * klass)
+gst_ffmpegscale_init (GstFFMpegScale * scale)
{
GstBaseTransform *trans = GST_BASE_TRANSFORM (scale);
static GstCaps *
gst_ffmpegscale_transform_caps (GstBaseTransform * trans,
- GstPadDirection direction, GstCaps * caps)
+ GstPadDirection direction, GstCaps * caps, GstCaps * filter)
{
GstCaps *ret;
GstStructure *structure;
static gboolean
gst_ffmpegscale_get_unit_size (GstBaseTransform * trans, GstCaps * caps,
- guint * size)
+ gsize * size)
{
- gint width, height;
- GstVideoFormat format;
+ GstVideoInfo info;
- if (!gst_video_format_parse_caps (caps, &format, &width, &height))
+ if (!gst_video_info_from_caps (&info, caps))
return FALSE;
- *size = gst_video_format_get_size (format, width, height);
+ *size = info.size;
GST_DEBUG_OBJECT (trans, "unit size = %d for format %d w %d height %d",
- *size, format, width, height);
+ *size, GST_VIDEO_INFO_FORMAT (&info), GST_VIDEO_INFO_WIDTH (&info),
+ GST_VIDEO_INFO_HEIGHT (&info));
return TRUE;
}
static enum PixelFormat
gst_ffmpeg_caps_to_pixfmt (const GstCaps * caps)
{
- GstStructure *structure;
- enum PixelFormat pix_fmt = PIX_FMT_NONE;
+ GstVideoInfo info;
+ enum PixelFormat pix_fmt;
GST_DEBUG ("converting caps %" GST_PTR_FORMAT, caps);
- g_return_val_if_fail (gst_caps_get_size (caps) == 1, PIX_FMT_NONE);
- structure = gst_caps_get_structure (caps, 0);
- if (strcmp (gst_structure_get_name (structure), "video/x-raw-yuv") == 0) {
- guint32 fourcc;
-
- if (gst_structure_get_fourcc (structure, "format", &fourcc)) {
- switch (fourcc) {
- case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'):
- pix_fmt = PIX_FMT_YUYV422;
- break;
- case GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y'):
- pix_fmt = PIX_FMT_UYVY422;
- break;
- case GST_MAKE_FOURCC ('I', '4', '2', '0'):
- pix_fmt = PIX_FMT_YUV420P;
- break;
- case GST_MAKE_FOURCC ('Y', '4', '1', 'B'):
- pix_fmt = PIX_FMT_YUV411P;
- break;
- case GST_MAKE_FOURCC ('Y', '4', '2', 'B'):
- pix_fmt = PIX_FMT_YUV422P;
- break;
- case GST_MAKE_FOURCC ('Y', 'U', 'V', '9'):
- pix_fmt = PIX_FMT_YUV410P;
- break;
- }
- }
- } else if (strcmp (gst_structure_get_name (structure),
- "video/x-raw-rgb") == 0) {
- gint bpp = 0, rmask = 0, endianness = 0;
-
- if (gst_structure_get_int (structure, "bpp", &bpp) &&
- gst_structure_get_int (structure, "endianness", &endianness) &&
- endianness == G_BIG_ENDIAN) {
- if (gst_structure_get_int (structure, "red_mask", &rmask)) {
- switch (bpp) {
- case 32:
- if (rmask == 0x00ff0000)
- pix_fmt = PIX_FMT_ARGB;
- else if (rmask == 0xff000000)
- pix_fmt = PIX_FMT_RGBA;
- else if (rmask == 0xff00)
- pix_fmt = PIX_FMT_BGRA;
- else if (rmask == 0xff)
- pix_fmt = PIX_FMT_ABGR;
- break;
- case 24:
- if (rmask == 0x0000FF)
- pix_fmt = PIX_FMT_BGR24;
- else
- pix_fmt = PIX_FMT_RGB24;
- break;
- case 16:
- if (endianness == G_BYTE_ORDER)
- pix_fmt = PIX_FMT_RGB565;
- break;
- case 15:
- if (endianness == G_BYTE_ORDER)
- pix_fmt = PIX_FMT_RGB555;
- break;
- default:
- /* nothing */
- break;
- }
- } else {
- if (bpp == 8) {
- pix_fmt = PIX_FMT_PAL8;
- }
- }
- }
- }
+ if (gst_video_info_from_caps (&info, caps))
+ goto invalid_caps;
+ switch (GST_VIDEO_INFO_FORMAT (&info)) {
+ case GST_VIDEO_FORMAT_YUY2:
+ pix_fmt = PIX_FMT_YUYV422;
+ break;
+ case GST_VIDEO_FORMAT_UYVY:
+ pix_fmt = PIX_FMT_UYVY422;
+ break;
+ case GST_VIDEO_FORMAT_I420:
+ pix_fmt = PIX_FMT_YUV420P;
+ break;
+ case GST_VIDEO_FORMAT_Y41B:
+ pix_fmt = PIX_FMT_YUV411P;
+ break;
+ case GST_VIDEO_FORMAT_Y42B:
+ pix_fmt = PIX_FMT_YUV422P;
+ break;
+ case GST_VIDEO_FORMAT_YUV9:
+ pix_fmt = PIX_FMT_YUV410P;
+ break;
+ case GST_VIDEO_FORMAT_ARGB:
+ pix_fmt = PIX_FMT_ARGB;
+ break;
+ case GST_VIDEO_FORMAT_RGBA:
+ pix_fmt = PIX_FMT_RGBA;
+ break;
+ case GST_VIDEO_FORMAT_BGRA:
+ pix_fmt = PIX_FMT_BGRA;
+ break;
+ case GST_VIDEO_FORMAT_ABGR:
+ pix_fmt = PIX_FMT_ABGR;
+ break;
+ case GST_VIDEO_FORMAT_BGR:
+ pix_fmt = PIX_FMT_BGR24;
+ break;
+ case GST_VIDEO_FORMAT_RGB:
+ pix_fmt = PIX_FMT_RGB24;
+ break;
+ case GST_VIDEO_FORMAT_RGB16:
+ pix_fmt = PIX_FMT_RGB565;
+ break;
+ case GST_VIDEO_FORMAT_RGB15:
+ pix_fmt = PIX_FMT_RGB555;
+ break;
+ case GST_VIDEO_FORMAT_RGB8_PALETTED:
+ pix_fmt = PIX_FMT_PAL8;
+ break;
+ default:
+ pix_fmt = PIX_FMT_NONE;
+ break;
+ }
return pix_fmt;
-}
-static void
-gst_ffmpegscale_fill_info (GstFFMpegScale * scale, GstVideoFormat format,
- guint width, guint height, gint stride[], gint offset[])
-{
- gint i;
-
- for (i = 0; i < 3; i++) {
- stride[i] = gst_video_format_get_row_stride (format, i, width);
- offset[i] = gst_video_format_get_component_offset (format, i, width,
- height);
- /* stay close to the ffmpeg offset way */
- if (offset[i] < 3)
- offset[i] = 0;
- GST_DEBUG_OBJECT (scale, "format %d, component %d; stride %d, offset %d",
- format, i, stride[i], offset[i]);
+ /* ERROR */
+invalid_caps:
+ {
+ return PIX_FMT_NONE;
}
}
GstFFMpegScale *scale = GST_FFMPEGSCALE (trans);
guint mmx_flags, altivec_flags;
gint swsflags;
- GstVideoFormat in_format, out_format;
gboolean ok;
g_return_val_if_fail (scale->method <
scale->ctx = NULL;
}
- ok = gst_video_format_parse_caps (incaps, &in_format, &scale->in_width,
- &scale->in_height);
- ok &= gst_video_format_parse_caps (outcaps, &out_format, &scale->out_width,
- &scale->out_height);
+ ok = gst_video_info_from_caps (&scale->in_info, incaps);
+ ok &= gst_video_info_from_caps (&scale->out_info, outcaps);
+
scale->in_pixfmt = gst_ffmpeg_caps_to_pixfmt (incaps);
scale->out_pixfmt = gst_ffmpeg_caps_to_pixfmt (outcaps);
if (!ok || scale->in_pixfmt == PIX_FMT_NONE ||
scale->out_pixfmt == PIX_FMT_NONE ||
- in_format == GST_VIDEO_FORMAT_UNKNOWN ||
- out_format == GST_VIDEO_FORMAT_UNKNOWN)
+ GST_VIDEO_INFO_FORMAT (&scale->in_info) == GST_VIDEO_FORMAT_UNKNOWN ||
+ GST_VIDEO_INFO_FORMAT (&scale->out_info) == GST_VIDEO_FORMAT_UNKNOWN)
goto refuse_caps;
- GST_DEBUG_OBJECT (scale, "format %d => %d, from=%dx%d -> to=%dx%d", in_format,
- out_format, scale->in_width, scale->in_height, scale->out_width,
- scale->out_height);
-
- gst_ffmpegscale_fill_info (scale, in_format, scale->in_width,
- scale->in_height, scale->in_stride, scale->in_offset);
- gst_ffmpegscale_fill_info (scale, out_format, scale->out_width,
- scale->out_height, scale->out_stride, scale->out_offset);
+ GST_DEBUG_OBJECT (scale, "format %d => %d, from=%dx%d -> to=%dx%d",
+ GST_VIDEO_INFO_FORMAT (&scale->in_info),
+ GST_VIDEO_INFO_FORMAT (&scale->out_info),
+ GST_VIDEO_INFO_WIDTH (&scale->in_info),
+ GST_VIDEO_INFO_HEIGHT (&scale->in_info),
+ GST_VIDEO_INFO_WIDTH (&scale->out_info),
+ GST_VIDEO_INFO_HEIGHT (&scale->out_info));
#ifdef HAVE_ORC
mmx_flags = orc_target_get_default_flags (orc_target_get_by_name ("mmx"));
swsflags = 0;
#endif
-
- scale->ctx = sws_getContext (scale->in_width, scale->in_height,
- scale->in_pixfmt, scale->out_width, scale->out_height, scale->out_pixfmt,
- swsflags | gst_ffmpegscale_method_flags[scale->method], NULL, NULL, NULL);
+ scale->ctx = sws_getContext (scale->in_info.width, scale->in_info.height,
+ scale->in_pixfmt, scale->out_info.width, scale->out_info.height,
+ scale->out_pixfmt, swsflags | gst_ffmpegscale_method_flags[scale->method],
+ NULL, NULL, NULL);
if (!scale->ctx)
goto setup_failed;
GstBuffer * outbuf)
{
GstFFMpegScale *scale = GST_FFMPEGSCALE (trans);
- guint8 *in_data[3] = { NULL, NULL, NULL };
- guint8 *out_data[3] = { NULL, NULL, NULL };
- gint i;
-
- for (i = 0; i < 3; i++) {
- /* again; stay close to the ffmpeg offset way */
- if (!i || scale->in_offset[i])
- in_data[i] = GST_BUFFER_DATA (inbuf) + scale->in_offset[i];
- if (!i || scale->out_offset[i])
- out_data[i] = GST_BUFFER_DATA (outbuf) + scale->out_offset[i];
- }
+ GstVideoFrame in_frame, out_frame;
- sws_scale (scale->ctx, (const guint8 **) in_data, scale->in_stride, 0,
- scale->in_height, out_data, scale->out_stride);
+ if (!gst_video_frame_map (&in_frame, &scale->in_info, inbuf, GST_MAP_READ))
+ goto invalid_buffer;
+
+ if (!gst_video_frame_map (&out_frame, &scale->out_info, outbuf,
+ GST_MAP_WRITE))
+ goto invalid_buffer;
+
+ sws_scale (scale->ctx, (const guint8 **) in_frame.data, in_frame.info.stride,
+ 0, scale->in_info.height, (guint8 **) out_frame.data,
+ out_frame.info.stride);
+
+ gst_video_frame_unmap (&in_frame);
+ gst_video_frame_unmap (&out_frame);
return GST_FLOW_OK;
+
+ /* ERRORS */
+invalid_buffer:
+ {
+ return GST_FLOW_OK;
+ }
}
static gboolean
if (gst_structure_get_double (structure, "pointer_x", &pointer)) {
gst_structure_set (structure,
"pointer_x", G_TYPE_DOUBLE,
- pointer * scale->in_width / scale->out_width, NULL);
+ pointer * scale->in_info.width / scale->out_info.width, NULL);
}
if (gst_structure_get_double (structure, "pointer_y", &pointer)) {
gst_structure_set (structure,
"pointer_y", G_TYPE_DOUBLE,
- pointer * scale->in_height / scale->out_height, NULL);
+ pointer * scale->in_info.height / scale->out_info.height, NULL);
}
break;
default:
noinst_PROGRAMS =
AM_CFLAGS = $(GST_OBJ_CFLAGS) $(GST_CHECK_CFLAGS) $(CHECK_CFLAGS) \
- $(GST_OPTION_CFLAGS) -DGST_TEST_FILES_PATH="\"$(TEST_FILES_DIRECTORY)\""
+ $(GST_OPTION_CFLAGS) -DGST_TEST_FILES_PATH="\"$(TEST_FILES_DIRECTORY)\"" \
+ -DGST_USE_UNSTABLE_API
LDADD = $(GST_OBJ_LIBS) $(GST_CHECK_LIBS) $(CHECK_LIBS)
}
static gboolean
-pad_check_get_range (GstPad * pad)
-{
- return FALSE;
-}
-
-static gboolean
decode_file (const gchar * file, gboolean push_mode)
{
GstStateChangeReturn state_ret;
- GstElement *sink, *src, *dec, *pipeline;
+ GstElement *sink, *src, *dec, *queue, *pipeline;
GstMessage *msg;
GstBus *bus;
gchar *path;
fail_unless (src != NULL, "Failed to create filesrc!");
if (push_mode) {
- GstPad *pad = gst_element_get_static_pad (src, "src");
-
- /* KIDS: don't do this at home! */
- gst_pad_set_checkgetrange_function (pad, pad_check_get_range);
- gst_object_unref (pad);
+ queue = gst_element_factory_make ("queue", "queue");
+ } else {
+ queue = gst_element_factory_make ("identity", "identity");
}
- dec = gst_element_factory_make ("decodebin2", "decodebin2");
- fail_unless (dec != NULL, "Failed to create decodebin2!");
+ dec = gst_element_factory_make ("decodebin", "decodebin");
+ fail_unless (dec != NULL, "Failed to create decodebin!");
sink = gst_element_factory_make ("fakesink", "fakesink");
fail_unless (sink != NULL, "Failed to create fakesink!");
* we just want to abort and nothing else */
gst_bus_set_sync_handler (bus, error_cb, (gpointer) file);
- gst_bin_add_many (GST_BIN (pipeline), src, dec, sink, NULL);
- gst_element_link_many (src, dec, NULL);
+ gst_bin_add_many (GST_BIN (pipeline), src, queue, dec, sink, NULL);
+ gst_element_link_many (src, queue, dec, NULL);
path = g_build_filename (GST_TEST_FILES_PATH, file, NULL);
GST_LOG ("reading file '%s'", path);
ret = decode_file (filename, FALSE);
fail_unless (ret == TRUE, "Failed to decode '%s' (pull mode)", filename);
- /* first, pull-based */
+ /* second, push-based */
ret = decode_file (filename, TRUE);
fail_unless (ret == TRUE, "Failed to decode '%s' (push mode)", filename);
}
GST_START_TEST (test_low_sample_rate_adpcm)
{
- if (!gst_default_registry_check_feature_version ("wavparse", 0, 10, 0) ||
- !gst_default_registry_check_feature_version ("decodebin2", 0, 10, 0)) {
+#define MIN_VERSION GST_VERSION_MAJOR, GST_VERSION_MINOR, 0
+ if (!gst_default_registry_check_feature_version ("wavparse", MIN_VERSION) ||
+ !gst_default_registry_check_feature_version ("decodebin", MIN_VERSION)) {
g_printerr ("skipping test_low_sample_rate_adpcm: required element "
- "wavparse or element decodebin2 not found\n");
+ "wavparse or element decodebin not found\n");
return;
}
return GST_BUS_PASS;
}
-static gboolean
-event_probe (GstPad * pad, GstEvent * event, GstTagList ** p_tags)
+static GstProbeReturn
+event_probe (GstPad * pad, GstProbeType type, gpointer type_data,
+ gpointer user_data)
{
+ GstTagList **p_tags = user_data;
+ GstEvent *event = type_data;
+
if (GST_EVENT_TYPE (event) == GST_EVENT_TAG) {
- GST_INFO ("tag event: %" GST_PTR_FORMAT, event->structure);
+ GST_INFO ("tag event: %" GST_PTR_FORMAT, event);
if (*p_tags == NULL) {
+ GstTagList *event_tags;
+
GST_INFO ("first tag, saving");
- *p_tags = gst_tag_list_copy ((GstTagList *) event->structure);
+ gst_event_parse_tag (event, &event_tags);
+ *p_tags = gst_tag_list_copy (event_tags);
}
}
- return TRUE; /* keep the data */
+ return GST_PROBE_OK; /* keep the data */
}
/* FIXME: push_mode not used currently */
src = gst_element_factory_make ("filesrc", "filesrc");
fail_unless (src != NULL, "Failed to create filesrc!");
- dec = gst_element_factory_make ("decodebin2", "decodebin2");
- fail_unless (dec != NULL, "Failed to create decodebin2!");
+ dec = gst_element_factory_make ("decodebin", "decodebin");
+ fail_unless (dec != NULL, "Failed to create decodebin!");
sink = gst_element_factory_make ("fakesink", "fakesink");
fail_unless (sink != NULL, "Failed to create fakesink!");
/* we want to make sure there's a tag event coming out of ffdemux_ape
* (ie. the one apedemux generated) */
pad = gst_element_get_static_pad (sink, "sink");
- gst_pad_add_event_probe (pad, G_CALLBACK (event_probe), &tags);
+ gst_pad_add_probe (pad, GST_PROBE_TYPE_EVENT, event_probe, &tags, NULL);
gst_object_unref (pad);
state_ret = gst_element_set_state (pipeline, GST_STATE_PAUSED);
GST_START_TEST (test_tag_caching)
{
- if (!gst_default_registry_check_feature_version ("apedemux", 0, 10, 0) ||
- !gst_default_registry_check_feature_version ("decodebin2", 0, 10, 0)) {
+#define MIN_VERSION GST_VERSION_MAJOR, GST_VERSION_MINOR, 0
+
+ if (!gst_default_registry_check_feature_version ("apedemux", MIN_VERSION) ||
+ !gst_default_registry_check_feature_version ("decodebin", MIN_VERSION)) {
g_printerr ("Skipping test_tag_caching: required element apedemux or "
- "decodebin2 element not found\n");
+ "decodebin element not found\n");
return;
}
sinks = g_strjoinv (" ", sink);
s = g_strdup_printf
- ("videotestsrc ! video/x-raw-yuv,format=(fourcc)I420,width=320,height=240,framerate=(fraction)10/1 ! tee name=t %s",
+ ("videotestsrc ! video/x-raw,format=(string)I420,width=320,height=240,framerate=(fraction)10/1 ! tee name=t %s",
sinks);
run_pipeline (setup_pipeline (s), s,