--- /dev/null
- } else {
+ /* GStreamer
+ * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
+ * This file:
+ * Copyright (c) 2002-2004 Ronald Bultje <rbultje@ronald.bitfreak.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+ #include "config.h"
+ #endif
+
+ #include <string.h>
+
+ #include <gst/gst.h>
+ #include <libavcodec/avcodec.h>
+ #include <libavutil/channel_layout.h>
+
+ #include "gstav.h"
+ #include "gstavcodecmap.h"
+
+ #include <gst/video/video.h>
+ #include <gst/audio/audio.h>
+ #include <gst/pbutils/codec-utils.h>
+
+ /* IMPORTANT: Keep this sorted by the ffmpeg channel masks */
+ static const struct
+ {
+ guint64 ff;
+ GstAudioChannelPosition gst;
+ } _ff_to_gst_layout[] = {
+ {
+ AV_CH_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT}, {
+ AV_CH_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}, {
+ AV_CH_FRONT_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER}, {
+ AV_CH_LOW_FREQUENCY, GST_AUDIO_CHANNEL_POSITION_LFE1}, {
+ AV_CH_BACK_LEFT, GST_AUDIO_CHANNEL_POSITION_REAR_LEFT}, {
+ AV_CH_BACK_RIGHT, GST_AUDIO_CHANNEL_POSITION_REAR_RIGHT}, {
+ AV_CH_FRONT_LEFT_OF_CENTER, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER}, {
+ AV_CH_FRONT_RIGHT_OF_CENTER,
+ GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER}, {
+ AV_CH_BACK_CENTER, GST_AUDIO_CHANNEL_POSITION_REAR_CENTER}, {
+ AV_CH_SIDE_LEFT, GST_AUDIO_CHANNEL_POSITION_SIDE_LEFT}, {
+ AV_CH_SIDE_RIGHT, GST_AUDIO_CHANNEL_POSITION_SIDE_RIGHT}, {
+ AV_CH_TOP_CENTER, GST_AUDIO_CHANNEL_POSITION_TOP_CENTER}, {
+ AV_CH_TOP_FRONT_LEFT, GST_AUDIO_CHANNEL_POSITION_TOP_FRONT_LEFT}, {
+ AV_CH_TOP_FRONT_CENTER, GST_AUDIO_CHANNEL_POSITION_TOP_FRONT_CENTER}, {
+ AV_CH_TOP_FRONT_RIGHT, GST_AUDIO_CHANNEL_POSITION_TOP_FRONT_RIGHT}, {
+ AV_CH_TOP_BACK_LEFT, GST_AUDIO_CHANNEL_POSITION_TOP_REAR_LEFT}, {
+ AV_CH_TOP_BACK_CENTER, GST_AUDIO_CHANNEL_POSITION_TOP_REAR_CENTER}, {
+ AV_CH_TOP_BACK_RIGHT, GST_AUDIO_CHANNEL_POSITION_TOP_REAR_RIGHT}, {
+ AV_CH_STEREO_LEFT, GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT}, {
+ AV_CH_STEREO_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}
+ };
+
+ static guint64
+ gst_ffmpeg_channel_positions_to_layout (GstAudioChannelPosition * pos,
+ gint channels)
+ {
+ gint i, j;
+ guint64 ret = 0;
+ gint channels_found = 0;
+
+ if (!pos)
+ return 0;
+
+ if (channels == 1 && pos[0] == GST_AUDIO_CHANNEL_POSITION_MONO)
+ return AV_CH_LAYOUT_MONO;
+
+ for (i = 0; i < channels; i++) {
+ for (j = 0; j < G_N_ELEMENTS (_ff_to_gst_layout); j++) {
+ if (_ff_to_gst_layout[j].gst == pos[i]) {
+ ret |= _ff_to_gst_layout[j].ff;
+ channels_found++;
+ break;
+ }
+ }
+ }
+
+ if (channels_found != channels)
+ return 0;
+ return ret;
+ }
+
+ gboolean
+ gst_ffmpeg_channel_layout_to_gst (guint64 channel_layout, gint channels,
+ GstAudioChannelPosition * pos)
+ {
+ guint nchannels = 0;
+ gboolean none_layout = FALSE;
+
+ if (channel_layout == 0 || channels > 64) {
+ nchannels = channels;
+ none_layout = TRUE;
+ } else {
+ guint i, j;
+
+ /* Special path for mono, as AV_CH_LAYOUT_MONO is the same
+ * as FRONT_CENTER but we distinguish between the two in
+ * GStreamer
+ */
+ if (channels == 1 && channel_layout == AV_CH_LAYOUT_MONO) {
+ pos[0] = GST_AUDIO_CHANNEL_POSITION_MONO;
+ return TRUE;
+ }
+
+ for (i = 0; i < 64; i++) {
+ if ((channel_layout & (G_GUINT64_CONSTANT (1) << i)) != 0) {
+ nchannels++;
+ }
+ }
+
+ if (nchannels != channels) {
+ GST_ERROR ("Number of channels is different (%u != %u)", channels,
+ nchannels);
+ nchannels = channels;
+ none_layout = TRUE;
+ } else {
+
+ for (i = 0, j = 0; i < G_N_ELEMENTS (_ff_to_gst_layout); i++) {
+ if ((channel_layout & _ff_to_gst_layout[i].ff) != 0) {
+ pos[j++] = _ff_to_gst_layout[i].gst;
+
+ if (_ff_to_gst_layout[i].gst == GST_AUDIO_CHANNEL_POSITION_NONE)
+ none_layout = TRUE;
+ }
+ }
+
+ if (j != nchannels) {
+ GST_WARNING
+ ("Unknown channels in channel layout - assuming NONE layout");
+ none_layout = TRUE;
+ }
+ }
+ }
+
+ if (!none_layout
+ && !gst_audio_check_valid_channel_positions (pos, nchannels, FALSE)) {
+ GST_ERROR ("Invalid channel layout %" G_GUINT64_FORMAT
+ " - assuming NONE layout", channel_layout);
+ none_layout = TRUE;
+ }
+
+ if (none_layout) {
+ if (nchannels == 1) {
+ pos[0] = GST_AUDIO_CHANNEL_POSITION_MONO;
+ } else if (nchannels == 2) {
+ pos[0] = GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT;
+ pos[1] = GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT;
+ } else {
+ guint i;
+
+ for (i = 0; i < nchannels && i < 64; i++)
+ pos[i] = GST_AUDIO_CHANNEL_POSITION_NONE;
+ }
+ }
+
+ return TRUE;
+ }
+
+ static gboolean
+ _gst_value_list_contains (const GValue * list, const GValue * value)
+ {
+ guint i, n;
+ const GValue *tmp;
+
+ n = gst_value_list_get_size (list);
+ for (i = 0; i < n; i++) {
+ tmp = gst_value_list_get_value (list, i);
+ if (gst_value_compare (value, tmp) == GST_VALUE_EQUAL)
+ return TRUE;
+ }
+
+ return FALSE;
+ }
+
+ static void
+ gst_ffmpeg_video_set_pix_fmts (GstCaps * caps, const enum AVPixelFormat *fmts)
+ {
+ GValue va = { 0, };
+ GValue v = { 0, };
+ GstVideoFormat format;
+
+ if (!fmts || fmts[0] == -1) {
+ gint i;
+
+ g_value_init (&va, GST_TYPE_LIST);
+ g_value_init (&v, G_TYPE_STRING);
+ for (i = 0; i <= AV_PIX_FMT_NB; i++) {
+ format = gst_ffmpeg_pixfmt_to_videoformat (i);
+ if (format == GST_VIDEO_FORMAT_UNKNOWN)
+ continue;
+ g_value_set_string (&v, gst_video_format_to_string (format));
+ gst_value_list_append_value (&va, &v);
+ }
+ gst_caps_set_value (caps, "format", &va);
+ g_value_unset (&v);
+ g_value_unset (&va);
+ return;
+ }
+
+ /* Only a single format */
+ g_value_init (&va, GST_TYPE_LIST);
+ g_value_init (&v, G_TYPE_STRING);
+ while (*fmts != -1) {
+ format = gst_ffmpeg_pixfmt_to_videoformat (*fmts);
+ if (format != GST_VIDEO_FORMAT_UNKNOWN) {
+ g_value_set_string (&v, gst_video_format_to_string (format));
+ /* Only append values we don't have yet */
+ if (!_gst_value_list_contains (&va, &v))
+ gst_value_list_append_value (&va, &v);
+ }
+ fmts++;
+ }
+ if (gst_value_list_get_size (&va) == 1) {
+ /* The single value is still in v */
+ gst_caps_set_value (caps, "format", &v);
+ } else if (gst_value_list_get_size (&va) > 1) {
+ gst_caps_set_value (caps, "format", &va);
+ }
+ g_value_unset (&v);
+ g_value_unset (&va);
+ }
+
+ /* this macro makes a caps width fixed or unfixed width/height
+ * properties depending on whether we've got a context.
+ *
+ * See below for why we use this.
+ *
+ * We should actually do this stuff at the end, like in riff-media.c,
+ * but I'm too lazy today. Maybe later.
+ */
+ static GstCaps *
+ gst_ff_vid_caps_new (AVCodecContext * context, AVCodec * codec,
+ enum AVCodecID codec_id, gboolean encode, const char *mimetype,
+ const char *fieldname, ...)
+ {
+ GstCaps *caps = NULL;
+ va_list var_args;
+ gint i;
+
+ GST_LOG ("context:%p, codec_id:%d, mimetype:%s", context, codec_id, mimetype);
+
+ /* fixed, non probing context */
+ if (context != NULL && context->width != -1) {
+ gint num, denom;
+
+ caps = gst_caps_new_simple (mimetype,
+ "width", G_TYPE_INT, context->width,
+ "height", G_TYPE_INT, context->height, NULL);
+
+ num = context->framerate.num;
+ denom = context->framerate.den;
+
+ if (!denom) {
+ GST_LOG ("invalid framerate: %d/0, -> %d/1", num, num);
+ denom = 1;
+ }
+ if (gst_util_fraction_compare (num, denom, 1000, 1) > 0) {
+ GST_LOG ("excessive framerate: %d/%d, -> 0/1", num, denom);
+ num = 0;
+ denom = 1;
+ }
+ GST_LOG ("setting framerate: %d/%d", num, denom);
+ gst_caps_set_simple (caps,
+ "framerate", GST_TYPE_FRACTION, num, denom, NULL);
+ } else if (encode) {
+ /* so we are after restricted caps in this case */
+ switch (codec_id) {
+ case AV_CODEC_ID_H261:
+ {
+ caps = gst_caps_new_simple (mimetype,
+ "width", G_TYPE_INT, 352,
+ "height", G_TYPE_INT, 288,
+ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
+ gst_caps_append (caps, gst_caps_new_simple (mimetype,
+ "width", G_TYPE_INT, 176,
+ "height", G_TYPE_INT, 144,
+ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL));
+ break;
+ }
+ case AV_CODEC_ID_H263:
+ {
+ /* 128x96, 176x144, 352x288, 704x576, and 1408x1152. slightly reordered
+ * because we want automatic negotiation to go as close to 320x240 as
+ * possible. */
+ const static gint widths[] = { 352, 704, 176, 1408, 128 };
+ const static gint heights[] = { 288, 576, 144, 1152, 96 };
+ GstCaps *temp;
+ gint n_sizes = G_N_ELEMENTS (widths);
+
+ caps = gst_caps_new_empty ();
+ for (i = 0; i < n_sizes; i++) {
+ temp = gst_caps_new_simple (mimetype,
+ "width", G_TYPE_INT, widths[i],
+ "height", G_TYPE_INT, heights[i],
+ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
+
+ gst_caps_append (caps, temp);
+ }
+ break;
+ }
+ case AV_CODEC_ID_DVVIDEO:
+ {
+ static struct
+ {
+ const gchar *csp;
+ gint width, height;
+ gint par_n, par_d;
+ gint framerate_n, framerate_d;
+ } profiles[] = {
+ {
+ "Y41B", 720, 480, 8, 9, 30000, 1001}, {
+ "Y41B", 720, 480, 32, 27, 30000, 1001}, {
+ "Y42B", 720, 480, 8, 9, 30000, 1001}, {
+ "Y42B", 720, 480, 32, 27, 30000, 1001}, {
+ "I420", 720, 576, 16, 15, 25, 1}, {
+ "I420", 720, 576, 64, 45, 25, 1}, {
+ "Y41B", 720, 576, 16, 15, 25, 1}, {
+ "Y41B", 720, 576, 64, 45, 25, 1}, {
+ "Y42B", 720, 576, 16, 15, 25, 1}, {
+ "Y42B", 720, 576, 64, 45, 25, 1}, {
+ "Y42B", 1280, 1080, 1, 1, 30000, 1001}, {
+ "Y42B", 1280, 1080, 3, 2, 30000, 1001}, {
+ "Y42B", 1440, 1080, 1, 1, 25, 1}, {
+ "Y42B", 1440, 1080, 4, 3, 25, 1}, {
+ "Y42B", 960, 720, 1, 1, 60000, 1001}, {
+ "Y42B", 960, 720, 4, 3, 60000, 1001}, {
+ "Y42B", 960, 720, 1, 1, 50, 1}, {
+ "Y42B", 960, 720, 4, 3, 50, 1},};
+ GstCaps *temp;
+ gint n_sizes = G_N_ELEMENTS (profiles);
+
+ if (strcmp (mimetype, "video/x-raw") == 0) {
+ caps = gst_caps_new_empty ();
+ for (i = 0; i < n_sizes; i++) {
+ temp = gst_caps_new_simple (mimetype,
+ "format", G_TYPE_STRING, profiles[i].csp,
+ "width", G_TYPE_INT, profiles[i].width,
+ "height", G_TYPE_INT, profiles[i].height,
+ "framerate", GST_TYPE_FRACTION, profiles[i].framerate_n,
+ profiles[i].framerate_d, "pixel-aspect-ratio",
+ GST_TYPE_FRACTION, profiles[i].par_n, profiles[i].par_d, NULL);
+
+ gst_caps_append (caps, temp);
+ }
+ } else {
+ caps = gst_caps_new_empty ();
+ for (i = 0; i < n_sizes; i++) {
+ temp = gst_caps_new_simple (mimetype,
+ "width", G_TYPE_INT, profiles[i].width,
+ "height", G_TYPE_INT, profiles[i].height,
+ "framerate", GST_TYPE_FRACTION, profiles[i].framerate_n,
+ profiles[i].framerate_d, "pixel-aspect-ratio",
+ GST_TYPE_FRACTION, profiles[i].par_n, profiles[i].par_d, NULL);
+
+ gst_caps_append (caps, temp);
+ }
+ }
+ break;
+ }
+ case AV_CODEC_ID_DNXHD:
+ {
+ caps = gst_caps_new_simple (mimetype,
+ "width", G_TYPE_INT, 1920,
+ "height", G_TYPE_INT, 1080,
+ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
+ gst_caps_append (caps, gst_caps_new_simple (mimetype,
+ "width", G_TYPE_INT, 1280,
+ "height", G_TYPE_INT, 720,
+ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL));
+ break;
+ }
+ default:
+ {
+ if (codec && codec->supported_framerates
+ && codec->supported_framerates[0].num != 0
+ && codec->supported_framerates[0].den != 0) {
+ GValue va = { 0, };
+ GValue v = { 0, };
+ const AVRational *rates = codec->supported_framerates;
+
+ if (rates[1].num == 0 && rates[1].den == 0) {
+ caps =
+ gst_caps_new_simple (mimetype, "framerate", GST_TYPE_FRACTION,
+ rates[0].num, rates[0].den, NULL);
+ } else {
+ g_value_init (&va, GST_TYPE_LIST);
+ g_value_init (&v, GST_TYPE_FRACTION);
+
+ while (rates->num != 0 && rates->den != 0) {
+ gst_value_set_fraction (&v, rates->num, rates->den);
+ gst_value_list_append_value (&va, &v);
+ rates++;
+ }
+
+ caps = gst_caps_new_simple (mimetype, NULL, NULL, NULL);
+ gst_caps_set_value (caps, "framerate", &va);
+ g_value_unset (&va);
+ g_value_unset (&v);
+ }
+
+ } else {
+ caps = gst_caps_new_empty_simple (mimetype);
+ }
+
+ break;
+ }
+ }
+ }
+
+ /* no fixed caps or special restrictions applied;
+ * default unfixed setting */
+ if (!caps) {
+ GST_DEBUG ("Creating default caps");
+ caps = gst_caps_new_empty_simple (mimetype);
+ }
+
+ va_start (var_args, fieldname);
+ gst_caps_set_simple_valist (caps, fieldname, var_args);
+ va_end (var_args);
+
+ return caps;
+ }
+
+ static gint
+ get_nbits_set (guint64 n)
+ {
+ gint i, x;
+
+ x = 0;
+ for (i = 0; i < 64; i++) {
+ if ((n & (G_GUINT64_CONSTANT (1) << i)))
+ x++;
+ }
+
+ return x;
+ }
+
+ static void
+ gst_ffmpeg_audio_set_sample_fmts (GstCaps * caps,
+ const enum AVSampleFormat *fmts, gboolean always_interleaved)
+ {
+ GValue va = { 0, };
+ GValue vap = { 0, };
+ GValue v = { 0, };
+ GstAudioFormat format;
+ GstAudioLayout layout;
+ GstCaps *caps_copy = NULL;
+
+ if (!fmts || fmts[0] == -1) {
+ gint i;
+
+ g_value_init (&va, GST_TYPE_LIST);
+ g_value_init (&v, G_TYPE_STRING);
+ for (i = 0; i <= AV_SAMPLE_FMT_DBL; i++) {
+ format = gst_ffmpeg_smpfmt_to_audioformat (i, NULL);
+ if (format == GST_AUDIO_FORMAT_UNKNOWN)
+ continue;
+ g_value_set_string (&v, gst_audio_format_to_string (format));
+ gst_value_list_append_value (&va, &v);
+ }
+ gst_caps_set_value (caps, "format", &va);
+ if (!always_interleaved) {
+ g_value_init (&vap, GST_TYPE_LIST);
+ g_value_set_string (&v, "interleaved");
+ gst_value_list_append_value (&vap, &v);
+ g_value_set_string (&v, "non-interleaved");
+ gst_value_list_append_value (&vap, &v);
+ gst_caps_set_value (caps, "layout", &vap);
+ g_value_unset (&vap);
+ } else {
+ gst_caps_set_simple (caps, "layout", G_TYPE_STRING, "interleaved", NULL);
+ }
+ g_value_unset (&v);
+ g_value_unset (&va);
+ return;
+ }
+
+ g_value_init (&va, GST_TYPE_LIST);
+ g_value_init (&vap, GST_TYPE_LIST);
+ g_value_init (&v, G_TYPE_STRING);
+ while (*fmts != -1) {
+ format = gst_ffmpeg_smpfmt_to_audioformat (*fmts, &layout);
+ if (format != GST_AUDIO_FORMAT_UNKNOWN) {
+ g_value_set_string (&v, gst_audio_format_to_string (format));
+ /* Only append values we don't have yet */
+ if (layout == GST_AUDIO_LAYOUT_INTERLEAVED || always_interleaved) {
+ if (!_gst_value_list_contains (&va, &v))
+ gst_value_list_append_value (&va, &v);
+ } else {
+ if (!_gst_value_list_contains (&vap, &v))
+ gst_value_list_append_value (&vap, &v);
+ }
+ }
+ fmts++;
+ }
+ if (gst_value_list_get_size (&va) >= 1 && gst_value_list_get_size (&vap) >= 1) {
+ caps_copy = gst_caps_copy (caps);
+ }
+ if (gst_value_list_get_size (&va) == 1) {
+ gst_caps_set_value (caps, "format", gst_value_list_get_value (&va, 0));
+ gst_caps_set_simple (caps, "layout", G_TYPE_STRING, "interleaved", NULL);
+ } else if (gst_value_list_get_size (&va) > 1) {
+ gst_caps_set_value (caps, "format", &va);
+ gst_caps_set_simple (caps, "layout", G_TYPE_STRING, "interleaved", NULL);
+ }
+ if (gst_value_list_get_size (&vap) == 1) {
+ gst_caps_set_value (caps_copy ? caps_copy : caps, "format",
+ gst_value_list_get_value (&vap, 0));
+ gst_caps_set_simple (caps_copy ? caps_copy : caps, "layout", G_TYPE_STRING,
+ "non-interleaved", NULL);
+ } else if (gst_value_list_get_size (&vap) > 1) {
+ gst_caps_set_value (caps_copy ? caps_copy : caps, "format", &vap);
+ gst_caps_set_simple (caps_copy ? caps_copy : caps, "layout", G_TYPE_STRING,
+ "non-interleaved", NULL);
+ }
+ if (caps_copy) {
+ gst_caps_append (caps, caps_copy);
+ }
+ g_value_unset (&v);
+ g_value_unset (&va);
+ g_value_unset (&vap);
+ }
+
+ /* same for audio - now with channels/sample rate
+ */
+ static GstCaps *
+ gst_ff_aud_caps_new (AVCodecContext * context, AVCodec * codec,
+ enum AVCodecID codec_id, gboolean encode, const char *mimetype,
+ const char *fieldname, ...)
+ {
+ GstCaps *caps = NULL;
+ gint i;
+ va_list var_args;
+
+ /* fixed, non-probing context */
+ if (context != NULL && context->channels != -1) {
+ GstAudioChannelPosition pos[64];
+ guint64 mask;
+
+ caps = gst_caps_new_simple (mimetype,
+ "rate", G_TYPE_INT, context->sample_rate,
+ "channels", G_TYPE_INT, context->channels, NULL);
+
+ if (context->channels > 1 &&
+ gst_ffmpeg_channel_layout_to_gst (context->channel_layout,
+ context->channels, pos) &&
+ gst_audio_channel_positions_to_mask (pos, context->channels, FALSE,
+ &mask)) {
+ gst_caps_set_simple (caps, "channel-mask", GST_TYPE_BITMASK, mask, NULL);
+ }
+ } else if (encode) {
+ gint maxchannels = 2;
+ const gint *rates = NULL;
+ gint n_rates = 0;
+
+ /* so we must be after restricted caps in this case */
+ switch (codec_id) {
+ case AV_CODEC_ID_AAC:
+ case AV_CODEC_ID_AAC_LATM:
+ case AV_CODEC_ID_DTS:
+ maxchannels = 6;
+ break;
+ case AV_CODEC_ID_MP2:
+ {
+ const static gint l_rates[] =
+ { 48000, 44100, 32000, 24000, 22050, 16000 };
+ n_rates = G_N_ELEMENTS (l_rates);
+ rates = l_rates;
+ break;
+ }
+ case AV_CODEC_ID_EAC3:
+ case AV_CODEC_ID_AC3:
+ {
+ const static gint l_rates[] = { 48000, 44100, 32000 };
+ maxchannels = 6;
+ n_rates = G_N_ELEMENTS (l_rates);
+ rates = l_rates;
+ break;
+ }
+ case AV_CODEC_ID_ADPCM_G722:
+ {
+ const static gint l_rates[] = { 16000 };
+ n_rates = G_N_ELEMENTS (l_rates);
+ rates = l_rates;
+ maxchannels = 1;
+ break;
+ }
+ case AV_CODEC_ID_ADPCM_G726:
+ {
+ const static gint l_rates[] = { 8000 };
+ n_rates = G_N_ELEMENTS (l_rates);
+ rates = l_rates;
+ maxchannels = 1;
+ break;
+ }
+ case AV_CODEC_ID_ADPCM_SWF:
+ {
+ const static gint l_rates[] = { 11025, 22050, 44100 };
+ n_rates = G_N_ELEMENTS (l_rates);
+ rates = l_rates;
+ break;
+ }
+ case AV_CODEC_ID_ROQ_DPCM:
+ {
+ const static gint l_rates[] = { 22050 };
+ n_rates = G_N_ELEMENTS (l_rates);
+ rates = l_rates;
+ break;
+ }
+ case AV_CODEC_ID_AMR_NB:
+ {
+ const static gint l_rates[] = { 8000 };
+ maxchannels = 1;
+ n_rates = G_N_ELEMENTS (l_rates);
+ rates = l_rates;
+ break;
+ }
+ case AV_CODEC_ID_AMR_WB:
+ {
+ const static gint l_rates[] = { 16000 };
+ maxchannels = 1;
+ n_rates = G_N_ELEMENTS (l_rates);
+ rates = l_rates;
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* regardless of encode/decode, open up channels if applicable */
+ /* Until decoders/encoders expose the maximum number of channels
+ * they support, we whitelist them here. */
+ switch (codec_id) {
+ case AV_CODEC_ID_WMAPRO:
+ case AV_CODEC_ID_TRUEHD:
+ maxchannels = 8;
+ break;
+ default:
+ break;
+ }
+
+ if (codec && codec->channel_layouts) {
+ const uint64_t *layouts = codec->channel_layouts;
+ GstAudioChannelPosition pos[64];
+
+ caps = gst_caps_new_empty ();
+ while (*layouts) {
+ gint nbits_set = get_nbits_set (*layouts);
+
+ if (gst_ffmpeg_channel_layout_to_gst (*layouts, nbits_set, pos)) {
+ guint64 mask;
+
+ if (gst_audio_channel_positions_to_mask (pos, nbits_set, FALSE,
+ &mask)) {
+ GstStructure *s =
+ gst_structure_new (mimetype, "channels", G_TYPE_INT, nbits_set,
+ NULL);
+
+ /* No need to require a channel mask for mono or stereo */
+ if (!(nbits_set == 1 && pos[0] == GST_AUDIO_CHANNEL_POSITION_MONO)
+ && !(nbits_set == 2
+ && pos[0] == GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT
+ && pos[1] == GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT))
+ gst_structure_set (s, "channel-mask", GST_TYPE_BITMASK, mask,
+ NULL);
+
+ gst_caps_append_structure (caps, s);
+ }
+ }
+ layouts++;
+ }
+ } else {
+ if (maxchannels == 1)
+ caps = gst_caps_new_simple (mimetype,
+ "channels", G_TYPE_INT, maxchannels, NULL);
+ else
+ caps = gst_caps_new_simple (mimetype,
+ "channels", GST_TYPE_INT_RANGE, 1, maxchannels, NULL);
+ }
+
+ if (n_rates) {
+ GValue list = { 0, };
+
+ g_value_init (&list, GST_TYPE_LIST);
+ for (i = 0; i < n_rates; i++) {
+ GValue v = { 0, };
+
+ g_value_init (&v, G_TYPE_INT);
+ g_value_set_int (&v, rates[i]);
+ gst_value_list_append_value (&list, &v);
+ g_value_unset (&v);
+ }
+ gst_caps_set_value (caps, "rate", &list);
+ g_value_unset (&list);
+ } else if (codec && codec->supported_samplerates
+ && codec->supported_samplerates[0]) {
+ GValue va = { 0, };
+ GValue v = { 0, };
+
+ if (!codec->supported_samplerates[1]) {
+ gst_caps_set_simple (caps, "rate", G_TYPE_INT,
+ codec->supported_samplerates[0], NULL);
+ } else {
+ const int *rates = codec->supported_samplerates;
+
+ g_value_init (&va, GST_TYPE_LIST);
+ g_value_init (&v, G_TYPE_INT);
+
+ while (*rates) {
+ g_value_set_int (&v, *rates);
+ gst_value_list_append_value (&va, &v);
+ rates++;
+ }
+ gst_caps_set_value (caps, "rate", &va);
+ g_value_unset (&va);
+ g_value_unset (&v);
+ }
+ } else {
+ gst_caps_set_simple (caps, "rate", GST_TYPE_INT_RANGE, 4000, 96000, NULL);
+ }
+ } else {
+ caps = gst_caps_new_empty_simple (mimetype);
+ }
+
+ va_start (var_args, fieldname);
+ gst_caps_set_simple_valist (caps, fieldname, var_args);
+ va_end (var_args);
+
+ return caps;
+ }
+
+ /* Check if the given codec ID is an image format -- for now this is just
+ * anything whose caps is image/... */
+ gboolean
+ gst_ffmpeg_codecid_is_image (enum AVCodecID codec_id)
+ {
+ switch (codec_id) {
+ case AV_CODEC_ID_MJPEG:
+ case AV_CODEC_ID_LJPEG:
+ case AV_CODEC_ID_GIF:
+ case AV_CODEC_ID_PPM:
+ case AV_CODEC_ID_PBM:
+ case AV_CODEC_ID_PCX:
+ case AV_CODEC_ID_SGI:
+ case AV_CODEC_ID_TARGA:
+ case AV_CODEC_ID_TIFF:
+ case AV_CODEC_ID_SUNRAST:
+ case AV_CODEC_ID_BMP:
+ return TRUE;
+
+ default:
+ return FALSE;
+ }
+ }
+
+ /* Convert a FFMPEG codec ID and optional AVCodecContext
+ * to a GstCaps. If the context is ommitted, no fixed values
+ * for video/audio size will be included in the GstCaps
+ *
+ * CodecID is primarily meant for compressed data GstCaps!
+ *
+ * encode is a special parameter. gstffmpegdec will say
+ * FALSE, gstffmpegenc will say TRUE. The output caps
+ * depends on this, in such a way that it will be very
+ * specific, defined, fixed and correct caps for encoders,
+ * yet very wide, "forgiving" caps for decoders. Example
+ * for mp3: decode: audio/mpeg,mpegversion=1,layer=[1-3]
+ * but encode: audio/mpeg,mpegversion=1,layer=3,bitrate=x,
+ * rate=x,channels=x.
+ */
+
+ GstCaps *
+ gst_ffmpeg_codecid_to_caps (enum AVCodecID codec_id,
+ AVCodecContext * context, gboolean encode)
+ {
+ GstCaps *caps = NULL;
+ gboolean buildcaps = FALSE;
+
+ GST_LOG ("codec_id:%d, context:%p, encode:%d", codec_id, context, encode);
+
+ switch (codec_id) {
+ case AV_CODEC_ID_MPEG1VIDEO:
+ /* FIXME: bitrate */
+ caps = gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/mpeg",
+ "mpegversion", G_TYPE_INT, 1,
+ "systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
+ break;
+
+ case AV_CODEC_ID_MPEG2VIDEO:
+ if (encode) {
+ /* FIXME: bitrate */
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/mpeg",
+ "mpegversion", G_TYPE_INT, 2, "systemstream", G_TYPE_BOOLEAN, FALSE,
+ NULL);
+ } else {
+ /* decode both MPEG-1 and MPEG-2; width/height/fps are all in
+ * the MPEG video stream headers, so may be omitted from caps. */
+ caps = gst_caps_new_simple ("video/mpeg",
+ "mpegversion", GST_TYPE_INT_RANGE, 1, 2,
+ "systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_H263:
+ if (encode) {
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-h263", "variant", G_TYPE_STRING, "itu", "h263version",
+ G_TYPE_STRING, "h263", NULL);
+ } else {
+ /* don't pass codec_id, we can decode other variants with the H263
+ * decoder that don't have specific size requirements
+ */
+ caps =
+ gst_ff_vid_caps_new (context, NULL, AV_CODEC_ID_NONE, encode,
+ "video/x-h263", "variant", G_TYPE_STRING, "itu", NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_H263P:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-h263",
+ "variant", G_TYPE_STRING, "itu", "h263version", G_TYPE_STRING,
+ "h263p", NULL);
+ if (encode && context) {
+
+ gst_caps_set_simple (caps,
+ "annex-f", G_TYPE_BOOLEAN, context->flags & AV_CODEC_FLAG_4MV,
+ "annex-j", G_TYPE_BOOLEAN,
+ context->flags & AV_CODEC_FLAG_LOOP_FILTER,
+ "annex-i", G_TYPE_BOOLEAN, context->flags & AV_CODEC_FLAG_AC_PRED,
+ "annex-t", G_TYPE_BOOLEAN, context->flags & AV_CODEC_FLAG_AC_PRED,
+ NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_H263I:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-intel-h263", "variant", G_TYPE_STRING, "intel", NULL);
+ break;
+
+ case AV_CODEC_ID_H261:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-h261",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_RV10:
+ case AV_CODEC_ID_RV20:
+ case AV_CODEC_ID_RV30:
+ case AV_CODEC_ID_RV40:
+ {
+ gint version;
+
+ switch (codec_id) {
+ case AV_CODEC_ID_RV40:
+ version = 4;
+ break;
+ case AV_CODEC_ID_RV30:
+ version = 3;
+ break;
+ case AV_CODEC_ID_RV20:
+ version = 2;
+ break;
+ default:
+ version = 1;
+ break;
+ }
+
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-pn-realvideo", "rmversion", G_TYPE_INT, version, NULL);
+ if (context) {
+ if (context->extradata_size >= 8) {
+ gst_caps_set_simple (caps,
+ "subformat", G_TYPE_INT, GST_READ_UINT32_BE (context->extradata),
+ NULL);
+ }
+ }
+ }
+ break;
+
+ case AV_CODEC_ID_MP1:
+ /* FIXME: bitrate */
+ caps = gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/mpeg",
+ "mpegversion", G_TYPE_INT, 1, "layer", G_TYPE_INT, 1, NULL);
+ break;
+
+ case AV_CODEC_ID_MP2:
+ /* FIXME: bitrate */
+ caps = gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/mpeg",
+ "mpegversion", G_TYPE_INT, 1, "layer", G_TYPE_INT, 2, NULL);
+ break;
+
+ case AV_CODEC_ID_MP3:
+ if (encode) {
+ /* FIXME: bitrate */
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/mpeg",
+ "mpegversion", G_TYPE_INT, 1, "layer", G_TYPE_INT, 3, NULL);
+ } else {
+ /* Decodes MPEG-1 layer 1/2/3. Samplerate, channels et al are
+ * in the MPEG audio header, so may be omitted from caps. */
+ caps = gst_caps_new_simple ("audio/mpeg",
+ "mpegversion", G_TYPE_INT, 1,
+ "layer", GST_TYPE_INT_RANGE, 1, 3, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_MUSEPACK7:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-ffmpeg-parsed-musepack", "streamversion", G_TYPE_INT, 7,
+ NULL);
+ break;
+
+ case AV_CODEC_ID_MUSEPACK8:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-ffmpeg-parsed-musepack", "streamversion", G_TYPE_INT, 8,
+ NULL);
+ break;
+
+ case AV_CODEC_ID_AC3:
+ /* FIXME: bitrate */
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-ac3",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_EAC3:
+ /* FIXME: bitrate */
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-eac3",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_TRUEHD:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-true-hd", NULL);
+ break;
+
+ case AV_CODEC_ID_ATRAC1:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-vnd.sony.atrac1", NULL);
+ break;
+
+ case AV_CODEC_ID_ATRAC3:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-vnd.sony.atrac3", NULL);
+ break;
+
+ case AV_CODEC_ID_DTS:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-dts",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_APE:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-ffmpeg-parsed-ape", NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "depth", G_TYPE_INT, context->bits_per_coded_sample, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_MLP:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-mlp",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_METASOUND:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-voxware", NULL);
+ break;
+
+ case AV_CODEC_ID_IMC:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-imc",
+ NULL);
+ break;
+
+ /* MJPEG is normal JPEG, Motion-JPEG and Quicktime MJPEG-A. MJPEGB
+ * is Quicktime's MJPEG-B. LJPEG is lossless JPEG. I don't know what
+ * sp5x is, but it's apparently something JPEG... We don't separate
+ * between those in GStreamer. Should we (at least between MJPEG,
+ * MJPEG-B and sp5x decoding...)? */
+ case AV_CODEC_ID_MJPEG:
+ case AV_CODEC_ID_LJPEG:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/jpeg",
+ "parsed", G_TYPE_BOOLEAN, TRUE, NULL);
+ break;
+
+ case AV_CODEC_ID_JPEG2000:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/x-j2c",
+ NULL);
+ if (!encode) {
+ gst_caps_append (caps, gst_ff_vid_caps_new (context, NULL, codec_id,
+ encode, "image/x-jpc", NULL));
+ gst_caps_append (caps, gst_ff_vid_caps_new (context, NULL, codec_id,
+ encode, "image/jp2", NULL));
+ }
+ break;
+
+ case AV_CODEC_ID_SP5X:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/sp5x",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_MJPEGB:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-mjpeg-b", NULL);
+ break;
+
+ case AV_CODEC_ID_MPEG4:
+ if (encode && context != NULL) {
+ /* I'm not exactly sure what ffmpeg outputs... ffmpeg itself uses
+ * the AVI fourcc 'DIVX', but 'mp4v' for Quicktime... */
+ switch (context->codec_tag) {
+ case GST_MAKE_FOURCC ('D', 'I', 'V', 'X'):
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-divx", "divxversion", G_TYPE_INT, 5, NULL);
+ break;
+ case GST_MAKE_FOURCC ('m', 'p', '4', 'v'):
+ default:
+ /* FIXME: bitrate. libav doesn't expose the used profile and level */
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/mpeg", "systemstream", G_TYPE_BOOLEAN, FALSE,
+ "mpegversion", G_TYPE_INT, 4, NULL);
+ break;
+ }
+ } else {
+ /* The trick here is to separate xvid, divx, mpeg4, 3ivx et al */
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/mpeg",
+ "mpegversion", G_TYPE_INT, 4, "systemstream", G_TYPE_BOOLEAN, FALSE,
+ NULL);
+
+ if (encode) {
+ GValue arr = { 0, };
+ GValue item = { 0, };
+
+ g_value_init (&arr, GST_TYPE_LIST);
+ g_value_init (&item, G_TYPE_STRING);
+ g_value_set_string (&item, "simple");
+ gst_value_list_append_value (&arr, &item);
+ g_value_set_string (&item, "advanced-simple");
+ gst_value_list_append_value (&arr, &item);
+ g_value_unset (&item);
+
+ gst_caps_set_value (caps, "profile", &arr);
+ g_value_unset (&arr);
+
+ gst_caps_append (caps, gst_ff_vid_caps_new (context, NULL, codec_id,
+ encode, "video/x-divx", "divxversion", G_TYPE_INT, 5, NULL));
++ }
++#ifndef TIZEN_FEATURE_LIBAV
++ else {
+ gst_caps_append (caps, gst_ff_vid_caps_new (context, NULL, codec_id,
+ encode, "video/x-divx", "divxversion", GST_TYPE_INT_RANGE, 4,
+ 5, NULL));
+ }
++#endif
+ }
+ break;
+
+ case AV_CODEC_ID_RAWVIDEO:
+ caps =
+ gst_ffmpeg_codectype_to_video_caps (context, codec_id, encode, NULL);
+ break;
+
+ case AV_CODEC_ID_MSMPEG4V1:
+ case AV_CODEC_ID_MSMPEG4V2:
+ case AV_CODEC_ID_MSMPEG4V3:
+ {
+ gint version = 41 + codec_id - AV_CODEC_ID_MSMPEG4V1;
+
+ /* encode-FIXME: bitrate */
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-msmpeg", "msmpegversion", G_TYPE_INT, version, NULL);
+ if (!encode && codec_id == AV_CODEC_ID_MSMPEG4V3) {
+ gst_caps_append (caps, gst_ff_vid_caps_new (context, NULL, codec_id,
+ encode, "video/x-divx", "divxversion", G_TYPE_INT, 3, NULL));
+ }
+ }
+ break;
+
+ case AV_CODEC_ID_WMV1:
+ case AV_CODEC_ID_WMV2:
+ {
+ gint version = (codec_id == AV_CODEC_ID_WMV1) ? 1 : 2;
+
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-wmv",
+ "wmvversion", G_TYPE_INT, version, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_FLV1:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-flash-video", "flvversion", G_TYPE_INT, 1, NULL);
+ break;
+
+ case AV_CODEC_ID_SVQ1:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-svq",
+ "svqversion", G_TYPE_INT, 1, NULL);
+ break;
+
+ case AV_CODEC_ID_SVQ3:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-svq",
+ "svqversion", G_TYPE_INT, 3, NULL);
+ break;
+
+ case AV_CODEC_ID_DVAUDIO:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-dv",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_DVVIDEO:
+ {
+ if (encode && context) {
+ const gchar *format;
+
+ switch (context->pix_fmt) {
+ case AV_PIX_FMT_YUYV422:
+ format = "YUY2";
+ break;
+ case AV_PIX_FMT_YUV420P:
+ format = "I420";
+ break;
+ case AV_PIX_FMT_YUVA420P:
+ format = "A420";
+ break;
+ case AV_PIX_FMT_YUV411P:
+ format = "Y41B";
+ break;
+ case AV_PIX_FMT_YUV422P:
+ format = "Y42B";
+ break;
+ case AV_PIX_FMT_YUV410P:
+ format = "YUV9";
+ break;
+ default:
+ GST_WARNING
+ ("Couldnt' find format for pixfmt %d, defaulting to I420",
+ context->pix_fmt);
+ format = "I420";
+ break;
+ }
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-dv",
+ "systemstream", G_TYPE_BOOLEAN, FALSE, "format", G_TYPE_STRING,
+ format, NULL);
+ } else {
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-dv",
+ "systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
+ }
+ }
+ break;
+
+ case AV_CODEC_ID_WMAV1:
+ case AV_CODEC_ID_WMAV2:
+ {
+ gint version = (codec_id == AV_CODEC_ID_WMAV1) ? 1 : 2;
+
+ if (context) {
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-wma",
+ "wmaversion", G_TYPE_INT, version, "block_align", G_TYPE_INT,
+ context->block_align, "bitrate", G_TYPE_INT,
+ (guint) context->bit_rate, NULL);
+ } else {
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-wma",
+ "wmaversion", G_TYPE_INT, version, "block_align",
+ GST_TYPE_INT_RANGE, 0, G_MAXINT, "bitrate", GST_TYPE_INT_RANGE, 0,
+ G_MAXINT, NULL);
+ }
+ }
+ break;
+ case AV_CODEC_ID_WMAPRO:
+ {
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-wma",
+ "wmaversion", G_TYPE_INT, 3, NULL);
+ break;
+ }
+ case AV_CODEC_ID_WMALOSSLESS:
+ {
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-wma",
+ "wmaversion", G_TYPE_INT, 4, NULL);
+ break;
+ }
+ case AV_CODEC_ID_WMAVOICE:
+ {
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-wms",
+ NULL);
+ break;
+ }
+
+ case AV_CODEC_ID_XMA1:
+ {
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-xma",
+ "xmaversion", G_TYPE_INT, 1, NULL);
+ break;
+ }
+ case AV_CODEC_ID_XMA2:
+ {
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-xma",
+ "xmaversion", G_TYPE_INT, 2, NULL);
+ break;
+ }
+
+ case AV_CODEC_ID_MACE3:
+ case AV_CODEC_ID_MACE6:
+ {
+ gint version = (codec_id == AV_CODEC_ID_MACE3) ? 3 : 6;
+
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-mace",
+ "maceversion", G_TYPE_INT, version, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_HUFFYUV:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-huffyuv", NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "bpp", G_TYPE_INT, context->bits_per_coded_sample, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_CYUV:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-compressed-yuv", NULL);
+ break;
+
+ case AV_CODEC_ID_H264:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-h264",
+ "alignment", G_TYPE_STRING, "au", NULL);
+ if (!encode) {
+ GValue arr = { 0, };
+ GValue item = { 0, };
+ g_value_init (&arr, GST_TYPE_LIST);
+ g_value_init (&item, G_TYPE_STRING);
+ g_value_set_string (&item, "avc");
+ gst_value_list_append_value (&arr, &item);
+ g_value_set_string (&item, "byte-stream");
+ gst_value_list_append_value (&arr, &item);
+ g_value_unset (&item);
+ gst_caps_set_value (caps, "stream-format", &arr);
+ g_value_unset (&arr);
+
+ gst_caps_append (caps, gst_ff_vid_caps_new (context, NULL, codec_id,
+ encode, "video/x-h264", "alignment", G_TYPE_STRING, "nal",
+ "stream-format", G_TYPE_STRING, "byte-stream", NULL));
+
+ } else if (context) {
+ /* FIXME: ffmpeg currently assumes AVC if there is extradata and
+ * byte-stream otherwise. See for example the MOV or MPEG-TS code.
+ * ffmpeg does not distinguish the different types of AVC. */
+ if (context->extradata_size > 0) {
+ gst_caps_set_simple (caps, "stream-format", G_TYPE_STRING, "avc",
+ NULL);
+ } else {
+ gst_caps_set_simple (caps, "stream-format", G_TYPE_STRING,
+ "byte-stream", NULL);
+ }
+ }
+ break;
+
+ case AV_CODEC_ID_HEVC:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-h265",
+ "alignment", G_TYPE_STRING, "au", NULL);
+ if (!encode) {
+ GValue arr = { 0, };
+ GValue item = { 0, };
+ g_value_init (&arr, GST_TYPE_LIST);
+ g_value_init (&item, G_TYPE_STRING);
+ g_value_set_string (&item, "hvc1");
+ gst_value_list_append_value (&arr, &item);
+ g_value_set_string (&item, "hev1");
+ gst_value_list_append_value (&arr, &item);
+ g_value_set_string (&item, "byte-stream");
+ gst_value_list_append_value (&arr, &item);
+ g_value_unset (&item);
+ gst_caps_set_value (caps, "stream-format", &arr);
+ g_value_unset (&arr);
+ } else if (context) {
+ /* FIXME: ffmpeg currently assumes HVC1 if there is extradata and
+ * byte-stream otherwise. See for example the MOV or MPEG-TS code.
+ * ffmpeg does not distinguish the different types: HVC1/HEV1/etc. */
+ if (context->extradata_size > 0) {
+ gst_caps_set_simple (caps, "stream-format", G_TYPE_STRING, "hvc1",
+ NULL);
+ } else {
+ gst_caps_set_simple (caps, "stream-format", G_TYPE_STRING,
+ "byte-stream", NULL);
+ }
+ }
+ break;
+
+ case AV_CODEC_ID_INDEO5:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-indeo",
+ "indeoversion", G_TYPE_INT, 5, NULL);
+ break;
+
+ case AV_CODEC_ID_INDEO4:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-indeo",
+ "indeoversion", G_TYPE_INT, 4, NULL);
+ break;
+
+ case AV_CODEC_ID_INDEO3:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-indeo",
+ "indeoversion", G_TYPE_INT, 3, NULL);
+ break;
+
+ case AV_CODEC_ID_INDEO2:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-indeo",
+ "indeoversion", G_TYPE_INT, 2, NULL);
+ break;
+
+ case AV_CODEC_ID_FLASHSV:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-flash-screen", NULL);
+ break;
+
+ case AV_CODEC_ID_FLASHSV2:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-flash-screen2", NULL);
+ break;
+
+ case AV_CODEC_ID_VP3:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-vp3",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_VP5:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-vp5",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_VP6:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-vp6",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_VP6F:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-vp6-flash", NULL);
+ break;
+
+ case AV_CODEC_ID_VP6A:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-vp6-alpha", NULL);
+ break;
+
+ case AV_CODEC_ID_VP8:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-vp8",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_VP9:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-vp9",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_THEORA:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-theora", NULL);
+ break;
+
+ case AV_CODEC_ID_CFHD:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-cineform", NULL);
+ break;
+
+ case AV_CODEC_ID_SPEEDHQ:
+ if (context && context->codec_tag) {
+ gchar *variant = g_strdup_printf ("%" GST_FOURCC_FORMAT,
+ GST_FOURCC_ARGS (context->codec_tag));
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-speedhq", "variant", G_TYPE_STRING, variant, NULL);
+ g_free (variant);
+ } else {
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-speedhq", NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_AAC:
+ {
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/mpeg",
+ NULL);
+
+ if (!encode) {
+ GValue arr = { 0, };
+ GValue item = { 0, };
+
+ g_value_init (&arr, GST_TYPE_LIST);
+ g_value_init (&item, G_TYPE_INT);
+ g_value_set_int (&item, 2);
+ gst_value_list_append_value (&arr, &item);
+ g_value_set_int (&item, 4);
+ gst_value_list_append_value (&arr, &item);
+ g_value_unset (&item);
+
+ gst_caps_set_value (caps, "mpegversion", &arr);
+ g_value_unset (&arr);
+
+ g_value_init (&arr, GST_TYPE_LIST);
+ g_value_init (&item, G_TYPE_STRING);
+ g_value_set_string (&item, "raw");
+ gst_value_list_append_value (&arr, &item);
+ g_value_set_string (&item, "adts");
+ gst_value_list_append_value (&arr, &item);
+ g_value_set_string (&item, "adif");
+ gst_value_list_append_value (&arr, &item);
+ g_value_unset (&item);
+
+ gst_caps_set_value (caps, "stream-format", &arr);
+ g_value_unset (&arr);
+ } else {
+ gst_caps_set_simple (caps, "mpegversion", G_TYPE_INT, 4,
+ "base-profile", G_TYPE_STRING, "lc", NULL);
+
+ /* FIXME: ffmpeg currently assumes raw if there is extradata and
+ * ADTS otherwise. See for example the FDK AAC encoder. */
+ if (context && context->extradata_size > 0) {
+ gst_caps_set_simple (caps, "stream-format", G_TYPE_STRING, "raw",
+ NULL);
+ gst_codec_utils_aac_caps_set_level_and_profile (caps,
+ context->extradata, context->extradata_size);
+ } else if (context) {
+ gst_caps_set_simple (caps, "stream-format", G_TYPE_STRING, "adts",
+ NULL);
+ }
+ }
+
+ break;
+ }
+ case AV_CODEC_ID_AAC_LATM: /* LATM/LOAS AAC syntax */
+ caps = gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/mpeg",
+ "mpegversion", G_TYPE_INT, 4, "stream-format", G_TYPE_STRING, "loas",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_ASV1:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-asus",
+ "asusversion", G_TYPE_INT, 1, NULL);
+ break;
+ case AV_CODEC_ID_ASV2:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-asus",
+ "asusversion", G_TYPE_INT, 2, NULL);
+ break;
+
+ case AV_CODEC_ID_FFV1:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-ffv",
+ "ffvversion", G_TYPE_INT, 1, NULL);
+ break;
+
+ case AV_CODEC_ID_4XM:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-4xm",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_XAN_WC3:
+ case AV_CODEC_ID_XAN_WC4:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-xan",
+ "wcversion", G_TYPE_INT, 3 - AV_CODEC_ID_XAN_WC3 + codec_id, NULL);
+ break;
+
+ case AV_CODEC_ID_CLJR:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-cirrus-logic-accupak", NULL);
+ break;
+
+ case AV_CODEC_ID_FRAPS:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-fraps",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_MDEC:
+ case AV_CODEC_ID_ROQ:
+ case AV_CODEC_ID_INTERPLAY_VIDEO:
+ buildcaps = TRUE;
+ break;
+
+ case AV_CODEC_ID_VCR1:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-ati-vcr", "vcrversion", G_TYPE_INT, 1, NULL);
+ break;
+
+ case AV_CODEC_ID_RPZA:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-apple-video", NULL);
+ break;
+
+ case AV_CODEC_ID_CINEPAK:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-cinepak", NULL);
+ break;
+
+ /* WS_VQA belogns here (order) */
+
+ case AV_CODEC_ID_MSRLE:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-rle",
+ "layout", G_TYPE_STRING, "microsoft", NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "depth", G_TYPE_INT, (gint) context->bits_per_coded_sample, NULL);
+ } else {
+ gst_caps_set_simple (caps, "depth", GST_TYPE_INT_RANGE, 1, 64, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_QTRLE:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-rle",
+ "layout", G_TYPE_STRING, "quicktime", NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "depth", G_TYPE_INT, (gint) context->bits_per_coded_sample, NULL);
+ } else {
+ gst_caps_set_simple (caps, "depth", GST_TYPE_INT_RANGE, 1, 64, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_MSVIDEO1:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-msvideocodec", "msvideoversion", G_TYPE_INT, 1, NULL);
+ break;
+
+ case AV_CODEC_ID_MSS1:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-wmv",
+ "wmvversion", G_TYPE_INT, 1, "format", G_TYPE_STRING, "MSS1", NULL);
+ break;
+
+ case AV_CODEC_ID_MSS2:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-wmv",
+ "wmvversion", G_TYPE_INT, 3, "format", G_TYPE_STRING, "MSS2", NULL);
+ break;
+
+ case AV_CODEC_ID_WMV3:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-wmv",
+ "wmvversion", G_TYPE_INT, 3, "format", G_TYPE_STRING, "WMV3", NULL);
+ break;
+ case AV_CODEC_ID_VC1:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-wmv",
+ "wmvversion", G_TYPE_INT, 3, NULL);
+ if (!context && !encode) {
+ GValue arr = { 0, };
+ GValue item = { 0, };
+
+ g_value_init (&arr, GST_TYPE_LIST);
+ g_value_init (&item, G_TYPE_STRING);
+ g_value_set_string (&item, "WVC1");
+ gst_value_list_append_value (&arr, &item);
+ g_value_set_string (&item, "WMVA");
+ gst_value_list_append_and_take_value (&arr, &item);
+ gst_caps_set_value (caps, "format", &arr);
+ g_value_unset (&arr);
+ } else {
+ gst_caps_set_simple (caps, "format", G_TYPE_STRING, "WVC1", NULL);
+ }
+ break;
+ case AV_CODEC_ID_QDM2:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-qdm2",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_MSZH:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-mszh",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_ZLIB:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-zlib",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_TRUEMOTION1:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-truemotion", "trueversion", G_TYPE_INT, 1, NULL);
+ break;
+ case AV_CODEC_ID_TRUEMOTION2:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-truemotion", "trueversion", G_TYPE_INT, 2, NULL);
+ break;
+
+ case AV_CODEC_ID_ULTI:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-ultimotion", NULL);
+ break;
+
+ case AV_CODEC_ID_TSCC:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-camtasia", NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "depth", G_TYPE_INT, (gint) context->bits_per_coded_sample, NULL);
+ } else {
+ gst_caps_set_simple (caps, "depth", GST_TYPE_INT_RANGE, 8, 32, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_TSCC2:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-tscc", "tsccversion", G_TYPE_INT, 2, NULL);
+ break;
+
+ case AV_CODEC_ID_KMVC:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-kmvc",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_NUV:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-nuv",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_GIF:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "image/gst-libav-gif", "parsed", G_TYPE_BOOLEAN, TRUE, NULL);
+ break;
+
+ case AV_CODEC_ID_PNG:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/png",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_PPM:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/ppm",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_PBM:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/pbm",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_PAM:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "image/x-portable-anymap", NULL);
+ break;
+
+ case AV_CODEC_ID_PGM:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "image/x-portable-graymap", NULL);
+ break;
+
+ case AV_CODEC_ID_PCX:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/x-pcx",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_SGI:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/x-sgi",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_TARGA:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/x-tga",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_TIFF:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "image/tiff",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_SUNRAST:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "image/x-sun-raster", NULL);
+ break;
+
+ case AV_CODEC_ID_SMC:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-smc",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_QDRAW:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-qdrw",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_DNXHD:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-dnxhd",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_PRORES:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-prores", NULL);
+ if (context) {
+ switch (context->codec_tag) {
+ case GST_MAKE_FOURCC ('a', 'p', 'c', 'o'):
+ gst_caps_set_simple (caps, "variant", G_TYPE_STRING, "proxy", NULL);
+ break;
+ case GST_MAKE_FOURCC ('a', 'p', 'c', 's'):
+ gst_caps_set_simple (caps, "variant", G_TYPE_STRING, "lt", NULL);
+ break;
+ default:
+ case GST_MAKE_FOURCC ('a', 'p', 'c', 'n'):
+ gst_caps_set_simple (caps, "variant", G_TYPE_STRING, "standard",
+ NULL);
+ break;
+ case GST_MAKE_FOURCC ('a', 'p', 'c', 'h'):
+ gst_caps_set_simple (caps, "variant", G_TYPE_STRING, "hq", NULL);
+ break;
+ case GST_MAKE_FOURCC ('a', 'p', '4', 'h'):
+ gst_caps_set_simple (caps, "variant", G_TYPE_STRING, "4444", NULL);
+ break;
+ case GST_MAKE_FOURCC ('a', 'p', '4', 'x'):
+ gst_caps_set_simple (caps, "variant", G_TYPE_STRING, "4444xq",
+ NULL);
+ break;
+ }
+ }
+ break;
+
+ case AV_CODEC_ID_MIMIC:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-mimic",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_VMNC:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-vmnc",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_TRUESPEECH:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-truespeech", NULL);
+ break;
+
+ case AV_CODEC_ID_QCELP:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/qcelp",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_AMV:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-amv",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_AASC:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-aasc",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_LOCO:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-loco",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_ZMBV:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-zmbv",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_LAGARITH:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-lagarith", NULL);
+ break;
+
+ case AV_CODEC_ID_CSCD:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-camstudio", NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "depth", G_TYPE_INT, (gint) context->bits_per_coded_sample, NULL);
+ } else {
+ gst_caps_set_simple (caps, "depth", GST_TYPE_INT_RANGE, 8, 32, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_AIC:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-apple-intermediate-codec", NULL);
+ break;
+
+ case AV_CODEC_ID_CAVS:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode,
+ "video/x-cavs", NULL);
+ break;
+
+ case AV_CODEC_ID_WS_VQA:
+ case AV_CODEC_ID_IDCIN:
+ case AV_CODEC_ID_8BPS:
+ case AV_CODEC_ID_FLIC:
+ case AV_CODEC_ID_VMDVIDEO:
+ case AV_CODEC_ID_VMDAUDIO:
+ case AV_CODEC_ID_VIXL:
+ case AV_CODEC_ID_QPEG:
+ case AV_CODEC_ID_PGMYUV:
+ case AV_CODEC_ID_FFVHUFF:
+ case AV_CODEC_ID_WNV1:
+ case AV_CODEC_ID_MP3ADU:
+ case AV_CODEC_ID_MP3ON4:
+ case AV_CODEC_ID_WESTWOOD_SND1:
+ case AV_CODEC_ID_MMVIDEO:
+ case AV_CODEC_ID_AVS:
+ buildcaps = TRUE;
+ break;
+
+ /* weird quasi-codecs for the demuxers only */
+ case AV_CODEC_ID_PCM_S16LE:
+ case AV_CODEC_ID_PCM_S16BE:
+ case AV_CODEC_ID_PCM_U16LE:
+ case AV_CODEC_ID_PCM_U16BE:
+ case AV_CODEC_ID_PCM_S8:
+ case AV_CODEC_ID_PCM_U8:
+ {
+ GstAudioFormat format;
+
+ switch (codec_id) {
+ case AV_CODEC_ID_PCM_S16LE:
+ format = GST_AUDIO_FORMAT_S16LE;
+ break;
+ case AV_CODEC_ID_PCM_S16BE:
+ format = GST_AUDIO_FORMAT_S16BE;
+ break;
+ case AV_CODEC_ID_PCM_U16LE:
+ format = GST_AUDIO_FORMAT_U16LE;
+ break;
+ case AV_CODEC_ID_PCM_U16BE:
+ format = GST_AUDIO_FORMAT_U16BE;
+ break;
+ case AV_CODEC_ID_PCM_S8:
+ format = GST_AUDIO_FORMAT_S8;
+ break;
+ case AV_CODEC_ID_PCM_U8:
+ format = GST_AUDIO_FORMAT_U8;
+ break;
+ default:
+ format = 0;
+ g_assert (0); /* don't worry, we never get here */
+ break;
+ }
+
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-raw",
+ "format", G_TYPE_STRING, gst_audio_format_to_string (format),
+ "layout", G_TYPE_STRING, "interleaved", NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_PCM_MULAW:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-mulaw",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_PCM_ALAW:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-alaw",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_ADPCM_G722:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/G722",
+ NULL);
+ if (context)
+ gst_caps_set_simple (caps,
+ "block_align", G_TYPE_INT, context->block_align,
+ "bitrate", G_TYPE_INT, (guint) context->bit_rate, NULL);
+ break;
+
+ case AV_CODEC_ID_ADPCM_G726:
+ {
+ /* the G726 decoder can also handle G721 */
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-adpcm",
+ "layout", G_TYPE_STRING, "g726", NULL);
+ if (context)
+ gst_caps_set_simple (caps,
+ "block_align", G_TYPE_INT, context->block_align,
+ "bitrate", G_TYPE_INT, (guint) context->bit_rate, NULL);
+
+ if (!encode) {
+ gst_caps_append (caps, gst_caps_new_simple ("audio/x-adpcm",
+ "layout", G_TYPE_STRING, "g721",
+ "channels", G_TYPE_INT, 1, "rate", G_TYPE_INT, 8000, NULL));
+ }
+ break;
+ }
+ case AV_CODEC_ID_ADPCM_IMA_QT:
+ case AV_CODEC_ID_ADPCM_IMA_WAV:
+ case AV_CODEC_ID_ADPCM_IMA_DK3:
+ case AV_CODEC_ID_ADPCM_IMA_DK4:
+ case AV_CODEC_ID_ADPCM_IMA_OKI:
+ case AV_CODEC_ID_ADPCM_IMA_WS:
+ case AV_CODEC_ID_ADPCM_IMA_SMJPEG:
+ case AV_CODEC_ID_ADPCM_IMA_AMV:
+ case AV_CODEC_ID_ADPCM_IMA_ISS:
+ case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
+ case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
+ case AV_CODEC_ID_ADPCM_MS:
+ case AV_CODEC_ID_ADPCM_4XM:
+ case AV_CODEC_ID_ADPCM_XA:
+ case AV_CODEC_ID_ADPCM_ADX:
+ case AV_CODEC_ID_ADPCM_EA:
+ case AV_CODEC_ID_ADPCM_CT:
+ case AV_CODEC_ID_ADPCM_SWF:
+ case AV_CODEC_ID_ADPCM_YAMAHA:
+ case AV_CODEC_ID_ADPCM_SBPRO_2:
+ case AV_CODEC_ID_ADPCM_SBPRO_3:
+ case AV_CODEC_ID_ADPCM_SBPRO_4:
+ case AV_CODEC_ID_ADPCM_EA_R1:
+ case AV_CODEC_ID_ADPCM_EA_R2:
+ case AV_CODEC_ID_ADPCM_EA_R3:
+ case AV_CODEC_ID_ADPCM_EA_MAXIS_XA:
+ case AV_CODEC_ID_ADPCM_EA_XAS:
+ case AV_CODEC_ID_ADPCM_THP:
+ {
+ const gchar *layout = NULL;
+
+ switch (codec_id) {
+ case AV_CODEC_ID_ADPCM_IMA_QT:
+ layout = "quicktime";
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_WAV:
+ layout = "dvi";
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_DK3:
+ layout = "dk3";
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_DK4:
+ layout = "dk4";
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_OKI:
+ layout = "oki";
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_WS:
+ layout = "westwood";
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_SMJPEG:
+ layout = "smjpeg";
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_AMV:
+ layout = "amv";
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_ISS:
+ layout = "iss";
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
+ layout = "ea-eacs";
+ break;
+ case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
+ layout = "ea-sead";
+ break;
+ case AV_CODEC_ID_ADPCM_MS:
+ layout = "microsoft";
+ break;
+ case AV_CODEC_ID_ADPCM_4XM:
+ layout = "4xm";
+ break;
+ case AV_CODEC_ID_ADPCM_XA:
+ layout = "xa";
+ break;
+ case AV_CODEC_ID_ADPCM_ADX:
+ layout = "adx";
+ break;
+ case AV_CODEC_ID_ADPCM_EA:
+ layout = "ea";
+ break;
+ case AV_CODEC_ID_ADPCM_CT:
+ layout = "ct";
+ break;
+ case AV_CODEC_ID_ADPCM_SWF:
+ layout = "swf";
+ break;
+ case AV_CODEC_ID_ADPCM_YAMAHA:
+ layout = "yamaha";
+ break;
+ case AV_CODEC_ID_ADPCM_SBPRO_2:
+ layout = "sbpro2";
+ break;
+ case AV_CODEC_ID_ADPCM_SBPRO_3:
+ layout = "sbpro3";
+ break;
+ case AV_CODEC_ID_ADPCM_SBPRO_4:
+ layout = "sbpro4";
+ break;
+ case AV_CODEC_ID_ADPCM_EA_R1:
+ layout = "ea-r1";
+ break;
+ case AV_CODEC_ID_ADPCM_EA_R2:
+ layout = "ea-r3";
+ break;
+ case AV_CODEC_ID_ADPCM_EA_R3:
+ layout = "ea-r3";
+ break;
+ case AV_CODEC_ID_ADPCM_EA_MAXIS_XA:
+ layout = "ea-maxis-xa";
+ break;
+ case AV_CODEC_ID_ADPCM_EA_XAS:
+ layout = "ea-xas";
+ break;
+ case AV_CODEC_ID_ADPCM_THP:
+ layout = "thp";
+ break;
+ default:
+ g_assert (0); /* don't worry, we never get here */
+ break;
+ }
+
+ /* FIXME: someone please check whether we need additional properties
+ * in this caps definition. */
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-adpcm",
+ "layout", G_TYPE_STRING, layout, NULL);
+ if (context)
+ gst_caps_set_simple (caps,
+ "block_align", G_TYPE_INT, context->block_align,
+ "bitrate", G_TYPE_INT, (guint) context->bit_rate, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_AMR_NB:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/AMR",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_AMR_WB:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/AMR-WB",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_GSM:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-gsm",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_GSM_MS:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/ms-gsm",
+ NULL);
+ break;
+
+ case AV_CODEC_ID_NELLYMOSER:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-nellymoser", NULL);
+ break;
+
+ case AV_CODEC_ID_SIPR:
+ {
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-sipro",
+ NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "leaf_size", G_TYPE_INT, context->block_align,
+ "bitrate", G_TYPE_INT, (guint) context->bit_rate, NULL);
+ }
+ }
+ break;
+
+ case AV_CODEC_ID_RA_144:
+ case AV_CODEC_ID_RA_288:
+ case AV_CODEC_ID_COOK:
+ {
+ gint version = 0;
+
+ switch (codec_id) {
+ case AV_CODEC_ID_RA_144:
+ version = 1;
+ break;
+ case AV_CODEC_ID_RA_288:
+ version = 2;
+ break;
+ case AV_CODEC_ID_COOK:
+ version = 8;
+ break;
+ default:
+ break;
+ }
+
+ /* FIXME: properties? */
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-pn-realaudio", "raversion", G_TYPE_INT, version, NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "leaf_size", G_TYPE_INT, context->block_align,
+ "bitrate", G_TYPE_INT, (guint) context->bit_rate, NULL);
+ }
+ }
+ break;
+
+ case AV_CODEC_ID_ROQ_DPCM:
+ case AV_CODEC_ID_INTERPLAY_DPCM:
+ case AV_CODEC_ID_XAN_DPCM:
+ case AV_CODEC_ID_SOL_DPCM:
+ {
+ const gchar *layout = NULL;
+
+ switch (codec_id) {
+ case AV_CODEC_ID_ROQ_DPCM:
+ layout = "roq";
+ break;
+ case AV_CODEC_ID_INTERPLAY_DPCM:
+ layout = "interplay";
+ break;
+ case AV_CODEC_ID_XAN_DPCM:
+ layout = "xan";
+ break;
+ case AV_CODEC_ID_SOL_DPCM:
+ layout = "sol";
+ break;
+ default:
+ g_assert (0); /* don't worry, we never get here */
+ break;
+ }
+
+ /* FIXME: someone please check whether we need additional properties
+ * in this caps definition. */
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-dpcm",
+ "layout", G_TYPE_STRING, layout, NULL);
+ if (context)
+ gst_caps_set_simple (caps,
+ "block_align", G_TYPE_INT, context->block_align,
+ "bitrate", G_TYPE_INT, (guint) context->bit_rate, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_SHORTEN:
+ caps = gst_caps_new_empty_simple ("audio/x-shorten");
+ break;
+
+ case AV_CODEC_ID_ALAC:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-alac",
+ NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "samplesize", G_TYPE_INT, context->bits_per_coded_sample, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_FLAC:
+ /* Note that ffmpeg has no encoder yet, but just for safety. In the
+ * encoder case, we want to add things like samplerate, channels... */
+ if (!encode) {
+ caps = gst_caps_new_empty_simple ("audio/x-flac");
+ }
+ break;
+
+ case AV_CODEC_ID_OPUS:
+ /* Note that ffmpeg has no encoder yet, but just for safety. In the
+ * encoder case, we want to add things like samplerate, channels... */
+ if (!encode) {
+ /* FIXME: can ffmpeg handle multichannel Opus? */
+ caps = gst_caps_new_simple ("audio/x-opus",
+ "channel-mapping-family", G_TYPE_INT, 0, NULL);
+ }
+ break;
+
+ case AV_CODEC_ID_S302M:
+ caps = gst_caps_new_empty_simple ("audio/x-smpte-302m");
+ break;
+
+ case AV_CODEC_ID_DVD_SUBTITLE:
+ case AV_CODEC_ID_DVB_SUBTITLE:
+ caps = NULL;
+ break;
+ case AV_CODEC_ID_BMP:
+ caps = gst_caps_new_empty_simple ("image/bmp");
+ break;
+ case AV_CODEC_ID_TTA:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-tta",
+ NULL);
+ if (context) {
+ gst_caps_set_simple (caps,
+ "samplesize", G_TYPE_INT, context->bits_per_coded_sample, NULL);
+ }
+ break;
+ case AV_CODEC_ID_TWINVQ:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode,
+ "audio/x-twin-vq", NULL);
+ break;
+ case AV_CODEC_ID_G729:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/G729",
+ NULL);
+ break;
+ case AV_CODEC_ID_DSD_LSBF:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-dsd",
+ NULL);
+ gst_caps_set_simple (caps, "lsbf", G_TYPE_BOOLEAN,
+ TRUE, "planar", G_TYPE_BOOLEAN, FALSE, NULL);
+ break;
+ case AV_CODEC_ID_DSD_MSBF:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-dsd",
+ NULL);
+ gst_caps_set_simple (caps, "lsbf", G_TYPE_BOOLEAN,
+ FALSE, "planar", G_TYPE_BOOLEAN, FALSE, NULL);
+ break;
+ case AV_CODEC_ID_DSD_LSBF_PLANAR:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-dsd",
+ NULL);
+ gst_caps_set_simple (caps, "lsbf", G_TYPE_BOOLEAN,
+ TRUE, "planar", G_TYPE_BOOLEAN, TRUE, NULL);
+ break;
+ case AV_CODEC_ID_DSD_MSBF_PLANAR:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/x-dsd",
+ NULL);
+ gst_caps_set_simple (caps, "lsbf", G_TYPE_BOOLEAN,
+ FALSE, "planar", G_TYPE_BOOLEAN, TRUE, NULL);
+ break;
+ case AV_CODEC_ID_APTX:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/aptx",
+ NULL);
+ break;
+ case AV_CODEC_ID_APTX_HD:
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, "audio/aptx-hd",
+ NULL);
+ break;
+ case AV_CODEC_ID_AV1:
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, "video/x-av1",
+ NULL);
+ break;
+ default:
+ GST_DEBUG ("Unknown codec ID %d, please add mapping here", codec_id);
+ break;
+ }
+
+ if (buildcaps) {
+ AVCodec *codec;
+
+ if ((codec = avcodec_find_decoder (codec_id)) ||
+ (codec = avcodec_find_encoder (codec_id))) {
+ gchar *mime = NULL;
+
+ GST_LOG ("Could not create stream format caps for %s", codec->name);
+
+ switch (codec->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ mime = g_strdup_printf ("video/x-gst-av-%s", codec->name);
+ caps =
+ gst_ff_vid_caps_new (context, NULL, codec_id, encode, mime, NULL);
+ g_free (mime);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ mime = g_strdup_printf ("audio/x-gst-av-%s", codec->name);
+ caps =
+ gst_ff_aud_caps_new (context, NULL, codec_id, encode, mime, NULL);
+ if (context)
+ gst_caps_set_simple (caps,
+ "block_align", G_TYPE_INT, context->block_align,
+ "bitrate", G_TYPE_INT, (guint) context->bit_rate, NULL);
+ g_free (mime);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (caps != NULL) {
+
+ /* set private data */
+ if (context && context->extradata_size > 0) {
+ GstBuffer *data = gst_buffer_new_and_alloc (context->extradata_size);
+
+ gst_buffer_fill (data, 0, context->extradata, context->extradata_size);
+ gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, data, NULL);
+ gst_buffer_unref (data);
+ }
+
+ GST_LOG ("caps for codec_id=%d: %" GST_PTR_FORMAT, codec_id, caps);
+
+ } else {
+ GST_LOG ("No caps found for codec_id=%d", codec_id);
+ }
+
+ return caps;
+ }
+
+ /* Convert a FFMPEG Pixel Format and optional AVCodecContext
+ * to a GstCaps. If the context is ommitted, no fixed values
+ * for video/audio size will be included in the GstCaps
+ *
+ * See below for usefullness
+ */
+
+ static GstCaps *
+ gst_ffmpeg_pixfmt_to_caps (enum AVPixelFormat pix_fmt, AVCodecContext * context,
+ enum AVCodecID codec_id)
+ {
+ GstCaps *caps = NULL;
+ GstVideoFormat format;
+
+ format = gst_ffmpeg_pixfmt_to_videoformat (pix_fmt);
+
+ if (format != GST_VIDEO_FORMAT_UNKNOWN) {
+ caps = gst_ff_vid_caps_new (context, NULL, codec_id, TRUE, "video/x-raw",
+ "format", G_TYPE_STRING, gst_video_format_to_string (format), NULL);
+ }
+
+ if (caps != NULL) {
+ GST_DEBUG ("caps for pix_fmt=%d: %" GST_PTR_FORMAT, pix_fmt, caps);
+ } else {
+ GST_LOG ("No caps found for pix_fmt=%d", pix_fmt);
+ }
+
+ return caps;
+ }
+
+ GstAudioFormat
+ gst_ffmpeg_smpfmt_to_audioformat (enum AVSampleFormat sample_fmt,
+ GstAudioLayout * layout)
+ {
+ if (layout)
+ *layout = GST_AUDIO_LAYOUT_NON_INTERLEAVED;
+
+ switch (sample_fmt) {
+ case AV_SAMPLE_FMT_U8:
+ if (layout)
+ *layout = GST_AUDIO_LAYOUT_INTERLEAVED;
+ case AV_SAMPLE_FMT_U8P:
+ return GST_AUDIO_FORMAT_U8;
+ break;
+
+ case AV_SAMPLE_FMT_S16:
+ if (layout)
+ *layout = GST_AUDIO_LAYOUT_INTERLEAVED;
+ case AV_SAMPLE_FMT_S16P:
+ return GST_AUDIO_FORMAT_S16;
+ break;
+
+ case AV_SAMPLE_FMT_S32:
+ if (layout)
+ *layout = GST_AUDIO_LAYOUT_INTERLEAVED;
+ case AV_SAMPLE_FMT_S32P:
+ return GST_AUDIO_FORMAT_S32;
+ break;
+ case AV_SAMPLE_FMT_FLT:
+ if (layout)
+ *layout = GST_AUDIO_LAYOUT_INTERLEAVED;
+ case AV_SAMPLE_FMT_FLTP:
+ return GST_AUDIO_FORMAT_F32;
+ break;
+
+ case AV_SAMPLE_FMT_DBL:
+ if (layout)
+ *layout = GST_AUDIO_LAYOUT_INTERLEAVED;
+ case AV_SAMPLE_FMT_DBLP:
+ return GST_AUDIO_FORMAT_F64;
+ break;
+
+ default:
+ /* .. */
+ return GST_AUDIO_FORMAT_UNKNOWN;
+ break;
+ }
+ }
+
+ /* Convert a FFMPEG Sample Format and optional AVCodecContext
+ * to a GstCaps. If the context is ommitted, no fixed values
+ * for video/audio size will be included in the GstCaps
+ *
+ * See below for usefullness
+ */
+
+ static GstCaps *
+ gst_ffmpeg_smpfmt_to_caps (enum AVSampleFormat sample_fmt,
+ AVCodecContext * context, AVCodec * codec, enum AVCodecID codec_id)
+ {
+ GstCaps *caps = NULL;
+ GstAudioFormat format;
+ GstAudioLayout layout;
+
+ format = gst_ffmpeg_smpfmt_to_audioformat (sample_fmt, &layout);
+
+ if (format != GST_AUDIO_FORMAT_UNKNOWN) {
+ caps = gst_ff_aud_caps_new (context, codec, codec_id, TRUE, "audio/x-raw",
+ "format", G_TYPE_STRING, gst_audio_format_to_string (format),
+ "layout", G_TYPE_STRING,
+ (layout == GST_AUDIO_LAYOUT_INTERLEAVED) ?
+ "interleaved" : "non-interleaved", NULL);
+ GST_LOG ("caps for sample_fmt=%d: %" GST_PTR_FORMAT, sample_fmt, caps);
+ } else {
+ GST_LOG ("No caps found for sample_fmt=%d", sample_fmt);
+ }
+
+ return caps;
+ }
+
+ static gboolean
+ caps_has_field (GstCaps * caps, const gchar * field)
+ {
+ guint i, n;
+
+ n = gst_caps_get_size (caps);
+ for (i = 0; i < n; i++) {
+ GstStructure *s = gst_caps_get_structure (caps, i);
+
+ if (gst_structure_has_field (s, field))
+ return TRUE;
+ }
+
+ return FALSE;
+ }
+
+ GstCaps *
+ gst_ffmpeg_codectype_to_audio_caps (AVCodecContext * context,
+ enum AVCodecID codec_id, gboolean encode, AVCodec * codec)
+ {
+ GstCaps *caps = NULL;
+
+ GST_DEBUG ("context:%p, codec_id:%d, encode:%d, codec:%p",
+ context, codec_id, encode, codec);
+ if (codec)
+ GST_DEBUG ("sample_fmts:%p, samplerates:%p",
+ codec->sample_fmts, codec->supported_samplerates);
+
+ if (context) {
+ /* Specific codec context */
+ caps =
+ gst_ffmpeg_smpfmt_to_caps (context->sample_fmt, context, codec,
+ codec_id);
+ } else {
+ caps = gst_ff_aud_caps_new (context, codec, codec_id, encode, "audio/x-raw",
+ NULL);
+ if (!caps_has_field (caps, "format"))
+ gst_ffmpeg_audio_set_sample_fmts (caps,
+ codec ? codec->sample_fmts : NULL, encode);
+ }
+
+ return caps;
+ }
+
+ GstCaps *
+ gst_ffmpeg_codectype_to_video_caps (AVCodecContext * context,
+ enum AVCodecID codec_id, gboolean encode, AVCodec * codec)
+ {
+ GstCaps *caps;
+
+ GST_LOG ("context:%p, codec_id:%d, encode:%d, codec:%p",
+ context, codec_id, encode, codec);
+
+ if (context) {
+ caps = gst_ffmpeg_pixfmt_to_caps (context->pix_fmt, context, codec_id);
+ } else {
+ caps =
+ gst_ff_vid_caps_new (context, codec, codec_id, encode, "video/x-raw",
+ NULL);
+ if (!caps_has_field (caps, "format"))
+ gst_ffmpeg_video_set_pix_fmts (caps, codec ? codec->pix_fmts : NULL);
+ }
+ return caps;
+ }
+
+ /* Convert a GstCaps (audio/raw) to a FFMPEG SampleFmt
+ * and other audio properties in a AVCodecContext.
+ *
+ * For usefullness, see below
+ */
+
+ static void
+ gst_ffmpeg_caps_to_smpfmt (const GstCaps * caps,
+ AVCodecContext * context, gboolean raw)
+ {
+ GstStructure *structure;
+ const gchar *fmt;
+ GstAudioFormat format = GST_AUDIO_FORMAT_UNKNOWN;
+ gint bitrate;
+ const gchar *layout;
+ gboolean interleaved;
+
+ g_return_if_fail (gst_caps_get_size (caps) == 1);
+
+ structure = gst_caps_get_structure (caps, 0);
+
+ gst_structure_get_int (structure, "channels", &context->channels);
+ gst_structure_get_int (structure, "rate", &context->sample_rate);
+ gst_structure_get_int (structure, "block_align", &context->block_align);
+ if (gst_structure_get_int (structure, "bitrate", &bitrate))
+ context->bit_rate = bitrate;
+
+ if (!raw)
+ return;
+
+ if (gst_structure_has_name (structure, "audio/x-raw")) {
+ if ((fmt = gst_structure_get_string (structure, "format"))) {
+ format = gst_audio_format_from_string (fmt);
+ }
+ }
+
+ layout = gst_structure_get_string (structure, "layout");
+ interleaved = ! !g_strcmp0 (layout, "non-interleaved");
+
+ switch (format) {
+ case GST_AUDIO_FORMAT_F32:
+ context->sample_fmt =
+ interleaved ? AV_SAMPLE_FMT_FLT : AV_SAMPLE_FMT_FLTP;
+ break;
+ case GST_AUDIO_FORMAT_F64:
+ context->sample_fmt =
+ interleaved ? AV_SAMPLE_FMT_DBL : AV_SAMPLE_FMT_DBLP;
+ break;
+ case GST_AUDIO_FORMAT_S32:
+ context->sample_fmt =
+ interleaved ? AV_SAMPLE_FMT_S32 : AV_SAMPLE_FMT_S32P;
+ break;
+ case GST_AUDIO_FORMAT_S16:
+ context->sample_fmt =
+ interleaved ? AV_SAMPLE_FMT_S16 : AV_SAMPLE_FMT_S16P;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Convert a GstCaps (video/raw) to a FFMPEG PixFmt
+ * and other video properties in a AVCodecContext.
+ *
+ * For usefullness, see below
+ */
+
+ static void
+ gst_ffmpeg_caps_to_pixfmt (const GstCaps * caps,
+ AVCodecContext * context, gboolean raw)
+ {
+ GstStructure *structure;
+ const GValue *fps;
+ const GValue *par = NULL;
+ const gchar *fmt;
+ GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;
+ const gchar *s;
+
+ GST_DEBUG ("converting caps %" GST_PTR_FORMAT, caps);
+ g_return_if_fail (gst_caps_get_size (caps) == 1);
+ structure = gst_caps_get_structure (caps, 0);
+
+ gst_structure_get_int (structure, "width", &context->width);
+ gst_structure_get_int (structure, "height", &context->height);
+ gst_structure_get_int (structure, "bpp", &context->bits_per_coded_sample);
+
+ fps = gst_structure_get_value (structure, "framerate");
+ if (fps != NULL && GST_VALUE_HOLDS_FRACTION (fps)) {
+
+ int num = gst_value_get_fraction_numerator (fps);
+ int den = gst_value_get_fraction_denominator (fps);
+
+ if (num > 0 && den > 0) {
+ /* somehow these seem mixed up.. */
+ /* they're fine, this is because it does period=1/frequency */
+ context->time_base.den = gst_value_get_fraction_numerator (fps);
+ context->time_base.num = gst_value_get_fraction_denominator (fps);
+ context->ticks_per_frame = 1;
+
+ GST_DEBUG ("setting framerate %d/%d = %lf",
+ context->time_base.den, context->time_base.num,
+ 1. * context->time_base.den / context->time_base.num);
+ } else {
+ GST_INFO ("ignoring framerate %d/%d (probably variable framerate)",
+ context->time_base.num, context->time_base.den);
+ }
+ }
+
+ par = gst_structure_get_value (structure, "pixel-aspect-ratio");
+ if (par && GST_VALUE_HOLDS_FRACTION (par)) {
+
+ int num = gst_value_get_fraction_numerator (par);
+ int den = gst_value_get_fraction_denominator (par);
+
+ if (num > 0 && den > 0) {
+ context->sample_aspect_ratio.num = num;
+ context->sample_aspect_ratio.den = den;
+
+ GST_DEBUG ("setting pixel-aspect-ratio %d/%d = %lf",
+ context->sample_aspect_ratio.num, context->sample_aspect_ratio.den,
+ 1. * context->sample_aspect_ratio.num /
+ context->sample_aspect_ratio.den);
+ } else {
+ GST_WARNING ("ignoring insane pixel-aspect-ratio %d/%d",
+ context->sample_aspect_ratio.num, context->sample_aspect_ratio.den);
+ }
+ }
+
+ if (!raw)
+ return;
+
+ g_return_if_fail (fps != NULL && GST_VALUE_HOLDS_FRACTION (fps));
+
+ if (gst_structure_has_name (structure, "video/x-raw")) {
+ if ((fmt = gst_structure_get_string (structure, "format"))) {
+ format = gst_video_format_from_string (fmt);
+ }
+ }
+
+ switch (format) {
+ case GST_VIDEO_FORMAT_YUY2:
+ context->pix_fmt = AV_PIX_FMT_YUYV422;
+ break;
+ case GST_VIDEO_FORMAT_I420:
+ context->pix_fmt = AV_PIX_FMT_YUV420P;
+ break;
+ case GST_VIDEO_FORMAT_A420:
+ context->pix_fmt = AV_PIX_FMT_YUVA420P;
+ break;
+ case GST_VIDEO_FORMAT_Y41B:
+ context->pix_fmt = AV_PIX_FMT_YUV411P;
+ break;
+ case GST_VIDEO_FORMAT_Y42B:
+ context->pix_fmt = AV_PIX_FMT_YUV422P;
+ break;
+ case GST_VIDEO_FORMAT_YUV9:
+ context->pix_fmt = AV_PIX_FMT_YUV410P;
+ break;
+ case GST_VIDEO_FORMAT_Y444:
+ context->pix_fmt = AV_PIX_FMT_YUV444P;
+ break;
+ case GST_VIDEO_FORMAT_GRAY8:
+ context->pix_fmt = AV_PIX_FMT_GRAY8;
+ break;
+ case GST_VIDEO_FORMAT_xRGB:
+ #if (G_BYTE_ORDER == G_BIG_ENDIAN)
+ context->pix_fmt = AV_PIX_FMT_RGB32;
+ #endif
+ break;
+ case GST_VIDEO_FORMAT_BGRx:
+ #if (G_BYTE_ORDER == G_LITTLE_ENDIAN)
+ context->pix_fmt = AV_PIX_FMT_RGB32;
+ #endif
+ break;
+ case GST_VIDEO_FORMAT_RGB:
+ context->pix_fmt = AV_PIX_FMT_RGB24;
+ break;
+ case GST_VIDEO_FORMAT_BGR:
+ context->pix_fmt = AV_PIX_FMT_BGR24;
+ break;
+ case GST_VIDEO_FORMAT_RGB16:
+ context->pix_fmt = AV_PIX_FMT_RGB565;
+ break;
+ case GST_VIDEO_FORMAT_RGB15:
+ context->pix_fmt = AV_PIX_FMT_RGB555;
+ break;
+ case GST_VIDEO_FORMAT_RGB8P:
+ context->pix_fmt = AV_PIX_FMT_PAL8;
+ break;
+ default:
+ break;
+ }
+
+ s = gst_structure_get_string (structure, "interlace-mode");
+ if (s) {
+ if (strcmp (s, "progressive") == 0) {
+ context->field_order = AV_FIELD_PROGRESSIVE;
+ } else if (strcmp (s, "interleaved") == 0) {
+ s = gst_structure_get_string (structure, "field-order");
+ if (s) {
+ if (strcmp (s, "top-field-first") == 0) {
+ context->field_order = AV_FIELD_TT;
+ } else if (strcmp (s, "bottom-field-first") == 0) {
+ context->field_order = AV_FIELD_TB;
+ }
+ }
+ }
+ }
+ }
+
+ typedef struct
+ {
+ GstVideoFormat format;
+ enum AVPixelFormat pixfmt;
+ } PixToFmt;
+
+ /* FIXME : FILLME */
+ static const PixToFmt pixtofmttable[] = {
+ /* GST_VIDEO_FORMAT_I420, */
+ {GST_VIDEO_FORMAT_I420, AV_PIX_FMT_YUV420P},
+ /* Note : this should use a different chroma placement */
+ {GST_VIDEO_FORMAT_I420, AV_PIX_FMT_YUVJ420P},
+
+ /* GST_VIDEO_FORMAT_YV12, */
+ /* GST_VIDEO_FORMAT_YUY2, */
+ {GST_VIDEO_FORMAT_YUY2, AV_PIX_FMT_YUYV422},
+ /* GST_VIDEO_FORMAT_UYVY, */
+ {GST_VIDEO_FORMAT_UYVY, AV_PIX_FMT_UYVY422},
+ /* GST_VIDEO_FORMAT_AYUV, */
+ /* GST_VIDEO_FORMAT_RGBx, */
+ {GST_VIDEO_FORMAT_RGBx, AV_PIX_FMT_RGB0},
+ /* GST_VIDEO_FORMAT_BGRx, */
+ {GST_VIDEO_FORMAT_BGRx, AV_PIX_FMT_BGR0},
+ /* GST_VIDEO_FORMAT_xRGB, */
+ {GST_VIDEO_FORMAT_xRGB, AV_PIX_FMT_0RGB},
+ /* GST_VIDEO_FORMAT_xBGR, */
+ {GST_VIDEO_FORMAT_xBGR, AV_PIX_FMT_0BGR},
+ /* GST_VIDEO_FORMAT_RGBA, */
+ {GST_VIDEO_FORMAT_RGBA, AV_PIX_FMT_RGBA},
+ /* GST_VIDEO_FORMAT_BGRA, */
+ {GST_VIDEO_FORMAT_BGRA, AV_PIX_FMT_BGRA},
+ /* GST_VIDEO_FORMAT_ARGB, */
+ {GST_VIDEO_FORMAT_ARGB, AV_PIX_FMT_ARGB},
+ /* GST_VIDEO_FORMAT_ABGR, */
+ {GST_VIDEO_FORMAT_ABGR, AV_PIX_FMT_ABGR},
+ /* GST_VIDEO_FORMAT_RGB, */
+ {GST_VIDEO_FORMAT_RGB, AV_PIX_FMT_RGB24},
+ /* GST_VIDEO_FORMAT_BGR, */
+ {GST_VIDEO_FORMAT_BGR, AV_PIX_FMT_BGR24},
+ /* GST_VIDEO_FORMAT_Y41B, */
+ {GST_VIDEO_FORMAT_Y41B, AV_PIX_FMT_YUV411P},
+ /* GST_VIDEO_FORMAT_Y42B, */
+ {GST_VIDEO_FORMAT_Y42B, AV_PIX_FMT_YUV422P},
+ {GST_VIDEO_FORMAT_Y42B, AV_PIX_FMT_YUVJ422P},
+ /* GST_VIDEO_FORMAT_YVYU, */
+ /* GST_VIDEO_FORMAT_Y444, */
+ {GST_VIDEO_FORMAT_Y444, AV_PIX_FMT_YUV444P},
+ {GST_VIDEO_FORMAT_Y444, AV_PIX_FMT_YUVJ444P},
+ /* GST_VIDEO_FORMAT_v210, */
+ /* GST_VIDEO_FORMAT_v216, */
+ /* GST_VIDEO_FORMAT_NV12, */
+ {GST_VIDEO_FORMAT_NV12, AV_PIX_FMT_NV12},
+ /* GST_VIDEO_FORMAT_NV21, */
+ {GST_VIDEO_FORMAT_NV21, AV_PIX_FMT_NV21},
+ /* GST_VIDEO_FORMAT_GRAY8, */
+ {GST_VIDEO_FORMAT_GRAY8, AV_PIX_FMT_GRAY8},
+ /* GST_VIDEO_FORMAT_GRAY16_BE, */
+ {GST_VIDEO_FORMAT_GRAY16_BE, AV_PIX_FMT_GRAY16BE},
+ /* GST_VIDEO_FORMAT_GRAY16_LE, */
+ {GST_VIDEO_FORMAT_GRAY16_LE, AV_PIX_FMT_GRAY16LE},
+ /* GST_VIDEO_FORMAT_v308, */
+ /* GST_VIDEO_FORMAT_Y800, */
+ /* GST_VIDEO_FORMAT_Y16, */
+ /* GST_VIDEO_FORMAT_RGB16, */
+ {GST_VIDEO_FORMAT_RGB16, AV_PIX_FMT_RGB565},
+ /* GST_VIDEO_FORMAT_BGR16, */
+ /* GST_VIDEO_FORMAT_RGB15, */
+ {GST_VIDEO_FORMAT_RGB15, AV_PIX_FMT_RGB555},
+ /* GST_VIDEO_FORMAT_BGR15, */
+ /* GST_VIDEO_FORMAT_UYVP, */
+ /* GST_VIDEO_FORMAT_A420, */
+ {GST_VIDEO_FORMAT_A420, AV_PIX_FMT_YUVA420P},
+ /* GST_VIDEO_FORMAT_RGB8_PALETTED, */
+ {GST_VIDEO_FORMAT_RGB8P, AV_PIX_FMT_PAL8},
+ /* GST_VIDEO_FORMAT_YUV9, */
+ {GST_VIDEO_FORMAT_YUV9, AV_PIX_FMT_YUV410P},
+ /* GST_VIDEO_FORMAT_YVU9, */
+ /* GST_VIDEO_FORMAT_IYU1, */
+ /* GST_VIDEO_FORMAT_ARGB64, */
+ /* GST_VIDEO_FORMAT_AYUV64, */
+ /* GST_VIDEO_FORMAT_r210, */
+ {GST_VIDEO_FORMAT_I420_10LE, AV_PIX_FMT_YUV420P10LE},
+ {GST_VIDEO_FORMAT_I420_10BE, AV_PIX_FMT_YUV420P10BE},
+ {GST_VIDEO_FORMAT_I422_10LE, AV_PIX_FMT_YUV422P10LE},
+ {GST_VIDEO_FORMAT_I422_10BE, AV_PIX_FMT_YUV422P10BE},
+ {GST_VIDEO_FORMAT_Y444_10LE, AV_PIX_FMT_YUV444P10LE},
+ {GST_VIDEO_FORMAT_Y444_10BE, AV_PIX_FMT_YUV444P10BE},
+ {GST_VIDEO_FORMAT_GBR, AV_PIX_FMT_GBRP},
+ {GST_VIDEO_FORMAT_GBRA, AV_PIX_FMT_GBRAP},
+ {GST_VIDEO_FORMAT_GBR_10LE, AV_PIX_FMT_GBRP10LE},
+ {GST_VIDEO_FORMAT_GBR_10BE, AV_PIX_FMT_GBRP10BE},
+ {GST_VIDEO_FORMAT_GBRA_10LE, AV_PIX_FMT_GBRAP10LE},
+ {GST_VIDEO_FORMAT_GBRA_10BE, AV_PIX_FMT_GBRAP10BE},
+ {GST_VIDEO_FORMAT_GBR_12LE, AV_PIX_FMT_GBRP12LE},
+ {GST_VIDEO_FORMAT_GBR_12BE, AV_PIX_FMT_GBRP12BE},
+ {GST_VIDEO_FORMAT_GBRA_12LE, AV_PIX_FMT_GBRAP12LE},
+ {GST_VIDEO_FORMAT_GBRA_12BE, AV_PIX_FMT_GBRAP12BE},
+ {GST_VIDEO_FORMAT_A420_10LE, AV_PIX_FMT_YUVA420P10LE},
+ {GST_VIDEO_FORMAT_A420_10BE, AV_PIX_FMT_YUVA420P10BE},
+ {GST_VIDEO_FORMAT_A422_10LE, AV_PIX_FMT_YUVA422P10LE},
+ {GST_VIDEO_FORMAT_A422_10BE, AV_PIX_FMT_YUVA422P10BE},
+ {GST_VIDEO_FORMAT_A444_10LE, AV_PIX_FMT_YUVA444P10LE},
+ {GST_VIDEO_FORMAT_A444_10BE, AV_PIX_FMT_YUVA444P10BE},
+ {GST_VIDEO_FORMAT_I420_12LE, AV_PIX_FMT_YUV420P12LE},
+ {GST_VIDEO_FORMAT_I420_12BE, AV_PIX_FMT_YUV420P12BE},
+ {GST_VIDEO_FORMAT_I422_12LE, AV_PIX_FMT_YUV422P12LE},
+ {GST_VIDEO_FORMAT_I422_12BE, AV_PIX_FMT_YUV422P12BE},
+ {GST_VIDEO_FORMAT_Y444_12LE, AV_PIX_FMT_YUV444P12LE},
+ {GST_VIDEO_FORMAT_Y444_12BE, AV_PIX_FMT_YUV444P12BE},
+ };
+
+ GstVideoFormat
+ gst_ffmpeg_pixfmt_to_videoformat (enum AVPixelFormat pixfmt)
+ {
+ guint i;
+
+ for (i = 0; i < G_N_ELEMENTS (pixtofmttable); i++)
+ if (pixtofmttable[i].pixfmt == pixfmt)
+ return pixtofmttable[i].format;
+
+ GST_DEBUG ("Unknown pixel format %d", pixfmt);
+ return GST_VIDEO_FORMAT_UNKNOWN;
+ }
+
+ static enum AVPixelFormat
+ gst_ffmpeg_videoformat_to_pixfmt_for_codec (GstVideoFormat format,
+ const AVCodec * codec)
+ {
+ guint i;
+
+ for (i = 0; i < G_N_ELEMENTS (pixtofmttable); i++) {
+ if (pixtofmttable[i].format == format) {
+ gint j;
+
+ if (codec && codec->pix_fmts) {
+ for (j = 0; codec->pix_fmts[j] != -1; j++) {
+ if (pixtofmttable[i].pixfmt == codec->pix_fmts[j])
+ return pixtofmttable[i].pixfmt;
+ }
+ } else {
+ return pixtofmttable[i].pixfmt;
+ }
+ }
+ }
+
+ return AV_PIX_FMT_NONE;
+ }
+
+ enum AVPixelFormat
+ gst_ffmpeg_videoformat_to_pixfmt (GstVideoFormat format)
+ {
+ return gst_ffmpeg_videoformat_to_pixfmt_for_codec (format, NULL);
+ }
+
+ void
+ gst_ffmpeg_videoinfo_to_context (GstVideoInfo * info, AVCodecContext * context)
+ {
+ gint i, bpp = 0;
+
+ context->width = GST_VIDEO_INFO_WIDTH (info);
+ context->height = GST_VIDEO_INFO_HEIGHT (info);
+ for (i = 0; i < GST_VIDEO_INFO_N_COMPONENTS (info); i++)
+ bpp += GST_VIDEO_INFO_COMP_DEPTH (info, i);
+ context->bits_per_coded_sample = bpp;
+
+ context->ticks_per_frame = 1;
+ if (GST_VIDEO_INFO_FPS_N (info) == 0) {
+ GST_DEBUG ("Using 25/1 framerate");
+ context->time_base.den = 25;
+ context->time_base.num = 1;
+ } else {
+ context->time_base.den = GST_VIDEO_INFO_FPS_N (info);
+ context->time_base.num = GST_VIDEO_INFO_FPS_D (info);
+ }
+
+ context->sample_aspect_ratio.num = GST_VIDEO_INFO_PAR_N (info);
+ context->sample_aspect_ratio.den = GST_VIDEO_INFO_PAR_D (info);
+
+ context->pix_fmt =
+ gst_ffmpeg_videoformat_to_pixfmt_for_codec (GST_VIDEO_INFO_FORMAT (info),
+ context->codec);
+
+ switch (info->chroma_site) {
+ case GST_VIDEO_CHROMA_SITE_MPEG2:
+ context->chroma_sample_location = AVCHROMA_LOC_LEFT;
+ break;
+ case GST_VIDEO_CHROMA_SITE_JPEG:
+ context->chroma_sample_location = AVCHROMA_LOC_CENTER;
+ break;
+ case GST_VIDEO_CHROMA_SITE_DV:
+ context->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
+ break;
+ case GST_VIDEO_CHROMA_SITE_V_COSITED:
+ context->chroma_sample_location = AVCHROMA_LOC_TOP;
+ break;
+ default:
+ break;
+ }
+
+ context->color_primaries =
+ gst_video_color_primaries_to_iso (info->colorimetry.primaries);
+ context->color_trc =
+ gst_video_transfer_function_to_iso (info->colorimetry.transfer);
+ context->colorspace =
+ gst_video_color_matrix_to_iso (info->colorimetry.matrix);
+
+ if (info->colorimetry.range == GST_VIDEO_COLOR_RANGE_0_255) {
+ context->color_range = AVCOL_RANGE_JPEG;
+ } else {
+ context->color_range = AVCOL_RANGE_MPEG;
+ }
+ }
+
+ void
+ gst_ffmpeg_audioinfo_to_context (GstAudioInfo * info, AVCodecContext * context)
+ {
+ const AVCodec *codec;
+ const enum AVSampleFormat *smpl_fmts;
+ enum AVSampleFormat smpl_fmt = -1;
+
+ context->channels = info->channels;
+ context->sample_rate = info->rate;
+ context->channel_layout =
+ gst_ffmpeg_channel_positions_to_layout (info->position, info->channels);
+
+ codec = context->codec;
+
+ smpl_fmts = codec->sample_fmts;
+
+ switch (info->finfo->format) {
+ case GST_AUDIO_FORMAT_F32:
+ if (smpl_fmts) {
+ while (*smpl_fmts != -1) {
+ if (*smpl_fmts == AV_SAMPLE_FMT_FLT) {
+ smpl_fmt = *smpl_fmts;
+ break;
+ } else if (*smpl_fmts == AV_SAMPLE_FMT_FLTP) {
+ smpl_fmt = *smpl_fmts;
+ }
+
+ smpl_fmts++;
+ }
+ } else {
+ smpl_fmt = AV_SAMPLE_FMT_FLT;
+ }
+ break;
+ case GST_AUDIO_FORMAT_F64:
+ if (smpl_fmts) {
+ while (*smpl_fmts != -1) {
+ if (*smpl_fmts == AV_SAMPLE_FMT_DBL) {
+ smpl_fmt = *smpl_fmts;
+ break;
+ } else if (*smpl_fmts == AV_SAMPLE_FMT_DBLP) {
+ smpl_fmt = *smpl_fmts;
+ }
+
+ smpl_fmts++;
+ }
+ } else {
+ smpl_fmt = AV_SAMPLE_FMT_DBL;
+ }
+ break;
+ case GST_AUDIO_FORMAT_S32:
+ if (smpl_fmts) {
+ while (*smpl_fmts != -1) {
+ if (*smpl_fmts == AV_SAMPLE_FMT_S32) {
+ smpl_fmt = *smpl_fmts;
+ break;
+ } else if (*smpl_fmts == AV_SAMPLE_FMT_S32P) {
+ smpl_fmt = *smpl_fmts;
+ }
+
+ smpl_fmts++;
+ }
+ } else {
+ smpl_fmt = AV_SAMPLE_FMT_S32;
+ }
+ break;
+ case GST_AUDIO_FORMAT_S16:
+ if (smpl_fmts) {
+ while (*smpl_fmts != -1) {
+ if (*smpl_fmts == AV_SAMPLE_FMT_S16) {
+ smpl_fmt = *smpl_fmts;
+ break;
+ } else if (*smpl_fmts == AV_SAMPLE_FMT_S16P) {
+ smpl_fmt = *smpl_fmts;
+ }
+
+ smpl_fmts++;
+ }
+ } else {
+ smpl_fmt = AV_SAMPLE_FMT_S16;
+ }
+ break;
+ case GST_AUDIO_FORMAT_U8:
+ if (smpl_fmts) {
+ while (*smpl_fmts != -1) {
+ if (*smpl_fmts == AV_SAMPLE_FMT_U8) {
+ smpl_fmt = *smpl_fmts;
+ break;
+ } else if (*smpl_fmts == AV_SAMPLE_FMT_U8P) {
+ smpl_fmt = *smpl_fmts;
+ }
+
+ smpl_fmts++;
+ }
+ } else {
+ smpl_fmt = AV_SAMPLE_FMT_U8;
+ }
+ break;
+ default:
+ break;
+ }
+
+ g_assert (smpl_fmt != -1);
+
+ context->sample_fmt = smpl_fmt;
+ }
+
+ /* Convert a GstCaps and a FFMPEG codec Type to a
+ * AVCodecContext. If the context is ommitted, no fixed values
+ * for video/audio size will be included in the context
+ *
+ * AVMediaType is primarily meant for uncompressed data GstCaps!
+ */
+
+ void
+ gst_ffmpeg_caps_with_codectype (enum AVMediaType type,
+ const GstCaps * caps, AVCodecContext * context)
+ {
+ if (context == NULL)
+ return;
+
+ switch (type) {
+ case AVMEDIA_TYPE_VIDEO:
+ gst_ffmpeg_caps_to_pixfmt (caps, context, TRUE);
+ break;
+
+ case AVMEDIA_TYPE_AUDIO:
+ gst_ffmpeg_caps_to_smpfmt (caps, context, TRUE);
+ break;
+
+ default:
+ /* unknown */
+ break;
+ }
+ }
+
+ #if 0
+ static void
+ nal_escape (guint8 * dst, guint8 * src, guint size, guint * destsize)
+ {
+ guint8 *dstp = dst;
+ guint8 *srcp = src;
+ guint8 *end = src + size;
+ gint count = 0;
+
+ while (srcp < end) {
+ if (count == 2 && *srcp <= 0x03) {
+ GST_DEBUG ("added escape code");
+ *dstp++ = 0x03;
+ count = 0;
+ }
+ if (*srcp == 0)
+ count++;
+ else
+ count = 0;
+
+ GST_DEBUG ("copy %02x, count %d", *srcp, count);
+ *dstp++ = *srcp++;
+ }
+ *destsize = dstp - dst;
+ }
+
+ /* copy the config, escaping NAL units as we iterate them, if something fails we
+ * copy everything and hope for the best. */
+ static void
+ copy_config (guint8 * dst, guint8 * src, guint size, guint * destsize)
+ {
+ guint8 *dstp = dst;
+ guint8 *srcp = src;
+ gint cnt, i;
+ guint nalsize, esize;
+
+ /* check size */
+ if (size < 7)
+ goto full_copy;
+
+ /* check version */
+ if (*srcp != 1)
+ goto full_copy;
+
+ cnt = *(srcp + 5) & 0x1f; /* Number of sps */
+
+ GST_DEBUG ("num SPS %d", cnt);
+
+ memcpy (dstp, srcp, 6);
+ srcp += 6;
+ dstp += 6;
+
+ for (i = 0; i < cnt; i++) {
+ GST_DEBUG ("copy SPS %d", i);
+ nalsize = (srcp[0] << 8) | srcp[1];
+ nal_escape (dstp + 2, srcp + 2, nalsize, &esize);
+ dstp[0] = esize >> 8;
+ dstp[1] = esize & 0xff;
+ dstp += esize + 2;
+ srcp += nalsize + 2;
+ }
+
+ cnt = *(dstp++) = *(srcp++); /* Number of pps */
+
+ GST_DEBUG ("num PPS %d", cnt);
+
+ for (i = 0; i < cnt; i++) {
+ GST_DEBUG ("copy PPS %d", i);
+ nalsize = (srcp[0] << 8) | srcp[1];
+ nal_escape (dstp + 2, srcp + 2, nalsize, &esize);
+ dstp[0] = esize >> 8;
+ dstp[1] = esize & 0xff;
+ dstp += esize + 2;
+ srcp += nalsize + 2;
+ }
+ *destsize = dstp - dst;
+
+ return;
+
+ full_copy:
+ {
+ GST_DEBUG ("something unexpected, doing full copy");
+ memcpy (dst, src, size);
+ *destsize = size;
+ return;
+ }
+ }
+ #endif
+
+ /*
+ * caps_with_codecid () transforms a GstCaps for a known codec
+ * ID into a filled-in context.
+ * codec_data from caps will override possible extradata already in the context
+ */
+
+ void
+ gst_ffmpeg_caps_with_codecid (enum AVCodecID codec_id,
+ enum AVMediaType codec_type, const GstCaps * caps, AVCodecContext * context)
+ {
+ GstStructure *str;
+ const GValue *value;
+ GstBuffer *buf;
+
+ GST_LOG ("codec_id:%d, codec_type:%d, caps:%" GST_PTR_FORMAT " context:%p",
+ codec_id, codec_type, caps, context);
+
+ if (!context || !gst_caps_get_size (caps))
+ return;
+
+ str = gst_caps_get_structure (caps, 0);
+
+ /* extradata parsing (esds [mpeg4], wma/wmv, msmpeg4v1/2/3, etc.) */
+ if ((value = gst_structure_get_value (str, "codec_data"))) {
+ GstMapInfo map;
+
+ buf = gst_value_get_buffer (value);
+ gst_buffer_map (buf, &map, GST_MAP_READ);
+
+ /* free the old one if it is there */
+ if (context->extradata)
+ av_free (context->extradata);
+
+ #if 0
+ if (codec_id == AV_CODEC_ID_H264) {
+ guint extrasize;
+
+ GST_DEBUG ("copy, escaping codec_data %d", size);
+ /* ffmpeg h264 expects the codec_data to be escaped, there is no real
+ * reason for this but let's just escape it for now. Start by allocating
+ * enough space, x2 is more than enough.
+ *
+ * FIXME, we disabled escaping because some file already contain escaped
+ * codec_data and then we escape twice and fail. It's better to leave it
+ * as is, as that is what most players do. */
+ context->extradata =
+ av_mallocz (GST_ROUND_UP_16 (size * 2 +
+ AV_INPUT_BUFFER_PADDING_SIZE));
+ copy_config (context->extradata, data, size, &extrasize);
+ GST_DEBUG ("escaped size: %d", extrasize);
+ context->extradata_size = extrasize;
+ } else
+ #endif
+ {
+ /* allocate with enough padding */
+ GST_DEBUG ("copy codec_data");
+ context->extradata =
+ av_mallocz (GST_ROUND_UP_16 (map.size +
+ AV_INPUT_BUFFER_PADDING_SIZE));
+ memcpy (context->extradata, map.data, map.size);
+ context->extradata_size = map.size;
+ }
+
+ /* Hack for VC1. Sometimes the first (length) byte is 0 for some files */
+ if (codec_id == AV_CODEC_ID_VC1 && map.size > 0 && map.data[0] == 0) {
+ context->extradata[0] = (guint8) map.size;
+ }
+
+ GST_DEBUG ("have codec data of size %" G_GSIZE_FORMAT, map.size);
+
+ gst_buffer_unmap (buf, &map);
++#ifdef TIZEN_FEATURE_LIBAV
++ } else if (!context->extradata) {
++#else
+ } else {
+ context->extradata = NULL;
++#endif
+ context->extradata_size = 0;
+ GST_DEBUG ("no codec data");
+ }
+
+ switch (codec_id) {
+ case AV_CODEC_ID_MPEG4:
+ {
+ const gchar *mime = gst_structure_get_name (str);
+
+ context->flags |= AV_CODEC_FLAG_4MV;
+
+ if (!strcmp (mime, "video/x-divx"))
+ context->codec_tag = GST_MAKE_FOURCC ('D', 'I', 'V', 'X');
+ else if (!strcmp (mime, "video/mpeg")) {
+ const gchar *profile;
+
+ context->codec_tag = GST_MAKE_FOURCC ('m', 'p', '4', 'v');
+
+ profile = gst_structure_get_string (str, "profile");
+ if (profile) {
+ if (g_strcmp0 (profile, "advanced-simple") == 0)
+ context->flags |= AV_CODEC_FLAG_QPEL;
+ }
+ }
+ break;
+ }
+
+ case AV_CODEC_ID_SVQ3:
+ /* FIXME: this is a workaround for older gst-plugins releases
+ * (<= 0.8.9). This should be removed at some point, because
+ * it causes wrong decoded frame order. */
+ if (!context->extradata) {
+ gint halfpel_flag, thirdpel_flag, low_delay, unknown_svq3_flag;
+ guint16 flags;
+
+ if (gst_structure_get_int (str, "halfpel_flag", &halfpel_flag) &&
+ gst_structure_get_int (str, "thirdpel_flag", &thirdpel_flag) &&
+ gst_structure_get_int (str, "low_delay", &low_delay) &&
+ gst_structure_get_int (str, "unknown_svq3_flag",
+ &unknown_svq3_flag)) {
+ context->extradata = (guint8 *) av_mallocz (0x64);
+ g_stpcpy ((gchar *) context->extradata, "SVQ3");
+ flags = 1 << 3;
+ flags |= low_delay;
+ flags = flags << 2;
+ flags |= unknown_svq3_flag;
+ flags = flags << 6;
+ flags |= halfpel_flag;
+ flags = flags << 1;
+ flags |= thirdpel_flag;
+ flags = flags << 3;
+
+ flags = GUINT16_FROM_LE (flags);
+
+ memcpy ((gchar *) context->extradata + 0x62, &flags, 2);
+ context->extradata_size = 0x64;
+ }
+ }
+ break;
+
+ case AV_CODEC_ID_MSRLE:
+ case AV_CODEC_ID_QTRLE:
+ case AV_CODEC_ID_TSCC:
+ case AV_CODEC_ID_CSCD:
+ case AV_CODEC_ID_APE:
+ {
+ gint depth;
+
+ if (gst_structure_get_int (str, "depth", &depth)) {
+ context->bits_per_coded_sample = depth;
+ } else {
+ GST_WARNING ("No depth field in caps %" GST_PTR_FORMAT, caps);
+ }
+
+ }
+ break;
+
+ case AV_CODEC_ID_COOK:
+ case AV_CODEC_ID_RA_288:
+ case AV_CODEC_ID_RA_144:
+ case AV_CODEC_ID_SIPR:
+ {
+ gint leaf_size;
+ gint bitrate;
+
+ if (gst_structure_get_int (str, "leaf_size", &leaf_size))
+ context->block_align = leaf_size;
+ if (gst_structure_get_int (str, "bitrate", &bitrate))
+ context->bit_rate = bitrate;
+ }
+ break;
+ case AV_CODEC_ID_ALAC:
+ gst_structure_get_int (str, "samplesize",
+ &context->bits_per_coded_sample);
+ break;
+
+ case AV_CODEC_ID_DVVIDEO:
+ {
+ const gchar *format;
+
+ if ((format = gst_structure_get_string (str, "format"))) {
+
+ if (g_str_equal (format, "YUY2"))
+ context->pix_fmt = AV_PIX_FMT_YUYV422;
+ else if (g_str_equal (format, "I420"))
+ context->pix_fmt = AV_PIX_FMT_YUV420P;
+ else if (g_str_equal (format, "A420"))
+ context->pix_fmt = AV_PIX_FMT_YUVA420P;
+ else if (g_str_equal (format, "Y41B"))
+ context->pix_fmt = AV_PIX_FMT_YUV411P;
+ else if (g_str_equal (format, "Y42B"))
+ context->pix_fmt = AV_PIX_FMT_YUV422P;
+ else if (g_str_equal (format, "YUV9"))
+ context->pix_fmt = AV_PIX_FMT_YUV410P;
+ else {
+ GST_WARNING ("couldn't convert format %s" " to a pixel format",
+ format);
+ }
+ } else
+ GST_WARNING ("No specified format");
+ break;
+ }
+ case AV_CODEC_ID_H263P:
+ {
+ gboolean val;
+
+ if (!gst_structure_get_boolean (str, "annex-f", &val) || val)
+ context->flags |= AV_CODEC_FLAG_4MV;
+ else
+ context->flags &= ~AV_CODEC_FLAG_4MV;
+ if ((!gst_structure_get_boolean (str, "annex-i", &val) || val) &&
+ (!gst_structure_get_boolean (str, "annex-t", &val) || val))
+ context->flags |= AV_CODEC_FLAG_AC_PRED;
+ else
+ context->flags &= ~AV_CODEC_FLAG_AC_PRED;
+ if (!gst_structure_get_boolean (str, "annex-j", &val) || val)
+ context->flags |= AV_CODEC_FLAG_LOOP_FILTER;
+ else
+ context->flags &= ~AV_CODEC_FLAG_LOOP_FILTER;
+ break;
+ }
+ case AV_CODEC_ID_ADPCM_G726:
+ {
+ const gchar *layout;
+
+ if ((layout = gst_structure_get_string (str, "layout"))) {
+ if (!strcmp (layout, "g721")) {
+ context->sample_rate = 8000;
+ context->channels = 1;
+ context->bit_rate = 32000;
+ }
+ }
+ break;
+ }
+ case AV_CODEC_ID_SPEEDHQ:
+ {
+ const gchar *variant;
+
+ if (context && (variant = gst_structure_get_string (str, "variant"))
+ && strlen (variant) == 4) {
+
+ context->codec_tag =
+ GST_MAKE_FOURCC (variant[0], variant[1], variant[2], variant[3]);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (!gst_caps_is_fixed (caps))
+ return;
+
+ /* common properties (width, height, fps) */
+ switch (codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ gst_ffmpeg_caps_to_pixfmt (caps, context,
+ codec_id == AV_CODEC_ID_RAWVIDEO);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ gst_ffmpeg_caps_to_smpfmt (caps, context, FALSE);
+ break;
+ default:
+ break;
+ }
+
+ /* fixup of default settings */
+ switch (codec_id) {
+ case AV_CODEC_ID_QCELP:
+ /* QCELP is always mono, no matter what the caps say */
+ context->channels = 1;
+ break;
+ case AV_CODEC_ID_ADPCM_G726:
+ if (context->sample_rate && context->bit_rate)
+ context->bits_per_coded_sample =
+ context->bit_rate / context->sample_rate;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* _formatid_to_caps () is meant for muxers/demuxers, it
+ * transforms a name (ffmpeg way of ID'ing these, why don't
+ * they have unique numerical IDs?) to the corresponding
+ * caps belonging to that mux-format
+ *
+ * Note: we don't need any additional info because the caps
+ * isn't supposed to contain any useful info besides the
+ * media type anyway
+ */
+
+ GstCaps *
+ gst_ffmpeg_formatid_to_caps (const gchar * format_name)
+ {
+ GstCaps *caps = NULL;
+
+ if (!strcmp (format_name, "mpeg")) {
+ caps = gst_caps_new_simple ("video/mpeg",
+ "systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
+ } else if (!strcmp (format_name, "mpegts")) {
+ caps = gst_caps_new_simple ("video/mpegts",
+ "systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
+ } else if (!strcmp (format_name, "rm")) {
+ caps = gst_caps_new_simple ("application/x-pn-realmedia",
+ "systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
+ } else if (!strcmp (format_name, "asf")) {
+ caps = gst_caps_new_empty_simple ("video/x-ms-asf");
+ } else if (!strcmp (format_name, "avi")) {
+ caps = gst_caps_new_empty_simple ("video/x-msvideo");
+ } else if (!strcmp (format_name, "wav")) {
+ caps = gst_caps_new_empty_simple ("audio/x-wav");
+ } else if (!strcmp (format_name, "ape")) {
+ caps = gst_caps_new_empty_simple ("application/x-ape");
+ } else if (!strcmp (format_name, "swf")) {
+ caps = gst_caps_new_empty_simple ("application/x-shockwave-flash");
+ } else if (!strcmp (format_name, "au")) {
+ caps = gst_caps_new_empty_simple ("audio/x-au");
+ } else if (!strcmp (format_name, "dv")) {
+ caps = gst_caps_new_simple ("video/x-dv",
+ "systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
+ } else if (!strcmp (format_name, "4xm")) {
+ caps = gst_caps_new_empty_simple ("video/x-4xm");
+ } else if (!strcmp (format_name, "matroska")) {
+ caps = gst_caps_new_empty_simple ("video/x-matroska");
+ } else if (!strcmp (format_name, "ivf")) {
+ caps = gst_caps_new_empty_simple ("video/x-ivf");
+ } else if (!strcmp (format_name, "mp3")) {
+ caps = gst_caps_new_empty_simple ("application/x-id3");
+ } else if (!strcmp (format_name, "flic")) {
+ caps = gst_caps_new_empty_simple ("video/x-fli");
+ } else if (!strcmp (format_name, "flv")) {
+ caps = gst_caps_new_empty_simple ("video/x-flv");
+ } else if (!strcmp (format_name, "tta")) {
+ caps = gst_caps_new_empty_simple ("audio/x-ttafile");
+ } else if (!strcmp (format_name, "aiff")) {
+ caps = gst_caps_new_empty_simple ("audio/x-aiff");
+ } else if (!strcmp (format_name, "mov_mp4_m4a_3gp_3g2")) {
+ caps =
+ gst_caps_from_string
+ ("application/x-3gp; video/quicktime; audio/x-m4a");
+ } else if (!strcmp (format_name, "mov")) {
+ caps = gst_caps_from_string ("video/quicktime,variant=(string)apple");
+ } else if (!strcmp (format_name, "mp4")) {
+ caps = gst_caps_from_string ("video/quicktime,variant=(string)iso");
+ } else if (!strcmp (format_name, "3gp")) {
+ caps = gst_caps_from_string ("video/quicktime,variant=(string)3gpp");
+ } else if (!strcmp (format_name, "3g2")) {
+ caps = gst_caps_from_string ("video/quicktime,variant=(string)3g2");
+ } else if (!strcmp (format_name, "psp")) {
+ caps = gst_caps_from_string ("video/quicktime,variant=(string)psp");
+ } else if (!strcmp (format_name, "ipod")) {
+ caps = gst_caps_from_string ("video/quicktime,variant=(string)ipod");
+ } else if (!strcmp (format_name, "aac")) {
+ caps = gst_caps_new_simple ("audio/mpeg",
+ "mpegversion", G_TYPE_INT, 4, NULL);
+ } else if (!strcmp (format_name, "gif")) {
+ caps = gst_caps_from_string ("image/gif");
+ } else if (!strcmp (format_name, "ogg")) {
+ caps = gst_caps_from_string ("application/ogg");
+ } else if (!strcmp (format_name, "mxf") || !strcmp (format_name, "mxf_d10")) {
+ caps = gst_caps_from_string ("application/mxf");
+ } else if (!strcmp (format_name, "gxf")) {
+ caps = gst_caps_from_string ("application/gxf");
+ } else if (!strcmp (format_name, "yuv4mpegpipe")) {
+ caps = gst_caps_new_simple ("application/x-yuv4mpeg",
+ "y4mversion", G_TYPE_INT, 2, NULL);
+ } else if (!strcmp (format_name, "mpc")) {
+ caps = gst_caps_from_string ("audio/x-musepack, streamversion = (int) 7");
+ } else if (!strcmp (format_name, "mpc8")) {
+ caps = gst_caps_from_string ("audio/x-musepack, streamversion = (int) 8");
+ } else if (!strcmp (format_name, "vqf")) {
+ caps = gst_caps_from_string ("audio/x-vqf");
+ } else if (!strcmp (format_name, "nsv")) {
+ caps = gst_caps_from_string ("video/x-nsv");
+ } else if (!strcmp (format_name, "amr")) {
+ caps = gst_caps_from_string ("audio/x-amr-nb-sh");
+ } else if (!strcmp (format_name, "webm")) {
+ caps = gst_caps_from_string ("video/webm");
+ } else if (!strcmp (format_name, "voc")) {
+ caps = gst_caps_from_string ("audio/x-voc");
+ } else if (!strcmp (format_name, "pva")) {
+ caps = gst_caps_from_string ("video/x-pva");
+ } else if (!strcmp (format_name, "brstm")) {
+ caps = gst_caps_from_string ("audio/x-brstm");
+ } else if (!strcmp (format_name, "bfstm")) {
+ caps = gst_caps_from_string ("audio/x-bfstm");
+ } else {
+ gchar *name;
+
+ GST_LOG ("Could not create stream format caps for %s", format_name);
+ name = g_strdup_printf ("application/x-gst-av-%s", format_name);
+ caps = gst_caps_new_empty_simple (name);
+ g_free (name);
+ }
+
+ return caps;
+ }
+
+ gboolean
+ gst_ffmpeg_formatid_get_codecids (const gchar * format_name,
+ enum AVCodecID ** video_codec_list, enum AVCodecID ** audio_codec_list,
+ AVOutputFormat * plugin)
+ {
+ static enum AVCodecID tmp_vlist[] = {
+ AV_CODEC_ID_NONE,
+ AV_CODEC_ID_NONE
+ };
+ static enum AVCodecID tmp_alist[] = {
+ AV_CODEC_ID_NONE,
+ AV_CODEC_ID_NONE
+ };
+
+ GST_LOG ("format_name : %s", format_name);
+
+ if (!strcmp (format_name, "mp4")) {
+ static enum AVCodecID mp4_video_list[] = {
+ AV_CODEC_ID_MPEG4, AV_CODEC_ID_H264,
+ AV_CODEC_ID_MJPEG,
+ AV_CODEC_ID_NONE
+ };
+ static enum AVCodecID mp4_audio_list[] = {
+ AV_CODEC_ID_AAC, AV_CODEC_ID_MP3,
+ AV_CODEC_ID_NONE
+ };
+
+ *video_codec_list = mp4_video_list;
+ *audio_codec_list = mp4_audio_list;
+ } else if (!strcmp (format_name, "mpeg")) {
+ static enum AVCodecID mpeg_video_list[] = { AV_CODEC_ID_MPEG1VIDEO,
+ AV_CODEC_ID_MPEG2VIDEO,
+ AV_CODEC_ID_H264,
+ AV_CODEC_ID_NONE
+ };
+ static enum AVCodecID mpeg_audio_list[] = { AV_CODEC_ID_MP1,
+ AV_CODEC_ID_MP2,
+ AV_CODEC_ID_MP3,
+ AV_CODEC_ID_NONE
+ };
+
+ *video_codec_list = mpeg_video_list;
+ *audio_codec_list = mpeg_audio_list;
+ } else if (!strcmp (format_name, "dvd")) {
+ static enum AVCodecID mpeg_video_list[] = { AV_CODEC_ID_MPEG2VIDEO,
+ AV_CODEC_ID_NONE
+ };
+ static enum AVCodecID mpeg_audio_list[] = { AV_CODEC_ID_MP2,
+ AV_CODEC_ID_AC3,
+ AV_CODEC_ID_DTS,
+ AV_CODEC_ID_PCM_S16BE,
+ AV_CODEC_ID_NONE
+ };
+
+ *video_codec_list = mpeg_video_list;
+ *audio_codec_list = mpeg_audio_list;
+ } else if (!strcmp (format_name, "mpegts")) {
+ static enum AVCodecID mpegts_video_list[] = { AV_CODEC_ID_MPEG1VIDEO,
+ AV_CODEC_ID_MPEG2VIDEO,
+ AV_CODEC_ID_H264,
+ AV_CODEC_ID_NONE
+ };
+ static enum AVCodecID mpegts_audio_list[] = { AV_CODEC_ID_MP2,
+ AV_CODEC_ID_MP3,
+ AV_CODEC_ID_AC3,
+ AV_CODEC_ID_DTS,
+ AV_CODEC_ID_AAC,
+ AV_CODEC_ID_NONE
+ };
+
+ *video_codec_list = mpegts_video_list;
+ *audio_codec_list = mpegts_audio_list;
+ } else if (!strcmp (format_name, "vob")) {
+ static enum AVCodecID vob_video_list[] =
+ { AV_CODEC_ID_MPEG2VIDEO, AV_CODEC_ID_NONE };
+ static enum AVCodecID vob_audio_list[] = { AV_CODEC_ID_MP2, AV_CODEC_ID_AC3,
+ AV_CODEC_ID_DTS, AV_CODEC_ID_NONE
+ };
+
+ *video_codec_list = vob_video_list;
+ *audio_codec_list = vob_audio_list;
+ } else if (!strcmp (format_name, "flv")) {
+ static enum AVCodecID flv_video_list[] =
+ { AV_CODEC_ID_FLV1, AV_CODEC_ID_NONE };
+ static enum AVCodecID flv_audio_list[] =
+ { AV_CODEC_ID_MP3, AV_CODEC_ID_NONE };
+
+ *video_codec_list = flv_video_list;
+ *audio_codec_list = flv_audio_list;
+ } else if (!strcmp (format_name, "asf")) {
+ static enum AVCodecID asf_video_list[] =
+ { AV_CODEC_ID_WMV1, AV_CODEC_ID_WMV2, AV_CODEC_ID_MSMPEG4V3,
+ AV_CODEC_ID_NONE
+ };
+ static enum AVCodecID asf_audio_list[] =
+ { AV_CODEC_ID_WMAV1, AV_CODEC_ID_WMAV2, AV_CODEC_ID_MP3,
+ AV_CODEC_ID_NONE
+ };
+
+ *video_codec_list = asf_video_list;
+ *audio_codec_list = asf_audio_list;
+ } else if (!strcmp (format_name, "dv")) {
+ static enum AVCodecID dv_video_list[] =
+ { AV_CODEC_ID_DVVIDEO, AV_CODEC_ID_NONE };
+ static enum AVCodecID dv_audio_list[] =
+ { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_NONE };
+
+ *video_codec_list = dv_video_list;
+ *audio_codec_list = dv_audio_list;
+ } else if (!strcmp (format_name, "mov")) {
+ static enum AVCodecID mov_video_list[] = {
+ AV_CODEC_ID_SVQ1, AV_CODEC_ID_SVQ3, AV_CODEC_ID_MPEG4,
+ AV_CODEC_ID_H263, AV_CODEC_ID_H263P,
+ AV_CODEC_ID_H264, AV_CODEC_ID_DVVIDEO,
+ AV_CODEC_ID_MJPEG,
+ AV_CODEC_ID_NONE
+ };
+ static enum AVCodecID mov_audio_list[] = {
+ AV_CODEC_ID_PCM_MULAW, AV_CODEC_ID_PCM_ALAW, AV_CODEC_ID_ADPCM_IMA_QT,
+ AV_CODEC_ID_MACE3, AV_CODEC_ID_MACE6, AV_CODEC_ID_AAC,
+ AV_CODEC_ID_AMR_NB, AV_CODEC_ID_AMR_WB,
+ AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE,
+ AV_CODEC_ID_MP3, AV_CODEC_ID_NONE
+ };
+
+ *video_codec_list = mov_video_list;
+ *audio_codec_list = mov_audio_list;
+ } else if ((!strcmp (format_name, "3gp") || !strcmp (format_name, "3g2"))) {
+ static enum AVCodecID tgp_video_list[] = {
+ AV_CODEC_ID_MPEG4, AV_CODEC_ID_H263, AV_CODEC_ID_H263P, AV_CODEC_ID_H264,
+ AV_CODEC_ID_NONE
+ };
+ static enum AVCodecID tgp_audio_list[] = {
+ AV_CODEC_ID_AMR_NB, AV_CODEC_ID_AMR_WB,
+ AV_CODEC_ID_AAC,
+ AV_CODEC_ID_NONE
+ };
+
+ *video_codec_list = tgp_video_list;
+ *audio_codec_list = tgp_audio_list;
+ } else if (!strcmp (format_name, "mmf")) {
+ static enum AVCodecID mmf_audio_list[] = {
+ AV_CODEC_ID_ADPCM_YAMAHA, AV_CODEC_ID_NONE
+ };
+ *video_codec_list = NULL;
+ *audio_codec_list = mmf_audio_list;
+ } else if (!strcmp (format_name, "amr")) {
+ static enum AVCodecID amr_audio_list[] = {
+ AV_CODEC_ID_AMR_NB, AV_CODEC_ID_AMR_WB,
+ AV_CODEC_ID_NONE
+ };
+ *video_codec_list = NULL;
+ *audio_codec_list = amr_audio_list;
+ } else if (!strcmp (format_name, "gif")) {
+ static enum AVCodecID gif_image_list[] = {
+ AV_CODEC_ID_RAWVIDEO, AV_CODEC_ID_NONE
+ };
+ *video_codec_list = gif_image_list;
+ *audio_codec_list = NULL;
+ } else if ((!strcmp (format_name, "pva"))) {
+ static enum AVCodecID pga_video_list[] = {
+ AV_CODEC_ID_MPEG2VIDEO,
+ AV_CODEC_ID_NONE
+ };
+ static enum AVCodecID pga_audio_list[] = {
+ AV_CODEC_ID_MP2,
+ AV_CODEC_ID_NONE
+ };
+
+ *video_codec_list = pga_video_list;
+ *audio_codec_list = pga_audio_list;
+ } else if ((!strcmp (format_name, "ivf"))) {
+ static enum AVCodecID ivf_video_list[] = {
+ AV_CODEC_ID_VP8,
+ AV_CODEC_ID_VP9,
+ AV_CODEC_ID_AV1,
+ AV_CODEC_ID_NONE
+ };
+ static enum AVCodecID ivf_audio_list[] = {
+ AV_CODEC_ID_NONE
+ };
+
+ *video_codec_list = ivf_video_list;
+ *audio_codec_list = ivf_audio_list;
+ } else if ((plugin->audio_codec != AV_CODEC_ID_NONE) ||
+ (plugin->video_codec != AV_CODEC_ID_NONE)) {
+ tmp_vlist[0] = plugin->video_codec;
+ tmp_alist[0] = plugin->audio_codec;
+
+ *video_codec_list = tmp_vlist;
+ *audio_codec_list = tmp_alist;
+ } else {
+ GST_LOG ("Format %s not found", format_name);
+ return FALSE;
+ }
+
+ return TRUE;
+ }
+
+ /* Convert a GstCaps to a FFMPEG codec ID. Size et all
+ * are omitted, that can be queried by the user itself,
+ * we're not eating the GstCaps or anything
+ * A pointer to an allocated context is also needed for
+ * optional extra info
+ */
+
+ enum AVCodecID
+ gst_ffmpeg_caps_to_codecid (const GstCaps * caps, AVCodecContext * context)
+ {
+ enum AVCodecID id = AV_CODEC_ID_NONE;
+ const gchar *mimetype;
+ const GstStructure *structure;
+ gboolean video = FALSE, audio = FALSE; /* we want to be sure! */
+
+ g_return_val_if_fail (caps != NULL, AV_CODEC_ID_NONE);
+ g_return_val_if_fail (gst_caps_get_size (caps) == 1, AV_CODEC_ID_NONE);
+ structure = gst_caps_get_structure (caps, 0);
+
+ mimetype = gst_structure_get_name (structure);
+
+ if (!strcmp (mimetype, "video/x-raw")) {
+ id = AV_CODEC_ID_RAWVIDEO;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-raw")) {
+ GstAudioInfo info;
+
+ if (gst_audio_info_from_caps (&info, caps)) {
+ switch (GST_AUDIO_INFO_FORMAT (&info)) {
+ case GST_AUDIO_FORMAT_S8:
+ id = AV_CODEC_ID_PCM_S8;
+ break;
+ case GST_AUDIO_FORMAT_U8:
+ id = AV_CODEC_ID_PCM_U8;
+ break;
+ case GST_AUDIO_FORMAT_S16LE:
+ id = AV_CODEC_ID_PCM_S16LE;
+ break;
+ case GST_AUDIO_FORMAT_S16BE:
+ id = AV_CODEC_ID_PCM_S16BE;
+ break;
+ case GST_AUDIO_FORMAT_U16LE:
+ id = AV_CODEC_ID_PCM_U16LE;
+ break;
+ case GST_AUDIO_FORMAT_U16BE:
+ id = AV_CODEC_ID_PCM_U16BE;
+ break;
+ default:
+ break;
+ }
+ if (id != AV_CODEC_ID_NONE)
+ audio = TRUE;
+ }
+ } else if (!strcmp (mimetype, "audio/x-mulaw")) {
+ id = AV_CODEC_ID_PCM_MULAW;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-alaw")) {
+ id = AV_CODEC_ID_PCM_ALAW;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "video/x-dv")) {
+ gboolean sys_strm;
+
+ if (gst_structure_get_boolean (structure, "systemstream", &sys_strm) &&
+ !sys_strm) {
+ id = AV_CODEC_ID_DVVIDEO;
+ video = TRUE;
+ }
+ } else if (!strcmp (mimetype, "audio/x-dv")) { /* ??? */
+ id = AV_CODEC_ID_DVAUDIO;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "video/x-h263")) {
+ const gchar *h263version =
+ gst_structure_get_string (structure, "h263version");
+ if (h263version && !strcmp (h263version, "h263p"))
+ id = AV_CODEC_ID_H263P;
+ else
+ id = AV_CODEC_ID_H263;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-intel-h263")) {
+ id = AV_CODEC_ID_H263I;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-h261")) {
+ id = AV_CODEC_ID_H261;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/mpeg")) {
+ gboolean sys_strm;
+ gint mpegversion;
+
+ if (gst_structure_get_boolean (structure, "systemstream", &sys_strm) &&
+ gst_structure_get_int (structure, "mpegversion", &mpegversion) &&
+ !sys_strm) {
+ switch (mpegversion) {
+ case 1:
+ id = AV_CODEC_ID_MPEG1VIDEO;
+ break;
+ case 2:
+ id = AV_CODEC_ID_MPEG2VIDEO;
+ break;
+ case 4:
+ id = AV_CODEC_ID_MPEG4;
+ break;
+ }
+ }
+ if (id != AV_CODEC_ID_NONE)
+ video = TRUE;
+ } else if (!strcmp (mimetype, "image/jpeg")) {
+ id = AV_CODEC_ID_MJPEG; /* A... B... */
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-jpeg-b")) {
+ id = AV_CODEC_ID_MJPEGB;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-wmv")) {
+ gint wmvversion = 0;
+
+ if (gst_structure_get_int (structure, "wmvversion", &wmvversion)) {
+ switch (wmvversion) {
+ case 1:
+ id = AV_CODEC_ID_WMV1;
+ break;
+ case 2:
+ id = AV_CODEC_ID_WMV2;
+ break;
+ case 3:
+ {
+ const gchar *format;
+
+ /* WMV3 unless the fourcc exists and says otherwise */
+ id = AV_CODEC_ID_WMV3;
+
+ if ((format = gst_structure_get_string (structure, "format")) &&
+ (g_str_equal (format, "WVC1") || g_str_equal (format, "WMVA")))
+ id = AV_CODEC_ID_VC1;
+
+ break;
+ }
+ }
+ }
+ if (id != AV_CODEC_ID_NONE)
+ video = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-vorbis")) {
+ id = AV_CODEC_ID_VORBIS;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-qdm2")) {
+ id = AV_CODEC_ID_QDM2;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/mpeg")) {
+ gint layer = 0;
+ gint mpegversion = 0;
+
+ if (gst_structure_get_int (structure, "mpegversion", &mpegversion)) {
+ switch (mpegversion) {
+ case 2: /* ffmpeg uses faad for both... */
+ case 4:
+ id = AV_CODEC_ID_AAC;
+ break;
+ case 1:
+ if (gst_structure_get_int (structure, "layer", &layer)) {
+ switch (layer) {
+ case 1:
+ id = AV_CODEC_ID_MP1;
+ break;
+ case 2:
+ id = AV_CODEC_ID_MP2;
+ break;
+ case 3:
+ id = AV_CODEC_ID_MP3;
+ break;
+ }
+ }
+ }
+ }
+ if (id != AV_CODEC_ID_NONE)
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-musepack")) {
+ gint streamversion = -1;
+
+ if (gst_structure_get_int (structure, "streamversion", &streamversion)) {
+ if (streamversion == 7)
+ id = AV_CODEC_ID_MUSEPACK7;
+ } else {
+ id = AV_CODEC_ID_MUSEPACK7;
+ }
+ } else if (!strcmp (mimetype, "audio/x-wma")) {
+ gint wmaversion = 0;
+
+ if (gst_structure_get_int (structure, "wmaversion", &wmaversion)) {
+ switch (wmaversion) {
+ case 1:
+ id = AV_CODEC_ID_WMAV1;
+ break;
+ case 2:
+ id = AV_CODEC_ID_WMAV2;
+ break;
+ case 3:
+ id = AV_CODEC_ID_WMAPRO;
+ break;
+ }
+ }
+ if (id != AV_CODEC_ID_NONE)
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-xma")) {
+ gint xmaversion = 0;
+
+ if (gst_structure_get_int (structure, "xmaversion", &xmaversion)) {
+ switch (xmaversion) {
+ case 1:
+ id = AV_CODEC_ID_XMA1;
+ break;
+ case 2:
+ id = AV_CODEC_ID_XMA2;
+ break;
+ }
+ }
+ if (id != AV_CODEC_ID_NONE)
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-wms")) {
+ id = AV_CODEC_ID_WMAVOICE;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-ac3")) {
+ id = AV_CODEC_ID_AC3;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-eac3")) {
+ id = AV_CODEC_ID_EAC3;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-vnd.sony.atrac3") ||
+ !strcmp (mimetype, "audio/atrac3")) {
+ id = AV_CODEC_ID_ATRAC3;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-dts")) {
+ id = AV_CODEC_ID_DTS;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "application/x-ape")) {
+ id = AV_CODEC_ID_APE;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "video/x-msmpeg")) {
+ gint msmpegversion = 0;
+
+ if (gst_structure_get_int (structure, "msmpegversion", &msmpegversion)) {
+ switch (msmpegversion) {
+ case 41:
+ id = AV_CODEC_ID_MSMPEG4V1;
+ break;
+ case 42:
+ id = AV_CODEC_ID_MSMPEG4V2;
+ break;
+ case 43:
+ id = AV_CODEC_ID_MSMPEG4V3;
+ break;
+ }
+ }
+ if (id != AV_CODEC_ID_NONE)
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-svq")) {
+ gint svqversion = 0;
+
+ if (gst_structure_get_int (structure, "svqversion", &svqversion)) {
+ switch (svqversion) {
+ case 1:
+ id = AV_CODEC_ID_SVQ1;
+ break;
+ case 3:
+ id = AV_CODEC_ID_SVQ3;
+ break;
+ }
+ }
+ if (id != AV_CODEC_ID_NONE)
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-huffyuv")) {
+ id = AV_CODEC_ID_HUFFYUV;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-mace")) {
+ gint maceversion = 0;
+
+ if (gst_structure_get_int (structure, "maceversion", &maceversion)) {
+ switch (maceversion) {
+ case 3:
+ id = AV_CODEC_ID_MACE3;
+ break;
+ case 6:
+ id = AV_CODEC_ID_MACE6;
+ break;
+ }
+ }
+ if (id != AV_CODEC_ID_NONE)
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "video/x-theora")) {
+ id = AV_CODEC_ID_THEORA;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-vp3")) {
+ id = AV_CODEC_ID_VP3;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-vp5")) {
+ id = AV_CODEC_ID_VP5;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-vp6")) {
+ id = AV_CODEC_ID_VP6;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-vp6-flash")) {
+ id = AV_CODEC_ID_VP6F;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-vp6-alpha")) {
+ id = AV_CODEC_ID_VP6A;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-vp8")) {
+ id = AV_CODEC_ID_VP8;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-vp9")) {
+ id = AV_CODEC_ID_VP9;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-flash-screen")) {
+ id = AV_CODEC_ID_FLASHSV;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-flash-screen2")) {
+ id = AV_CODEC_ID_FLASHSV2;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-cineform")) {
+ id = AV_CODEC_ID_CFHD;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-speedhq")) {
+ id = AV_CODEC_ID_SPEEDHQ;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-indeo")) {
+ gint indeoversion = 0;
+
+ if (gst_structure_get_int (structure, "indeoversion", &indeoversion)) {
+ switch (indeoversion) {
+ case 5:
+ id = AV_CODEC_ID_INDEO5;
+ break;
+ case 4:
+ id = AV_CODEC_ID_INDEO4;
+ break;
+ case 3:
+ id = AV_CODEC_ID_INDEO3;
+ break;
+ case 2:
+ id = AV_CODEC_ID_INDEO2;
+ break;
+ }
+ if (id != AV_CODEC_ID_NONE)
+ video = TRUE;
+ }
+ } else if (!strcmp (mimetype, "video/x-divx")) {
+ gint divxversion = 0;
+
+ if (gst_structure_get_int (structure, "divxversion", &divxversion)) {
+ switch (divxversion) {
+ case 3:
+ id = AV_CODEC_ID_MSMPEG4V3;
+ break;
+ case 4:
+ case 5:
+ id = AV_CODEC_ID_MPEG4;
+ break;
+ }
+ }
+ if (id != AV_CODEC_ID_NONE)
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-ffv")) {
+ gint ffvversion = 0;
+
+ if (gst_structure_get_int (structure, "ffvversion", &ffvversion) &&
+ ffvversion == 1) {
+ id = AV_CODEC_ID_FFV1;
+ video = TRUE;
+ }
+ } else if (!strcmp (mimetype, "video/x-apple-intermediate-codec")) {
+ id = AV_CODEC_ID_AIC;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-adpcm")) {
+ const gchar *layout;
+
+ layout = gst_structure_get_string (structure, "layout");
+ if (layout == NULL) {
+ /* break */
+ } else if (!strcmp (layout, "quicktime")) {
+ id = AV_CODEC_ID_ADPCM_IMA_QT;
+ } else if (!strcmp (layout, "microsoft")) {
+ id = AV_CODEC_ID_ADPCM_MS;
+ } else if (!strcmp (layout, "dvi")) {
+ id = AV_CODEC_ID_ADPCM_IMA_WAV;
+ } else if (!strcmp (layout, "4xm")) {
+ id = AV_CODEC_ID_ADPCM_4XM;
+ } else if (!strcmp (layout, "smjpeg")) {
+ id = AV_CODEC_ID_ADPCM_IMA_SMJPEG;
+ } else if (!strcmp (layout, "dk3")) {
+ id = AV_CODEC_ID_ADPCM_IMA_DK3;
+ } else if (!strcmp (layout, "dk4")) {
+ id = AV_CODEC_ID_ADPCM_IMA_DK4;
+ } else if (!strcmp (layout, "oki")) {
+ id = AV_CODEC_ID_ADPCM_IMA_OKI;
+ } else if (!strcmp (layout, "westwood")) {
+ id = AV_CODEC_ID_ADPCM_IMA_WS;
+ } else if (!strcmp (layout, "iss")) {
+ id = AV_CODEC_ID_ADPCM_IMA_ISS;
+ } else if (!strcmp (layout, "xa")) {
+ id = AV_CODEC_ID_ADPCM_XA;
+ } else if (!strcmp (layout, "adx")) {
+ id = AV_CODEC_ID_ADPCM_ADX;
+ } else if (!strcmp (layout, "ea")) {
+ id = AV_CODEC_ID_ADPCM_EA;
+ } else if (!strcmp (layout, "g726")) {
+ id = AV_CODEC_ID_ADPCM_G726;
+ } else if (!strcmp (layout, "g721")) {
+ id = AV_CODEC_ID_ADPCM_G726;
+ } else if (!strcmp (layout, "ct")) {
+ id = AV_CODEC_ID_ADPCM_CT;
+ } else if (!strcmp (layout, "swf")) {
+ id = AV_CODEC_ID_ADPCM_SWF;
+ } else if (!strcmp (layout, "yamaha")) {
+ id = AV_CODEC_ID_ADPCM_YAMAHA;
+ } else if (!strcmp (layout, "sbpro2")) {
+ id = AV_CODEC_ID_ADPCM_SBPRO_2;
+ } else if (!strcmp (layout, "sbpro3")) {
+ id = AV_CODEC_ID_ADPCM_SBPRO_3;
+ } else if (!strcmp (layout, "sbpro4")) {
+ id = AV_CODEC_ID_ADPCM_SBPRO_4;
+ }
+ if (id != AV_CODEC_ID_NONE)
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "video/x-4xm")) {
+ id = AV_CODEC_ID_4XM;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-dpcm")) {
+ const gchar *layout;
+
+ layout = gst_structure_get_string (structure, "layout");
+ if (!layout) {
+ /* .. */
+ } else if (!strcmp (layout, "roq")) {
+ id = AV_CODEC_ID_ROQ_DPCM;
+ } else if (!strcmp (layout, "interplay")) {
+ id = AV_CODEC_ID_INTERPLAY_DPCM;
+ } else if (!strcmp (layout, "xan")) {
+ id = AV_CODEC_ID_XAN_DPCM;
+ } else if (!strcmp (layout, "sol")) {
+ id = AV_CODEC_ID_SOL_DPCM;
+ }
+ if (id != AV_CODEC_ID_NONE)
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-flac")) {
+ id = AV_CODEC_ID_FLAC;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-shorten")) {
+ id = AV_CODEC_ID_SHORTEN;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-alac")) {
+ id = AV_CODEC_ID_ALAC;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "video/x-cinepak")) {
+ id = AV_CODEC_ID_CINEPAK;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-pn-realvideo")) {
+ gint rmversion;
+
+ if (gst_structure_get_int (structure, "rmversion", &rmversion)) {
+ switch (rmversion) {
+ case 1:
+ id = AV_CODEC_ID_RV10;
+ break;
+ case 2:
+ id = AV_CODEC_ID_RV20;
+ break;
+ case 3:
+ id = AV_CODEC_ID_RV30;
+ break;
+ case 4:
+ id = AV_CODEC_ID_RV40;
+ break;
+ }
+ }
+ if (id != AV_CODEC_ID_NONE)
+ video = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-sipro")) {
+ id = AV_CODEC_ID_SIPR;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/x-pn-realaudio")) {
+ gint raversion;
+
+ if (gst_structure_get_int (structure, "raversion", &raversion)) {
+ switch (raversion) {
+ case 1:
+ id = AV_CODEC_ID_RA_144;
+ break;
+ case 2:
+ id = AV_CODEC_ID_RA_288;
+ break;
+ case 8:
+ id = AV_CODEC_ID_COOK;
+ break;
+ }
+ }
+ if (id != AV_CODEC_ID_NONE)
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "video/x-rle")) {
+ const gchar *layout;
+
+ if ((layout = gst_structure_get_string (structure, "layout"))) {
+ if (!strcmp (layout, "microsoft")) {
+ id = AV_CODEC_ID_MSRLE;
+ video = TRUE;
+ }
+ }
+ } else if (!strcmp (mimetype, "video/x-xan")) {
+ gint wcversion = 0;
+
+ if ((gst_structure_get_int (structure, "wcversion", &wcversion))) {
+ switch (wcversion) {
+ case 3:
+ id = AV_CODEC_ID_XAN_WC3;
+ video = TRUE;
+ break;
+ case 4:
+ id = AV_CODEC_ID_XAN_WC4;
+ video = TRUE;
+ break;
+ default:
+ break;
+ }
+ }
+ } else if (!strcmp (mimetype, "audio/AMR")) {
+ audio = TRUE;
+ id = AV_CODEC_ID_AMR_NB;
+ } else if (!strcmp (mimetype, "audio/AMR-WB")) {
+ id = AV_CODEC_ID_AMR_WB;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "audio/qcelp")) {
+ id = AV_CODEC_ID_QCELP;
+ audio = TRUE;
+ } else if (!strcmp (mimetype, "video/x-h264")) {
+ id = AV_CODEC_ID_H264;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-h265")) {
+ id = AV_CODEC_ID_HEVC;
+ video = TRUE;
+ } else if (!strcmp (mimetype, "video/x-flash-video")) {
+ gint flvversion = 0;
+
+ if ((gst_structure_get_int (structure, "flvversion", &flvversion))) {
+ switch (flvversion) {
+ case 1:
+ id = AV_CODEC_ID_FLV1;
+ video = TRUE;
+ break;
+ default:
+ break;
+ }
+ }
+
+ } else if (!strcmp (mimetype, "audio/x-nellymoser")) {
+ id = AV_CODEC_ID_NELLYMOSER;
+ audio = TRUE;
+ } else if (!strncmp (mimetype, "audio/x-gst-av-", 15)) {
+ gchar ext[16];
+ AVCodec *codec;
+
+ if (strlen (mimetype) <= 30 &&
+ sscanf (mimetype, "audio/x-gst-av-%s", ext) == 1) {
+ if ((codec = avcodec_find_decoder_by_name (ext)) ||
+ (codec = avcodec_find_encoder_by_name (ext))) {
+ id = codec->id;
+ audio = TRUE;
+ }
+ }
+ } else if (!strncmp (mimetype, "video/x-gst-av-", 15)) {
+ gchar ext[16];
+ AVCodec *codec;
+
+ if (strlen (mimetype) <= 30 &&
+ sscanf (mimetype, "video/x-gst-av-%s", ext) == 1) {
+ if ((codec = avcodec_find_decoder_by_name (ext)) ||
+ (codec = avcodec_find_encoder_by_name (ext))) {
+ id = codec->id;
+ video = TRUE;
+ }
+ }
+ }
+
+ if (context != NULL) {
+ if (video == TRUE) {
+ context->codec_type = AVMEDIA_TYPE_VIDEO;
+ } else if (audio == TRUE) {
+ context->codec_type = AVMEDIA_TYPE_AUDIO;
+ } else {
+ context->codec_type = AVMEDIA_TYPE_UNKNOWN;
+ }
+ context->codec_id = id;
+ gst_ffmpeg_caps_with_codecid (id, context->codec_type, caps, context);
+ }
+
+ if (id != AV_CODEC_ID_NONE) {
+ GST_DEBUG ("The id=%d belongs to the caps %" GST_PTR_FORMAT, id, caps);
+ } else {
+ GST_WARNING ("Couldn't figure out the id for caps %" GST_PTR_FORMAT, caps);
+ }
+
+ return id;
+ }
--- /dev/null
- /* if we need to land on a keyframe, try to do so, we don't try to do a
+ /* GStreamer
+ * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>,
+ * <2006> Edward Hervey <bilboed@bilboed.com>
+ * <2006> Wim Taymans <wim@fluendo.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+ #include "config.h"
+ #endif
+
+ #include <string.h>
+
+ #include <libavformat/avformat.h>
+ #include <libavutil/imgutils.h>
+ /* #include <ffmpeg/avi.h> */
+ #include <gst/gst.h>
+ #include <gst/base/gstflowcombiner.h>
+
+ #include "gstav.h"
+ #include "gstavcodecmap.h"
+ #include "gstavutils.h"
+ #include "gstavprotocol.h"
+
+ #define MAX_STREAMS 20
+
+ typedef struct _GstFFMpegDemux GstFFMpegDemux;
+ typedef struct _GstFFStream GstFFStream;
+
+ struct _GstFFStream
+ {
+ GstPad *pad;
+
+ AVStream *avstream;
+
+ gboolean unknown;
+ GstClockTime last_ts;
+ gboolean discont;
+ gboolean eos;
+
+ GstTagList *tags; /* stream tags */
+ };
+
+ struct _GstFFMpegDemux
+ {
+ GstElement element;
+
+ /* We need to keep track of our pads, so we do so here. */
+ GstPad *sinkpad;
+
+ gboolean have_group_id;
+ guint group_id;
+
+ AVFormatContext *context;
+ gboolean opened;
+
+ GstFFStream *streams[MAX_STREAMS];
+
+ GstFlowCombiner *flowcombiner;
+
+ gint videopads, audiopads;
+
+ GstClockTime start_time;
+ GstClockTime duration;
+
+ /* TRUE if working in pull-mode */
+ gboolean seekable;
+
+ /* TRUE if the avformat demuxer can reliably handle streaming mode */
+ gboolean can_push;
+
+ gboolean flushing;
+
+ /* segment stuff */
+ GstSegment segment;
+
+ /* cached seek in READY */
+ GstEvent *seek_event;
+
+ /* cached upstream events */
+ GList *cached_events;
+
+ /* push mode data */
+ GstFFMpegPipe ffpipe;
+ GstTask *task;
+ GRecMutex task_lock;
+ };
+
+ typedef struct _GstFFMpegDemuxClass GstFFMpegDemuxClass;
+
+ struct _GstFFMpegDemuxClass
+ {
+ GstElementClass parent_class;
+
+ AVInputFormat *in_plugin;
+ GstPadTemplate *sinktempl;
+ GstPadTemplate *videosrctempl;
+ GstPadTemplate *audiosrctempl;
+ };
+
+ /* A number of function prototypes are given so we can refer to them later. */
+ static void gst_ffmpegdemux_class_init (GstFFMpegDemuxClass * klass);
+ static void gst_ffmpegdemux_base_init (GstFFMpegDemuxClass * klass);
+ static void gst_ffmpegdemux_init (GstFFMpegDemux * demux);
+ static void gst_ffmpegdemux_finalize (GObject * object);
+
+ static gboolean gst_ffmpegdemux_sink_event (GstPad * sinkpad,
+ GstObject * parent, GstEvent * event);
+ static GstFlowReturn gst_ffmpegdemux_chain (GstPad * sinkpad,
+ GstObject * parent, GstBuffer * buf);
+
+ static void gst_ffmpegdemux_loop (GstFFMpegDemux * demux);
+ static gboolean gst_ffmpegdemux_sink_activate (GstPad * sinkpad,
+ GstObject * parent);
+ static gboolean gst_ffmpegdemux_sink_activate_mode (GstPad * sinkpad,
+ GstObject * parent, GstPadMode mode, gboolean active);
+ static GstTagList *gst_ffmpeg_metadata_to_tag_list (AVDictionary * metadata);
+
+ #if 0
+ static gboolean
+ gst_ffmpegdemux_src_convert (GstPad * pad,
+ GstFormat src_fmt,
+ gint64 src_value, GstFormat * dest_fmt, gint64 * dest_value);
+ #endif
+ static gboolean
+ gst_ffmpegdemux_send_event (GstElement * element, GstEvent * event);
+ static GstStateChangeReturn
+ gst_ffmpegdemux_change_state (GstElement * element, GstStateChange transition);
+
+ #define GST_FFDEMUX_PARAMS_QDATA g_quark_from_static_string("avdemux-params")
+
+ static GstElementClass *parent_class = NULL;
+
+ static const gchar *
+ gst_ffmpegdemux_averror (gint av_errno)
+ {
+ const gchar *message = NULL;
+
+ switch (av_errno) {
+ case AVERROR (EINVAL):
+ message = "Unknown error";
+ break;
+ case AVERROR (EIO):
+ message = "Input/output error";
+ break;
+ case AVERROR (EDOM):
+ message = "Number syntax expected in filename";
+ break;
+ case AVERROR (ENOMEM):
+ message = "Not enough memory";
+ break;
+ case AVERROR (EILSEQ):
+ message = "Unknown format";
+ break;
+ case AVERROR (ENOSYS):
+ message = "Operation not supported";
+ break;
+ default:
+ message = "Unhandled error code received";
+ break;
+ }
+
+ return message;
+ }
+
+ static void
+ gst_ffmpegdemux_base_init (GstFFMpegDemuxClass * klass)
+ {
+ GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
+ AVInputFormat *in_plugin;
+ GstCaps *sinkcaps;
+ GstPadTemplate *sinktempl, *audiosrctempl, *videosrctempl;
+ gchar *longname, *description, *name;
+
+ in_plugin = (AVInputFormat *)
+ g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass), GST_FFDEMUX_PARAMS_QDATA);
+ g_assert (in_plugin != NULL);
+
+ name = g_strdup (in_plugin->name);
+ g_strdelimit (name, ".,|-<> ", '_');
+
+ /* construct the element details struct */
+ longname = g_strdup_printf ("libav %s demuxer", in_plugin->long_name);
+ description = g_strdup_printf ("libav %s demuxer", in_plugin->long_name);
+ gst_element_class_set_metadata (element_class, longname,
+ "Codec/Demuxer", description,
+ "Wim Taymans <wim@fluendo.com>, "
+ "Ronald Bultje <rbultje@ronald.bitfreak.net>, "
+ "Edward Hervey <bilboed@bilboed.com>");
+ g_free (longname);
+ g_free (description);
+
+ /* pad templates */
+ sinkcaps = gst_ffmpeg_formatid_to_caps (name);
+ sinktempl = gst_pad_template_new ("sink",
+ GST_PAD_SINK, GST_PAD_ALWAYS, sinkcaps);
+ g_free (name);
+ videosrctempl = gst_pad_template_new ("video_%u",
+ GST_PAD_SRC, GST_PAD_SOMETIMES, GST_CAPS_ANY);
+ audiosrctempl = gst_pad_template_new ("audio_%u",
+ GST_PAD_SRC, GST_PAD_SOMETIMES, GST_CAPS_ANY);
+
+ gst_element_class_add_pad_template (element_class, videosrctempl);
+ gst_element_class_add_pad_template (element_class, audiosrctempl);
+ gst_element_class_add_pad_template (element_class, sinktempl);
+
+ gst_caps_unref (sinkcaps);
+
+ klass->in_plugin = in_plugin;
+ klass->videosrctempl = videosrctempl;
+ klass->audiosrctempl = audiosrctempl;
+ klass->sinktempl = sinktempl;
+ }
+
+ static void
+ gst_ffmpegdemux_class_init (GstFFMpegDemuxClass * klass)
+ {
+ GObjectClass *gobject_class;
+ GstElementClass *gstelement_class;
+
+ gobject_class = (GObjectClass *) klass;
+ gstelement_class = (GstElementClass *) klass;
+
+ parent_class = g_type_class_peek_parent (klass);
+
+ gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_ffmpegdemux_finalize);
+
+ gstelement_class->change_state = gst_ffmpegdemux_change_state;
+ gstelement_class->send_event = gst_ffmpegdemux_send_event;
+ }
+
+ static void
+ gst_ffmpegdemux_init (GstFFMpegDemux * demux)
+ {
+ GstFFMpegDemuxClass *oclass =
+ (GstFFMpegDemuxClass *) (G_OBJECT_GET_CLASS (demux));
+ gint n;
+
+ demux->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
+ gst_pad_set_activate_function (demux->sinkpad,
+ GST_DEBUG_FUNCPTR (gst_ffmpegdemux_sink_activate));
+ gst_pad_set_activatemode_function (demux->sinkpad,
+ GST_DEBUG_FUNCPTR (gst_ffmpegdemux_sink_activate_mode));
+ gst_element_add_pad (GST_ELEMENT (demux), demux->sinkpad);
+
+ /* push based setup */
+ /* the following are not used in pull-based mode, so safe to set anyway */
+ gst_pad_set_event_function (demux->sinkpad,
+ GST_DEBUG_FUNCPTR (gst_ffmpegdemux_sink_event));
+ gst_pad_set_chain_function (demux->sinkpad,
+ GST_DEBUG_FUNCPTR (gst_ffmpegdemux_chain));
+ /* task for driving ffmpeg in loop function */
+ demux->task =
+ gst_task_new ((GstTaskFunction) gst_ffmpegdemux_loop, demux, NULL);
+ g_rec_mutex_init (&demux->task_lock);
+ gst_task_set_lock (demux->task, &demux->task_lock);
+
+ demux->have_group_id = FALSE;
+ demux->group_id = G_MAXUINT;
+
+ demux->opened = FALSE;
+ demux->context = NULL;
+
+ for (n = 0; n < MAX_STREAMS; n++) {
+ demux->streams[n] = NULL;
+ }
+ demux->videopads = 0;
+ demux->audiopads = 0;
+
+ demux->seek_event = NULL;
+ gst_segment_init (&demux->segment, GST_FORMAT_TIME);
+
+ demux->flowcombiner = gst_flow_combiner_new ();
+
+ /* push based data */
+ g_mutex_init (&demux->ffpipe.tlock);
+ g_cond_init (&demux->ffpipe.cond);
+ demux->ffpipe.adapter = gst_adapter_new ();
+
+ /* blacklist unreliable push-based demuxers */
+ if (strcmp (oclass->in_plugin->name, "ape"))
+ demux->can_push = TRUE;
+ else
+ demux->can_push = FALSE;
+ }
+
+ static void
+ gst_ffmpegdemux_finalize (GObject * object)
+ {
+ GstFFMpegDemux *demux;
+
+ demux = (GstFFMpegDemux *) object;
+
+ gst_flow_combiner_free (demux->flowcombiner);
+
+ g_mutex_clear (&demux->ffpipe.tlock);
+ g_cond_clear (&demux->ffpipe.cond);
+ gst_object_unref (demux->ffpipe.adapter);
+
+ gst_object_unref (demux->task);
+ g_rec_mutex_clear (&demux->task_lock);
+
+ G_OBJECT_CLASS (parent_class)->finalize (object);
+ }
+
+ static void
+ gst_ffmpegdemux_close (GstFFMpegDemux * demux)
+ {
+ gint n;
+ GstEvent **event_p;
+
+ if (!demux->opened)
+ return;
+
+ /* remove pads from ourselves */
+ for (n = 0; n < MAX_STREAMS; n++) {
+ GstFFStream *stream;
+
+ stream = demux->streams[n];
+ if (stream) {
+ if (stream->pad) {
+ gst_flow_combiner_remove_pad (demux->flowcombiner, stream->pad);
+ gst_element_remove_pad (GST_ELEMENT (demux), stream->pad);
+ }
+ if (stream->tags)
+ gst_tag_list_unref (stream->tags);
+ g_free (stream);
+ }
+ demux->streams[n] = NULL;
+ }
+ demux->videopads = 0;
+ demux->audiopads = 0;
+
+ /* close demuxer context from ffmpeg */
+ if (demux->seekable)
+ gst_ffmpegdata_close (demux->context->pb);
+ else
+ gst_ffmpeg_pipe_close (demux->context->pb);
+ demux->context->pb = NULL;
+ avformat_close_input (&demux->context);
+ if (demux->context)
+ avformat_free_context (demux->context);
+ demux->context = NULL;
+
+ GST_OBJECT_LOCK (demux);
+ demux->opened = FALSE;
+ event_p = &demux->seek_event;
+ gst_event_replace (event_p, NULL);
+ GST_OBJECT_UNLOCK (demux);
+
+ gst_segment_init (&demux->segment, GST_FORMAT_TIME);
+ }
+
+ /* send an event to all the source pads .
+ * Takes ownership of the event.
+ *
+ * Returns FALSE if none of the source pads handled the event.
+ */
+ static gboolean
+ gst_ffmpegdemux_push_event (GstFFMpegDemux * demux, GstEvent * event)
+ {
+ gboolean res;
+ gint n;
+
+ res = TRUE;
+
+ for (n = 0; n < MAX_STREAMS; n++) {
+ GstFFStream *s = demux->streams[n];
+
+ if (s && s->pad) {
+ gst_event_ref (event);
+ res &= gst_pad_push_event (s->pad, event);
+ }
+ }
+ gst_event_unref (event);
+
+ return res;
+ }
+
+ /* set flags on all streams */
+ static void
+ gst_ffmpegdemux_set_flags (GstFFMpegDemux * demux, gboolean discont,
+ gboolean eos)
+ {
+ GstFFStream *s;
+ gint n;
+
+ for (n = 0; n < MAX_STREAMS; n++) {
+ if ((s = demux->streams[n])) {
+ s->discont = discont;
+ s->eos = eos;
+ }
+ }
+ }
+
+ /* check if all streams are eos */
+ static gboolean
+ gst_ffmpegdemux_is_eos (GstFFMpegDemux * demux)
+ {
+ GstFFStream *s;
+ gint n;
+
+ for (n = 0; n < MAX_STREAMS; n++) {
+ if ((s = demux->streams[n])) {
+ GST_DEBUG ("stream %d %p eos:%d", n, s, s->eos);
+ if (!s->eos)
+ return FALSE;
+ }
+ }
+ return TRUE;
+ }
+
+ /* Returns True if we at least outputted one buffer */
+ static gboolean
+ gst_ffmpegdemux_has_outputted (GstFFMpegDemux * demux)
+ {
+ GstFFStream *s;
+ gint n;
+
+ for (n = 0; n < MAX_STREAMS; n++) {
+ if ((s = demux->streams[n])) {
+ if (GST_CLOCK_TIME_IS_VALID (s->last_ts))
+ return TRUE;
+ }
+ }
+ return FALSE;
+ }
+
+ static gboolean
+ gst_ffmpegdemux_do_seek (GstFFMpegDemux * demux, GstSegment * segment)
+ {
+ gboolean ret;
+ gint seekret;
+ gint64 target;
+ gint64 fftarget;
+ AVStream *stream;
+ gint index;
+
+ /* find default index and fail if none is present */
+ index = av_find_default_stream_index (demux->context);
+ GST_LOG_OBJECT (demux, "default stream index %d", index);
+ if (index < 0)
+ return FALSE;
+
+ ret = TRUE;
+
+ /* get the stream for seeking */
+ stream = demux->context->streams[index];
+ /* initial seek position */
+ target = segment->position + demux->start_time;
+ /* convert target to ffmpeg time */
+ fftarget = gst_ffmpeg_time_gst_to_ff (target, stream->time_base);
+
+ GST_LOG_OBJECT (demux, "do seek to time %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (target));
+
++ /* if we need to land on a keyframe, try to do so, we don't try to do a
+ * keyframe seek if we are not absolutely sure we have an index.*/
+ if (segment->flags & GST_SEEK_FLAG_KEY_UNIT) {
+ gint keyframeidx;
+
+ GST_LOG_OBJECT (demux, "looking for keyframe in ffmpeg for time %"
+ GST_TIME_FORMAT, GST_TIME_ARGS (target));
+
+ /* search in the index for the previous keyframe */
+ keyframeidx =
+ av_index_search_timestamp (stream, fftarget, AVSEEK_FLAG_BACKWARD);
+
+ GST_LOG_OBJECT (demux, "keyframeidx: %d", keyframeidx);
+
+ if (keyframeidx >= 0) {
+ #if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(58,78,0)
+ fftarget = avformat_index_get_entry (stream, keyframeidx)->timestamp;
+ #else
+ fftarget = stream->index_entries[keyframeidx].timestamp;
+ #endif
+ target = gst_ffmpeg_time_ff_to_gst (fftarget, stream->time_base);
+
+ GST_LOG_OBJECT (demux,
+ "Found a keyframe at ffmpeg idx: %d timestamp :%" GST_TIME_FORMAT,
+ keyframeidx, GST_TIME_ARGS (target));
+ }
+ }
+
+ GST_DEBUG_OBJECT (demux,
+ "About to call av_seek_frame (context, %d, %" G_GINT64_FORMAT
+ ", 0) for time %" GST_TIME_FORMAT, index, fftarget,
+ GST_TIME_ARGS (target));
+
+ if ((seekret =
+ av_seek_frame (demux->context, index, fftarget,
+ AVSEEK_FLAG_BACKWARD)) < 0)
+ goto seek_failed;
+
+ GST_DEBUG_OBJECT (demux, "seek success, returned %d", seekret);
+
+ if (target > demux->start_time)
+ target -= demux->start_time;
+ else
+ target = 0;
+
+ segment->position = target;
+ segment->time = target;
+ segment->start = target;
+
+ return ret;
+
+ /* ERRORS */
+ seek_failed:
+ {
+ GST_WARNING_OBJECT (demux, "Call to av_seek_frame failed : %d", seekret);
+ return FALSE;
+ }
+ }
+
+ static gboolean
+ gst_ffmpegdemux_perform_seek (GstFFMpegDemux * demux, GstEvent * event)
+ {
+ gboolean res;
+ gdouble rate;
+ GstFormat format;
+ GstSeekFlags flags;
+ GstSeekType cur_type, stop_type;
+ gint64 cur, stop;
+ gboolean flush;
+ gboolean update;
+ GstSegment seeksegment;
+
+ if (!demux->seekable) {
+ GST_DEBUG_OBJECT (demux, "in push mode; ignoring seek");
+ return FALSE;
+ }
+
+ GST_DEBUG_OBJECT (demux, "starting seek");
+
+ if (event) {
+ gst_event_parse_seek (event, &rate, &format, &flags,
+ &cur_type, &cur, &stop_type, &stop);
+
+ /* we have to have a format as the segment format. Try to convert
+ * if not. */
+ if (demux->segment.format != format) {
+ GstFormat fmt;
+
+ fmt = demux->segment.format;
+ res = TRUE;
+ /* FIXME, use source pad */
+ if (cur_type != GST_SEEK_TYPE_NONE && cur != -1)
+ res = gst_pad_query_convert (demux->sinkpad, format, cur, fmt, &cur);
+ if (res && stop_type != GST_SEEK_TYPE_NONE && stop != -1)
+ res = gst_pad_query_convert (demux->sinkpad, format, stop, fmt, &stop);
+ if (!res)
+ goto no_format;
+
+ format = fmt;
+ }
+ } else {
+ flags = 0;
+ }
+
+ flush = flags & GST_SEEK_FLAG_FLUSH;
+
+ /* send flush start */
+ if (flush) {
+ /* mark flushing so that the streaming thread can react on it */
+ GST_OBJECT_LOCK (demux);
+ demux->flushing = TRUE;
+ GST_OBJECT_UNLOCK (demux);
+ gst_pad_push_event (demux->sinkpad, gst_event_new_flush_start ());
+ gst_ffmpegdemux_push_event (demux, gst_event_new_flush_start ());
+ } else {
+ gst_pad_pause_task (demux->sinkpad);
+ }
+
+ /* grab streaming lock, this should eventually be possible, either
+ * because the task is paused or our streaming thread stopped
+ * because our peer is flushing. */
+ GST_PAD_STREAM_LOCK (demux->sinkpad);
+
+ /* make copy into temp structure, we can only update the main one
+ * when we actually could do the seek. */
+ memcpy (&seeksegment, &demux->segment, sizeof (GstSegment));
+
+ /* now configure the seek segment */
+ if (event) {
+ gst_segment_do_seek (&seeksegment, rate, format, flags,
+ cur_type, cur, stop_type, stop, &update);
+ }
+
+ GST_DEBUG_OBJECT (demux, "segment configured from %" G_GINT64_FORMAT
+ " to %" G_GINT64_FORMAT ", position %" G_GINT64_FORMAT,
+ seeksegment.start, seeksegment.stop, seeksegment.position);
+
+ /* make the sinkpad available for data passing since we might need
+ * it when doing the seek */
+ if (flush) {
+ GST_OBJECT_LOCK (demux);
+ demux->flushing = FALSE;
+ GST_OBJECT_UNLOCK (demux);
+ gst_pad_push_event (demux->sinkpad, gst_event_new_flush_stop (TRUE));
+ }
+
+ /* do the seek, segment.position contains new position. */
+ res = gst_ffmpegdemux_do_seek (demux, &seeksegment);
+
+ /* and prepare to continue streaming */
+ if (flush) {
+ /* send flush stop, peer will accept data and events again. We
+ * are not yet providing data as we still have the STREAM_LOCK. */
+ gst_ffmpegdemux_push_event (demux, gst_event_new_flush_stop (TRUE));
+ }
+ /* if successfull seek, we update our real segment and push
+ * out the new segment. */
+ if (res) {
+ memcpy (&demux->segment, &seeksegment, sizeof (GstSegment));
+
+ if (demux->segment.flags & GST_SEEK_FLAG_SEGMENT) {
+ gst_element_post_message (GST_ELEMENT (demux),
+ gst_message_new_segment_start (GST_OBJECT (demux),
+ demux->segment.format, demux->segment.position));
+ }
+
+ /* now send the newsegment, FIXME, do this from the streaming thread */
+ GST_DEBUG_OBJECT (demux, "Sending newsegment %" GST_SEGMENT_FORMAT,
+ &demux->segment);
+
+ gst_ffmpegdemux_push_event (demux, gst_event_new_segment (&demux->segment));
+ }
+
+ /* Mark discont on all srcpads and remove eos */
+ gst_ffmpegdemux_set_flags (demux, TRUE, FALSE);
+ gst_flow_combiner_reset (demux->flowcombiner);
+
+ /* and restart the task in case it got paused explicitely or by
+ * the FLUSH_START event we pushed out. */
+ gst_pad_start_task (demux->sinkpad, (GstTaskFunction) gst_ffmpegdemux_loop,
+ demux->sinkpad, NULL);
+
+ /* and release the lock again so we can continue streaming */
+ GST_PAD_STREAM_UNLOCK (demux->sinkpad);
+
+ return res;
+
+ /* ERROR */
+ no_format:
+ {
+ GST_DEBUG_OBJECT (demux, "undefined format given, seek aborted.");
+ return FALSE;
+ }
+ }
+
+ static gboolean
+ gst_ffmpegdemux_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
+ {
+ GstFFMpegDemux *demux;
+ gboolean res = TRUE;
+
+ demux = (GstFFMpegDemux *) parent;
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_SEEK:
+ res = gst_ffmpegdemux_perform_seek (demux, event);
+ gst_event_unref (event);
+ break;
+ case GST_EVENT_LATENCY:
+ res = gst_pad_push_event (demux->sinkpad, event);
+ break;
+ case GST_EVENT_NAVIGATION:
+ case GST_EVENT_QOS:
+ default:
+ res = FALSE;
+ gst_event_unref (event);
+ break;
+ }
+
+ return res;
+ }
+
+ static gboolean
+ gst_ffmpegdemux_send_event (GstElement * element, GstEvent * event)
+ {
+ GstFFMpegDemux *demux = (GstFFMpegDemux *) (element);
+ gboolean res;
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_SEEK:
+ GST_OBJECT_LOCK (demux);
+ if (!demux->opened) {
+ GstEvent **event_p;
+
+ GST_DEBUG_OBJECT (demux, "caching seek event");
+ event_p = &demux->seek_event;
+ gst_event_replace (event_p, event);
+ GST_OBJECT_UNLOCK (demux);
+
+ res = TRUE;
+ } else {
+ GST_OBJECT_UNLOCK (demux);
+ res = gst_ffmpegdemux_perform_seek (demux, event);
+ gst_event_unref (event);
+ }
+ break;
+ default:
+ res = FALSE;
+ break;
+ }
+
+ return res;
+ }
+
+ static gboolean
+ gst_ffmpegdemux_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
+ {
+ GstFFMpegDemux *demux;
+ GstFFStream *stream;
+ AVStream *avstream;
+ gboolean res = FALSE;
+
+ if (!(stream = gst_pad_get_element_private (pad)))
+ return FALSE;
+
+ avstream = stream->avstream;
+
+ demux = (GstFFMpegDemux *) parent;
+
+ switch (GST_QUERY_TYPE (query)) {
+ case GST_QUERY_POSITION:
+ {
+ GstFormat format;
+ gint64 timeposition;
+
+ gst_query_parse_position (query, &format, NULL);
+
+ timeposition = stream->last_ts;
+ if (!(GST_CLOCK_TIME_IS_VALID (timeposition)))
+ break;
+
+ switch (format) {
+ case GST_FORMAT_TIME:
+ gst_query_set_position (query, GST_FORMAT_TIME, timeposition);
+ res = TRUE;
+ break;
+ case GST_FORMAT_DEFAULT:
+ gst_query_set_position (query, GST_FORMAT_DEFAULT,
+ gst_util_uint64_scale (timeposition, avstream->avg_frame_rate.num,
+ GST_SECOND * avstream->avg_frame_rate.den));
+ res = TRUE;
+ break;
+ case GST_FORMAT_BYTES:
+ if (demux->videopads + demux->audiopads == 1 &&
+ GST_PAD_PEER (demux->sinkpad) != NULL)
+ res = gst_pad_query_default (pad, parent, query);
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case GST_QUERY_DURATION:
+ {
+ GstFormat format;
+ gint64 timeduration;
+
+ gst_query_parse_duration (query, &format, NULL);
+
+ timeduration =
+ gst_ffmpeg_time_ff_to_gst (avstream->duration, avstream->time_base);
+ if (!(GST_CLOCK_TIME_IS_VALID (timeduration))) {
+ /* use duration of complete file if the stream duration is not known */
+ timeduration = demux->duration;
+ if (!(GST_CLOCK_TIME_IS_VALID (timeduration)))
+ break;
+ }
+
+ switch (format) {
+ case GST_FORMAT_TIME:
+ gst_query_set_duration (query, GST_FORMAT_TIME, timeduration);
+ res = TRUE;
+ break;
+ case GST_FORMAT_DEFAULT:
+ gst_query_set_duration (query, GST_FORMAT_DEFAULT,
+ gst_util_uint64_scale (timeduration, avstream->avg_frame_rate.num,
+ GST_SECOND * avstream->avg_frame_rate.den));
+ res = TRUE;
+ break;
+ case GST_FORMAT_BYTES:
+ if (demux->videopads + demux->audiopads == 1 &&
+ GST_PAD_PEER (demux->sinkpad) != NULL)
+ res = gst_pad_query_default (pad, parent, query);
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case GST_QUERY_SEEKING:{
+ GstFormat format;
+ gboolean seekable;
+ gint64 dur = -1;
+
+ gst_query_parse_seeking (query, &format, NULL, NULL, NULL);
+ seekable = demux->seekable;
+ if (!gst_pad_query_duration (pad, format, &dur)) {
+ /* unlikely that we don't know duration but can seek */
+ seekable = FALSE;
+ dur = -1;
+ }
+ gst_query_set_seeking (query, format, seekable, 0, dur);
+ res = TRUE;
+ break;
+ }
+ case GST_QUERY_SEGMENT:{
+ GstFormat format;
+ gint64 start, stop;
+
+ format = demux->segment.format;
+
+ start =
+ gst_segment_to_stream_time (&demux->segment, format,
+ demux->segment.start);
+ if ((stop = demux->segment.stop) == -1)
+ stop = demux->segment.duration;
+ else
+ stop = gst_segment_to_stream_time (&demux->segment, format, stop);
+
+ gst_query_set_segment (query, demux->segment.rate, format, start, stop);
+ res = TRUE;
+ break;
+ }
+ default:
+ /* FIXME : ADD GST_QUERY_CONVERT */
+ res = gst_pad_query_default (pad, parent, query);
+ break;
+ }
+
+ return res;
+ }
+
+ #if 0
+ /* FIXME, reenable me */
+ static gboolean
+ gst_ffmpegdemux_src_convert (GstPad * pad,
+ GstFormat src_fmt,
+ gint64 src_value, GstFormat * dest_fmt, gint64 * dest_value)
+ {
+ GstFFStream *stream;
+ gboolean res = TRUE;
+ AVStream *avstream;
+
+ if (!(stream = gst_pad_get_element_private (pad)))
+ return FALSE;
+
+ avstream = stream->avstream;
+ if (avstream->codec->codec_type != AVMEDIA_TYPE_VIDEO)
+ return FALSE;
+
+ switch (src_fmt) {
+ case GST_FORMAT_TIME:
+ switch (*dest_fmt) {
+ case GST_FORMAT_DEFAULT:
+ *dest_value = gst_util_uint64_scale (src_value,
+ avstream->avg_frame_rate.num,
+ GST_SECOND * avstream->avg_frame_rate.den);
+ break;
+ default:
+ res = FALSE;
+ break;
+ }
+ break;
+ case GST_FORMAT_DEFAULT:
+ switch (*dest_fmt) {
+ case GST_FORMAT_TIME:
+ *dest_value = gst_util_uint64_scale (src_value,
+ GST_SECOND * avstream->avg_frame_rate.num,
+ avstream->avg_frame_rate.den);
+ break;
+ default:
+ res = FALSE;
+ break;
+ }
+ break;
+ default:
+ res = FALSE;
+ break;
+ }
+
+ return res;
+ }
+ #endif
+
+ static gchar *
+ gst_ffmpegdemux_create_padname (const gchar * templ, gint n)
+ {
+ GString *string;
+
+ /* FIXME, we just want to printf the number according to the template but
+ * then the format string is not a literal and we can't check arguments and
+ * this generates a compiler error */
+ string = g_string_new (templ);
+ g_string_truncate (string, string->len - 2);
+ g_string_append_printf (string, "%u", n);
+
+ return g_string_free (string, FALSE);
+ }
+
+ static GstFFStream *
+ gst_ffmpegdemux_get_stream (GstFFMpegDemux * demux, AVStream * avstream)
+ {
+ GstFFMpegDemuxClass *oclass;
+ GstPadTemplate *templ = NULL;
+ GstPad *pad;
+ GstCaps *caps;
+ gint num;
+ gchar *padname;
+ const gchar *codec;
+ AVCodecContext *ctx = NULL;
+ GstFFStream *stream;
+ GstEvent *event;
+ gchar *stream_id;
+
+ oclass = (GstFFMpegDemuxClass *) G_OBJECT_GET_CLASS (demux);
+
+ if (demux->streams[avstream->index] != NULL)
+ goto exists;
+
+ ctx = avcodec_alloc_context3 (NULL);
+ avcodec_parameters_to_context (ctx, avstream->codecpar);
+
+ /* create new stream */
+ stream = g_new0 (GstFFStream, 1);
+ demux->streams[avstream->index] = stream;
+
+ /* mark stream as unknown */
+ stream->unknown = TRUE;
+ stream->discont = TRUE;
+ stream->avstream = avstream;
+ stream->last_ts = GST_CLOCK_TIME_NONE;
+ stream->tags = NULL;
+
+ switch (ctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ templ = oclass->videosrctempl;
+ num = demux->videopads++;
+ /* These are not part of the codec parameters we built the
+ * context from */
+ ctx->framerate.num = avstream->r_frame_rate.num;
+ ctx->framerate.den = avstream->r_frame_rate.den;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ templ = oclass->audiosrctempl;
+ num = demux->audiopads++;
+ break;
+ default:
+ goto unknown_type;
+ }
+
+ /* get caps that belongs to this stream */
+ caps = gst_ffmpeg_codecid_to_caps (ctx->codec_id, ctx, TRUE);
+ if (caps == NULL)
+ goto unknown_caps;
+
+ /* stream is known now */
+ stream->unknown = FALSE;
+
+ /* create new pad for this stream */
+ padname =
+ gst_ffmpegdemux_create_padname (GST_PAD_TEMPLATE_NAME_TEMPLATE (templ),
+ num);
+ pad = gst_pad_new_from_template (templ, padname);
+ g_free (padname);
+
+ gst_pad_use_fixed_caps (pad);
+ gst_pad_set_active (pad, TRUE);
+
+ gst_pad_set_query_function (pad, gst_ffmpegdemux_src_query);
+ gst_pad_set_event_function (pad, gst_ffmpegdemux_src_event);
+
+ /* store pad internally */
+ stream->pad = pad;
+ gst_pad_set_element_private (pad, stream);
+
+ /* transform some useful info to GstClockTime and remember */
+ {
+ GstClockTime tmp;
+
+ /* FIXME, actually use the start_time in some way */
+ tmp = gst_ffmpeg_time_ff_to_gst (avstream->start_time, avstream->time_base);
+ GST_DEBUG_OBJECT (demux, "stream %d: start time: %" GST_TIME_FORMAT,
+ avstream->index, GST_TIME_ARGS (tmp));
+
+ tmp = gst_ffmpeg_time_ff_to_gst (avstream->duration, avstream->time_base);
+ GST_DEBUG_OBJECT (demux, "stream %d: duration: %" GST_TIME_FORMAT,
+ avstream->index, GST_TIME_ARGS (tmp));
+ }
+
+ demux->streams[avstream->index] = stream;
+
+
+ stream_id =
+ gst_pad_create_stream_id_printf (pad, GST_ELEMENT_CAST (demux), "%03u",
+ avstream->index);
+
+ event = gst_pad_get_sticky_event (demux->sinkpad, GST_EVENT_STREAM_START, 0);
+ if (event) {
+ if (gst_event_parse_group_id (event, &demux->group_id))
+ demux->have_group_id = TRUE;
+ else
+ demux->have_group_id = FALSE;
+ gst_event_unref (event);
+ } else if (!demux->have_group_id) {
+ demux->have_group_id = TRUE;
+ demux->group_id = gst_util_group_id_next ();
+ }
+ event = gst_event_new_stream_start (stream_id);
+ if (demux->have_group_id)
+ gst_event_set_group_id (event, demux->group_id);
+
+ gst_pad_push_event (pad, event);
+ g_free (stream_id);
+
+ GST_INFO_OBJECT (pad, "adding pad with caps %" GST_PTR_FORMAT, caps);
+ gst_pad_set_caps (pad, caps);
+ gst_caps_unref (caps);
+
+ /* activate and add */
+ gst_element_add_pad (GST_ELEMENT (demux), pad);
+ gst_flow_combiner_add_pad (demux->flowcombiner, pad);
+
+ /* metadata */
+ if ((codec = gst_ffmpeg_get_codecid_longname (ctx->codec_id))) {
+ stream->tags = gst_ffmpeg_metadata_to_tag_list (avstream->metadata);
+
+ if (stream->tags == NULL)
+ stream->tags = gst_tag_list_new_empty ();
+
+ gst_tag_list_add (stream->tags, GST_TAG_MERGE_REPLACE,
+ (ctx->codec_type == AVMEDIA_TYPE_VIDEO) ?
+ GST_TAG_VIDEO_CODEC : GST_TAG_AUDIO_CODEC, codec, NULL);
+ }
+
+ done:
+ if (ctx)
+ avcodec_free_context (&ctx);
+ return stream;
+
+ /* ERRORS */
+ exists:
+ {
+ GST_DEBUG_OBJECT (demux, "Pad existed (stream %d)", avstream->index);
+ stream = demux->streams[avstream->index];
+ goto done;
+ }
+ unknown_type:
+ {
+ GST_WARNING_OBJECT (demux, "Unknown pad type %d", ctx->codec_type);
+ goto done;
+ }
+ unknown_caps:
+ {
+ GST_WARNING_OBJECT (demux, "Unknown caps for codec %d", ctx->codec_id);
+ goto done;
+ }
+ }
+
+ static gchar *
+ safe_utf8_copy (gchar * input)
+ {
+ gchar *output;
+
+ if (!(g_utf8_validate (input, -1, NULL))) {
+ output = g_convert (input, strlen (input),
+ "UTF-8", "ISO-8859-1", NULL, NULL, NULL);
+ } else {
+ output = g_strdup (input);
+ }
+
+ return output;
+ }
+
+ /* This is a list of standard tag keys taken from the avformat.h
+ * header, without handling any variants. */
+ static const struct
+ {
+ const gchar *ffmpeg_tag_name;
+ const gchar *gst_tag_name;
+ } tagmapping[] = {
+ {
+ "album", GST_TAG_ALBUM}, {
+ "album_artist", GST_TAG_ALBUM_ARTIST}, {
+ "artist", GST_TAG_ARTIST}, {
+ "comment", GST_TAG_COMMENT}, {
+ "composer", GST_TAG_COMPOSER}, {
+ "copyright", GST_TAG_COPYRIGHT}, {
+ /* Need to convert ISO 8601 to GstDateTime: */
+ "creation_time", GST_TAG_DATE_TIME}, {
+ /* Need to convert ISO 8601 to GDateTime: */
+ "date", GST_TAG_DATE_TIME}, {
+ "disc", GST_TAG_ALBUM_VOLUME_NUMBER}, {
+ "encoder", GST_TAG_ENCODER}, {
+ "encoded_by", GST_TAG_ENCODED_BY}, {
+ "genre", GST_TAG_GENRE}, {
+ "language", GST_TAG_LANGUAGE_CODE}, {
+ "performer", GST_TAG_PERFORMER}, {
+ "publisher", GST_TAG_PUBLISHER}, {
+ "title", GST_TAG_TITLE}, {
+ "track", GST_TAG_TRACK_NUMBER}
+ };
+
+ static const gchar *
+ match_tag_name (gchar * ffmpeg_tag_name)
+ {
+ gint i;
+ for (i = 0; i < G_N_ELEMENTS (tagmapping); i++) {
+ if (!g_strcmp0 (tagmapping[i].ffmpeg_tag_name, ffmpeg_tag_name))
+ return tagmapping[i].gst_tag_name;
+ }
+ return NULL;
+ }
+
+ static GstTagList *
+ gst_ffmpeg_metadata_to_tag_list (AVDictionary * metadata)
+ {
+ AVDictionaryEntry *tag = NULL;
+ GstTagList *list;
+ list = gst_tag_list_new_empty ();
+
+ while ((tag = av_dict_get (metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
+ const gchar *gsttag = match_tag_name (tag->key);
+ GType t;
+ GST_LOG ("mapping tag %s=%s\n", tag->key, tag->value);
+ if (gsttag == NULL) {
+ GST_LOG ("Ignoring unknown metadata tag %s", tag->key);
+ continue;
+ }
+ /* Special case, track and disc numbers may be x/n in libav, split
+ * them */
+ if (g_str_equal (gsttag, GST_TAG_TRACK_NUMBER)) {
+ guint track, trackcount;
+ if (sscanf (tag->value, "%u/%u", &track, &trackcount) == 2) {
+ gst_tag_list_add (list, GST_TAG_MERGE_REPLACE,
+ gsttag, track, GST_TAG_TRACK_COUNT, trackcount, NULL);
+ continue;
+ }
+ /* Fall through and handle as a single uint below */
+ } else if (g_str_equal (gsttag, GST_TAG_ALBUM_VOLUME_NUMBER)) {
+ guint disc, disc_count;
+ if (sscanf (tag->value, "%u/%u", &disc, &disc_count) == 2) {
+ gst_tag_list_add (list, GST_TAG_MERGE_REPLACE,
+ gsttag, disc, GST_TAG_ALBUM_VOLUME_COUNT, disc_count, NULL);
+ continue;
+ }
+ /* Fall through and handle as a single uint below */
+ }
+
+ t = gst_tag_get_type (gsttag);
+ if (t == G_TYPE_STRING) {
+ gchar *s = safe_utf8_copy (tag->value);
+ gst_tag_list_add (list, GST_TAG_MERGE_REPLACE, gsttag, s, NULL);
+ g_free (s);
+ } else if (t == G_TYPE_UINT || t == G_TYPE_INT) {
+ gchar *end;
+ gint v = strtol (tag->value, &end, 10);
+ if (end == tag->value)
+ continue; /* Failed to parse */
+ gst_tag_list_add (list, GST_TAG_MERGE_REPLACE, gsttag, v, NULL);
+ } else if (t == G_TYPE_DATE) {
+ guint year, month, day;
+ GDate *date = NULL;
+ if (sscanf (tag->value, "%04u-%02u-%02u", &year, &month, &day) == 3) {
+ date = g_date_new_dmy (day, month, year);
+ } else {
+ /* Try interpreting just as a year */
+ gchar *end;
+
+ year = strtol (tag->value, &end, 10);
+ if (end != tag->value)
+ date = g_date_new_dmy (1, 1, year);
+ }
+ if (date) {
+ gst_tag_list_add (list, GST_TAG_MERGE_REPLACE, gsttag, date, NULL);
+ g_date_free (date);
+ }
+ } else if (t == GST_TYPE_DATE_TIME) {
+ gchar *s = safe_utf8_copy (tag->value);
+ GstDateTime *d = gst_date_time_new_from_iso8601_string (s);
+
+ g_free (s);
+ if (d) {
+ gst_tag_list_add (list, GST_TAG_MERGE_REPLACE, gsttag, d, NULL);
+ gst_date_time_unref (d);
+ }
+ } else {
+ GST_FIXME ("Unhandled tag %s", gsttag);
+ }
+ }
+
+ if (gst_tag_list_is_empty (list)) {
+ gst_tag_list_unref (list);
+ return NULL;
+ }
+
+ return list;
+ }
+
+ static gboolean
+ gst_ffmpegdemux_open (GstFFMpegDemux * demux)
+ {
+ AVIOContext *iocontext = NULL;
+ GstFFMpegDemuxClass *oclass =
+ (GstFFMpegDemuxClass *) G_OBJECT_GET_CLASS (demux);
+ gint res, n_streams, i;
+ GstTagList *tags;
+ GstEvent *event;
+ GList *cached_events;
+ GstQuery *query;
+ gchar *uri = NULL;
+
+ /* to be sure... */
+ gst_ffmpegdemux_close (demux);
+
+ /* open via our input protocol hack */
+ if (demux->seekable)
+ res = gst_ffmpegdata_open (demux->sinkpad, AVIO_FLAG_READ, &iocontext);
+ else
+ res = gst_ffmpeg_pipe_open (&demux->ffpipe, AVIO_FLAG_READ, &iocontext);
+
+ if (res < 0)
+ goto beach;
+
+ query = gst_query_new_uri ();
+ if (gst_pad_peer_query (demux->sinkpad, query)) {
+ gchar *query_uri, *redirect_uri;
+ gboolean permanent;
+
+ gst_query_parse_uri (query, &query_uri);
+ gst_query_parse_uri_redirection (query, &redirect_uri);
+ gst_query_parse_uri_redirection_permanent (query, &permanent);
+
+ if (permanent && redirect_uri) {
+ uri = redirect_uri;
+ g_free (query_uri);
+ } else {
+ uri = query_uri;
+ g_free (redirect_uri);
+ }
+ }
+ gst_query_unref (query);
+
+ GST_DEBUG_OBJECT (demux, "Opening context with URI %s", GST_STR_NULL (uri));
+
+ demux->context = avformat_alloc_context ();
+ demux->context->pb = iocontext;
+ res = avformat_open_input (&demux->context, uri, oclass->in_plugin, NULL);
+
+ g_free (uri);
+
+ GST_DEBUG_OBJECT (demux, "av_open_input returned %d", res);
+ if (res < 0)
+ goto beach;
+
+ res = gst_ffmpeg_av_find_stream_info (demux->context);
+ GST_DEBUG_OBJECT (demux, "av_find_stream_info returned %d", res);
+ if (res < 0)
+ goto beach;
+
+ n_streams = demux->context->nb_streams;
+ GST_DEBUG_OBJECT (demux, "we have %d streams", n_streams);
+
+ /* open_input_file() automatically reads the header. We can now map each
+ * created AVStream to a GstPad to make GStreamer handle it. */
+ for (i = 0; i < n_streams; i++) {
+ gst_ffmpegdemux_get_stream (demux, demux->context->streams[i]);
+ }
+
+ gst_element_no_more_pads (GST_ELEMENT (demux));
+
+ /* transform some useful info to GstClockTime and remember */
+ demux->start_time = gst_util_uint64_scale_int (demux->context->start_time,
+ GST_SECOND, AV_TIME_BASE);
+ GST_DEBUG_OBJECT (demux, "start time: %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (demux->start_time));
+ if (demux->context->duration > 0)
+ demux->duration = gst_util_uint64_scale_int (demux->context->duration,
+ GST_SECOND, AV_TIME_BASE);
+ else
+ demux->duration = GST_CLOCK_TIME_NONE;
+
+ GST_DEBUG_OBJECT (demux, "duration: %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (demux->duration));
+
+ /* store duration in the segment as well */
+ demux->segment.duration = demux->duration;
+
+ GST_OBJECT_LOCK (demux);
+ demux->opened = TRUE;
+ event = demux->seek_event;
+ demux->seek_event = NULL;
+ cached_events = demux->cached_events;
+ demux->cached_events = NULL;
+ GST_OBJECT_UNLOCK (demux);
+
+ if (event) {
+ gst_ffmpegdemux_perform_seek (demux, event);
+ gst_event_unref (event);
+ } else {
+ GST_DEBUG_OBJECT (demux, "Sending segment %" GST_SEGMENT_FORMAT,
+ &demux->segment);
+ gst_ffmpegdemux_push_event (demux, gst_event_new_segment (&demux->segment));
+ }
+
+ while (cached_events) {
+ event = cached_events->data;
+ GST_INFO_OBJECT (demux, "pushing cached event: %" GST_PTR_FORMAT, event);
+ gst_ffmpegdemux_push_event (demux, event);
+ cached_events = g_list_delete_link (cached_events, cached_events);
+ }
+
+ /* grab the global tags */
+ tags = gst_ffmpeg_metadata_to_tag_list (demux->context->metadata);
+ if (tags) {
+ GST_INFO_OBJECT (demux, "global tags: %" GST_PTR_FORMAT, tags);
+ }
+
+ /* now handle the stream tags */
+ for (i = 0; i < n_streams; i++) {
+ GstFFStream *stream;
+
+ stream = gst_ffmpegdemux_get_stream (demux, demux->context->streams[i]);
+ if (stream->pad != NULL) {
+
+ /* Global tags */
+ if (tags)
+ gst_pad_push_event (stream->pad,
+ gst_event_new_tag (gst_tag_list_ref (tags)));
+
+ /* Per-stream tags */
+ if (stream->tags != NULL) {
+ GST_INFO_OBJECT (stream->pad, "stream tags: %" GST_PTR_FORMAT,
+ stream->tags);
+ gst_pad_push_event (stream->pad,
+ gst_event_new_tag (gst_tag_list_ref (stream->tags)));
+ }
+ }
+ }
+ if (tags)
+ gst_tag_list_unref (tags);
+ return TRUE;
+
+ /* ERRORS */
+ beach:
+ {
+ GST_ELEMENT_ERROR (demux, LIBRARY, FAILED, (NULL),
+ ("%s", gst_ffmpegdemux_averror (res)));
+ return FALSE;
+ }
+ }
+
+ #define GST_FFMPEG_TYPE_FIND_SIZE 4096
+ #define GST_FFMPEG_TYPE_FIND_MIN_SIZE 256
+
+ static void
+ gst_ffmpegdemux_type_find (GstTypeFind * tf, gpointer priv)
+ {
+ const guint8 *data;
+ AVInputFormat *in_plugin = (AVInputFormat *) priv;
+ gint res = 0;
+ guint64 length;
+ GstCaps *sinkcaps;
+
+ /* We want GST_FFMPEG_TYPE_FIND_SIZE bytes, but if the file is shorter than
+ * that we'll give it a try... */
+ length = gst_type_find_get_length (tf);
+ if (length == 0 || length > GST_FFMPEG_TYPE_FIND_SIZE)
+ length = GST_FFMPEG_TYPE_FIND_SIZE;
+
+ /* The ffmpeg typefinders assume there's a certain minimum amount of data
+ * and will happily do invalid memory access if there isn't, so let's just
+ * skip the ffmpeg typefinders if the data available is too short
+ * (in which case it's unlikely to be a media file anyway) */
+ if (length < GST_FFMPEG_TYPE_FIND_MIN_SIZE) {
+ GST_LOG ("not typefinding %" G_GUINT64_FORMAT " bytes, too short", length);
+ return;
+ }
+
+ GST_LOG ("typefinding %" G_GUINT64_FORMAT " bytes", length);
+ if (in_plugin->read_probe &&
+ (data = gst_type_find_peek (tf, 0, length)) != NULL) {
+ AVProbeData probe_data;
+
+ probe_data.filename = "";
+ probe_data.buf = (guint8 *) data;
+ probe_data.buf_size = length;
+
+ res = in_plugin->read_probe (&probe_data);
+ if (res > 0) {
+ res = MAX (1, res * GST_TYPE_FIND_MAXIMUM / AVPROBE_SCORE_MAX);
+ /* Restrict the probability for MPEG-TS streams, because there is
+ * probably a better version in plugins-base, if the user has a recent
+ * plugins-base (in fact we shouldn't even get here for ffmpeg mpegts or
+ * mpegtsraw typefinders, since we blacklist them) */
+ if (g_str_has_prefix (in_plugin->name, "mpegts"))
+ res = MIN (res, GST_TYPE_FIND_POSSIBLE);
+
+ sinkcaps = gst_ffmpeg_formatid_to_caps (in_plugin->name);
+
+ GST_LOG ("libav typefinder '%s' suggests %" GST_PTR_FORMAT ", p=%u%%",
+ in_plugin->name, sinkcaps, res);
+
+ gst_type_find_suggest (tf, res, sinkcaps);
+ gst_caps_unref (sinkcaps);
+ }
+ }
+ }
+
+ /* Task */
+ static void
+ gst_ffmpegdemux_loop (GstFFMpegDemux * demux)
+ {
+ GstFlowReturn ret;
+ gint res = -1;
+ AVPacket pkt;
+ GstPad *srcpad;
+ GstFFStream *stream;
+ AVStream *avstream;
+ GstBuffer *outbuf = NULL;
+ GstClockTime timestamp, duration;
+ gint outsize;
+ gboolean rawvideo;
+ GstFlowReturn stream_last_flow;
+ gint64 pts;
+
+ /* open file if we didn't so already */
+ if (!demux->opened)
+ if (!gst_ffmpegdemux_open (demux))
+ goto open_failed;
+
+ GST_DEBUG_OBJECT (demux, "about to read a frame");
+
+ /* read a frame */
+ res = av_read_frame (demux->context, &pkt);
+ if (res < 0)
+ goto read_failed;
+
+ /* get the stream */
+ stream =
+ gst_ffmpegdemux_get_stream (demux,
+ demux->context->streams[pkt.stream_index]);
+
+ /* check if we know the stream */
+ if (stream->unknown)
+ goto done;
+
+ /* get more stuff belonging to this stream */
+ avstream = stream->avstream;
+
+ /* do timestamps, we do this first so that we can know when we
+ * stepped over the segment stop position. */
+ pts = pkt.pts;
+ if (G_UNLIKELY (pts < 0)) {
+ /* some streams have pts such this:
+ * 0
+ * -2
+ * -1
+ * 1
+ *
+ * we reset pts to 0 since for us timestamp are unsigned
+ */
+ GST_WARNING_OBJECT (demux,
+ "negative pts detected: %" G_GINT64_FORMAT " resetting to 0", pts);
+ pts = 0;
+ }
+ timestamp = gst_ffmpeg_time_ff_to_gst (pts, avstream->time_base);
+ if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
+ stream->last_ts = timestamp;
+ }
+ duration = gst_ffmpeg_time_ff_to_gst (pkt.duration, avstream->time_base);
+ if (G_UNLIKELY (!duration)) {
+ GST_WARNING_OBJECT (demux, "invalid buffer duration, setting to NONE");
+ duration = GST_CLOCK_TIME_NONE;
+ }
+
+
+ GST_DEBUG_OBJECT (demux,
+ "pkt pts:%" GST_TIME_FORMAT
+ " / size:%d / stream_index:%d / flags:%d / duration:%" GST_TIME_FORMAT
+ " / pos:%" G_GINT64_FORMAT, GST_TIME_ARGS (timestamp), pkt.size,
+ pkt.stream_index, pkt.flags, GST_TIME_ARGS (duration), (gint64) pkt.pos);
+
+ /* check start_time */
+ #if 0
+ if (demux->start_time != -1 && demux->start_time > timestamp)
+ goto drop;
+ #endif
+
+ if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
+ /* start_time should be the ts of the first frame but it may actually be
+ * higher because of rounding when converting to gst ts. */
+ if (demux->start_time >= timestamp)
+ timestamp = 0;
+ else
+ timestamp -= demux->start_time;
+ }
+
+ /* check if we ran outside of the segment */
+ if (demux->segment.stop != -1 && timestamp > demux->segment.stop)
+ goto drop;
+
+ /* prepare to push packet to peer */
+ srcpad = stream->pad;
+
+ rawvideo = (avstream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
+ avstream->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO);
+
+ if (rawvideo)
+ outsize = gst_ffmpeg_avpicture_get_size (avstream->codecpar->format,
+ avstream->codecpar->width, avstream->codecpar->height);
+ else
+ outsize = pkt.size;
+
+ outbuf = gst_buffer_new_and_alloc (outsize);
+
+ /* copy the data from packet into the target buffer
+ * and do conversions for raw video packets */
+ if (rawvideo) {
+ AVFrame src, dst;
+ const gchar *plugin_name =
+ ((GstFFMpegDemuxClass *) (G_OBJECT_GET_CLASS (demux)))->in_plugin->name;
+ GstMapInfo map;
+
+ GST_WARNING ("Unknown demuxer %s, no idea what to do", plugin_name);
+ gst_ffmpeg_avpicture_fill (&src, pkt.data,
+ avstream->codecpar->format, avstream->codecpar->width,
+ avstream->codecpar->height);
+
+ gst_buffer_map (outbuf, &map, GST_MAP_WRITE);
+ gst_ffmpeg_avpicture_fill (&dst, map.data,
+ avstream->codecpar->format, avstream->codecpar->width,
+ avstream->codecpar->height);
+
+ av_image_copy (dst.data, dst.linesize, (const uint8_t **) src.data,
+ src.linesize, avstream->codecpar->format, avstream->codecpar->width,
+ avstream->codecpar->height);
+ gst_buffer_unmap (outbuf, &map);
+ } else {
+ gst_buffer_fill (outbuf, 0, pkt.data, outsize);
+ }
+
+ GST_BUFFER_TIMESTAMP (outbuf) = timestamp;
+ GST_BUFFER_DURATION (outbuf) = duration;
+
+ /* mark keyframes */
+ if (!(pkt.flags & AV_PKT_FLAG_KEY)) {
+ GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
+ }
+
+ /* Mark discont */
+ if (stream->discont) {
+ GST_DEBUG_OBJECT (demux, "marking DISCONT");
+ GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
+ stream->discont = FALSE;
+ }
+
+ GST_DEBUG_OBJECT (demux,
+ "Sending out buffer time:%" GST_TIME_FORMAT " size:%" G_GSIZE_FORMAT,
+ GST_TIME_ARGS (timestamp), gst_buffer_get_size (outbuf));
+
+ ret = stream_last_flow = gst_pad_push (srcpad, outbuf);
+
+ /* if a pad is in e.g. WRONG_STATE, we want to pause to unlock the STREAM_LOCK */
+ if (((ret = gst_flow_combiner_update_flow (demux->flowcombiner,
+ ret)) != GST_FLOW_OK)) {
+ GST_WARNING_OBJECT (demux, "stream_movi flow: %s / %s",
+ gst_flow_get_name (stream_last_flow), gst_flow_get_name (ret));
+ goto pause;
+ }
+
+ done:
+ /* can destroy the packet now */
+ if (res == 0) {
+ av_packet_unref (&pkt);
+ }
+
+ return;
+
+ /* ERRORS */
+ pause:
+ {
+ GST_LOG_OBJECT (demux, "pausing task, reason %d (%s)", ret,
+ gst_flow_get_name (ret));
+ if (demux->seekable)
+ gst_pad_pause_task (demux->sinkpad);
+ else {
+ GstFFMpegPipe *ffpipe = &demux->ffpipe;
+
+ GST_FFMPEG_PIPE_MUTEX_LOCK (ffpipe);
+ /* pause task and make sure loop stops */
+ gst_task_pause (demux->task);
+ g_rec_mutex_lock (&demux->task_lock);
+ g_rec_mutex_unlock (&demux->task_lock);
+ demux->ffpipe.srcresult = ret;
+ GST_FFMPEG_PIPE_MUTEX_UNLOCK (ffpipe);
+ }
+
+ if (ret == GST_FLOW_EOS) {
+ if (demux->segment.flags & GST_SEEK_FLAG_SEGMENT) {
+ gint64 stop;
+
+ if ((stop = demux->segment.stop) == -1)
+ stop = demux->segment.duration;
+
+ GST_LOG_OBJECT (demux, "posting segment done");
+ gst_element_post_message (GST_ELEMENT (demux),
+ gst_message_new_segment_done (GST_OBJECT (demux),
+ demux->segment.format, stop));
+ gst_ffmpegdemux_push_event (demux,
+ gst_event_new_segment_done (demux->segment.format, stop));
+ } else {
+ GST_LOG_OBJECT (demux, "pushing eos");
+ gst_ffmpegdemux_push_event (demux, gst_event_new_eos ());
+ }
+ } else if (ret == GST_FLOW_NOT_LINKED || ret < GST_FLOW_EOS) {
+ GST_ELEMENT_FLOW_ERROR (demux, ret);
+ gst_ffmpegdemux_push_event (demux, gst_event_new_eos ());
+ }
+ goto done;
+ }
+ open_failed:
+ {
+ ret = GST_FLOW_ERROR;
+ goto pause;
+ }
+ read_failed:
+ {
+ /* something went wrong... */
+ GST_WARNING_OBJECT (demux, "av_read_frame returned %d", res);
+
+ GST_OBJECT_LOCK (demux);
+ /* pause appropriatly based on if we are flushing or not */
+ if (demux->flushing)
+ ret = GST_FLOW_FLUSHING;
+ else if (gst_ffmpegdemux_has_outputted (demux)
+ || gst_ffmpegdemux_is_eos (demux)) {
+ GST_DEBUG_OBJECT (demux, "We are EOS");
+ ret = GST_FLOW_EOS;
+ } else
+ ret = GST_FLOW_ERROR;
+ GST_OBJECT_UNLOCK (demux);
+
+ goto pause;
+ }
+ drop:
+ {
+ GST_DEBUG_OBJECT (demux, "dropping buffer out of segment, stream eos");
+ stream->eos = TRUE;
+ if (gst_ffmpegdemux_is_eos (demux)) {
+ av_packet_unref (&pkt);
+ GST_DEBUG_OBJECT (demux, "we are eos");
+ ret = GST_FLOW_EOS;
+ goto pause;
+ } else {
+ GST_DEBUG_OBJECT (demux, "some streams are not yet eos");
+ goto done;
+ }
+ }
+ }
+
+
+ static gboolean
+ gst_ffmpegdemux_sink_event (GstPad * sinkpad, GstObject * parent,
+ GstEvent * event)
+ {
+ GstFFMpegDemux *demux;
+ GstFFMpegPipe *ffpipe;
+ gboolean result = TRUE;
+
+ demux = (GstFFMpegDemux *) parent;
+ ffpipe = &(demux->ffpipe);
+
+ GST_LOG_OBJECT (demux, "event: %" GST_PTR_FORMAT, event);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_FLUSH_START:
+ /* forward event */
+ gst_pad_event_default (sinkpad, parent, event);
+
+ /* now unblock the chain function */
+ GST_FFMPEG_PIPE_MUTEX_LOCK (ffpipe);
+ ffpipe->srcresult = GST_FLOW_FLUSHING;
+ GST_FFMPEG_PIPE_SIGNAL (ffpipe);
+ GST_FFMPEG_PIPE_MUTEX_UNLOCK (ffpipe);
+
+ /* loop might run into WRONG_STATE and end itself,
+ * but may also be waiting in a ffmpeg read
+ * trying to break that would make ffmpeg believe eos,
+ * so no harm to have the loop 'pausing' there ... */
+ goto done;
+ case GST_EVENT_FLUSH_STOP:
+ /* forward event */
+ gst_pad_event_default (sinkpad, parent, event);
+
+ GST_OBJECT_LOCK (demux);
+ g_list_foreach (demux->cached_events, (GFunc) gst_mini_object_unref,
+ NULL);
+ g_list_free (demux->cached_events);
+ GST_OBJECT_UNLOCK (demux);
+ GST_FFMPEG_PIPE_MUTEX_LOCK (ffpipe);
+ gst_adapter_clear (ffpipe->adapter);
+ ffpipe->srcresult = GST_FLOW_OK;
+ /* loop may have decided to end itself as a result of flush WRONG_STATE */
+ gst_task_start (demux->task);
+ demux->flushing = FALSE;
+ GST_LOG_OBJECT (demux, "loop started");
+ GST_FFMPEG_PIPE_MUTEX_UNLOCK (ffpipe);
+ goto done;
+ case GST_EVENT_EOS:
+ /* inform the src task that it can stop now */
+ GST_FFMPEG_PIPE_MUTEX_LOCK (ffpipe);
+ ffpipe->eos = TRUE;
+ GST_FFMPEG_PIPE_SIGNAL (ffpipe);
+ GST_FFMPEG_PIPE_MUTEX_UNLOCK (ffpipe);
+
+ /* eat this event for now, task will send eos when finished */
+ gst_event_unref (event);
+ goto done;
+ case GST_EVENT_STREAM_START:
+ case GST_EVENT_CAPS:
+ GST_LOG_OBJECT (demux, "dropping %s event", GST_EVENT_TYPE_NAME (event));
+ gst_event_unref (event);
+ goto done;
+ default:
+ /* for a serialized event, wait until an earlier data is gone,
+ * though this is no guarantee as to when task is done with it.
+ *
+ * If the demuxer isn't opened, push straight away, since we'll
+ * be waiting against a cond that will never be signalled. */
+ if (GST_EVENT_IS_SERIALIZED (event)) {
+ if (demux->opened) {
+ GST_FFMPEG_PIPE_MUTEX_LOCK (ffpipe);
+ while (!ffpipe->needed)
+ GST_FFMPEG_PIPE_WAIT (ffpipe);
+ GST_FFMPEG_PIPE_MUTEX_UNLOCK (ffpipe);
+ } else {
+ /* queue events and send them later (esp. tag events) */
+ GST_OBJECT_LOCK (demux);
+ demux->cached_events = g_list_append (demux->cached_events, event);
+ GST_OBJECT_UNLOCK (demux);
+ goto done;
+ }
+ }
+ break;
+ }
+
+ result = gst_pad_event_default (sinkpad, parent, event);
+
+ done:
+
+ return result;
+ }
+
+ static GstFlowReturn
+ gst_ffmpegdemux_chain (GstPad * sinkpad, GstObject * parent, GstBuffer * buffer)
+ {
+ GstFFMpegDemux *demux;
+ GstFFMpegPipe *ffpipe;
+
+ demux = (GstFFMpegDemux *) parent;
+ ffpipe = &demux->ffpipe;
+
+ GST_FFMPEG_PIPE_MUTEX_LOCK (ffpipe);
+
+ if (G_UNLIKELY (ffpipe->eos))
+ goto eos;
+
+ if (G_UNLIKELY (ffpipe->srcresult != GST_FLOW_OK))
+ goto ignore;
+
+ GST_DEBUG ("Giving a buffer of %" G_GSIZE_FORMAT " bytes",
+ gst_buffer_get_size (buffer));
+ gst_adapter_push (ffpipe->adapter, buffer);
+ buffer = NULL;
+ while (gst_adapter_available (ffpipe->adapter) >= ffpipe->needed) {
+ GST_DEBUG ("Adapter has more that requested (ffpipe->needed:%d)",
+ ffpipe->needed);
+ GST_FFMPEG_PIPE_SIGNAL (ffpipe);
+ GST_FFMPEG_PIPE_WAIT (ffpipe);
+ /* may have become flushing */
+ if (G_UNLIKELY (ffpipe->srcresult != GST_FLOW_OK))
+ goto ignore;
+ }
+
+ GST_FFMPEG_PIPE_MUTEX_UNLOCK (ffpipe);
+
+ return GST_FLOW_OK;
+
+ /* special cases */
+ eos:
+ {
+ GST_DEBUG_OBJECT (demux, "ignoring buffer at end-of-stream");
+ GST_FFMPEG_PIPE_MUTEX_UNLOCK (ffpipe);
+
+ gst_buffer_unref (buffer);
+ return GST_FLOW_EOS;
+ }
+ ignore:
+ {
+ GST_DEBUG_OBJECT (demux, "ignoring buffer because src task encountered %s",
+ gst_flow_get_name (ffpipe->srcresult));
+ GST_FFMPEG_PIPE_MUTEX_UNLOCK (ffpipe);
+
+ if (buffer)
+ gst_buffer_unref (buffer);
+ return GST_FLOW_FLUSHING;
+ }
+ }
+
+ static gboolean
+ gst_ffmpegdemux_sink_activate (GstPad * sinkpad, GstObject * parent)
+ {
+ GstQuery *query;
+ gboolean pull_mode;
+ GstSchedulingFlags flags;
+
+ query = gst_query_new_scheduling ();
+
+ if (!gst_pad_peer_query (sinkpad, query)) {
+ gst_query_unref (query);
+ goto activate_push;
+ }
+
+ pull_mode = gst_query_has_scheduling_mode_with_flags (query,
+ GST_PAD_MODE_PULL, GST_SCHEDULING_FLAG_SEEKABLE);
+
+ gst_query_parse_scheduling (query, &flags, NULL, NULL, NULL);
+ if (flags & GST_SCHEDULING_FLAG_SEQUENTIAL)
+ pull_mode = FALSE;
+
+ gst_query_unref (query);
+
+ if (!pull_mode)
+ goto activate_push;
+
+ GST_DEBUG_OBJECT (sinkpad, "activating pull");
+ return gst_pad_activate_mode (sinkpad, GST_PAD_MODE_PULL, TRUE);
+
+ activate_push:
+ {
+ GST_DEBUG_OBJECT (sinkpad, "activating push");
+ return gst_pad_activate_mode (sinkpad, GST_PAD_MODE_PUSH, TRUE);
+ }
+ }
+
+ /* push mode:
+ * - not seekable
+ * - use gstpipe protocol, like ffmpeg's pipe protocol
+ * - (independently managed) task driving ffmpeg
+ */
+ static gboolean
+ gst_ffmpegdemux_sink_activate_push (GstPad * sinkpad, GstObject * parent,
+ gboolean active)
+ {
+ GstFFMpegDemux *demux;
+ gboolean res = FALSE;
+
+ demux = (GstFFMpegDemux *) (parent);
+
+ if (active) {
+ if (demux->can_push == FALSE) {
+ GST_WARNING_OBJECT (demux, "Demuxer can't reliably operate in push-mode");
+ goto beach;
+ }
+ demux->ffpipe.eos = FALSE;
+ demux->ffpipe.srcresult = GST_FLOW_OK;
+ demux->ffpipe.needed = 0;
+ demux->seekable = FALSE;
+ res = gst_task_start (demux->task);
+ } else {
+ GstFFMpegPipe *ffpipe = &demux->ffpipe;
+
+ /* release chain and loop */
+ GST_FFMPEG_PIPE_MUTEX_LOCK (ffpipe);
+ demux->ffpipe.srcresult = GST_FLOW_FLUSHING;
+ /* end streaming by making ffmpeg believe eos */
+ demux->ffpipe.eos = TRUE;
+ GST_FFMPEG_PIPE_SIGNAL (ffpipe);
+ GST_FFMPEG_PIPE_MUTEX_UNLOCK (ffpipe);
+
+ /* make sure streaming ends */
+ gst_task_stop (demux->task);
+ g_rec_mutex_lock (&demux->task_lock);
+ g_rec_mutex_unlock (&demux->task_lock);
+ res = gst_task_join (demux->task);
+ demux->seekable = FALSE;
+ }
+
+ beach:
+ return res;
+ }
+
+ /* pull mode:
+ * - seekable
+ * - use gstreamer protocol, like ffmpeg's file protocol
+ * - task driving ffmpeg based on sink pad
+ */
+ static gboolean
+ gst_ffmpegdemux_sink_activate_pull (GstPad * sinkpad, GstObject * parent,
+ gboolean active)
+ {
+ GstFFMpegDemux *demux;
+ gboolean res;
+
+ demux = (GstFFMpegDemux *) parent;
+
+ if (active) {
+ demux->seekable = TRUE;
+ res = gst_pad_start_task (sinkpad, (GstTaskFunction) gst_ffmpegdemux_loop,
+ demux, NULL);
+ } else {
+ res = gst_pad_stop_task (sinkpad);
+ demux->seekable = FALSE;
+ }
+
+ return res;
+ }
+
+ static gboolean
+ gst_ffmpegdemux_sink_activate_mode (GstPad * sinkpad, GstObject * parent,
+ GstPadMode mode, gboolean active)
+ {
+ gboolean res;
+
+ switch (mode) {
+ case GST_PAD_MODE_PUSH:
+ res = gst_ffmpegdemux_sink_activate_push (sinkpad, parent, active);
+ break;
+ case GST_PAD_MODE_PULL:
+ res = gst_ffmpegdemux_sink_activate_pull (sinkpad, parent, active);
+ break;
+ default:
+ res = FALSE;
+ break;
+ }
+ return res;
+ }
+
+ static GstStateChangeReturn
+ gst_ffmpegdemux_change_state (GstElement * element, GstStateChange transition)
+ {
+ GstFFMpegDemux *demux = (GstFFMpegDemux *) (element);
+ GstStateChangeReturn ret;
+
+ switch (transition) {
+ case GST_STATE_CHANGE_READY_TO_PAUSED:
+ #if 0
+ /* test seek in READY here */
+ gst_element_send_event (element, gst_event_new_seek (1.0,
+ GST_FORMAT_TIME, GST_SEEK_FLAG_NONE,
+ GST_SEEK_TYPE_SET, 10 * GST_SECOND,
+ GST_SEEK_TYPE_SET, 13 * GST_SECOND));
+ #endif
+ break;
+ default:
+ break;
+ }
+
+ ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
+
+ switch (transition) {
+ case GST_STATE_CHANGE_PAUSED_TO_READY:
+ gst_ffmpegdemux_close (demux);
+ gst_adapter_clear (demux->ffpipe.adapter);
+ g_list_foreach (demux->cached_events, (GFunc) gst_mini_object_unref,
+ NULL);
+ g_list_free (demux->cached_events);
+ demux->cached_events = NULL;
+ demux->have_group_id = FALSE;
+ demux->group_id = G_MAXUINT;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+ }
+
+ gboolean
+ gst_ffmpegdemux_register (GstPlugin * plugin)
+ {
+ GType type;
+ const AVInputFormat *in_plugin;
+ gchar *extensions;
+ GTypeInfo typeinfo = {
+ sizeof (GstFFMpegDemuxClass),
+ (GBaseInitFunc) gst_ffmpegdemux_base_init,
+ NULL,
+ (GClassInitFunc) gst_ffmpegdemux_class_init,
+ NULL,
+ NULL,
+ sizeof (GstFFMpegDemux),
+ 0,
+ (GInstanceInitFunc) gst_ffmpegdemux_init,
+ };
+
+ void *i = 0;
+
+ GST_LOG ("Registering demuxers");
+
+ while ((in_plugin = av_demuxer_iterate (&i))) {
+ gchar *type_name, *typefind_name;
+ gint rank;
+ gboolean register_typefind_func = TRUE;
+
+ GST_LOG ("Attempting to handle libav demuxer plugin %s [%s]",
+ in_plugin->name, in_plugin->long_name);
+
+ /* no emulators */
+ if (in_plugin->long_name != NULL) {
+ if (!strncmp (in_plugin->long_name, "raw ", 4) ||
+ !strncmp (in_plugin->long_name, "pcm ", 4)
+ )
+ continue;
+ }
+
+ if (!strcmp (in_plugin->name, "audio_device") ||
+ !strncmp (in_plugin->name, "image", 5) ||
+ !strcmp (in_plugin->name, "mpegvideo") ||
+ !strcmp (in_plugin->name, "mjpeg") ||
+ !strcmp (in_plugin->name, "redir") ||
+ !strncmp (in_plugin->name, "u8", 2) ||
+ !strncmp (in_plugin->name, "u16", 3) ||
+ !strncmp (in_plugin->name, "u24", 3) ||
+ !strncmp (in_plugin->name, "u32", 3) ||
+ !strncmp (in_plugin->name, "s8", 2) ||
+ !strncmp (in_plugin->name, "s16", 3) ||
+ !strncmp (in_plugin->name, "s24", 3) ||
+ !strncmp (in_plugin->name, "s32", 3) ||
+ !strncmp (in_plugin->name, "f32", 3) ||
+ !strncmp (in_plugin->name, "f64", 3) ||
+ !strcmp (in_plugin->name, "mulaw") || !strcmp (in_plugin->name, "alaw")
+ )
+ continue;
+
+ /* no network demuxers */
+ if (!strcmp (in_plugin->name, "sdp") ||
+ !strcmp (in_plugin->name, "rtsp") ||
+ !strcmp (in_plugin->name, "applehttp")
+ )
+ continue;
+
+ /* these don't do what one would expect or
+ * are only partially functional/useful */
+ if (!strcmp (in_plugin->name, "aac") ||
+ !strcmp (in_plugin->name, "wv") ||
+ !strcmp (in_plugin->name, "ass") ||
+ !strcmp (in_plugin->name, "ffmetadata"))
+ continue;
+
+ /* Don't use the typefind functions of formats for which we already have
+ * better typefind functions */
+ if (!strcmp (in_plugin->name, "mov,mp4,m4a,3gp,3g2,mj2") ||
+ !strcmp (in_plugin->name, "ass") ||
+ !strcmp (in_plugin->name, "avi") ||
+ !strcmp (in_plugin->name, "asf") ||
+ !strcmp (in_plugin->name, "mpegvideo") ||
+ !strcmp (in_plugin->name, "mp3") ||
+ !strcmp (in_plugin->name, "matroska") ||
+ !strcmp (in_plugin->name, "matroska_webm") ||
+ !strcmp (in_plugin->name, "matroska,webm") ||
+ !strcmp (in_plugin->name, "mpeg") ||
+ !strcmp (in_plugin->name, "wav") ||
+ !strcmp (in_plugin->name, "au") ||
+ !strcmp (in_plugin->name, "tta") ||
+ !strcmp (in_plugin->name, "rm") ||
+ !strcmp (in_plugin->name, "amr") ||
+ !strcmp (in_plugin->name, "ogg") ||
+ !strcmp (in_plugin->name, "aiff") ||
+ !strcmp (in_plugin->name, "ape") ||
+ !strcmp (in_plugin->name, "dv") ||
+ !strcmp (in_plugin->name, "flv") ||
+ !strcmp (in_plugin->name, "mpc") ||
+ !strcmp (in_plugin->name, "mpc8") ||
+ !strcmp (in_plugin->name, "mpegts") ||
+ !strcmp (in_plugin->name, "mpegtsraw") ||
+ !strcmp (in_plugin->name, "mxf") ||
+ !strcmp (in_plugin->name, "nuv") ||
+ !strcmp (in_plugin->name, "swf") ||
+ !strcmp (in_plugin->name, "voc") ||
+ !strcmp (in_plugin->name, "pva") ||
+ !strcmp (in_plugin->name, "gif") ||
+ !strcmp (in_plugin->name, "vc1test") ||
+ !strcmp (in_plugin->name, "ivf"))
+ register_typefind_func = FALSE;
+
+ /* Set the rank of demuxers known to work to MARGINAL.
+ * Set demuxers for which we already have another implementation to NONE
+ * Set All others to NONE*/
+ /**
+ * element-avdemux_xwma
+ *
+ * Since: 1.20
+ */
+ if (!strcmp (in_plugin->name, "wsvqa") ||
+ !strcmp (in_plugin->name, "wsaud") ||
+ !strcmp (in_plugin->name, "wc3movie") ||
+ !strcmp (in_plugin->name, "voc") ||
+ !strcmp (in_plugin->name, "tta") ||
+ !strcmp (in_plugin->name, "sol") ||
+ !strcmp (in_plugin->name, "smk") ||
+ !strcmp (in_plugin->name, "vmd") ||
+ !strcmp (in_plugin->name, "film_cpk") ||
+ !strcmp (in_plugin->name, "ingenient") ||
+ !strcmp (in_plugin->name, "psxstr") ||
+ !strcmp (in_plugin->name, "nuv") ||
+ !strcmp (in_plugin->name, "nut") ||
+ !strcmp (in_plugin->name, "nsv") ||
+ !strcmp (in_plugin->name, "mxf") ||
+ !strcmp (in_plugin->name, "mmf") ||
+ !strcmp (in_plugin->name, "mm") ||
+ !strcmp (in_plugin->name, "ipmovie") ||
+ !strcmp (in_plugin->name, "ape") ||
+ !strcmp (in_plugin->name, "RoQ") ||
+ !strcmp (in_plugin->name, "idcin") ||
+ !strcmp (in_plugin->name, "gxf") ||
+ !strcmp (in_plugin->name, "ffm") ||
+ !strcmp (in_plugin->name, "ea") ||
+ !strcmp (in_plugin->name, "daud") ||
+ !strcmp (in_plugin->name, "avs") ||
+ !strcmp (in_plugin->name, "aiff") ||
+ !strcmp (in_plugin->name, "xwma") ||
+ !strcmp (in_plugin->name, "4xm") ||
+ !strcmp (in_plugin->name, "yuv4mpegpipe") ||
+ !strcmp (in_plugin->name, "pva") ||
+ !strcmp (in_plugin->name, "mpc") ||
+ !strcmp (in_plugin->name, "mpc8") ||
+ !strcmp (in_plugin->name, "ivf") ||
+ !strcmp (in_plugin->name, "brstm") ||
+ !strcmp (in_plugin->name, "bfstm") ||
+ !strcmp (in_plugin->name, "gif") ||
+ !strcmp (in_plugin->name, "dsf") || !strcmp (in_plugin->name, "iff"))
+ rank = GST_RANK_MARGINAL;
+ else {
+ GST_DEBUG ("ignoring %s", in_plugin->name);
+ rank = GST_RANK_NONE;
+ continue;
+ }
+
+ /* construct the type */
+ type_name = g_strdup_printf ("avdemux_%s", in_plugin->name);
+ g_strdelimit (type_name, ".,|-<> ", '_');
+
+ /* if it's already registered, drop it */
+ if (g_type_from_name (type_name)) {
+ g_free (type_name);
+ continue;
+ }
+
+ typefind_name = g_strdup_printf ("avtype_%s", in_plugin->name);
+ g_strdelimit (typefind_name, ".,|-<> ", '_');
+
+ /* create the type now */
+ type = g_type_register_static (GST_TYPE_ELEMENT, type_name, &typeinfo, 0);
+ g_type_set_qdata (type, GST_FFDEMUX_PARAMS_QDATA, (gpointer) in_plugin);
+
+ if (in_plugin->extensions)
+ extensions = g_strdelimit (g_strdup (in_plugin->extensions), " ", ',');
+ else
+ extensions = NULL;
+
+ if (!gst_element_register (plugin, type_name, rank, type) ||
+ (register_typefind_func == TRUE &&
+ !gst_type_find_register (plugin, typefind_name, rank,
+ gst_ffmpegdemux_type_find, extensions, NULL,
+ (gpointer) in_plugin, NULL))) {
+ g_warning ("Registration of type %s failed", type_name);
+ g_free (type_name);
+ g_free (typefind_name);
+ g_free (extensions);
+ return FALSE;
+ }
+
+ g_free (type_name);
+ g_free (typefind_name);
+ g_free (extensions);
+ }
+
+ GST_LOG ("Finished registering demuxers");
+
+ return TRUE;
+ }
--- /dev/null
+ /* GStreamer
+ * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+ #ifdef HAVE_CONFIG_H
+ #include "config.h"
+ #endif
+
+ #include <string.h>
+
+ #include <libavformat/avformat.h>
+ #include <libavutil/opt.h>
+ #include <gst/gst.h>
+ #include <gst/base/gstcollectpads.h>
+
+ #include "gstav.h"
+ #include "gstavcodecmap.h"
+ #include "gstavutils.h"
+ #include "gstavprotocol.h"
+
++#ifdef TIZEN_FEATURE_LIBAV
++#include "libavformat/movenc.h"
++#endif /* TIZEN_FEATURE_LIBAV */
++
+ typedef struct _GstFFMpegMux GstFFMpegMux;
+ typedef struct _GstFFMpegMuxPad GstFFMpegMuxPad;
+
+ struct _GstFFMpegMuxPad
+ {
+ GstCollectData collect; /* we extend the CollectData */
+
+ gint padnum;
+ };
+
+ struct _GstFFMpegMux
+ {
+ GstElement element;
+
+ GstCollectPads *collect;
+ /* We need to keep track of our pads, so we do so here. */
+ GstPad *srcpad;
+
+ AVFormatContext *context;
+ gboolean opened;
+
+ guint videopads, audiopads;
++#ifdef TIZEN_FEATURE_LIBAV
++ guint expected_trailer_size;
++ guint nb_video_frames;
++ guint nb_audio_frames;
++#endif /* TIZEN_FEATURE_LIBAV */
+
+ /*< private > */
+ /* event_function is the collectpads default eventfunction */
+ GstPadEventFunction event_function;
+ int max_delay;
+ int preload;
+ };
+
+ typedef struct _GstFFMpegMuxClass GstFFMpegMuxClass;
+
+ struct _GstFFMpegMuxClass
+ {
+ GstElementClass parent_class;
+
+ AVOutputFormat *in_plugin;
+ };
+
+ #define GST_TYPE_FFMPEGMUX \
+ (gst_ffmpegdec_get_type())
+ #define GST_FFMPEGMUX(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGMUX,GstFFMpegMux))
+ #define GST_FFMPEGMUX_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGMUX,GstFFMpegMuxClass))
+ #define GST_IS_FFMPEGMUX(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGMUX))
+ #define GST_IS_FFMPEGMUX_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGMUX))
+
+ enum
+ {
+ /* FILL ME */
+ LAST_SIGNAL
+ };
+
+ enum
+ {
+ PROP_0,
+ PROP_PRELOAD,
+ PROP_MAXDELAY
++#ifdef TIZEN_FEATURE_LIBAV
++ , PROP_EXPECTED_TRAILER_SIZE,
++ PROP_NUMBER_VIDEO_FRAMES,
++ PROP_NUMBER_AUDIO_FRAMES
++#endif /* TIZEN_FEATURE_LIBAV */
+ };
+
+ /* A number of function prototypes are given so we can refer to them later. */
+ static void gst_ffmpegmux_class_init (GstFFMpegMuxClass * klass);
+ static void gst_ffmpegmux_base_init (gpointer g_class);
+ static void gst_ffmpegmux_init (GstFFMpegMux * ffmpegmux,
+ GstFFMpegMuxClass * g_class);
+ static void gst_ffmpegmux_finalize (GObject * object);
+
+ static gboolean gst_ffmpegmux_setcaps (GstPad * pad, GstCaps * caps);
+ static GstPad *gst_ffmpegmux_request_new_pad (GstElement * element,
+ GstPadTemplate * templ, const gchar * name, const GstCaps * caps);
+ static GstFlowReturn gst_ffmpegmux_collected (GstCollectPads * pads,
+ gpointer user_data);
+
+ static gboolean gst_ffmpegmux_sink_event (GstPad * pad, GstObject * parent,
+ GstEvent * event);
+
+ static GstStateChangeReturn gst_ffmpegmux_change_state (GstElement * element,
+ GstStateChange transition);
+
+ static void gst_ffmpegmux_set_property (GObject * object, guint prop_id,
+ const GValue * value, GParamSpec * pspec);
+ static void gst_ffmpegmux_get_property (GObject * object, guint prop_id,
+ GValue * value, GParamSpec * pspec);
+
+ static GstCaps *gst_ffmpegmux_get_id_caps (enum AVCodecID *id_list);
+ static void gst_ffmpeg_mux_simple_caps_set_int_list (GstCaps * caps,
+ const gchar * field, guint num, const gint * values);
+
+ #define GST_FFMUX_PARAMS_QDATA g_quark_from_static_string("avmux-params")
+
++#ifdef TIZEN_FEATURE_LIBAV
++static void gst_ffmpegmux_release_pad(GstElement *element, GstPad *pad);
++#endif /* TIZEN_FEATURE_LIBAV */
++
+ static GstElementClass *parent_class = NULL;
+
+ /*static guint gst_ffmpegmux_signals[LAST_SIGNAL] = { 0 }; */
+
+ typedef struct
+ {
+ const char *name;
+ const char *replacement;
+ } GstFFMpegMuxReplacement;
+
+ static const char *
+ gst_ffmpegmux_get_replacement (const char *name)
+ {
+ static const GstFFMpegMuxReplacement blacklist[] = {
+ {"avi", "avimux"},
+ {"matroska", "matroskamux"},
+ {"mov", "qtmux"},
+ {"mpegts", "mpegtsmux"},
+ {"mp4", "mp4mux"},
+ {"mpjpeg", "multipartmux"},
+ {"ogg", "oggmux"},
+ {"wav", "wavenc"},
+ {"webm", "webmmux"},
+ {"mxf", "mxfmux"},
+ {"3gp", "gppmux"},
+ {"yuv4mpegpipe", "y4menc"},
+ {"aiff", "aiffmux"},
+ {"adts", "aacparse"},
+ {"asf", "asfmux"},
+ {"asf_stream", "asfmux"},
+ {"flv", "flvmux"},
+ {"mp3", "id3v2mux"},
+ {"mp2", "id3v2mux"}
+ };
+ guint i;
+
+ for (i = 0; i < sizeof (blacklist) / sizeof (blacklist[0]); i++) {
+ if (strcmp (blacklist[i].name, name) == 0) {
+ return blacklist[i].replacement;
+ }
+ }
+
+ return NULL;
+ }
+
+ static gboolean
+ gst_ffmpegmux_is_formatter (const char *name)
+ {
+ static const char *replace[] = {
+ "mp2", "mp3", NULL
+ };
+ int i;
+
+ for (i = 0; replace[i]; i++)
+ if (strcmp (replace[i], name) == 0)
+ return TRUE;
+ return FALSE;
+ }
+
++#ifdef TIZEN_FEATURE_LIBAV
++/* trailer entry size */
++#define ENTRY_SIZE_VIDEO_STTS 8
++#define ENTRY_SIZE_VIDEO_STSS 4
++#define ENTRY_SIZE_VIDEO_STSZ 4
++#define ENTRY_SIZE_VIDEO_STCO 4
++#define ENTRY_SIZE_AUDIO_STTS 8
++#define ENTRY_SIZE_AUDIO_STSZ 4
++#define ENTRY_SIZE_AUDIO_STCO 4
++
++#define ENTRY_SIZE_VIDEO_MPEG4_STSD 146
++#define ENTRY_SIZE_VIDEO_H263P_STSD 102
++#define ENTRY_SIZE_AUDIO_AAC_STSD 106
++#define ENTRY_SIZE_AUDIO_AMR_STSD 69
++
++#define ENTRY_SIZE_STSC 12
++#define ENTRY_SIZE_VIDEO_ST 84 /*atom size (stss + stts + stsc + stsz + stco ) * (size + atom + version + flags + sample count)+stsz(sample size) */
++#define ENTRY_SIZE_AUDIO_ST 68 /*atom size (stss + stsc + stsz + stco ) * (size + atom + version + flags + sample count)+stsz(sample size) */
++
++/* ffmux_adts */
++#define MUX_ADTS_NAME "adts"
++#define MUX_AMR_NAME "amr"
++#define MUX_MP4_NAME "mp4"
++#define MUX_ADTS_SIZE_HEADER 8
++#define MUX_ADTS_SIZE_ENTRY 7
++#define MUX_AMR_SIZE_HEADER 6
++
++/* common */
++#define MUX_COMMON_SIZE_3GP_HEADER 290 /* ftyp + free + moov + mvhd + +iods + udta */
++#define MUX_COMMON_SIZE_MP4_HEADER 378 /* ftyp + free + moov + mvhd + +iods + udta (meta) */
++#define MUX_COMMON_SIZE_MP4_VIDEO_HEADER 305
++#define MUX_COMMON_SIZE_MP4_AUDIO_HEADER 253
++
++#define MUX_INFO_SIZE_LOCATION 106 /* loci + .xyz */
++
++static void
++update_expected_trailer_size (GstFFMpegMux * ffmpegmux)
++{
++ int i = 0;
++ guint nb_video_frames = 0;
++ guint nb_video_i_frames = 0;
++ guint nb_video_stts_entry = 0;
++ guint nb_audio_frames = 0;
++ guint nb_audio_stts_entry = 0;
++ gboolean video_stream = FALSE;
++ gboolean audio_stream = FALSE;
++ guint exp_size = 0;
++ AVCodecContext *codec_context = NULL;
++ enum AVCodecID video_codec_id;
++ enum AVCodecID audio_codec_id;
++
++ if (ffmpegmux == NULL) {
++ GST_WARNING ("ffmpegmux is NULL");
++ return;
++ }
++
++ for (i = 0; i < ffmpegmux->context->nb_streams; i++) {
++ codec_context = ffmpegmux->context->streams[i]->codec;
++ if (codec_context->codec_type == AVMEDIA_TYPE_VIDEO) {
++ nb_video_frames += codec_context->frame_number;
++ nb_video_i_frames += codec_context->i_frame_number;
++ nb_video_stts_entry += codec_context->stts_count;
++
++ video_stream = TRUE;
++ video_codec_id = codec_context->codec_id;
++ } else if (codec_context->codec_type == AVMEDIA_TYPE_AUDIO) {
++ nb_audio_frames += codec_context->frame_number;
++ nb_audio_stts_entry += codec_context->stts_count;
++
++ audio_stream = TRUE;
++ audio_codec_id = codec_context->codec_id;
++ }
++ }
++
++ /*
++ [[ Metadata Size ]]
++ - COMMON
++ ftyp = 28 (MPEG4 ftype: 28 , H263P fype: 28)
++ free = 8
++ moov = 8
++ mvhd = 108
++ iods = 24
++ **optional
++ udta = 114(meta in case of audio only) or
++ 114(loci in case of video only or video/audio) or
++ 202( with meta in MP4)
++ 96 ( audio only with meta )
++
++ total : 290 (3GP) or 378 (MP4)
++
++ - VIDEO:MPEG4
++ trak = 8
++ tkhd = 92
++ edts = 48 ( addition )
++ mdia = 8
++ mdhd = 32
++ hdir = 45
++ minf = 8
++ vmhd = 20
++ dinf = 36 ( 8 , dref : 16 , url : 12 )
++ stbl = 8 ( common video total : 305 )
++ stsd = 146 ( addtion : 16 + , mp4v: 86 ,esds : 44 )
++ stts = 16 + (8*stts_count)
++ stss = 16 + (4*I-frame)
++ stsc = 28
++ stsz = 20 + (4*frame)
++ stco = 16 + (4*frame)
++
++ - VIDEO:H.264 = 487(or 489) + (8*stts_count) + (8*frame) + (4*I-frame)
++ trak = 8
++ tkhd = 92
++ edts = 48 ( addition )
++ mdia = 8
++ mdhd = 32
++ hdir = 45
++ minf = 8
++ vmhd = 20
++ dinf = 36 ( 8 , dref : 16 , url : 12 )
++ stbl = 8
++ stsd = 134 (SPS 9, PPS 4) or 136 (SPS 111, PPS 4)
++ stts = 16 + (8*stts_count)
++ stss = 16 + (4*I-frame)
++ stsc = 28
++ stsz = 20 + (4*frame)
++ stco = 16 + (4*frame)
++
++ - VIDEO:H.263 = 470 + + (8*stts_count) + (8*frame) + (4*I-frame)
++ trak = 8
++ tkhd = 92
++ edts = 48 ( addition )
++ mdia = 8
++ mdhd = 32
++ hdir = 45
++ minf = 8
++ vmhd = 20
++ dinf = 36
++ stbl = 8
++ stsd = 102 -> different from H.264
++ stts = 16 + (8*stts_count)
++ stss = 16 + (4*I-frame)
++ stsc = 28
++ stsz = 20 + (4*frame)
++ stco = 16 + (4*frame)
++
++ - AUDIO:AAC = 424 + + (8*stts_count) + (8*audio_frame)
++ trak = 8
++ tkhd = 92
++ mdia = 8
++ mdhd = 32
++ hdir = 45
++ minf = 8
++ smhd = 16
++ dinf = 36 ( 8 , dref : 16 , url : 12 )
++ stbl = 8 ( common video total : 253 )
++ stsd = 106 + ( addtion : 16 , mp4v: 46 ,esds : 54 )
++ stts = 16 + (8*stts_count)
++ stsc = 28
++ stsz = 20 + (4*frame)
++ stco = 16 + (4*frame)
++
++ - AUDIO:AMR = 410 + (4*audio_frame)
++ trak = 8
++ tkhd = 92
++ mdia = 8
++ mdhd = 32
++ hdir = 45
++ minf = 8
++ smhd = 16
++ dinf = 36
++ stbl = 8
++ stsd = 69 -> different from AAC
++ stts = 24 -> different from AAC
++ stsc = 28
++ stsz = 20 -> different from AAC
++ stco = 16 + (4*frame)
++ */
++
++ /* Calculate trailer size for video stream */
++ if (!strcmp (ffmpegmux->context->oformat->name, MUX_ADTS_NAME)) {
++
++ } else if (!strcmp (ffmpegmux->context->oformat->name, MUX_ADTS_NAME)) {
++
++ } else if (!strcmp (ffmpegmux->context->oformat->name, MUX_MP4_NAME)) {
++ exp_size = MUX_COMMON_SIZE_MP4_HEADER;
++ } else {
++ exp_size = MUX_COMMON_SIZE_3GP_HEADER;
++ }
++ //GST_INFO_OBJECT(ffmpegmux, "size: common size=[%d]", exp_size);
++
++ if (video_stream) {
++ /* ftyp + free + moov + mvhd + udta : H.264 -> 240, H.263 -> 236 */
++ /* trak size except frame related : H.264 -> 489, H.263 -> 470 */
++ if (video_codec_id == AV_CODEC_ID_H263
++ || video_codec_id == AV_CODEC_ID_H263P) {
++ exp_size +=
++ MUX_COMMON_SIZE_MP4_VIDEO_HEADER + ENTRY_SIZE_VIDEO_H263P_STSD;
++ } else if (video_codec_id == AV_CODEC_ID_MPEG4) {
++ exp_size +=
++ MUX_COMMON_SIZE_MP4_VIDEO_HEADER + ENTRY_SIZE_VIDEO_MPEG4_STSD;
++ } else {
++ exp_size += 240 + 489;
++ }
++
++ //GST_INFO_OBJECT(ffmpegmux, "size: [%d]",exp_size);
++
++ /* frame related */
++ exp_size +=
++ ENTRY_SIZE_VIDEO_ST + (ENTRY_SIZE_VIDEO_STTS * nb_video_stts_entry) +
++ (ENTRY_SIZE_VIDEO_STSS * nb_video_i_frames) + (ENTRY_SIZE_STSC) +
++ ((ENTRY_SIZE_VIDEO_STSZ + ENTRY_SIZE_VIDEO_STCO) * nb_video_frames);
++ }
++ //GST_INFO_OBJECT(ffmpegmux, "size: video=[%d] size=[%d], stts-entry=[%d], i-frame=[%d], video-sample=[%d]", video_stream, exp_size,nb_video_stts_entry,nb_video_i_frames,nb_video_frames);
++
++ if (audio_stream) {
++ /* Calculate trailer size for audio stream */
++ if (!strcmp (ffmpegmux->context->oformat->name, MUX_ADTS_NAME)) {
++ /* avmux_adts */
++ exp_size +=
++ MUX_ADTS_SIZE_HEADER + (MUX_ADTS_SIZE_ENTRY * nb_audio_frames);
++ } else if (!strcmp (ffmpegmux->context->oformat->name, MUX_AMR_NAME)) {
++ /* only audio avmux_amr */
++ exp_size = MUX_AMR_SIZE_HEADER;
++ } else {
++ /* avmux_3gp , avmux_mp4 */
++ if (!video_stream) {
++ /* audio only does not contain location info now */
++ exp_size -= MUX_INFO_SIZE_LOCATION;
++ }
++ /* others - avmux_3gp/mp4/amr */
++ if (audio_codec_id == AV_CODEC_ID_AMR_NB) {
++ /* AMR_NB codec */
++ exp_size +=
++ MUX_COMMON_SIZE_MP4_AUDIO_HEADER + ENTRY_SIZE_AUDIO_AMR_STSD;
++
++ //GST_INFO_OBJECT(ffmpegmux, "size: [%d]",exp_size);
++
++ exp_size +=
++ ENTRY_SIZE_AUDIO_ST +
++ (ENTRY_SIZE_AUDIO_STTS * nb_audio_stts_entry) + (ENTRY_SIZE_STSC) +
++ (ENTRY_SIZE_AUDIO_STCO * nb_audio_frames);
++ } else {
++ /* AAC codec */
++ exp_size +=
++ MUX_COMMON_SIZE_MP4_AUDIO_HEADER + ENTRY_SIZE_AUDIO_AAC_STSD;
++
++ //GST_INFO_OBJECT(ffmpegmux, "size: [%d]",exp_size);
++
++ exp_size +=
++ ENTRY_SIZE_AUDIO_ST +
++ (ENTRY_SIZE_AUDIO_STTS * nb_audio_stts_entry) + (ENTRY_SIZE_STSC) +
++ ((ENTRY_SIZE_AUDIO_STSZ + ENTRY_SIZE_AUDIO_STCO) * nb_audio_frames);
++ }
++
++ }
++ }
++ //GST_INFO_OBJECT(ffmpegmux, "size: audio=[%d], size=[%d], stts-entry=[%d], audio-sample=[%d]", audio_stream, exp_size, nb_audio_stts_entry, nb_audio_frames);
++
++ ffmpegmux->expected_trailer_size = exp_size;
++ ffmpegmux->nb_video_frames = nb_video_frames;
++ ffmpegmux->nb_audio_frames = nb_audio_frames;
++
++ return;
++}
++#endif /* TIZEN_FEATURE_LIBAV */
++
+ static void
+ gst_ffmpegmux_base_init (gpointer g_class)
+ {
+ GstFFMpegMuxClass *klass = (GstFFMpegMuxClass *) g_class;
+ GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
+ GstPadTemplate *videosinktempl, *audiosinktempl, *srctempl;
+ AVOutputFormat *in_plugin;
+ GstCaps *srccaps, *audiosinkcaps, *videosinkcaps;
+ enum AVCodecID *video_ids = NULL, *audio_ids = NULL;
+ gchar *longname, *description, *name;
+ const char *replacement;
+ gboolean is_formatter;
+
+ in_plugin =
+ (AVOutputFormat *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass),
+ GST_FFMUX_PARAMS_QDATA);
+ g_assert (in_plugin != NULL);
+
+ name = g_strdup (in_plugin->name);
+ g_strdelimit (name, ".,|-<> ", '_');
+
+ /* construct the element details struct */
+ replacement = gst_ffmpegmux_get_replacement (in_plugin->name);
+ is_formatter = gst_ffmpegmux_is_formatter (in_plugin->name);
+ if (replacement != NULL) {
+ longname =
+ g_strdup_printf ("libav %s %s (not recommended, use %s instead)",
+ in_plugin->long_name, is_formatter ? "formatter" : "muxer",
+ replacement);
+ description =
+ g_strdup_printf ("libav %s %s (not recommended, use %s instead)",
+ in_plugin->long_name, is_formatter ? "formatter" : "muxer",
+ replacement);
+ } else {
+ longname = g_strdup_printf ("libav %s %s", in_plugin->long_name,
+ is_formatter ? "formatter" : "muxer");
+ description = g_strdup_printf ("libav %s %s", in_plugin->long_name,
+ is_formatter ? "formatter" : "muxer");
+ }
+ gst_element_class_set_metadata (element_class, longname,
+ is_formatter ? "Formatter/Metadata" : "Codec/Muxer", description,
+ "Wim Taymans <wim.taymans@chello.be>, "
+ "Ronald Bultje <rbultje@ronald.bitfreak.net>");
+ g_free (longname);
+ g_free (description);
+
+ /* Try to find the caps that belongs here */
+ srccaps = gst_ffmpeg_formatid_to_caps (name);
+ if (!srccaps) {
+ GST_DEBUG ("Couldn't get source caps for muxer '%s', skipping", name);
+ goto beach;
+ }
+
+ if (!gst_ffmpeg_formatid_get_codecids (in_plugin->name,
+ &video_ids, &audio_ids, in_plugin)) {
+ gst_caps_unref (srccaps);
+ GST_DEBUG ("Couldn't get sink caps for muxer '%s'. Most likely because "
+ "no input format mapping exists.", name);
+ goto beach;
+ }
+
+ videosinkcaps = video_ids ? gst_ffmpegmux_get_id_caps (video_ids) : NULL;
+ audiosinkcaps = audio_ids ? gst_ffmpegmux_get_id_caps (audio_ids) : NULL;
+
+ /* fix up allowed caps for some muxers */
+ /* FIXME : This should be in gstffmpegcodecmap.c ! */
+ if (strcmp (in_plugin->name, "flv") == 0) {
+ const gint rates[] = { 44100, 22050, 11025 };
+
+ gst_ffmpeg_mux_simple_caps_set_int_list (audiosinkcaps, "rate", 3, rates);
+ } else if (strcmp (in_plugin->name, "dv") == 0) {
+ gst_caps_set_simple (audiosinkcaps,
+ "rate", G_TYPE_INT, 48000, "channels", G_TYPE_INT, 2, NULL);
+
+ }
+
+ /* pad templates */
+ srctempl = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, srccaps);
+ gst_element_class_add_pad_template (element_class, srctempl);
+ gst_caps_unref (srccaps);
+
+ if (audiosinkcaps) {
+ audiosinktempl = gst_pad_template_new ("audio_%u",
+ GST_PAD_SINK, GST_PAD_REQUEST, audiosinkcaps);
+ gst_element_class_add_pad_template (element_class, audiosinktempl);
+ gst_caps_unref (audiosinkcaps);
+ }
+
+ if (videosinkcaps) {
+ videosinktempl = gst_pad_template_new ("video_%u",
+ GST_PAD_SINK, GST_PAD_REQUEST, videosinkcaps);
+ gst_element_class_add_pad_template (element_class, videosinktempl);
+ gst_caps_unref (videosinkcaps);
+ }
+
+ beach:
+ klass->in_plugin = in_plugin;
+
+ g_free (name);
+ }
+
+ static void
+ gst_ffmpegmux_class_init (GstFFMpegMuxClass * klass)
+ {
+ GObjectClass *gobject_class;
+ GstElementClass *gstelement_class;
++#ifdef TIZEN_FEATURE_LIBAV
++ GParamSpec * tspec = NULL;
++#endif /* TIZEN_FEATURE_LIBAV */
+
+ gobject_class = (GObjectClass *) klass;
+ gstelement_class = (GstElementClass *) klass;
+
+ parent_class = g_type_class_peek_parent (klass);
+
+ gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_ffmpegmux_set_property);
+ gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_ffmpegmux_get_property);
+
+ g_object_class_install_property (gobject_class, PROP_PRELOAD,
+ g_param_spec_int ("preload", "preload",
+ "Set the initial demux-decode delay (in microseconds)",
+ 0, G_MAXINT, 0, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_MAXDELAY,
+ g_param_spec_int ("maxdelay", "maxdelay",
+ "Set the maximum demux-decode delay (in microseconds)", 0, G_MAXINT,
+ 0, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ gstelement_class->request_new_pad = gst_ffmpegmux_request_new_pad;
+ gstelement_class->change_state = gst_ffmpegmux_change_state;
+ gobject_class->finalize = gst_ffmpegmux_finalize;
++
++#ifdef TIZEN_FEATURE_LIBAV
++ gstelement_class->release_pad = gst_ffmpegmux_release_pad;
++
++ /* properties */
++ tspec = g_param_spec_uint("expected-trailer-size", "Expected Trailer Size",
++ "Expected trailer size (bytes)",
++ 0, G_MAXUINT, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
++ if (tspec)
++ g_object_class_install_property(gobject_class, PROP_EXPECTED_TRAILER_SIZE, tspec);
++ else
++ GST_ERROR("g_param_spec failed for \"expected-trailer-size\"");
++
++ tspec = g_param_spec_uint("number-video-frames", "Number of video frames",
++ "Current number of video frames",
++ 0, G_MAXUINT, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
++ if (tspec)
++ g_object_class_install_property (gobject_class, PROP_NUMBER_VIDEO_FRAMES, tspec);
++ else
++ GST_ERROR("g_param_spec failed for \"number-video-frames\"");
++
++ tspec = g_param_spec_uint("number-audio-frames", "Number of audio frames",
++ "Current number of audio frames",
++ 0, G_MAXUINT, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
++ if (tspec)
++ g_object_class_install_property (gobject_class, PROP_NUMBER_AUDIO_FRAMES, tspec);
++ else
++ GST_ERROR("g_param_spec failed for \"number-audio-frames\"");
++#endif /* TIZEN_FEATURE_LIBAV */
+ }
+
+ static void
+ gst_ffmpegmux_init (GstFFMpegMux * ffmpegmux, GstFFMpegMuxClass * g_class)
+ {
+ GstElementClass *klass = GST_ELEMENT_CLASS (g_class);
+ GstFFMpegMuxClass *oclass = (GstFFMpegMuxClass *) klass;
+ GstPadTemplate *templ = gst_element_class_get_pad_template (klass, "src");
+
+ ffmpegmux->srcpad = gst_pad_new_from_template (templ, "src");
+ gst_pad_set_caps (ffmpegmux->srcpad, gst_pad_template_get_caps (templ));
+ gst_element_add_pad (GST_ELEMENT (ffmpegmux), ffmpegmux->srcpad);
+
+ ffmpegmux->collect = gst_collect_pads_new ();
+ gst_collect_pads_set_function (ffmpegmux->collect,
+ (GstCollectPadsFunction) gst_ffmpegmux_collected, ffmpegmux);
+
+ ffmpegmux->context = avformat_alloc_context ();
+ ffmpegmux->context->oformat = oclass->in_plugin;
+ ffmpegmux->context->nb_streams = 0;
+ ffmpegmux->opened = FALSE;
+
+ ffmpegmux->videopads = 0;
+ ffmpegmux->audiopads = 0;
+ ffmpegmux->max_delay = 0;
++
++#ifdef TIZEN_FEATURE_LIBAV
++ ffmpegmux->expected_trailer_size = 0;
++ ffmpegmux->nb_video_frames = 0;
++ ffmpegmux->nb_audio_frames = 0;
++#endif /* TIZEN_FEATURE_LIBAV */
+ }
+
++#ifdef TIZEN_FEATURE_LIBAV
++static void
++gst_ffmpegmux_release_pad (GstElement * element, GstPad * pad)
++{
++ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) element;
++ GstFFMpegMuxPad *collect_pad;
++ AVStream *st;
++ int i;
++ collect_pad = (GstFFMpegMuxPad *)gst_pad_get_element_private(pad);
++
++ GST_DEBUG ("Release requested pad[%s:%s]", GST_DEBUG_PAD_NAME(pad));
++ st = ffmpegmux->context->streams[collect_pad->padnum];
++ if (st) {
++ if (st->codec) {
++ if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
++ ffmpegmux->videopads--;
++ } else {
++ ffmpegmux->audiopads--;
++ }
++ if (st->codec->extradata) {
++ av_free(st->codec->extradata);
++ st->codec->extradata = NULL;
++ }
++ g_free(st->codec);
++ st->codec = NULL;
++ }
++ if (ffmpegmux->context->priv_data) {
++ MOVMuxContext *mov = ffmpegmux->context->priv_data;
++ if (mov && mov->tracks) {
++ for (i = 0 ; i < ffmpegmux->context->nb_streams ; i++) {
++ MOVTrack *trk = &mov->tracks[i];
++ if (trk && trk->vos_data) {
++ av_free(trk->vos_data);
++ trk->vos_data = NULL;
++ }
++ }
++ av_free(mov->tracks);
++ mov->tracks = NULL;
++ }
++ av_free(ffmpegmux->context->priv_data);
++ ffmpegmux->context->priv_data = NULL;
++ }
++ ffmpegmux->context->nb_streams--;
++ g_free(st);
++ st = NULL;
++ }
++ gst_collect_pads_remove_pad(ffmpegmux->collect, pad);
++ gst_element_remove_pad(element, pad);
++}
++#endif /* TIZEN_FEATURE_LIBAV */
++
+ static void
+ gst_ffmpegmux_set_property (GObject * object, guint prop_id,
+ const GValue * value, GParamSpec * pspec)
+ {
+ GstFFMpegMux *src;
+
+ src = (GstFFMpegMux *) object;
+
+ switch (prop_id) {
+ case PROP_PRELOAD:
+ src->preload = g_value_get_int (value);
+ break;
+ case PROP_MAXDELAY:
+ src->max_delay = g_value_get_int (value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+ }
+
+ static void
+ gst_ffmpegmux_get_property (GObject * object, guint prop_id, GValue * value,
+ GParamSpec * pspec)
+ {
+ GstFFMpegMux *src;
+
+ src = (GstFFMpegMux *) object;
+
+ switch (prop_id) {
+ case PROP_PRELOAD:
+ g_value_set_int (value, src->preload);
+ break;
+ case PROP_MAXDELAY:
+ g_value_set_int (value, src->max_delay);
+ break;
++#ifdef TIZEN_FEATURE_LIBAV
++ case PROP_EXPECTED_TRAILER_SIZE:
++ g_value_set_uint(value, src->expected_trailer_size);
++ break;
++ case PROP_NUMBER_VIDEO_FRAMES:
++ g_value_set_uint(value, src->nb_video_frames);
++ break;
++ case PROP_NUMBER_AUDIO_FRAMES:
++ g_value_set_uint(value, src->nb_audio_frames);
++ break;
++#endif /* TIZEN_FEATURE_LIBAV */
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+ }
+
+
+ static void
+ gst_ffmpegmux_finalize (GObject * object)
+ {
+ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) object;
+
+ avformat_free_context (ffmpegmux->context);
+ ffmpegmux->context = NULL;
+
+ gst_object_unref (ffmpegmux->collect);
+
+ if (G_OBJECT_CLASS (parent_class)->finalize)
+ G_OBJECT_CLASS (parent_class)->finalize (object);
+ }
+
+ static GstPad *
+ gst_ffmpegmux_request_new_pad (GstElement * element,
+ GstPadTemplate * templ, const gchar * name, const GstCaps * caps)
+ {
+ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) element;
+ GstElementClass *klass = GST_ELEMENT_GET_CLASS (element);
+ GstFFMpegMuxPad *collect_pad;
+ gchar *padname;
+ GstPad *pad;
+ AVStream *st;
+ enum AVMediaType type;
+ gint bitrate = 0, framesize = 0;
+
+ g_return_val_if_fail (templ != NULL, NULL);
+ g_return_val_if_fail (templ->direction == GST_PAD_SINK, NULL);
+ g_return_val_if_fail (ffmpegmux->opened == FALSE, NULL);
+
+ /* figure out a name that *we* like */
+ if (templ == gst_element_class_get_pad_template (klass, "video_%u")) {
+ padname = g_strdup_printf ("video_%u", ffmpegmux->videopads++);
+ type = AVMEDIA_TYPE_VIDEO;
+ bitrate = 64 * 1024;
+ framesize = 1152;
+ } else if (templ == gst_element_class_get_pad_template (klass, "audio_%u")) {
+ padname = g_strdup_printf ("audio_%u", ffmpegmux->audiopads++);
+ type = AVMEDIA_TYPE_AUDIO;
+ bitrate = 285 * 1024;
+ } else {
+ g_warning ("avmux: unknown pad template!");
+ return NULL;
+ }
+
+ /* create pad */
+ pad = gst_pad_new_from_template (templ, padname);
+ collect_pad = (GstFFMpegMuxPad *)
+ gst_collect_pads_add_pad (ffmpegmux->collect, pad,
+ sizeof (GstFFMpegMuxPad), NULL, TRUE);
+ collect_pad->padnum = ffmpegmux->context->nb_streams;
+
+ /* small hack to put our own event pad function and chain up to collect pad */
+ ffmpegmux->event_function = GST_PAD_EVENTFUNC (pad);
+ gst_pad_set_event_function (pad,
+ GST_DEBUG_FUNCPTR (gst_ffmpegmux_sink_event));
+
+ gst_element_add_pad (element, pad);
+
+ /* AVStream needs to be created */
+ st = avformat_new_stream (ffmpegmux->context, NULL);
+ st->id = collect_pad->padnum;
+ st->codecpar->codec_type = type;
+ st->codecpar->codec_id = AV_CODEC_ID_NONE; /* this is a check afterwards */
+ st->codecpar->bit_rate = bitrate;
+ st->codecpar->frame_size = framesize;
+ /* we fill in codec during capsnego */
+
+ /* we love debug output (c) (tm) (r) */
+ GST_DEBUG ("Created %s pad for avmux_%s element",
+ padname, ((GstFFMpegMuxClass *) klass)->in_plugin->name);
+ g_free (padname);
+
+ return pad;
+ }
+
+ /**
+ * gst_ffmpegmux_setcaps
+ * @pad: #GstPad
+ * @caps: New caps.
+ *
+ * Set caps to pad.
+ *
+ * Returns: #TRUE on success.
+ */
+ static gboolean
+ gst_ffmpegmux_setcaps (GstPad * pad, GstCaps * caps)
+ {
+ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) (gst_pad_get_parent (pad));
+ GstFFMpegMuxPad *collect_pad;
+ AVStream *st;
+ AVCodecContext tmp;
+
+ collect_pad = (GstFFMpegMuxPad *) gst_pad_get_element_private (pad);
+
+ st = ffmpegmux->context->streams[collect_pad->padnum];
+ av_opt_set_int (ffmpegmux->context, "preload", ffmpegmux->preload, 0);
+ ffmpegmux->context->max_delay = ffmpegmux->max_delay;
+ memset (&tmp, 0, sizeof (tmp));
+
+ /* for the format-specific guesses, we'll go to
+ * our famous codec mapper */
+ if (gst_ffmpeg_caps_to_codecid (caps, &tmp) == AV_CODEC_ID_NONE)
+ goto not_accepted;
+
+ avcodec_parameters_from_context (st->codecpar, &tmp);
+
+ /* copy over the aspect ratios, ffmpeg expects the stream aspect to match the
+ * codec aspect. */
+ st->sample_aspect_ratio = st->codecpar->sample_aspect_ratio;
+
++#ifdef TIZEN_FEATURE_LIBAV
++ /* ref counting bug fix */
++ gst_object_unref(ffmpegmux);
++#endif /* TIZEN_FEATURE_LIBAV */
++
+ GST_LOG_OBJECT (pad, "accepted caps %" GST_PTR_FORMAT, caps);
+ return TRUE;
+
+ /* ERRORS */
+ not_accepted:
+ {
++#ifdef TIZEN_FEATURE_LIBAV
++ /* ref counting bug fix */
++ gst_object_unref (ffmpegmux);
++#endif /* TIZEN_FEATURE_LIBAV */
+ GST_LOG_OBJECT (pad, "rejecting caps %" GST_PTR_FORMAT, caps);
+ return FALSE;
+ }
+ }
+
+
+ static gboolean
+ gst_ffmpegmux_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
+ {
+ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) parent;
+ gboolean res = TRUE;
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_TAG:{
+ GstTagList *taglist;
+ GstTagSetter *setter = GST_TAG_SETTER (ffmpegmux);
+ const GstTagMergeMode mode = gst_tag_setter_get_tag_merge_mode (setter);
+
+ gst_event_parse_tag (event, &taglist);
+ gst_tag_setter_merge_tags (setter, taglist, mode);
+ break;
+ }
+ case GST_EVENT_CAPS:{
+ GstCaps *caps;
+ gst_event_parse_caps (event, &caps);
+ if (!(res = gst_ffmpegmux_setcaps (pad, caps)))
+ goto beach;
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* chaining up to collectpads default event function */
+ res = ffmpegmux->event_function (pad, parent, event);
+
+ beach:
+ return res;
+ }
+
+ static GstFlowReturn
+ gst_ffmpegmux_collected (GstCollectPads * pads, gpointer user_data)
+ {
+ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) user_data;
+ GSList *collected;
+ GstFFMpegMuxPad *best_pad;
+ GstClockTime best_time;
+ #if 0
+ /* Re-enable once converted to new AVMetaData API
+ * See #566605
+ */
+ const GstTagList *tags;
+ #endif
+
+ /* open "file" (gstreamer protocol to next element) */
+ if (!ffmpegmux->opened) {
+ int open_flags = AVIO_FLAG_WRITE;
+
+ /* we do need all streams to have started capsnego,
+ * or things will go horribly wrong */
+ for (collected = ffmpegmux->collect->data; collected;
+ collected = g_slist_next (collected)) {
+ GstFFMpegMuxPad *collect_pad = (GstFFMpegMuxPad *) collected->data;
+ AVStream *st = ffmpegmux->context->streams[collect_pad->padnum];
+
+ /* check whether the pad has successfully completed capsnego */
+ if (st->codecpar->codec_id == AV_CODEC_ID_NONE) {
+ GST_ELEMENT_ERROR (ffmpegmux, CORE, NEGOTIATION, (NULL),
+ ("no caps set on stream %d (%s)", collect_pad->padnum,
+ (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) ?
+ "video" : "audio"));
+ return GST_FLOW_ERROR;
+ }
+ /* set framerate for audio */
+ if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
+ switch (st->codecpar->codec_id) {
+ case AV_CODEC_ID_PCM_S16LE:
+ case AV_CODEC_ID_PCM_S16BE:
+ case AV_CODEC_ID_PCM_U16LE:
+ case AV_CODEC_ID_PCM_U16BE:
+ case AV_CODEC_ID_PCM_S8:
+ case AV_CODEC_ID_PCM_U8:
+ st->codecpar->frame_size = 1;
+ break;
+ default:
+ {
+ GstBuffer *buffer;
+
+ /* FIXME : This doesn't work for RAW AUDIO...
+ * in fact I'm wondering if it even works for any kind of audio... */
+ buffer = gst_collect_pads_peek (ffmpegmux->collect,
+ (GstCollectData *) collect_pad);
+ if (buffer) {
+ st->codecpar->frame_size =
+ st->codecpar->sample_rate *
+ GST_BUFFER_DURATION (buffer) / GST_SECOND;
+ gst_buffer_unref (buffer);
+ }
+ }
+ }
+ }
+ }
+
+ #if 0
+ /* Re-enable once converted to new AVMetaData API
+ * See #566605
+ */
+
+ /* tags */
+ tags = gst_tag_setter_get_tag_list (GST_TAG_SETTER (ffmpegmux));
+ if (tags) {
+ gint i;
+ gchar *s;
+
+ /* get the interesting ones */
+ if (gst_tag_list_get_string (tags, GST_TAG_TITLE, &s)) {
+ strncpy (ffmpegmux->context->title, s,
+ sizeof (ffmpegmux->context->title));
+ }
+ if (gst_tag_list_get_string (tags, GST_TAG_ARTIST, &s)) {
+ strncpy (ffmpegmux->context->author, s,
+ sizeof (ffmpegmux->context->author));
+ }
+ if (gst_tag_list_get_string (tags, GST_TAG_COPYRIGHT, &s)) {
+ strncpy (ffmpegmux->context->copyright, s,
+ sizeof (ffmpegmux->context->copyright));
+ }
+ if (gst_tag_list_get_string (tags, GST_TAG_COMMENT, &s)) {
+ strncpy (ffmpegmux->context->comment, s,
+ sizeof (ffmpegmux->context->comment));
+ }
+ if (gst_tag_list_get_string (tags, GST_TAG_ALBUM, &s)) {
+ strncpy (ffmpegmux->context->album, s,
+ sizeof (ffmpegmux->context->album));
+ }
+ if (gst_tag_list_get_string (tags, GST_TAG_GENRE, &s)) {
+ strncpy (ffmpegmux->context->genre, s,
+ sizeof (ffmpegmux->context->genre));
+ }
+ if (gst_tag_list_get_int (tags, GST_TAG_TRACK_NUMBER, &i)) {
+ ffmpegmux->context->track = i;
+ }
+ }
+ #endif
+
+ /* set the streamheader flag for gstffmpegprotocol if codec supports it */
+ if (!strcmp (ffmpegmux->context->oformat->name, "flv")) {
+ open_flags |= GST_FFMPEG_URL_STREAMHEADER;
+ }
+
+ /* some house-keeping for downstream before starting data flow */
+ /* stream-start (FIXME: create id based on input ids) */
+ {
+ gchar s_id[32];
+
+ g_snprintf (s_id, sizeof (s_id), "avmux-%08x", g_random_int ());
+ gst_pad_push_event (ffmpegmux->srcpad, gst_event_new_stream_start (s_id));
+ }
+ /* segment */
+ {
+ GstSegment segment;
+
+ /* let downstream know we think in BYTES and expect to do seeking later on */
+ gst_segment_init (&segment, GST_FORMAT_BYTES);
+ gst_pad_push_event (ffmpegmux->srcpad, gst_event_new_segment (&segment));
+ }
+
+ if (gst_ffmpegdata_open (ffmpegmux->srcpad, open_flags,
+ &ffmpegmux->context->pb) < 0) {
+ GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, TOO_LAZY, (NULL),
+ ("Failed to open stream context in avmux"));
+ return GST_FLOW_ERROR;
+ }
+
+ /* now open the mux format */
+ if (avformat_write_header (ffmpegmux->context, NULL) < 0) {
+ GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, SETTINGS, (NULL),
+ ("Failed to write file header - check codec settings"));
+ return GST_FLOW_ERROR;
+ }
+
+ /* we're now opened */
+ ffmpegmux->opened = TRUE;
+
+ /* flush the header so it will be used as streamheader */
+ avio_flush (ffmpegmux->context->pb);
+ }
+
+ /* take the one with earliest timestamp,
+ * and push it forward */
+ best_pad = NULL;
+ best_time = GST_CLOCK_TIME_NONE;
+ for (collected = ffmpegmux->collect->data; collected;
+ collected = g_slist_next (collected)) {
+ GstFFMpegMuxPad *collect_pad = (GstFFMpegMuxPad *) collected->data;
+ GstBuffer *buffer = gst_collect_pads_peek (ffmpegmux->collect,
+ (GstCollectData *) collect_pad);
+
+ /* if there's no buffer, just continue */
+ if (buffer == NULL) {
+ continue;
+ }
+
+ /* if we have no buffer yet, just use the first one */
+ if (best_pad == NULL) {
+ best_pad = collect_pad;
+ best_time = GST_BUFFER_TIMESTAMP (buffer);
+ goto next_pad;
+ }
+
+ /* if we do have one, only use this one if it's older */
+ if (GST_BUFFER_TIMESTAMP (buffer) < best_time) {
+ best_time = GST_BUFFER_TIMESTAMP (buffer);
+ best_pad = collect_pad;
+ }
+
+ next_pad:
+ gst_buffer_unref (buffer);
+
+ /* Mux buffers with invalid timestamp first */
+ if (!GST_CLOCK_TIME_IS_VALID (best_time))
+ break;
+ }
+
+ /* now handle the buffer, or signal EOS if we have
+ * no buffers left */
+ if (best_pad != NULL) {
+ GstBuffer *buf;
+ AVPacket pkt = { 0, };
+ GstMapInfo map;
+
++#ifdef TIZEN_FEATURE_LIBAV
++ av_init_packet (&pkt);
++ pkt.is_mux = 1;
++#endif /* TIZEN_FEATURE_LIBAV */
++
+ /* push out current buffer */
+ buf =
+ gst_collect_pads_pop (ffmpegmux->collect, (GstCollectData *) best_pad);
+
+ /* set time */
++#ifdef TIZEN_FEATURE_LIBAV
++ if (ffmpegmux->context->streams[best_pad->padnum]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
++ pkt.pts = GST_TIME_AS_MSECONDS(GST_BUFFER_TIMESTAMP(buf));
++ else
++#else /* TIZEN_FEATURE_LIBAV */
+ pkt.pts = gst_ffmpeg_time_gst_to_ff (GST_BUFFER_TIMESTAMP (buf),
+ ffmpegmux->context->streams[best_pad->padnum]->time_base);
++#endif /* TIZEN_FEATURE_LIBAV */
+ pkt.dts = pkt.pts;
+
+ gst_buffer_map (buf, &map, GST_MAP_READ);
+ pkt.data = map.data;
+ pkt.size = map.size;
+
+ pkt.stream_index = best_pad->padnum;
+
+ if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT))
+ pkt.flags |= AV_PKT_FLAG_KEY;
+
++#ifdef TIZEN_FEATURE_LIBAV
++ if (ffmpegmux->context->streams[best_pad->padnum]->codec->codec_type ==
++ AVMEDIA_TYPE_VIDEO) {
++ static int last_duration = -1;
++ static int64_t last_dts = -1;
++ if (GST_BUFFER_DURATION_IS_VALID (buf)) {
++ pkt.duration = GST_TIME_AS_MSECONDS (GST_BUFFER_DURATION (buf));
++ } else {
++ pkt.duration = 0;
++ }
++
++ if (last_dts == -1) {
++ /* first time */
++ ffmpegmux->context->streams[best_pad->padnum]->codec->stts_count++;
++ } else {
++ /* check real duration : current dts - last dts */
++ if (last_duration != (pkt.dts - last_dts)) {
++ last_duration = pkt.dts - last_dts;
++ ffmpegmux->context->streams[best_pad->padnum]->codec->stts_count++;
++ }
++ }
++ last_dts = pkt.dts;
++ if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
++ ffmpegmux->context->streams[best_pad->padnum]->codec->i_frame_number++;
++ }
++ } else {
++ static int last_duration_audio = -1;
++ static int64_t last_dts_audio = -1;
++
++ if (GST_BUFFER_DURATION_IS_VALID (buf)) {
++ if (last_dts_audio == -1) {
++ /* first time */
++ ffmpegmux->context->streams[best_pad->padnum]->codec->stts_count++;
++ } else {
++ /* check real duration : current dts - last dts */
++ if (last_duration_audio != (pkt.dts - last_dts_audio)) {
++ last_duration_audio = pkt.dts - last_dts_audio;
++ ffmpegmux->context->streams[best_pad->padnum]->codec->stts_count++;
++ }
++ }
++ last_dts_audio = pkt.dts;
++
++ pkt.duration =
++ gst_ffmpeg_time_gst_to_ff (GST_BUFFER_DURATION (buf),
++ ffmpegmux->context->streams[best_pad->padnum]->time_base);
++ } else {
++ pkt.duration = 0;
++ }
++ }
++
++ update_expected_trailer_size (ffmpegmux);
++#else /* TIZEN_FEATURE_LIBAV */
+ if (GST_BUFFER_DURATION_IS_VALID (buf))
+ pkt.duration =
+ gst_ffmpeg_time_gst_to_ff (GST_BUFFER_DURATION (buf),
+ ffmpegmux->context->streams[best_pad->padnum]->time_base);
++ else
++ pkt.duration = 0;
++#endif /* TIZEN_FEATURE_LIBAV */
+ av_write_frame (ffmpegmux->context, &pkt);
+ gst_buffer_unmap (buf, &map);
+ gst_buffer_unref (buf);
+ } else {
+ /* close down */
+ av_write_trailer (ffmpegmux->context);
+ ffmpegmux->opened = FALSE;
+ avio_flush (ffmpegmux->context->pb);
+ gst_ffmpegdata_close (ffmpegmux->context->pb);
+ gst_pad_push_event (ffmpegmux->srcpad, gst_event_new_eos ());
+ return GST_FLOW_EOS;
+ }
+
+ return GST_FLOW_OK;
+ }
+
+ static GstStateChangeReturn
+ gst_ffmpegmux_change_state (GstElement * element, GstStateChange transition)
+ {
+ GstStateChangeReturn ret;
+ GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) (element);
+
+ switch (transition) {
+ case GST_STATE_CHANGE_NULL_TO_READY:
+ break;
+ case GST_STATE_CHANGE_READY_TO_PAUSED:
+ gst_collect_pads_start (ffmpegmux->collect);
+ break;
+ case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
+ break;
+ case GST_STATE_CHANGE_PAUSED_TO_READY:
+ gst_collect_pads_stop (ffmpegmux->collect);
+ break;
+ default:
+ break;
+ }
+
+ ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
+
+ switch (transition) {
+ case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
+ break;
+ case GST_STATE_CHANGE_PAUSED_TO_READY:
++#ifdef TIZEN_FEATURE_LIBAV
++ {
++ int i = 0;
++#endif /* TIZEN_FEATURE_LIBAV */
+ gst_tag_setter_reset_tags (GST_TAG_SETTER (ffmpegmux));
+ if (ffmpegmux->opened) {
+ ffmpegmux->opened = FALSE;
+ gst_ffmpegdata_close (ffmpegmux->context->pb);
+ }
++#ifdef TIZEN_FEATURE_LIBAV
++ for (i = 0 ; i < ffmpegmux->context->nb_streams ; i++) {
++ ffmpegmux->context->streams[i]->start_time = AV_NOPTS_VALUE;
++ ffmpegmux->context->streams[i]->duration = AV_NOPTS_VALUE;
++ ffmpegmux->context->streams[i]->cur_dts = AV_NOPTS_VALUE;
++ }
++ }
++#endif
+ break;
+ case GST_STATE_CHANGE_READY_TO_NULL:
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+ }
+
+ static GstCaps *
+ gst_ffmpegmux_get_id_caps (enum AVCodecID *id_list)
+ {
+ GstCaps *caps, *t;
+ gint i;
+
+ caps = gst_caps_new_empty ();
+ for (i = 0; id_list[i] != AV_CODEC_ID_NONE; i++) {
+ if ((t = gst_ffmpeg_codecid_to_caps (id_list[i], NULL, TRUE)))
+ gst_caps_append (caps, t);
+ }
+ if (gst_caps_is_empty (caps)) {
+ gst_caps_unref (caps);
+ return NULL;
+ }
+
+ return caps;
+ }
+
+ /* set a list of integer values on the caps, e.g. for sample rates */
+ static void
+ gst_ffmpeg_mux_simple_caps_set_int_list (GstCaps * caps, const gchar * field,
+ guint num, const gint * values)
+ {
+ GValue list = { 0, };
+ GValue val = { 0, };
+ guint i;
+
+ g_return_if_fail (GST_CAPS_IS_SIMPLE (caps));
+
+ g_value_init (&list, GST_TYPE_LIST);
+ g_value_init (&val, G_TYPE_INT);
+
+ for (i = 0; i < num; ++i) {
+ g_value_set_int (&val, values[i]);
+ gst_value_list_append_value (&list, &val);
+ }
+
+ gst_structure_set_value (gst_caps_get_structure (caps, 0), field, &list);
+
+ g_value_unset (&val);
+ g_value_unset (&list);
+ }
+
+ gboolean
+ gst_ffmpegmux_register (GstPlugin * plugin)
+ {
+ GTypeInfo typeinfo = {
+ sizeof (GstFFMpegMuxClass),
+ (GBaseInitFunc) gst_ffmpegmux_base_init,
+ NULL,
+ (GClassInitFunc) gst_ffmpegmux_class_init,
+ NULL,
+ NULL,
+ sizeof (GstFFMpegMux),
+ 0,
+ (GInstanceInitFunc) gst_ffmpegmux_init,
+ };
+ static const GInterfaceInfo tag_setter_info = {
+ NULL, NULL, NULL
+ };
+ GType type;
+ const AVOutputFormat *in_plugin;
+ void *i = 0;
+
+ GST_LOG ("Registering muxers");
+
+ while ((in_plugin = av_muxer_iterate (&i))) {
+ gchar *type_name;
+ GstRank rank = GST_RANK_MARGINAL;
+
+ if ((!strncmp (in_plugin->name, "u16", 3)) ||
+ (!strncmp (in_plugin->name, "s16", 3)) ||
+ (!strncmp (in_plugin->name, "u24", 3)) ||
+ (!strncmp (in_plugin->name, "s24", 3)) ||
+ (!strncmp (in_plugin->name, "u8", 2)) ||
+ (!strncmp (in_plugin->name, "s8", 2)) ||
+ (!strncmp (in_plugin->name, "u32", 3)) ||
+ (!strncmp (in_plugin->name, "s32", 3)) ||
+ (!strncmp (in_plugin->name, "f32", 3)) ||
+ (!strncmp (in_plugin->name, "f64", 3)) ||
+ (!strncmp (in_plugin->name, "raw", 3)) ||
+ (!strncmp (in_plugin->name, "crc", 3)) ||
+ (!strncmp (in_plugin->name, "null", 4)) ||
+ (!strncmp (in_plugin->name, "gif", 3)) ||
+ (!strncmp (in_plugin->name, "fifo", 4)) ||
+ (!strncmp (in_plugin->name, "frame", 5)) ||
+ (!strncmp (in_plugin->name, "image", 5)) ||
+ (!strncmp (in_plugin->name, "mulaw", 5)) ||
+ (!strncmp (in_plugin->name, "alaw", 4)) ||
+ (!strncmp (in_plugin->name, "h26", 3)) ||
+ (!strncmp (in_plugin->name, "rtp", 3)) ||
+ (!strncmp (in_plugin->name, "ass", 3)) ||
+ (!strncmp (in_plugin->name, "ffmetadata", 10)) ||
+ (!strncmp (in_plugin->name, "srt", 3)) ||
+ (!strncmp (in_plugin->name, "scc", 3)) ||
+ !strcmp (in_plugin->name, "ttml") ||
+ !strcmp (in_plugin->name, "segment") ||
+ !strcmp (in_plugin->name, "stream_segment,ssegment") ||
+ !strcmp (in_plugin->name, "jacosub") ||
+ !strcmp (in_plugin->name, "webvtt") ||
+ !strcmp (in_plugin->name, "lrc") ||
+ !strcmp (in_plugin->name, "microdvd") ||
+ !strcmp (in_plugin->name, "tee") ||
+ !strncmp (in_plugin->name, "webm", 4)
+ ) {
+ GST_LOG ("Ignoring muxer %s", in_plugin->name);
+ continue;
+ }
+
+ if (in_plugin->long_name != NULL) {
+ if ((!strncmp (in_plugin->long_name, "raw ", 4))) {
+ GST_LOG ("Ignoring raw muxer %s", in_plugin->name);
+ continue;
+ }
+ }
+
+ if (gst_ffmpegmux_get_replacement (in_plugin->name))
+ rank = GST_RANK_NONE;
+
+ /* FIXME : We need a fast way to know whether we have mappings for this
+ * muxer type. */
+
+ /* construct the type */
+ type_name = g_strdup_printf ("avmux_%s", in_plugin->name);
+ g_strdelimit (type_name, ".,|-<> ", '_');
+
+ type = g_type_from_name (type_name);
+
+ if (!type) {
+ /* create the type now */
+ type = g_type_register_static (GST_TYPE_ELEMENT, type_name, &typeinfo, 0);
+ g_type_set_qdata (type, GST_FFMUX_PARAMS_QDATA, (gpointer) in_plugin);
+ g_type_add_interface_static (type, GST_TYPE_TAG_SETTER, &tag_setter_info);
+ }
+
+ if (!gst_element_register (plugin, type_name, rank, type)) {
+ g_free (type_name);
+ return FALSE;
+ }
+
+ g_free (type_name);
+ }
+
+ GST_LOG ("Finished registering muxers");
+
+ return TRUE;
+ }