#include "gstffmpegcodecmap.h"
#include "gstffmpegutils.h"
-/* define to enable alternative buffer refcounting algorithm */
-#undef EXTRA_REF
+GST_DEBUG_CATEGORY_EXTERN (GST_CAT_PERFORMANCE);
- typedef struct _GstFFMpegDec GstFFMpegDec;
+ typedef struct _GstFFMpegAudDec GstFFMpegAudDec;
#define MAX_TS_MASK 0xff
/* decoding */
AVCodecContext *context;
- AVFrame *picture;
gboolean opened;
- GstBufferPool *pool;
-
- /* from incoming caps */
- gint in_width;
- gint in_height;
- gint in_par_n;
- gint in_par_d;
- gint in_fps_n;
- gint in_fps_d;
-
- /* current context */
- enum PixelFormat ctx_pix_fmt;
- gint ctx_width;
- gint ctx_height;
- gint ctx_par_n;
- gint ctx_par_d;
- gint ctx_ticks;
- gint ctx_time_d;
- gint ctx_time_n;
- gint ctx_interlaced;
- union
- {
- struct
- {
- gint channels;
- gint samplerate;
- gint depth;
- } audio;
- } format;
+
+ /* current output format */
- GstVideoInfo out_info;
++ gint channels, samplerate, depth;
++ GstAudioChannelPosition ffmpeg_layout[64], gst_layout[64];
+
- union
- {
- struct
- {
- gint channels;
- gint samplerate;
- gint depth;
-
- GstAudioChannelPosition ffmpeg_layout[64], gst_layout[64];
- } audio;
- } format;
-
-
- gboolean waiting_for_key;
gboolean discont;
gboolean clear_ts;
/* reverse playback queue */
GList *queued;
+
+ /* prevent reopening the decoder on GST_EVENT_CAPS when caps are same as last time. */
+ GstCaps *last_caps;
};
- typedef struct _GstFFMpegDecClass GstFFMpegDecClass;
+ typedef struct _GstFFMpegAudDecClass GstFFMpegAudDecClass;
- struct _GstFFMpegDecClass
+ struct _GstFFMpegAudDecClass
{
GstElementClass parent_class;
static const GstTSInfo ts_info_none = { -1, -1, -1, -1 };
static const GstTSInfo *
- gst_ts_info_store (GstFFMpegDec * dec, GstClockTime dts, GstClockTime pts,
-gst_ts_info_store (GstFFMpegAudDec * dec, GstClockTime timestamp,
++gst_ts_info_store (GstFFMpegAudDec * dec, GstClockTime dts, GstClockTime pts,
GstClockTime duration, gint64 offset)
{
gint idx = dec->ts_idx;
}
#define GST_TYPE_FFMPEGDEC \
- (gst_ffmpegdec_get_type())
+ (gst_ffmpegauddec_get_type())
#define GST_FFMPEGDEC(obj) \
- (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGDEC,GstFFMpegDec))
- #define GST_FFMPEGDEC_CLASS(klass) \
- (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGDEC,GstFFMpegDecClass))
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGDEC,GstFFMpegAudDec))
+ #define GST_FFMPEGAUDDEC_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGDEC,GstFFMpegAudDecClass))
#define GST_IS_FFMPEGDEC(obj) \
(G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGDEC))
- #define GST_IS_FFMPEGDEC_CLASS(klass) \
+ #define GST_IS_FFMPEGAUDDEC_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGDEC))
- #define DEFAULT_LOWRES 0
- #define DEFAULT_SKIPFRAME 0
- #define DEFAULT_DIRECT_RENDERING TRUE
- #define DEFAULT_DEBUG_MV FALSE
- #define DEFAULT_MAX_THREADS 1
-
- enum
- {
- PROP_0,
- PROP_LOWRES,
- PROP_SKIPFRAME,
- PROP_DIRECT_RENDERING,
- PROP_DEBUG_MV,
- PROP_MAX_THREADS,
- PROP_LAST
- };
-
/* A number of function prototypes are given so we can refer to them later. */
- static void gst_ffmpegdec_base_init (GstFFMpegDecClass * klass);
- static void gst_ffmpegdec_class_init (GstFFMpegDecClass * klass);
- static void gst_ffmpegdec_init (GstFFMpegDec * ffmpegdec);
- static void gst_ffmpegdec_finalize (GObject * object);
-
- static gboolean gst_ffmpegdec_src_query (GstPad * pad, GstObject * parent,
- GstQuery * query);
- static gboolean gst_ffmpegdec_src_event (GstPad * pad, GstObject * parent,
+ static void gst_ffmpegauddec_base_init (GstFFMpegAudDecClass * klass);
+ static void gst_ffmpegauddec_class_init (GstFFMpegAudDecClass * klass);
+ static void gst_ffmpegauddec_init (GstFFMpegAudDec * ffmpegdec);
+ static void gst_ffmpegauddec_finalize (GObject * object);
+
-static gboolean gst_ffmpegauddec_setcaps (GstPad * pad, GstCaps * caps);
-static gboolean gst_ffmpegauddec_sink_event (GstPad * pad, GstEvent * event);
-static GstFlowReturn gst_ffmpegauddec_chain (GstPad * pad, GstBuffer * buf);
++static gboolean gst_ffmpegauddec_setcaps (GstFFMpegAudDec * ffmpegdec,
++ GstCaps * caps);
++static gboolean gst_ffmpegauddec_sink_event (GstPad * pad, GstObject * parent,
+ GstEvent * event);
-
- static gboolean gst_ffmpegdec_sink_event (GstPad * pad, GstObject * parent,
- GstEvent * event);
- static gboolean gst_ffmpegdec_sink_query (GstPad * pad, GstObject * parent,
++static gboolean gst_ffmpegauddec_sink_query (GstPad * pad, GstObject * parent,
+ GstQuery * query);
- static GstFlowReturn gst_ffmpegdec_chain (GstPad * pad, GstObject * parent,
++static GstFlowReturn gst_ffmpegauddec_chain (GstPad * pad, GstObject * parent,
+ GstBuffer * buf);
- static GstStateChangeReturn gst_ffmpegdec_change_state (GstElement * element,
+ static GstStateChangeReturn gst_ffmpegauddec_change_state (GstElement * element,
GstStateChange transition);
- static void gst_ffmpegdec_set_property (GObject * object,
- guint prop_id, const GValue * value, GParamSpec * pspec);
- static void gst_ffmpegdec_get_property (GObject * object,
- guint prop_id, GValue * value, GParamSpec * pspec);
-
- static gboolean gst_ffmpegdec_video_negotiate (GstFFMpegDec * ffmpegdec,
- gboolean force);
- static gboolean gst_ffmpegdec_audio_negotiate (GstFFMpegDec * ffmpegdec,
+ static gboolean gst_ffmpegauddec_negotiate (GstFFMpegAudDec * ffmpegdec,
gboolean force);
- /* some sort of bufferpool handling, but different */
- static int gst_ffmpegdec_get_buffer (AVCodecContext * context,
- AVFrame * picture);
- static void gst_ffmpegdec_release_buffer (AVCodecContext * context,
- AVFrame * picture);
-
- static void gst_ffmpegdec_drain (GstFFMpegDec * ffmpegdec);
+ static void gst_ffmpegauddec_drain (GstFFMpegAudDec * ffmpegdec);
-#define GST_FFDEC_PARAMS_QDATA g_quark_from_static_string("ffdec-params")
+#define GST_FFDEC_PARAMS_QDATA g_quark_from_static_string("avdec-params")
static GstElementClass *parent_class = NULL;
- #define GST_FFMPEGDEC_TYPE_LOWRES (gst_ffmpegdec_lowres_get_type())
- static GType
- gst_ffmpegdec_lowres_get_type (void)
- {
- static GType ffmpegdec_lowres_type = 0;
-
- if (!ffmpegdec_lowres_type) {
- static const GEnumValue ffmpegdec_lowres[] = {
- {0, "0", "full"},
- {1, "1", "1/2-size"},
- {2, "2", "1/4-size"},
- {0, NULL, NULL},
- };
-
- ffmpegdec_lowres_type =
- g_enum_register_static ("GstLibAVDecLowres", ffmpegdec_lowres);
- }
-
- return ffmpegdec_lowres_type;
- }
-
- #define GST_FFMPEGDEC_TYPE_SKIPFRAME (gst_ffmpegdec_skipframe_get_type())
- static GType
- gst_ffmpegdec_skipframe_get_type (void)
- {
- static GType ffmpegdec_skipframe_type = 0;
-
- if (!ffmpegdec_skipframe_type) {
- static const GEnumValue ffmpegdec_skipframe[] = {
- {0, "0", "Skip nothing"},
- {1, "1", "Skip B-frames"},
- {2, "2", "Skip IDCT/Dequantization"},
- {5, "5", "Skip everything"},
- {0, NULL, NULL},
- };
-
- ffmpegdec_skipframe_type =
- g_enum_register_static ("GstLibAVDecSkipFrame", ffmpegdec_skipframe);
- }
-
- return ffmpegdec_skipframe_type;
- }
--
static void
- gst_ffmpegdec_base_init (GstFFMpegDecClass * klass)
+ gst_ffmpegauddec_base_init (GstFFMpegAudDecClass * klass)
{
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
GstPadTemplate *sinktempl, *srctempl;
g_assert (in_plugin != NULL);
/* construct the element details struct */
- longname = g_strdup_printf ("FFmpeg %s decoder", in_plugin->long_name);
- description = g_strdup_printf ("FFmpeg %s decoder", in_plugin->name);
- gst_element_class_set_details_simple (element_class, longname,
+ longname = g_strdup_printf ("libav %s decoder", in_plugin->long_name);
- classification = g_strdup_printf ("Codec/Decoder/%s",
- (in_plugin->type == AVMEDIA_TYPE_VIDEO) ? "Video" : "Audio");
+ description = g_strdup_printf ("libav %s decoder", in_plugin->name);
+ gst_element_class_set_metadata (element_class, longname,
- classification, description,
+ "Codec/Decoder/Audio", description,
"Wim Taymans <wim.taymans@gmail.com>, "
"Ronald Bultje <rbultje@ronald.bitfreak.net>, "
"Edward Hervey <bilboed@bilboed.com>");
/* setup pads */
ffmpegdec->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
- gst_pad_set_event_function (ffmpegdec->sinkpad,
- GST_DEBUG_FUNCPTR (gst_ffmpegdec_sink_event));
- gst_pad_set_setcaps_function (ffmpegdec->sinkpad,
- GST_DEBUG_FUNCPTR (gst_ffmpegauddec_setcaps));
+ gst_pad_set_query_function (ffmpegdec->sinkpad,
- GST_DEBUG_FUNCPTR (gst_ffmpegdec_sink_query));
++ GST_DEBUG_FUNCPTR (gst_ffmpegauddec_sink_query));
+ gst_pad_set_event_function (ffmpegdec->sinkpad,
+ GST_DEBUG_FUNCPTR (gst_ffmpegauddec_sink_event));
gst_pad_set_chain_function (ffmpegdec->sinkpad,
- GST_DEBUG_FUNCPTR (gst_ffmpegdec_chain));
+ GST_DEBUG_FUNCPTR (gst_ffmpegauddec_chain));
gst_element_add_pad (GST_ELEMENT (ffmpegdec), ffmpegdec->sinkpad);
ffmpegdec->srcpad = gst_pad_new_from_template (oclass->srctempl, "src");
}
static void
- gst_ffmpegdec_finalize (GObject * object)
+ gst_ffmpegauddec_finalize (GObject * object)
{
- GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) object;
+ GstFFMpegAudDec *ffmpegdec = (GstFFMpegAudDec *) object;
- if (ffmpegdec->context != NULL) {
+ if (ffmpegdec->context != NULL)
av_free (ffmpegdec->context);
- ffmpegdec->context = NULL;
- }
- if (ffmpegdec->picture != NULL)
- av_free (ffmpegdec->picture);
-
- if (ffmpegdec->pool)
- gst_object_unref (ffmpegdec->pool);
-
G_OBJECT_CLASS (parent_class)->finalize (object);
}
if (!ffmpegdec->opened)
return;
-- GST_LOG_OBJECT (ffmpegdec, "closing ffmpeg codec");
++ GST_LOG_OBJECT (ffmpegdec, "closing libav codec");
+
+ gst_caps_replace (&ffmpegdec->last_caps, NULL);
if (ffmpegdec->context->priv_data)
gst_ffmpeg_avcodec_close (ffmpegdec->context);
goto could_not_open;
ffmpegdec->opened = TRUE;
- ffmpegdec->is_realvideo = FALSE;
-- GST_LOG_OBJECT (ffmpegdec, "Opened ffmpeg codec %s, id %d",
++ GST_LOG_OBJECT (ffmpegdec, "Opened libav codec %s, id %d",
oclass->in_plugin->name, oclass->in_plugin->id);
- /* open a parser if we can */
- switch (oclass->in_plugin->id) {
- case CODEC_ID_MPEG4:
- case CODEC_ID_MJPEG:
- case CODEC_ID_VC1:
- GST_LOG_OBJECT (ffmpegdec, "not using parser, blacklisted codec");
- ffmpegdec->pctx = NULL;
- break;
- case CODEC_ID_H264:
- /* For H264, only use a parser if there is no context data, if there is,
- * we're talking AVC */
- if (ffmpegdec->context->extradata_size == 0) {
- GST_LOG_OBJECT (ffmpegdec, "H264 with no extradata, creating parser");
- ffmpegdec->pctx = av_parser_init (oclass->in_plugin->id);
- } else {
- GST_LOG_OBJECT (ffmpegdec,
- "H264 with extradata implies framed data - not using parser");
- ffmpegdec->pctx = NULL;
- }
- break;
- case CODEC_ID_RV10:
- case CODEC_ID_RV30:
- case CODEC_ID_RV20:
- case CODEC_ID_RV40:
- ffmpegdec->is_realvideo = TRUE;
- break;
- default:
- if (!ffmpegdec->turnoff_parser) {
- ffmpegdec->pctx = av_parser_init (oclass->in_plugin->id);
- if (ffmpegdec->pctx)
- GST_LOG_OBJECT (ffmpegdec, "Using parser %p", ffmpegdec->pctx);
- else
- GST_LOG_OBJECT (ffmpegdec, "No parser for codec");
- } else {
- GST_LOG_OBJECT (ffmpegdec, "Parser deactivated for format");
- }
- break;
+ if (!ffmpegdec->turnoff_parser) {
+ ffmpegdec->pctx = av_parser_init (oclass->in_plugin->id);
+ if (ffmpegdec->pctx)
+ GST_LOG_OBJECT (ffmpegdec, "Using parser %p", ffmpegdec->pctx);
+ else
+ GST_LOG_OBJECT (ffmpegdec, "No parser for codec");
+ } else {
+ GST_LOG_OBJECT (ffmpegdec, "Parser deactivated for format");
}
- switch (oclass->in_plugin->type) {
- case AVMEDIA_TYPE_VIDEO:
- /* clear values */
- ffmpegdec->ctx_pix_fmt = PIX_FMT_NB;
- ffmpegdec->ctx_width = 0;
- ffmpegdec->ctx_height = 0;
- ffmpegdec->ctx_ticks = 1;
- ffmpegdec->ctx_time_n = 0;
- ffmpegdec->ctx_time_d = 0;
- ffmpegdec->ctx_par_n = 0;
- ffmpegdec->ctx_par_d = 0;
- break;
- case AVMEDIA_TYPE_AUDIO:
- ffmpegdec->format.audio.samplerate = 0;
- ffmpegdec->format.audio.channels = 0;
- ffmpegdec->format.audio.depth = 0;
- break;
- default:
- break;
- }
- ffmpegdec->format.audio.samplerate = 0;
- ffmpegdec->format.audio.channels = 0;
- ffmpegdec->format.audio.depth = 0;
++ ffmpegdec->samplerate = 0;
++ ffmpegdec->channels = 0;
++ ffmpegdec->depth = 0;
- gst_ffmpegdec_reset_ts (ffmpegdec);
- /* FIXME, reset_qos will take the LOCK and this function is already called
- * with the LOCK */
- ffmpegdec->proportion = 0.5;
- ffmpegdec->earliest_time = -1;
+ gst_ffmpegauddec_reset_ts (ffmpegdec);
return TRUE;
/* ERRORS */
could_not_open:
{
- gst_ffmpegdec_close (ffmpegdec);
- GST_DEBUG_OBJECT (ffmpegdec, "avdec_%s: Failed to open FFMPEG codec",
+ gst_ffmpegauddec_close (ffmpegdec);
- GST_DEBUG_OBJECT (ffmpegdec, "ffdec_%s: Failed to open FFMPEG codec",
++ GST_DEBUG_OBJECT (ffmpegdec, "avdec_%s: Failed to open libav codec",
oclass->in_plugin->name);
return FALSE;
}
}
static gboolean
- gst_ffmpegdec_setcaps (GstFFMpegDec * ffmpegdec, GstCaps * caps)
-gst_ffmpegauddec_setcaps (GstPad * pad, GstCaps * caps)
++gst_ffmpegauddec_setcaps (GstFFMpegAudDec * ffmpegdec, GstCaps * caps)
{
- GstFFMpegDecClass *oclass;
- GstFFMpegAudDec *ffmpegdec;
+ GstFFMpegAudDecClass *oclass;
GstStructure *structure;
- const GValue *par;
- const GValue *fps;
gboolean ret = TRUE;
- oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
- ffmpegdec = (GstFFMpegAudDec *) (gst_pad_get_parent (pad));
+ oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
- GST_DEBUG_OBJECT (pad, "setcaps called");
+ GST_DEBUG_OBJECT (ffmpegdec, "setcaps called");
GST_OBJECT_LOCK (ffmpegdec);
gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
oclass->in_plugin->type, caps, ffmpegdec->context);
- GST_LOG_OBJECT (ffmpegdec, "size after %dx%d", ffmpegdec->context->width,
- ffmpegdec->context->height);
-
- if (!ffmpegdec->context->time_base.den || !ffmpegdec->context->time_base.num) {
- GST_DEBUG_OBJECT (ffmpegdec, "forcing 25/1 framerate");
- ffmpegdec->context->time_base.num = 1;
- ffmpegdec->context->time_base.den = 25;
- }
-
- /* get pixel aspect ratio if it's set */
- structure = gst_caps_get_structure (caps, 0);
-
- par = gst_structure_get_value (structure, "pixel-aspect-ratio");
- if (par != NULL && GST_VALUE_HOLDS_FRACTION (par)) {
- ffmpegdec->in_par_n = gst_value_get_fraction_numerator (par);
- ffmpegdec->in_par_d = gst_value_get_fraction_denominator (par);
- GST_DEBUG_OBJECT (ffmpegdec, "sink caps have pixel-aspect-ratio of %d:%d",
- ffmpegdec->in_par_n, ffmpegdec->in_par_d);
- } else {
- GST_DEBUG_OBJECT (ffmpegdec, "no input pixel-aspect-ratio");
- ffmpegdec->in_par_n = 0;
- ffmpegdec->in_par_d = 0;
- }
-
- /* get the framerate from incoming caps. fps_n is set to 0 when
- * there is no valid framerate */
- fps = gst_structure_get_value (structure, "framerate");
- if (fps != NULL && GST_VALUE_HOLDS_FRACTION (fps)) {
- ffmpegdec->in_fps_n = gst_value_get_fraction_numerator (fps);
- ffmpegdec->in_fps_d = gst_value_get_fraction_denominator (fps);
- GST_DEBUG_OBJECT (ffmpegdec, "sink caps have framerate of %d/%d",
- ffmpegdec->in_fps_n, ffmpegdec->in_fps_d);
- } else {
- GST_DEBUG_OBJECT (ffmpegdec, "no input framerate ");
- ffmpegdec->in_fps_n = 0;
- ffmpegdec->in_fps_d = 0;
- }
-
- /* for AAC we only use av_parse if not on stream-format==raw or ==loas */
- if (oclass->in_plugin->id == CODEC_ID_AAC
- || oclass->in_plugin->id == CODEC_ID_AAC_LATM) {
- const gchar *format = gst_structure_get_string (structure, "stream-format");
-
- if (format == NULL || strcmp (format, "raw") == 0) {
- ffmpegdec->turnoff_parser = TRUE;
- }
- }
-
- /* for FLAC, don't parse if it's already parsed */
- if (oclass->in_plugin->id == CODEC_ID_FLAC) {
- if (gst_structure_has_field (structure, "streamheader"))
- ffmpegdec->turnoff_parser = TRUE;
- }
-
- /* workaround encoder bugs */
- ffmpegdec->context->workaround_bugs |= FF_BUG_AUTODETECT;
- ffmpegdec->context->error_recognition = 1;
-
- /* for slow cpus */
- ffmpegdec->context->lowres = ffmpegdec->lowres;
- ffmpegdec->context->skip_frame = ffmpegdec->skip_frame;
-
- /* ffmpeg can draw motion vectors on top of the image (not every decoder
- * supports it) */
- ffmpegdec->context->debug_mv = ffmpegdec->debug_mv;
-
- if (ffmpegdec->max_threads == 0) {
- if (!(oclass->in_plugin->capabilities & CODEC_CAP_AUTO_THREADS))
- ffmpegdec->context->thread_count = gst_ffmpeg_auto_max_threads ();
- else
- ffmpegdec->context->thread_count = 0;
- } else
- ffmpegdec->context->thread_count = ffmpegdec->max_threads;
-
- ffmpegdec->context->thread_type = FF_THREAD_SLICE | FF_THREAD_FRAME;
-
- /* open codec - we don't select an output pix_fmt yet,
- * simply because we don't know! We only get it
- * during playback... */
- if (!gst_ffmpegdec_open (ffmpegdec))
- goto open_failed;
-
- /* clipping region. take into account the lowres property */
- if (gst_structure_get_int (structure, "width", &ffmpegdec->in_width))
- ffmpegdec->in_width >>= ffmpegdec->lowres;
- else
- ffmpegdec->in_width = -1;
-
- if (gst_structure_get_int (structure, "height", &ffmpegdec->in_height))
- ffmpegdec->in_height >>= ffmpegdec->lowres;
- else
- ffmpegdec->in_height = -1;
-
- GST_DEBUG_OBJECT (ffmpegdec, "clipping to %dx%d",
- ffmpegdec->in_width, ffmpegdec->in_height);
-
- done:
- GST_OBJECT_UNLOCK (ffmpegdec);
-
- return ret;
-
- /* ERRORS */
- open_failed:
- {
- GST_DEBUG_OBJECT (ffmpegdec, "Failed to open");
- ret = FALSE;
- goto done;
- }
- }
-
- static void
- gst_ffmpegdec_fill_picture (GstFFMpegDec * ffmpegdec, GstVideoFrame * frame,
- AVFrame * picture)
- {
- guint i;
-
- /* setup data pointers and strides */
- for (i = 0; i < GST_VIDEO_FRAME_N_PLANES (frame); i++) {
- picture->data[i] = GST_VIDEO_FRAME_PLANE_DATA (frame, i);
- picture->linesize[i] = GST_VIDEO_FRAME_PLANE_STRIDE (frame, i);
-
- GST_LOG_OBJECT (ffmpegdec, "plane %d: data %p, linesize %d", i,
- picture->data[i], picture->linesize[i]);
- }
- }
-
- /* called when ffmpeg wants us to allocate a buffer to write the decoded frame
- * into. We try to give it memory from our pool */
- static int
- gst_ffmpegdec_get_buffer (AVCodecContext * context, AVFrame * picture)
- {
- GstBuffer *buf = NULL;
- GstFFMpegDec *ffmpegdec;
- GstFlowReturn ret;
- GstVideoFrame frame;
-
- ffmpegdec = (GstFFMpegDec *) context->opaque;
-
- ffmpegdec->context->pix_fmt = context->pix_fmt;
-
- GST_DEBUG_OBJECT (ffmpegdec, "getting buffer");
-
- /* apply the last info we have seen to this picture, when we get the
- * picture back from ffmpeg we can use this to correctly timestamp the output
- * buffer */
- picture->reordered_opaque = context->reordered_opaque;
- /* make sure we don't free the buffer when it's not ours */
- picture->opaque = NULL;
-
- /* see if we need renegotiation */
- if (G_UNLIKELY (!gst_ffmpegdec_video_negotiate (ffmpegdec, FALSE)))
- goto negotiate_failed;
-
- if (!ffmpegdec->current_dr)
- goto no_dr;
-
- /* alloc with aligned dimensions for ffmpeg */
- GST_LOG_OBJECT (ffmpegdec, "doing alloc from pool");
- ret = gst_buffer_pool_acquire_buffer (ffmpegdec->pool, &buf, NULL);
- if (G_UNLIKELY (ret != GST_FLOW_OK))
- goto alloc_failed;
-
- if (!gst_video_frame_map (&frame, &ffmpegdec->out_info, buf,
- GST_MAP_READWRITE))
- goto invalid_frame;
-
- gst_ffmpegdec_fill_picture (ffmpegdec, &frame, picture);
-
- /* tell ffmpeg we own this buffer, tranfer the ref we have on the buffer to
- * the opaque data. */
- picture->type = FF_BUFFER_TYPE_USER;
- picture->age = 256 * 256 * 256 * 64;
- picture->opaque = g_slice_dup (GstVideoFrame, &frame);
-
- GST_LOG_OBJECT (ffmpegdec, "returned buffer %p in frame %p", buf,
- picture->opaque);
-
- return 0;
-
- /* fallbacks */
- negotiate_failed:
- {
- GST_DEBUG_OBJECT (ffmpegdec, "negotiate failed");
- goto fallback;
- }
- no_dr:
- {
- GST_LOG_OBJECT (ffmpegdec, "direct rendering disabled, fallback alloc");
- goto fallback;
- }
- alloc_failed:
- {
- /* alloc default buffer when we can't get one from downstream */
- GST_LOG_OBJECT (ffmpegdec, "alloc failed, fallback alloc");
- goto fallback;
- }
- invalid_frame:
- {
- /* alloc default buffer when we can't get one from downstream */
- GST_LOG_OBJECT (ffmpegdec, "failed to map frame, fallback alloc");
- gst_buffer_unref (buf);
- goto fallback;
- }
- fallback:
- {
- return avcodec_default_get_buffer (context, picture);
- }
- }
-
- /* called when ffmpeg is done with our buffer */
- static void
- gst_ffmpegdec_release_buffer (AVCodecContext * context, AVFrame * picture)
- {
- gint i;
- GstBuffer *buf;
- GstFFMpegDec *ffmpegdec;
- GstVideoFrame *frame;
-
- ffmpegdec = (GstFFMpegDec *) context->opaque;
-
- /* check if it was our buffer */
- if (picture->opaque == NULL) {
- GST_DEBUG_OBJECT (ffmpegdec, "default release buffer");
- avcodec_default_release_buffer (context, picture);
- return;
- }
-
- /* we remove the opaque data now */
- frame = picture->opaque;
- picture->opaque = NULL;
-
- /* unmap buffer data */
- gst_video_frame_unmap (frame);
- buf = frame->buffer;
-
- GST_DEBUG_OBJECT (ffmpegdec, "release buffer %p in frame %p", buf, frame);
-
- g_slice_free (GstVideoFrame, frame);
- gst_buffer_unref (buf);
-
- /* zero out the reference in ffmpeg */
- for (i = 0; i < 4; i++) {
- picture->data[i] = NULL;
- picture->linesize[i] = 0;
- }
- }
-
- static void
- gst_ffmpegdec_update_par (GstFFMpegDec * ffmpegdec, gint * par_n, gint * par_d)
- {
- gboolean demuxer_par_set = FALSE;
- gboolean decoder_par_set = FALSE;
- gint demuxer_num = 1, demuxer_denom = 1;
- gint decoder_num = 1, decoder_denom = 1;
-
- GST_OBJECT_LOCK (ffmpegdec);
-
- if (ffmpegdec->in_par_n && ffmpegdec->in_par_d) {
- demuxer_num = ffmpegdec->in_par_n;
- demuxer_denom = ffmpegdec->in_par_d;
- demuxer_par_set = TRUE;
- GST_DEBUG_OBJECT (ffmpegdec, "Demuxer PAR: %d:%d", demuxer_num,
- demuxer_denom);
- }
-
- if (ffmpegdec->ctx_par_n && ffmpegdec->ctx_par_d) {
- decoder_num = ffmpegdec->ctx_par_n;
- decoder_denom = ffmpegdec->ctx_par_d;
- decoder_par_set = TRUE;
- GST_DEBUG_OBJECT (ffmpegdec, "Decoder PAR: %d:%d", decoder_num,
- decoder_denom);
- }
-
- GST_OBJECT_UNLOCK (ffmpegdec);
-
- if (!demuxer_par_set && !decoder_par_set)
- goto no_par;
-
- if (demuxer_par_set && !decoder_par_set)
- goto use_demuxer_par;
-
- if (decoder_par_set && !demuxer_par_set)
- goto use_decoder_par;
-
- /* Both the demuxer and the decoder provide a PAR. If one of
- * the two PARs is 1:1 and the other one is not, use the one
- * that is not 1:1. */
- if (demuxer_num == demuxer_denom && decoder_num != decoder_denom)
- goto use_decoder_par;
-
- if (decoder_num == decoder_denom && demuxer_num != demuxer_denom)
- goto use_demuxer_par;
-
- /* Both PARs are non-1:1, so use the PAR provided by the demuxer */
- goto use_demuxer_par;
-
- use_decoder_par:
- {
- GST_DEBUG_OBJECT (ffmpegdec,
- "Setting decoder provided pixel-aspect-ratio of %u:%u", decoder_num,
- decoder_denom);
- *par_n = decoder_num;
- *par_d = decoder_denom;
- return;
- }
-
- use_demuxer_par:
- {
- GST_DEBUG_OBJECT (ffmpegdec,
- "Setting demuxer provided pixel-aspect-ratio of %u:%u", demuxer_num,
- demuxer_denom);
- *par_n = demuxer_num;
- *par_d = demuxer_denom;
- return;
- }
- no_par:
- {
- GST_DEBUG_OBJECT (ffmpegdec,
- "Neither demuxer nor codec provide a pixel-aspect-ratio");
- *par_n = 1;
- *par_d = 1;
- return;
- }
- }
-
- static gboolean
- gst_ffmpegdec_bufferpool (GstFFMpegDec * ffmpegdec, GstCaps * caps)
- {
- GstQuery *query;
- GstBufferPool *pool;
- guint size, min, max;
- GstStructure *config;
- guint edge;
- AVCodecContext *context = ffmpegdec->context;
- gboolean have_videometa, have_alignment;
- GstAllocationParams params = { 0, 0, 0, 15, };
-
- GST_DEBUG_OBJECT (ffmpegdec, "setting up bufferpool");
-
- /* find a pool for the negotiated caps now */
- query = gst_query_new_allocation (caps, TRUE);
-
- if (gst_pad_peer_query (ffmpegdec->srcpad, query)) {
- have_videometa =
- gst_query_has_allocation_meta (query, GST_VIDEO_META_API_TYPE);
- } else {
- /* use query defaults */
- GST_DEBUG_OBJECT (ffmpegdec, "peer query failed, using defaults");
- have_videometa = FALSE;
- }
-
- if (gst_query_get_n_allocation_pools (query) > 0) {
- /* we got configuration from our peer, parse them */
- gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
-
- size = MAX (size, ffmpegdec->out_info.size);
- } else {
- pool = NULL;
- size = ffmpegdec->out_info.size;
- min = max = 0;
- }
-
- gst_query_unref (query);
-
- if (pool == NULL) {
- /* we did not get a pool, make one ourselves then */
- pool = gst_video_buffer_pool_new ();
- }
-
- config = gst_buffer_pool_get_config (pool);
- gst_buffer_pool_config_set_params (config, caps, size, min, max);
- /* we are happy with the default allocator but we would like to have 16 bytes
- * aligned memory */
- gst_buffer_pool_config_set_allocator (config, NULL, ¶ms);
-
- have_alignment =
- gst_buffer_pool_has_option (pool, GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
-
- /* we can only enable the alignment if downstream supports the
- * videometa api */
- if (have_alignment && have_videometa) {
- GstVideoAlignment align;
- gint width, height;
- gint linesize_align[4];
- gint i;
-
- width = ffmpegdec->ctx_width;
- height = ffmpegdec->ctx_height;
- /* let ffmpeg find the alignment and padding */
- avcodec_align_dimensions2 (context, &width, &height, linesize_align);
- edge = context->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width ();
- /* increase the size for the padding */
- width += edge << 1;
- height += edge << 1;
-
- align.padding_top = edge;
- align.padding_left = edge;
- align.padding_right = width - ffmpegdec->ctx_width - edge;
- align.padding_bottom = height - ffmpegdec->ctx_height - edge;
- for (i = 0; i < GST_VIDEO_MAX_PLANES; i++)
- align.stride_align[i] =
- (linesize_align[i] > 0 ? linesize_align[i] - 1 : 0);
-
- GST_DEBUG_OBJECT (ffmpegdec, "aligned dimension %dx%d -> %dx%d "
- "padding t:%u l:%u r:%u b:%u, stride_align %d:%d:%d:%d",
- ffmpegdec->ctx_width, ffmpegdec->ctx_height, width, height,
- align.padding_top, align.padding_left, align.padding_right,
- align.padding_bottom, align.stride_align[0], align.stride_align[1],
- align.stride_align[2], align.stride_align[3]);
-
- gst_buffer_pool_config_add_option (config,
- GST_BUFFER_POOL_OPTION_VIDEO_META);
- gst_buffer_pool_config_add_option (config,
- GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
- gst_buffer_pool_config_set_video_alignment (config, &align);
-
- if (ffmpegdec->direct_rendering) {
- GstFFMpegDecClass *oclass;
-
- GST_DEBUG_OBJECT (ffmpegdec, "trying to enable direct rendering");
-
- oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
-
- if (oclass->in_plugin->capabilities & CODEC_CAP_DR1) {
- GST_DEBUG_OBJECT (ffmpegdec, "enabled direct rendering");
- ffmpegdec->current_dr = TRUE;
- } else {
- GST_DEBUG_OBJECT (ffmpegdec, "direct rendering not supported");
- }
- }
- } else {
- GST_DEBUG_OBJECT (ffmpegdec,
- "alignment or videometa not supported, disable direct rendering");
- /* disable direct rendering. This will make us use the fallback ffmpeg
- * picture allocation code with padding etc. We will then do the final
- * copy (with cropping) into a buffer from our pool */
- ffmpegdec->current_dr = FALSE;
- }
-
- /* and store */
- gst_buffer_pool_set_config (pool, config);
-
- if (ffmpegdec->pool) {
- gst_buffer_pool_set_active (ffmpegdec->pool, FALSE);
- gst_object_unref (ffmpegdec->pool);
- }
- ffmpegdec->pool = pool;
-
- /* and activate */
- gst_buffer_pool_set_active (pool, TRUE);
-
- return TRUE;
- }
-
- static gboolean
- update_video_context (GstFFMpegDec * ffmpegdec, gboolean force)
- {
- AVCodecContext *context = ffmpegdec->context;
-
- if (!force && ffmpegdec->ctx_width == context->width
- && ffmpegdec->ctx_height == context->height
- && ffmpegdec->ctx_ticks == context->ticks_per_frame
- && ffmpegdec->ctx_time_n == context->time_base.num
- && ffmpegdec->ctx_time_d == context->time_base.den
- && ffmpegdec->ctx_pix_fmt == context->pix_fmt
- && ffmpegdec->ctx_par_n == context->sample_aspect_ratio.num
- && ffmpegdec->ctx_par_d == context->sample_aspect_ratio.den)
- return FALSE;
-
- GST_DEBUG_OBJECT (ffmpegdec,
- "Renegotiating video from %dx%d@ %d:%d PAR %d/%d fps to %dx%d@ %d:%d PAR %d/%d fps pixfmt %d",
- ffmpegdec->ctx_width, ffmpegdec->ctx_height,
- ffmpegdec->ctx_par_n, ffmpegdec->ctx_par_d,
- ffmpegdec->ctx_time_n, ffmpegdec->ctx_time_d,
- context->width, context->height,
- context->sample_aspect_ratio.num,
- context->sample_aspect_ratio.den,
- context->time_base.num, context->time_base.den, context->pix_fmt);
-
- ffmpegdec->ctx_width = context->width;
- ffmpegdec->ctx_height = context->height;
- ffmpegdec->ctx_ticks = context->ticks_per_frame;
- ffmpegdec->ctx_time_n = context->time_base.num;
- ffmpegdec->ctx_time_d = context->time_base.den;
- ffmpegdec->ctx_pix_fmt = context->pix_fmt;
- ffmpegdec->ctx_par_n = context->sample_aspect_ratio.num;
- ffmpegdec->ctx_par_d = context->sample_aspect_ratio.den;
-
- return TRUE;
- }
-
- static gboolean
- gst_ffmpegdec_video_negotiate (GstFFMpegDec * ffmpegdec, gboolean force)
- {
- GstFFMpegDecClass *oclass;
- GstCaps *caps;
- gint width, height;
- gint fps_n, fps_d;
- GstVideoInfo info;
- GstVideoFormat fmt;
-
- oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
-
- force |= gst_pad_check_reconfigure (ffmpegdec->srcpad);
-
- /* first check if anything changed */
- if (!update_video_context (ffmpegdec, force))
- return TRUE;
-
- /* now we're going to construct the video info for the final output
- * format */
- gst_video_info_init (&info);
-
- fmt = gst_ffmpeg_pixfmt_to_video_format (ffmpegdec->ctx_pix_fmt);
- if (fmt == GST_VIDEO_FORMAT_UNKNOWN)
- goto unknown_format;
-
- /* determine the width and height, start with the dimension of the
- * context */
- width = ffmpegdec->ctx_width;
- height = ffmpegdec->ctx_height;
-
- /* if there is a width/height specified in the input, use that */
- if (ffmpegdec->in_width != -1 && ffmpegdec->in_width < width)
- width = ffmpegdec->in_width;
- if (ffmpegdec->in_height != -1 && ffmpegdec->in_height < height)
- height = ffmpegdec->in_height;
-
- /* now store the values */
- gst_video_info_set_format (&info, fmt, width, height);
-
- /* set the interlaced flag */
- if (ffmpegdec->ctx_interlaced)
- info.interlace_mode = GST_VIDEO_INTERLACE_MODE_MIXED;
- else
- info.interlace_mode = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;
-
- /* try to find a good framerate */
- if (ffmpegdec->in_fps_d) {
- /* take framerate from input when it was specified (#313970) */
- fps_n = ffmpegdec->in_fps_n;
- fps_d = ffmpegdec->in_fps_d;
- } else {
- fps_n = ffmpegdec->ctx_time_d / ffmpegdec->ctx_ticks;
- fps_d = ffmpegdec->ctx_time_n;
-
- if (!fps_d) {
- GST_LOG_OBJECT (ffmpegdec, "invalid framerate: %d/0, -> %d/1", fps_n,
- fps_n);
- fps_d = 1;
- }
- if (gst_util_fraction_compare (fps_n, fps_d, 1000, 1) > 0) {
- GST_LOG_OBJECT (ffmpegdec, "excessive framerate: %d/%d, -> 0/1", fps_n,
- fps_d);
- fps_n = 0;
- fps_d = 1;
- }
- }
- GST_LOG_OBJECT (ffmpegdec, "setting framerate: %d/%d", fps_n, fps_d);
- info.fps_n = fps_n;
- info.fps_d = fps_d;
-
- /* calculate and update par now */
- gst_ffmpegdec_update_par (ffmpegdec, &info.par_n, &info.par_d);
-
- caps = gst_video_info_to_caps (&info);
-
- if (!gst_pad_set_caps (ffmpegdec->srcpad, caps))
- goto caps_failed;
-
- ffmpegdec->out_info = info;
-
- /* now figure out a bufferpool */
- if (!gst_ffmpegdec_bufferpool (ffmpegdec, caps))
- goto no_bufferpool;
-
- gst_caps_unref (caps);
-
- return TRUE;
-
- /* ERRORS */
- unknown_format:
- {
- #ifdef HAVE_LIBAV_UNINSTALLED
- /* using internal ffmpeg snapshot */
- GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
- ("Could not find GStreamer caps mapping for libav pixfmt %d.",
- ffmpegdec->ctx_pix_fmt), (NULL));
- #else
- /* using external ffmpeg */
- GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
- ("Could not find GStreamer caps mapping for libav codec '%s', and "
- "you are using an external libavcodec. This is most likely due to "
- "a packaging problem and/or libavcodec having been upgraded to a "
- "version that is not compatible with this version of "
- "gstreamer-libav. Make sure your gstreamer-libav and libavcodec "
- "packages come from the same source/repository.",
- oclass->in_plugin->name), (NULL));
- #endif
- return FALSE;
- }
- caps_failed:
- {
- GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
- ("Could not set caps for libav decoder (%s), not fixed?",
- oclass->in_plugin->name));
- gst_caps_unref (caps);
-
- return FALSE;
- }
- no_bufferpool:
- {
- GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
- ("Could not create bufferpool for fmpeg decoder (%s)",
- oclass->in_plugin->name));
- gst_caps_unref (caps);
-
- return FALSE;
- }
- }
-
- static gboolean
- update_audio_context (GstFFMpegDec * ffmpegdec, gboolean force)
- {
- AVCodecContext *context = ffmpegdec->context;
- gint depth;
- GstAudioChannelPosition pos[64] = { 0, };
-
- depth = av_smp_format_depth (context->sample_fmt);
-
- gst_ffmpeg_channel_layout_to_gst (context, pos);
-
- if (!force && ffmpegdec->format.audio.samplerate ==
- context->sample_rate &&
- ffmpegdec->format.audio.channels == context->channels &&
- ffmpegdec->format.audio.depth == depth &&
- memcmp (ffmpegdec->format.audio.ffmpeg_layout, pos,
- sizeof (GstAudioChannelPosition) * context->channels) == 0)
- return FALSE;
-
- GST_DEBUG_OBJECT (ffmpegdec,
- "Renegotiating audio from %dHz@%dchannels (%d) to %dHz@%dchannels (%d)",
- ffmpegdec->format.audio.samplerate, ffmpegdec->format.audio.channels,
- ffmpegdec->format.audio.depth,
- context->sample_rate, context->channels, depth);
-
- ffmpegdec->format.audio.samplerate = context->sample_rate;
- ffmpegdec->format.audio.channels = context->channels;
- ffmpegdec->format.audio.depth = depth;
- memcpy (ffmpegdec->format.audio.ffmpeg_layout, pos,
- sizeof (GstAudioChannelPosition) * context->channels);
-
- return TRUE;
- }
-
- static gboolean
- gst_ffmpegdec_audio_negotiate (GstFFMpegDec * ffmpegdec, gboolean force)
- {
- GstFFMpegDecClass *oclass;
- GstCaps *caps;
-
- oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
-
- if (!update_audio_context (ffmpegdec, force))
- return TRUE;
-
- /* convert the raw output format to caps */
- caps = gst_ffmpeg_codectype_to_caps (oclass->in_plugin->type,
- ffmpegdec->context, oclass->in_plugin->id, FALSE);
- if (caps == NULL)
- goto no_caps;
-
- /* Get GStreamer channel layout */
- memcpy (ffmpegdec->format.audio.gst_layout,
- ffmpegdec->format.audio.ffmpeg_layout,
- sizeof (GstAudioChannelPosition) * ffmpegdec->format.audio.channels);
- gst_audio_channel_positions_to_valid_order (ffmpegdec->format.
- audio.gst_layout, ffmpegdec->format.audio.channels);
-
- GST_LOG_OBJECT (ffmpegdec, "output caps %" GST_PTR_FORMAT, caps);
-
- if (!gst_pad_set_caps (ffmpegdec->srcpad, caps))
- goto caps_failed;
-
- gst_caps_unref (caps);
-
- return TRUE;
-
- /* ERRORS */
- no_caps:
- {
- #ifdef HAVE_LIBAV_UNINSTALLED
- /* using internal ffmpeg snapshot */
- GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
- ("Could not find GStreamer caps mapping for libav codec '%s'.",
- oclass->in_plugin->name), (NULL));
- #else
- /* using external ffmpeg */
- GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
- ("Could not find GStreamer caps mapping for libav codec '%s', and "
- "you are using an external libavcodec. This is most likely due to "
- "a packaging problem and/or libavcodec having been upgraded to a "
- "version that is not compatible with this version of "
- "gstreamer-libav. Make sure your gstreamer-libav and libavcodec "
- "packages come from the same source/repository.",
- oclass->in_plugin->name), (NULL));
- #endif
- return FALSE;
- }
- caps_failed:
- {
- GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
- ("Could not set caps for libav decoder (%s), not fixed?",
- oclass->in_plugin->name));
- gst_caps_unref (caps);
-
- return FALSE;
- }
- }
-
- /* perform qos calculations before decoding the next frame.
- *
- * Sets the skip_frame flag and if things are really bad, skips to the next
- * keyframe.
- *
- * Returns TRUE if the frame should be decoded, FALSE if the frame can be dropped
- * entirely.
- */
- static gboolean
- gst_ffmpegdec_do_qos (GstFFMpegDec * ffmpegdec, GstClockTime timestamp,
- gboolean * mode_switch)
- {
- GstClockTimeDiff diff;
- gdouble proportion;
- GstClockTime qostime, earliest_time;
- gboolean res = TRUE;
-
- *mode_switch = FALSE;
-
- /* no timestamp, can't do QoS */
- if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (timestamp)))
- goto no_qos;
-
- /* get latest QoS observation values */
- gst_ffmpegdec_read_qos (ffmpegdec, &proportion, &earliest_time);
-
- /* skip qos if we have no observation (yet) */
- if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (earliest_time))) {
- /* no skip_frame initialy */
- ffmpegdec->context->skip_frame = AVDISCARD_DEFAULT;
- goto no_qos;
- }
-
- /* qos is done on running time of the timestamp */
- qostime = gst_segment_to_running_time (&ffmpegdec->segment, GST_FORMAT_TIME,
- timestamp);
-
- /* timestamp can be out of segment, then we don't do QoS */
- if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (qostime)))
- goto no_qos;
-
- /* see how our next timestamp relates to the latest qos timestamp. negative
- * values mean we are early, positive values mean we are too late. */
- diff = GST_CLOCK_DIFF (qostime, earliest_time);
-
- GST_DEBUG_OBJECT (ffmpegdec, "QOS: qostime %" GST_TIME_FORMAT
- ", earliest %" GST_TIME_FORMAT, GST_TIME_ARGS (qostime),
- GST_TIME_ARGS (earliest_time));
-
- /* if we using less than 40% of the available time, we can try to
- * speed up again when we were slow. */
- if (proportion < 0.4 && diff < 0) {
- goto normal_mode;
- } else {
- if (diff >= 0) {
- /* we're too slow, try to speed up */
- if (ffmpegdec->waiting_for_key) {
- /* we were waiting for a keyframe, that's ok */
- goto skipping;
- }
- /* switch to skip_frame mode */
- goto skip_frame;
- }
- }
-
- no_qos:
- ffmpegdec->processed++;
- return TRUE;
-
- skipping:
- {
- res = FALSE;
- goto drop_qos;
- }
- normal_mode:
- {
- if (ffmpegdec->context->skip_frame != AVDISCARD_DEFAULT) {
- ffmpegdec->context->skip_frame = AVDISCARD_DEFAULT;
- *mode_switch = TRUE;
- GST_DEBUG_OBJECT (ffmpegdec, "QOS: normal mode %g < 0.4", proportion);
- }
- ffmpegdec->processed++;
- return TRUE;
- }
- skip_frame:
- {
- if (ffmpegdec->context->skip_frame != AVDISCARD_NONREF) {
- ffmpegdec->context->skip_frame = AVDISCARD_NONREF;
- *mode_switch = TRUE;
- GST_DEBUG_OBJECT (ffmpegdec,
- "QOS: hurry up, diff %" G_GINT64_FORMAT " >= 0", diff);
- }
- goto drop_qos;
- }
- drop_qos:
- {
- GstClockTime stream_time, jitter;
- GstMessage *qos_msg;
-
- ffmpegdec->dropped++;
- stream_time =
- gst_segment_to_stream_time (&ffmpegdec->segment, GST_FORMAT_TIME,
- timestamp);
- jitter = GST_CLOCK_DIFF (qostime, earliest_time);
- qos_msg =
- gst_message_new_qos (GST_OBJECT_CAST (ffmpegdec), FALSE, qostime,
- stream_time, timestamp, GST_CLOCK_TIME_NONE);
- gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
- gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
- ffmpegdec->processed, ffmpegdec->dropped);
- gst_element_post_message (GST_ELEMENT_CAST (ffmpegdec), qos_msg);
-
- return res;
- }
- }
-
- /* returns TRUE if buffer is within segment, else FALSE.
- * if Buffer is on segment border, it's timestamp and duration will be clipped */
- static gboolean
- clip_video_buffer (GstFFMpegDec * dec, GstBuffer * buf, GstClockTime in_ts,
- GstClockTime in_dur)
- {
- gboolean res = TRUE;
- guint64 cstart, cstop;
- GstClockTime stop;
-
- GST_LOG_OBJECT (dec,
- "timestamp:%" GST_TIME_FORMAT " , duration:%" GST_TIME_FORMAT,
- GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur));
-
- /* can't clip without TIME segment */
- if (G_UNLIKELY (dec->segment.format != GST_FORMAT_TIME))
- goto beach;
-
- /* we need a start time */
- if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (in_ts)))
- goto beach;
-
- /* generate valid stop, if duration unknown, we have unknown stop */
- stop =
- GST_CLOCK_TIME_IS_VALID (in_dur) ? (in_ts + in_dur) : GST_CLOCK_TIME_NONE;
-
- /* now clip */
- res =
- gst_segment_clip (&dec->segment, GST_FORMAT_TIME, in_ts, stop, &cstart,
- &cstop);
- if (G_UNLIKELY (!res))
- goto beach;
-
- /* we're pretty sure the duration of this buffer is not till the end of this
- * segment (which _clip will assume when the stop is -1) */
- if (stop == GST_CLOCK_TIME_NONE)
- cstop = GST_CLOCK_TIME_NONE;
-
- /* update timestamp and possibly duration if the clipped stop time is
- * valid */
- GST_BUFFER_TIMESTAMP (buf) = cstart;
- if (GST_CLOCK_TIME_IS_VALID (cstop))
- GST_BUFFER_DURATION (buf) = cstop - cstart;
-
- GST_LOG_OBJECT (dec,
- "clipped timestamp:%" GST_TIME_FORMAT " , duration:%" GST_TIME_FORMAT,
- GST_TIME_ARGS (cstart), GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
+ /* get pixel aspect ratio if it's set */
+ structure = gst_caps_get_structure (caps, 0);
- beach:
- GST_LOG_OBJECT (dec, "%sdropping", (res ? "not " : ""));
- return res;
- }
+ /* for AAC we only use av_parse if not on stream-format==raw or ==loas */
+ if (oclass->in_plugin->id == CODEC_ID_AAC
+ || oclass->in_plugin->id == CODEC_ID_AAC_LATM) {
+ const gchar *format = gst_structure_get_string (structure, "stream-format");
+ if (format == NULL || strcmp (format, "raw") == 0) {
+ ffmpegdec->turnoff_parser = TRUE;
+ }
+ }
- /* figure out if the current picture is a keyframe, return TRUE if that is
- * the case. */
- static gboolean
- check_keyframe (GstFFMpegDec * ffmpegdec)
- {
- GstFFMpegDecClass *oclass;
- gboolean is_itype = FALSE;
- gboolean is_reference = FALSE;
- gboolean iskeyframe;
-
- /* figure out if we are dealing with a keyframe */
- oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
-
- /* remember that we have B frames, we need this for the DTS -> PTS conversion
- * code */
- if (!ffmpegdec->has_b_frames && ffmpegdec->picture->pict_type == FF_B_TYPE) {
- GST_DEBUG_OBJECT (ffmpegdec, "we have B frames");
- ffmpegdec->has_b_frames = TRUE;
- /* Emit latency message to recalculate it */
- gst_element_post_message (GST_ELEMENT_CAST (ffmpegdec),
- gst_message_new_latency (GST_OBJECT_CAST (ffmpegdec)));
+ /* for FLAC, don't parse if it's already parsed */
+ if (oclass->in_plugin->id == CODEC_ID_FLAC) {
+ if (gst_structure_has_field (structure, "streamheader"))
+ ffmpegdec->turnoff_parser = TRUE;
}
- is_itype = (ffmpegdec->picture->pict_type == FF_I_TYPE);
- is_reference = (ffmpegdec->picture->reference == 1);
+ /* workaround encoder bugs */
+ ffmpegdec->context->workaround_bugs |= FF_BUG_AUTODETECT;
+ ffmpegdec->context->error_recognition = 1;
+
+ /* open codec - we don't select an output pix_fmt yet,
+ * simply because we don't know! We only get it
+ * during playback... */
+ if (!gst_ffmpegauddec_open (ffmpegdec))
+ goto open_failed;
- iskeyframe = (is_itype || is_reference || ffmpegdec->picture->key_frame)
- || (oclass->in_plugin->id == CODEC_ID_INDEO3)
- || (oclass->in_plugin->id == CODEC_ID_MSZH)
- || (oclass->in_plugin->id == CODEC_ID_ZLIB)
- || (oclass->in_plugin->id == CODEC_ID_VP3)
- || (oclass->in_plugin->id == CODEC_ID_HUFFYUV);
+ done:
+ GST_OBJECT_UNLOCK (ffmpegdec);
- GST_LOG_OBJECT (ffmpegdec,
- "current picture: type: %d, is_keyframe:%d, is_itype:%d, is_reference:%d",
- ffmpegdec->picture->pict_type, iskeyframe, is_itype, is_reference);
- gst_object_unref (ffmpegdec);
-
+ return ret;
- return iskeyframe;
+ /* ERRORS */
+ open_failed:
+ {
+ GST_DEBUG_OBJECT (ffmpegdec, "Failed to open");
+ ret = FALSE;
+ goto done;
+ }
}
- /* get an outbuf buffer with the current picture */
- static GstFlowReturn
- get_output_buffer (GstFFMpegDec * ffmpegdec, GstBuffer ** outbuf)
+ static gboolean
+ gst_ffmpegauddec_negotiate (GstFFMpegAudDec * ffmpegdec, gboolean force)
{
- GstFlowReturn ret;
-
- if (ffmpegdec->picture->opaque != NULL) {
- GstVideoFrame *frame;
-
- /* we allocated a picture already for ffmpeg to decode into, let's pick it
- * up and use it now. */
- frame = ffmpegdec->picture->opaque;
- *outbuf = frame->buffer;
- GST_LOG_OBJECT (ffmpegdec, "using opaque buffer %p on frame %p", *outbuf,
- frame);
- gst_buffer_ref (*outbuf);
- } else {
- GstVideoFrame frame;
- AVPicture *src, *dest;
- AVFrame pic;
- gint width, height;
- GstBuffer *buf;
+ GstFFMpegAudDecClass *oclass;
+ GstCaps *caps;
+ gint depth;
++ GstAudioChannelPosition pos[64] = { 0, };
- GST_LOG_OBJECT (ffmpegdec, "allocating an output buffer");
+ oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
- if (G_UNLIKELY (!gst_ffmpegdec_video_negotiate (ffmpegdec, FALSE)))
- goto negotiate_failed;
+ depth = av_smp_format_depth (ffmpegdec->context->sample_fmt);
- if (!force && ffmpegdec->format.audio.samplerate ==
++ gst_ffmpeg_channel_layout_to_gst (ffmpegdec->context, pos);
++
++ if (!force && ffmpegdec->samplerate ==
+ ffmpegdec->context->sample_rate &&
- ffmpegdec->format.audio.channels == ffmpegdec->context->channels &&
- ffmpegdec->format.audio.depth == depth)
++ ffmpegdec->channels == ffmpegdec->context->channels &&
++ ffmpegdec->depth == depth)
+ return TRUE;
+
- ret = gst_buffer_pool_acquire_buffer (ffmpegdec->pool, &buf, NULL);
- if (G_UNLIKELY (ret != GST_FLOW_OK))
- goto alloc_failed;
+ GST_DEBUG_OBJECT (ffmpegdec,
+ "Renegotiating audio from %dHz@%dchannels (%d) to %dHz@%dchannels (%d)",
- ffmpegdec->format.audio.samplerate, ffmpegdec->format.audio.channels,
- ffmpegdec->format.audio.depth,
++ ffmpegdec->samplerate, ffmpegdec->channels,
++ ffmpegdec->depth,
+ ffmpegdec->context->sample_rate, ffmpegdec->context->channels, depth);
- ffmpegdec->format.audio.samplerate = ffmpegdec->context->sample_rate;
- ffmpegdec->format.audio.channels = ffmpegdec->context->channels;
- ffmpegdec->format.audio.depth = depth;
+
- if (!gst_video_frame_map (&frame, &ffmpegdec->out_info, buf,
- GST_MAP_READWRITE))
- goto invalid_frame;
++ ffmpegdec->samplerate = ffmpegdec->context->sample_rate;
++ ffmpegdec->channels = ffmpegdec->context->channels;
++ ffmpegdec->depth = depth;
++ memcpy (ffmpegdec->ffmpeg_layout, pos,
++ sizeof (GstAudioChannelPosition) * ffmpegdec->context->channels);
+
- gst_ffmpegdec_fill_picture (ffmpegdec, &frame, &pic);
++ /* Get GStreamer channel layout */
++ memcpy (ffmpegdec->gst_layout,
++ ffmpegdec->ffmpeg_layout,
++ sizeof (GstAudioChannelPosition) * ffmpegdec->channels);
++ gst_audio_channel_positions_to_valid_order (ffmpegdec->gst_layout,
++ ffmpegdec->channels);
- width = ffmpegdec->out_info.width;
- height = ffmpegdec->out_info.height;
+ caps = gst_ffmpeg_codectype_to_caps (oclass->in_plugin->type,
+ ffmpegdec->context, oclass->in_plugin->id, FALSE);
- src = (AVPicture *) ffmpegdec->picture;
- dest = (AVPicture *) & pic;
+ if (caps == NULL)
+ goto no_caps;
- GST_CAT_TRACE_OBJECT (GST_CAT_PERFORMANCE, ffmpegdec,
- "copy picture to output buffer %dx%d", width, height);
- av_picture_copy (dest, src, ffmpegdec->context->pix_fmt, width, height);
++ GST_LOG_OBJECT (ffmpegdec, "output caps %" GST_PTR_FORMAT, caps);
+
- gst_video_frame_unmap (&frame);
+ if (!gst_pad_set_caps (ffmpegdec->srcpad, caps))
+ goto caps_failed;
- *outbuf = buf;
- }
- ffmpegdec->picture->reordered_opaque = -1;
+ gst_caps_unref (caps);
- return GST_FLOW_OK;
+ return TRUE;
- /* special cases */
- negotiate_failed:
- {
- GST_DEBUG_OBJECT (ffmpegdec, "negotiation failed");
- return GST_FLOW_NOT_NEGOTIATED;
- }
- alloc_failed:
+ /* ERRORS */
+ no_caps:
{
- GST_DEBUG_OBJECT (ffmpegdec, "buffer alloc failed");
- return ret;
-#ifdef HAVE_FFMPEG_UNINSTALLED
++#ifdef HAVE_LIBAV_UNINSTALLED
+ /* using internal ffmpeg snapshot */
+ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
- ("Could not find GStreamer caps mapping for FFmpeg codec '%s'.",
++ ("Could not find GStreamer caps mapping for libav codec '%s'.",
+ oclass->in_plugin->name), (NULL));
+ #else
+ /* using external ffmpeg */
+ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
- ("Could not find GStreamer caps mapping for FFmpeg codec '%s', and "
++ ("Could not find GStreamer caps mapping for libav codec '%s', and "
+ "you are using an external libavcodec. This is most likely due to "
+ "a packaging problem and/or libavcodec having been upgraded to a "
+ "version that is not compatible with this version of "
- "gstreamer-ffmpeg. Make sure your gstreamer-ffmpeg and libavcodec "
++ "gstreamer-libav. Make sure your gstreamer-libav and libavcodec "
+ "packages come from the same source/repository.",
+ oclass->in_plugin->name), (NULL));
+ #endif
+ return FALSE;
}
- invalid_frame:
+ caps_failed:
{
- GST_DEBUG_OBJECT (ffmpegdec, "could not map frame");
- return GST_FLOW_ERROR;
+ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
- ("Could not set caps for ffmpeg decoder (%s), not fixed?",
++ ("Could not set caps for libav decoder (%s), not fixed?",
+ oclass->in_plugin->name));
+ gst_caps_unref (caps);
+
+ return FALSE;
}
}
-
static void
- clear_queued (GstFFMpegDec * ffmpegdec)
+ clear_queued (GstFFMpegAudDec * ffmpegdec)
{
g_list_foreach (ffmpegdec->queued, (GFunc) gst_mini_object_unref, NULL);
g_list_free (ffmpegdec->queued);
if (G_UNLIKELY ((diff = ctime - in_ts) > 0)) {
/* bring clipped time to bytes */
diff =
-- gst_util_uint64_scale_int (diff, dec->format.audio.samplerate,
-- GST_SECOND) * (dec->format.audio.depth * dec->format.audio.channels);
++ gst_util_uint64_scale_int (diff, dec->samplerate,
++ GST_SECOND) * (dec->depth * dec->channels);
GST_DEBUG_OBJECT (dec, "clipping start to %" GST_TIME_FORMAT " %"
G_GINT64_FORMAT " bytes", GST_TIME_ARGS (ctime), diff);
if (G_UNLIKELY ((diff = stop - cstop) > 0)) {
/* bring clipped time to bytes */
diff =
-- gst_util_uint64_scale_int (diff, dec->format.audio.samplerate,
-- GST_SECOND) * (dec->format.audio.depth * dec->format.audio.channels);
++ gst_util_uint64_scale_int (diff, dec->samplerate,
++ GST_SECOND) * (dec->depth * dec->channels);
GST_DEBUG_OBJECT (dec, "clipping stop to %" GST_TIME_FORMAT " %"
G_GINT64_FORMAT " bytes", GST_TIME_ARGS (cstop), diff);
"Decode audio: len=%d, have_data=%d", len, have_data);
if (len >= 0 && have_data > 0) {
+ GstAudioFormat fmt;
+
+ /* Buffer size */
+ gst_buffer_unmap (*outbuf, &map);
+ gst_buffer_resize (*outbuf, 0, have_data);
+
GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
- if (!gst_ffmpegdec_audio_negotiate (ffmpegdec, FALSE)) {
+ if (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE)) {
gst_buffer_unref (*outbuf);
*outbuf = NULL;
len = -1;
* 1) calculate based on number of samples
*/
out_duration = gst_util_uint64_scale (have_data, GST_SECOND,
-- ffmpegdec->format.audio.depth * ffmpegdec->format.audio.channels *
-- ffmpegdec->format.audio.samplerate);
++ ffmpegdec->depth * ffmpegdec->channels * ffmpegdec->samplerate);
/* offset:
*
out_duration)))
goto clipped;
- ffmpegdec->format.audio.depth * 8, ffmpegdec->format.audio.depth * 8);
+
+ /* Reorder channels to the GStreamer channel order */
+ /* Only the width really matters here... and it's stored as depth */
+ fmt =
+ gst_audio_format_build_integer (TRUE, G_BYTE_ORDER,
- ffmpegdec->format.audio.channels,
- ffmpegdec->format.audio.ffmpeg_layout,
- ffmpegdec->format.audio.gst_layout);
++ ffmpegdec->depth * 8, ffmpegdec->depth * 8);
+
+ gst_audio_buffer_reorder_channels (*outbuf, fmt,
++ ffmpegdec->channels, ffmpegdec->ffmpeg_layout, ffmpegdec->gst_layout);
} else {
+ gst_buffer_unmap (*outbuf, &map);
gst_buffer_unref (*outbuf);
*outbuf = NULL;
}
}
static gboolean
- gst_ffmpegdec_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
-gst_ffmpegauddec_sink_event (GstPad * pad, GstEvent * event)
++gst_ffmpegauddec_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
{
- GstFFMpegDec *ffmpegdec;
+ GstFFMpegAudDec *ffmpegdec;
gboolean ret = FALSE;
- ffmpegdec = (GstFFMpegDec *) parent;
- ffmpegdec = (GstFFMpegAudDec *) gst_pad_get_parent (pad);
++ ffmpegdec = (GstFFMpegAudDec *) parent;
GST_DEBUG_OBJECT (ffmpegdec, "Handling %s event",
GST_EVENT_TYPE_NAME (event));
clear_queued (ffmpegdec);
break;
}
- case GST_EVENT_NEWSEGMENT:
+ case GST_EVENT_CAPS:
{
- gboolean update;
- GstFormat fmt;
- gint64 start, stop, time;
- gdouble rate, arate;
+ GstCaps *caps;
+
+ gst_event_parse_caps (event, &caps);
- gst_event_parse_new_segment_full (event, &update, &rate, &arate, &fmt,
- &start, &stop, &time);
+ if (!ffmpegdec->last_caps
+ || !gst_caps_is_equal (ffmpegdec->last_caps, caps)) {
- ret = gst_ffmpegdec_setcaps (ffmpegdec, caps);
++ ret = gst_ffmpegauddec_setcaps (ffmpegdec, caps);
+ if (ret) {
+ gst_caps_replace (&ffmpegdec->last_caps, caps);
+ }
+ } else {
+ ret = TRUE;
+ }
- switch (fmt) {
+ gst_event_unref (event);
+ goto done;
+ }
+ case GST_EVENT_SEGMENT:
+ {
+ GstSegment segment;
+
+ gst_event_copy_segment (event, &segment);
+
+ switch (segment.format) {
case GST_FORMAT_TIME:
/* fine, our native segment format */
break;
}
}
- gst_ffmpegdec_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
+static gboolean
- GstFFMpegDec *ffmpegdec;
++gst_ffmpegauddec_sink_query (GstPad * pad, GstObject * parent, GstQuery * query)
+{
- ffmpegdec = (GstFFMpegDec *) parent;
++ GstFFMpegAudDec *ffmpegdec;
+ gboolean ret = FALSE;
+
- case GST_QUERY_ALLOCATION:
- {
- GstAllocationParams params;
-
- gst_allocation_params_init (¶ms);
- params.flags = GST_MEMORY_FLAG_ZERO_PADDED;
- params.padding = FF_INPUT_BUFFER_PADDING_SIZE;
- /* we would like to have some padding so that we don't have to
- * memcpy. We don't suggest an allocator. */
- gst_query_add_allocation_param (query, NULL, ¶ms);
- ret = TRUE;
- break;
- }
++ ffmpegdec = (GstFFMpegAudDec *) parent;
+
+ GST_DEBUG_OBJECT (ffmpegdec, "Handling %s query",
+ GST_QUERY_TYPE_NAME (query));
+
+ switch (GST_QUERY_TYPE (query)) {
+ case GST_QUERY_ACCEPT_CAPS:
+ {
+ GstPadTemplate *templ;
+
+ ret = FALSE;
+ if ((templ = GST_PAD_PAD_TEMPLATE (pad))) {
+ GstCaps *tcaps;
+
+ if ((tcaps = GST_PAD_TEMPLATE_CAPS (templ))) {
+ GstCaps *caps;
+
+ gst_query_parse_accept_caps (query, &caps);
+ gst_query_set_accept_caps_result (query,
+ gst_caps_is_subset (caps, tcaps));
+ ret = TRUE;
+ }
+ }
+ break;
+ }
+ default:
+ ret = gst_pad_query_default (pad, parent, query);
+ break;
+ }
+ return ret;
+}
+
static GstFlowReturn
- gst_ffmpegdec_chain (GstPad * pad, GstObject * parent, GstBuffer * inbuf)
-gst_ffmpegauddec_chain (GstPad * pad, GstBuffer * inbuf)
++gst_ffmpegauddec_chain (GstPad * pad, GstObject * parent, GstBuffer * inbuf)
{
- GstFFMpegDec *ffmpegdec;
- GstFFMpegDecClass *oclass;
+ GstFFMpegAudDec *ffmpegdec;
+ GstFFMpegAudDecClass *oclass;
guint8 *data, *bdata;
+ GstMapInfo map;
gint size, bsize, len, have_data;
GstFlowReturn ret = GST_FLOW_OK;
- GstClockTime in_timestamp;
- GstClockTime in_duration;
+ GstClockTime in_pts, in_dts, in_duration;
- gboolean discont, do_padding;
+ gboolean discont;
gint64 in_offset;
const GstTSInfo *in_info;
const GstTSInfo *dec_info;
- ffmpegdec = (GstFFMpegDec *) parent;
- ffmpegdec = (GstFFMpegAudDec *) (GST_PAD_PARENT (pad));
++ ffmpegdec = (GstFFMpegAudDec *) parent;
if (G_UNLIKELY (!ffmpegdec->opened))
goto not_negotiated;
in_offset = GST_BUFFER_OFFSET (inbuf);
/* get handle to timestamp info, we can pass this around to ffmpeg */
- in_info = gst_ts_info_store (ffmpegdec, in_timestamp, in_duration, in_offset);
+ in_info =
+ gst_ts_info_store (ffmpegdec, in_dts, in_pts, in_duration, in_offset);
- if (in_dts != -1) {
- GstClockTime diff;
- /* keep track of timestamp diff to estimate duration */
- diff = in_dts - ffmpegdec->last_dts;
- /* need to scale with amount of frames in the interval */
- if (ffmpegdec->last_frames)
- diff /= ffmpegdec->last_frames;
-
- GST_LOG_OBJECT (ffmpegdec, "estimated duration %" GST_TIME_FORMAT " %u",
- GST_TIME_ARGS (diff), ffmpegdec->last_frames);
-
- ffmpegdec->last_diff = diff;
- ffmpegdec->last_dts = in_dts;
- ffmpegdec->last_frames = 0;
- }
+ GST_LOG_OBJECT (ffmpegdec,
+ "Received new data of size %u, offset:%" G_GUINT64_FORMAT ", ts:%"
+ GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT ", info %d",
- GST_BUFFER_SIZE (inbuf), GST_BUFFER_OFFSET (inbuf),
- GST_TIME_ARGS (in_timestamp), GST_TIME_ARGS (in_duration), in_info->idx);
++ gst_buffer_get_size (inbuf), GST_BUFFER_OFFSET (inbuf),
++ GST_TIME_ARGS (in_pts), GST_TIME_ARGS (in_duration), in_info->idx);
/* workarounds, functions write to buffers:
* libavcodec/svq1.c:svq1_decode_frame writes to the given buffer.
inbuf = gst_buffer_make_writable (inbuf);
}
- bdata = GST_BUFFER_DATA (inbuf);
- bsize = GST_BUFFER_SIZE (inbuf);
+ gst_buffer_map (inbuf, &map, GST_MAP_READ);
+
+ bdata = map.data;
+ bsize = map.size;
+
+ GST_LOG_OBJECT (ffmpegdec,
+ "Received new data of size %u, offset:%" G_GUINT64_FORMAT ", dts:%"
+ GST_TIME_FORMAT ", pts:%" GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT
+ ", info %d", bsize, in_offset, GST_TIME_ARGS (in_dts),
+ GST_TIME_ARGS (in_pts), GST_TIME_ARGS (in_duration), in_info->idx);
- if (!GST_MEMORY_IS_ZERO_PADDED (map.memory)
- || (map.maxsize - map.size) < FF_INPUT_BUFFER_PADDING_SIZE) {
- /* add padding */
- if (ffmpegdec->padded_size < bsize + FF_INPUT_BUFFER_PADDING_SIZE) {
- ffmpegdec->padded_size = bsize + FF_INPUT_BUFFER_PADDING_SIZE;
- ffmpegdec->padded = g_realloc (ffmpegdec->padded, ffmpegdec->padded_size);
- GST_LOG_OBJECT (ffmpegdec, "resized padding buffer to %d",
- ffmpegdec->padded_size);
- }
- GST_CAT_TRACE_OBJECT (GST_CAT_PERFORMANCE, ffmpegdec,
- "Copy input to add padding");
- memcpy (ffmpegdec->padded, bdata, bsize);
- memset (ffmpegdec->padded + bsize, 0, FF_INPUT_BUFFER_PADDING_SIZE);
-
- bdata = ffmpegdec->padded;
- }
-
do {
- guint8 tmp_padding[FF_INPUT_BUFFER_PADDING_SIZE];
-
/* parse, if at all possible */
if (ffmpegdec->pctx) {
gint res;
- do_padding = TRUE;
GST_LOG_OBJECT (ffmpegdec,
"Calling av_parser_parse2 with offset %" G_GINT64_FORMAT ", ts:%"
- GST_TIME_FORMAT " size %d", in_offset, GST_TIME_ARGS (in_timestamp),
- bsize);
+ GST_TIME_FORMAT " size %d", in_offset, GST_TIME_ARGS (in_pts), bsize);
/* feed the parser. We pass the timestamp info so that we can recover all
* info again later */
dec_info = in_info;
}
- if (do_padding) {
- /* add temporary padding */
- GST_CAT_TRACE_OBJECT (GST_CAT_PERFORMANCE, ffmpegdec,
- "Add temporary input padding");
- memcpy (tmp_padding, data + size, FF_INPUT_BUFFER_PADDING_SIZE);
- memset (data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
- }
-
-- /* decode a frame of audio/video now */
++ /* decode a frame of audio now */
len =
- gst_ffmpegdec_frame (ffmpegdec, data, size, &have_data, dec_info, &ret);
-
- if (do_padding) {
- memcpy (data + size, tmp_padding, FF_INPUT_BUFFER_PADDING_SIZE);
- }
+ gst_ffmpegauddec_frame (ffmpegdec, data, size, &have_data, dec_info,
+ &ret);
if (ret != GST_FLOW_OK) {
GST_LOG_OBJECT (ffmpegdec, "breaking because of flow ret %s",
/* ERRORS */
not_negotiated:
{
- oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
+ oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
- ("ffdec_%s: input format was not set before data start",
+ ("avdec_%s: input format was not set before data start",
oclass->in_plugin->name));
gst_buffer_unref (inbuf);
return GST_FLOW_NOT_NEGOTIATED;
{
ARG_0,
ARG_BIT_RATE,
- ARG_GOP_SIZE,
- ARG_ME_METHOD,
ARG_BUFSIZE,
ARG_RTP_PAYLOAD_SIZE,
- ARG_CFG_BASE
};
- #define GST_TYPE_ME_METHOD (gst_ffmpegenc_me_method_get_type())
- static GType
- gst_ffmpegenc_me_method_get_type (void)
- {
- static GType ffmpegenc_me_method_type = 0;
- static GEnumValue ffmpegenc_me_methods[] = {
- {ME_ZERO, "None (Very low quality)", "zero"},
- {ME_FULL, "Full (Slow, unmaintained)", "full"},
- {ME_LOG, "Logarithmic (Low quality, unmaintained)", "logarithmic"},
- {ME_PHODS, "phods (Low quality, unmaintained)", "phods"},
- {ME_EPZS, "EPZS (Best quality, Fast)", "epzs"},
- {ME_X1, "X1 (Experimental)", "x1"},
- {0, NULL, NULL},
- };
- if (!ffmpegenc_me_method_type) {
- ffmpegenc_me_method_type =
- g_enum_register_static ("GstLibAVEncMeMethod", ffmpegenc_me_methods);
- }
- return ffmpegenc_me_method_type;
- }
--
/* A number of function prototypes are given so we can refer to them later. */
- static void gst_ffmpegenc_class_init (GstFFMpegEncClass * klass);
- static void gst_ffmpegenc_base_init (GstFFMpegEncClass * klass);
- static void gst_ffmpegenc_init (GstFFMpegEnc * ffmpegenc);
- static void gst_ffmpegenc_finalize (GObject * object);
+ static void gst_ffmpegaudenc_class_init (GstFFMpegAudEncClass * klass);
+ static void gst_ffmpegaudenc_base_init (GstFFMpegAudEncClass * klass);
+ static void gst_ffmpegaudenc_init (GstFFMpegAudEnc * ffmpegaudenc);
+ static void gst_ffmpegaudenc_finalize (GObject * object);
- static gboolean gst_ffmpegenc_setcaps (GstFFMpegEnc * ffmpegenc,
-static gboolean gst_ffmpegaudenc_setcaps (GstPad * pad, GstCaps * caps);
-static GstCaps *gst_ffmpegaudenc_getcaps (GstPad * pad);
++static gboolean gst_ffmpegaudenc_setcaps (GstFFMpegAudEnc * ffmpegenc,
+ GstCaps * caps);
- static GstCaps *gst_ffmpegenc_getcaps (GstPad * pad, GstCaps * filter);
- static GstFlowReturn gst_ffmpegenc_chain_video (GstPad * pad,
- GstObject * parent, GstBuffer * buffer);
- static GstFlowReturn gst_ffmpegenc_chain_audio (GstPad * pad,
++static GstCaps *gst_ffmpegaudenc_getcaps (GstFFMpegAudEnc * ffmpegenc,
++ GstCaps * filter);
+ static GstFlowReturn gst_ffmpegaudenc_chain_audio (GstPad * pad,
- GstBuffer * buffer);
+ GstObject * parent, GstBuffer * buffer);
- static gboolean gst_ffmpegenc_event_sink (GstPad * pad, GstObject * parent,
- GstEvent * event);
- static gboolean gst_ffmpegenc_event_src (GstPad * pad, GstObject * parent,
- GstEvent * event);
- static gboolean gst_ffmpegenc_query_sink (GstPad * pad, GstObject * parent,
++static gboolean gst_ffmpegaudenc_query_sink (GstPad * pad, GstObject * parent,
+ GstQuery * query);
++static gboolean gst_ffmpegaudenc_event_sink (GstPad * pad, GstObject * parent,
++ GstEvent * event);
- static void gst_ffmpegenc_set_property (GObject * object,
+ static void gst_ffmpegaudenc_set_property (GObject * object,
guint prop_id, const GValue * value, GParamSpec * pspec);
- static void gst_ffmpegenc_get_property (GObject * object,
+ static void gst_ffmpegaudenc_get_property (GObject * object,
guint prop_id, GValue * value, GParamSpec * pspec);
- static GstStateChangeReturn gst_ffmpegenc_change_state (GstElement * element,
+ static GstStateChangeReturn gst_ffmpegaudenc_change_state (GstElement * element,
GstStateChange transition);
-#define GST_FFENC_PARAMS_QDATA g_quark_from_static_string("ffenc-params")
+#define GST_FFENC_PARAMS_QDATA g_quark_from_static_string("avenc-params")
static GstElementClass *parent_class = NULL;
g_assert (in_plugin != NULL);
/* construct the element details struct */
- longname = g_strdup_printf ("FFmpeg %s encoder", in_plugin->long_name);
- description = g_strdup_printf ("FFmpeg %s encoder", in_plugin->name);
- gst_element_class_set_details_simple (element_class, longname,
+ longname = g_strdup_printf ("libav %s encoder", in_plugin->long_name);
- classification = g_strdup_printf ("Codec/Encoder/%s",
- (in_plugin->type == AVMEDIA_TYPE_VIDEO) ? "Video" : "Audio");
+ description = g_strdup_printf ("libav %s encoder", in_plugin->name);
+ gst_element_class_set_metadata (element_class, longname,
- classification, description,
+ "Codec/Encoder/Audio", description,
"Wim Taymans <wim.taymans@gmail.com>, "
"Ronald Bultje <rbultje@ronald.bitfreak.net>");
g_free (longname);
if (!(srccaps = gst_ffmpeg_codecid_to_caps (in_plugin->id, NULL, TRUE))) {
GST_DEBUG ("Couldn't get source caps for encoder '%s'", in_plugin->name);
- srccaps = gst_caps_new_simple ("unknown/unknown", NULL);
+ srccaps = gst_caps_new_empty_simple ("unknown/unknown");
}
- if (in_plugin->type == AVMEDIA_TYPE_VIDEO) {
- sinkcaps = gst_caps_from_string ("video/x-raw");
- } else {
- sinkcaps = gst_ffmpeg_codectype_to_audio_caps (NULL,
- in_plugin->id, TRUE, in_plugin);
- }
+ sinkcaps = gst_ffmpeg_codectype_to_audio_caps (NULL,
+ in_plugin->id, TRUE, in_plugin);
if (!sinkcaps) {
GST_DEBUG ("Couldn't get sink caps for encoder '%s'", in_plugin->name);
- sinkcaps = gst_caps_new_simple ("unknown/unknown", NULL);
+ sinkcaps = gst_caps_new_empty_simple ("unknown/unknown");
}
/* pad templates */
parent_class = g_type_class_peek_parent (klass);
- gobject_class->set_property = gst_ffmpegenc_set_property;
- gobject_class->get_property = gst_ffmpegenc_get_property;
-
- if (klass->in_plugin->type == AVMEDIA_TYPE_VIDEO) {
- /* FIXME: could use -1 for a sensible per-codec default based on
- * e.g. input resolution and framerate */
- g_object_class_install_property (G_OBJECT_CLASS (klass), ARG_BIT_RATE,
- g_param_spec_int ("bitrate", "Bit Rate",
- "Target Video Bitrate", 0, G_MAXINT, DEFAULT_VIDEO_BITRATE,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
- g_object_class_install_property (G_OBJECT_CLASS (klass), ARG_GOP_SIZE,
- g_param_spec_int ("gop-size", "GOP Size",
- "Number of frames within one GOP", 0, G_MAXINT,
- DEFAULT_VIDEO_GOP_SIZE,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
- g_object_class_install_property (G_OBJECT_CLASS (klass), ARG_ME_METHOD,
- g_param_spec_enum ("me-method", "ME Method", "Motion Estimation Method",
- GST_TYPE_ME_METHOD, ME_EPZS,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- g_object_class_install_property (G_OBJECT_CLASS (klass), ARG_BUFSIZE,
- g_param_spec_int ("buffer-size", "Buffer Size",
- "Size of the video buffers (read-only)", 0, G_MAXINT, 0,
- G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
- g_object_class_install_property (G_OBJECT_CLASS (klass),
- ARG_RTP_PAYLOAD_SIZE, g_param_spec_int ("rtp-payload-size",
- "RTP Payload Size", "Target GOB length", 0, G_MAXINT, 0,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-
- /* register additional properties, possibly dependent on the exact CODEC */
- gst_ffmpeg_cfg_install_property (klass, ARG_CFG_BASE);
- } else if (klass->in_plugin->type == AVMEDIA_TYPE_AUDIO) {
- /* FIXME: could use -1 for a sensible per-codec defaults */
- g_object_class_install_property (G_OBJECT_CLASS (klass), ARG_BIT_RATE,
- g_param_spec_int ("bitrate", "Bit Rate",
- "Target Audio Bitrate", 0, G_MAXINT, DEFAULT_AUDIO_BITRATE,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
- }
+ gobject_class->set_property = gst_ffmpegaudenc_set_property;
+ gobject_class->get_property = gst_ffmpegaudenc_get_property;
+
- if (klass->in_plugin->type == AVMEDIA_TYPE_AUDIO) {
- g_object_class_install_property (G_OBJECT_CLASS (klass), ARG_BIT_RATE,
- g_param_spec_ulong ("bitrate", "Bit Rate",
- "Target Audio Bitrate", 0, G_MAXULONG, DEFAULT_AUDIO_BITRATE,
- G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
- }
++ /* FIXME: could use -1 for a sensible per-codec defaults */
++ g_object_class_install_property (G_OBJECT_CLASS (klass), ARG_BIT_RATE,
++ g_param_spec_int ("bitrate", "Bit Rate",
++ "Target Audio Bitrate", 0, G_MAXINT, DEFAULT_AUDIO_BITRATE,
++ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
- gstelement_class->change_state = gst_ffmpegenc_change_state;
+ gstelement_class->change_state = gst_ffmpegaudenc_change_state;
- gobject_class->finalize = gst_ffmpegenc_finalize;
+ gobject_class->finalize = gst_ffmpegaudenc_finalize;
}
static void
- gst_ffmpegenc_init (GstFFMpegEnc * ffmpegenc)
+ gst_ffmpegaudenc_init (GstFFMpegAudEnc * ffmpegaudenc)
{
- GstFFMpegEncClass *oclass =
- (GstFFMpegEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
+ GstFFMpegAudEncClass *oclass =
+ (GstFFMpegAudEncClass *) (G_OBJECT_GET_CLASS (ffmpegaudenc));
/* setup pads */
- ffmpegenc->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
- gst_pad_set_query_function (ffmpegenc->sinkpad, gst_ffmpegenc_query_sink);
- ffmpegenc->srcpad = gst_pad_new_from_template (oclass->srctempl, "src");
- gst_pad_use_fixed_caps (ffmpegenc->srcpad);
-
- /* ffmpeg objects */
- ffmpegenc->context = avcodec_alloc_context ();
- ffmpegenc->picture = avcodec_alloc_frame ();
- ffmpegenc->opened = FALSE;
-
- ffmpegenc->file = NULL;
- ffmpegenc->delay = g_queue_new ();
-
- gst_pad_set_event_function (ffmpegenc->sinkpad, gst_ffmpegenc_event_sink);
-
- if (oclass->in_plugin->type == AVMEDIA_TYPE_VIDEO) {
- gst_pad_set_chain_function (ffmpegenc->sinkpad, gst_ffmpegenc_chain_video);
- /* so we know when to flush the buffers on EOS */
- gst_pad_set_event_function (ffmpegenc->srcpad, gst_ffmpegenc_event_src);
-
- ffmpegenc->bitrate = DEFAULT_VIDEO_BITRATE;
- ffmpegenc->me_method = ME_EPZS;
- ffmpegenc->buffer_size = 512 * 1024;
- ffmpegenc->gop_size = DEFAULT_VIDEO_GOP_SIZE;
- ffmpegenc->rtp_payload_size = 0;
+ ffmpegaudenc->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
- gst_pad_set_setcaps_function (ffmpegaudenc->sinkpad,
- gst_ffmpegaudenc_setcaps);
- gst_pad_set_getcaps_function (ffmpegaudenc->sinkpad,
- gst_ffmpegaudenc_getcaps);
++ gst_pad_set_event_function (ffmpegaudenc->sinkpad,
++ gst_ffmpegaudenc_event_sink);
++ gst_pad_set_query_function (ffmpegaudenc->sinkpad,
++ gst_ffmpegaudenc_query_sink);
++ gst_pad_set_chain_function (ffmpegaudenc->sinkpad,
++ gst_ffmpegaudenc_chain_audio);
+
- ffmpegenc->lmin = 2;
- ffmpegenc->lmax = 31;
- ffmpegenc->max_key_interval = 0;
+ ffmpegaudenc->srcpad = gst_pad_new_from_template (oclass->srctempl, "src");
+ gst_pad_use_fixed_caps (ffmpegaudenc->srcpad);
- gst_ffmpeg_cfg_set_defaults (ffmpegenc);
- } else if (oclass->in_plugin->type == AVMEDIA_TYPE_AUDIO) {
- gst_pad_set_chain_function (ffmpegenc->sinkpad, gst_ffmpegenc_chain_audio);
-
- ffmpegenc->bitrate = DEFAULT_AUDIO_BITRATE;
- }
+ /* ffmpeg objects */
+ ffmpegaudenc->context = avcodec_alloc_context ();
+ ffmpegaudenc->opened = FALSE;
- gst_element_add_pad (GST_ELEMENT (ffmpegenc), ffmpegenc->sinkpad);
- gst_element_add_pad (GST_ELEMENT (ffmpegenc), ffmpegenc->srcpad);
- if (oclass->in_plugin->type == AVMEDIA_TYPE_AUDIO) {
- gst_pad_set_chain_function (ffmpegaudenc->sinkpad,
- gst_ffmpegaudenc_chain_audio);
-
- ffmpegaudenc->bitrate = DEFAULT_AUDIO_BITRATE;
- }
-
+ gst_element_add_pad (GST_ELEMENT (ffmpegaudenc), ffmpegaudenc->sinkpad);
+ gst_element_add_pad (GST_ELEMENT (ffmpegaudenc), ffmpegaudenc->srcpad);
- ffmpegenc->adapter = gst_adapter_new ();
+ ffmpegaudenc->adapter = gst_adapter_new ();
}
static void
}
static GstCaps *
- gst_ffmpegenc_get_possible_sizes (GstFFMpegEnc * ffmpegenc, GstPad * pad,
- GstCaps * caps)
- {
- GstCaps *templ, *othercaps = NULL;
- GstCaps *tmpcaps = NULL;
- GstCaps *intersect = NULL;
- guint i;
-
- othercaps = gst_pad_peer_query_caps (ffmpegenc->srcpad, NULL);
-
- if (!othercaps)
- return gst_caps_ref (caps);
-
- templ = gst_pad_get_pad_template_caps (ffmpegenc->srcpad);
- intersect = gst_caps_intersect (othercaps, templ);
- gst_caps_unref (othercaps);
- gst_caps_unref (templ);
-
- if (gst_caps_is_empty (intersect))
- return intersect;
-
- if (gst_caps_is_any (intersect))
- return gst_caps_ref (caps);
-
- tmpcaps = gst_caps_new_empty ();
-
- for (i = 0; i < gst_caps_get_size (intersect); i++) {
- GstStructure *s = gst_caps_get_structure (intersect, i);
- const GValue *height = NULL;
- const GValue *width = NULL;
- const GValue *framerate = NULL;
- GstStructure *tmps;
-
- height = gst_structure_get_value (s, "height");
- width = gst_structure_get_value (s, "width");
- framerate = gst_structure_get_value (s, "framerate");
-
- tmps = gst_structure_new_empty ("video/x-raw");
- if (width)
- gst_structure_set_value (tmps, "width", width);
- if (height)
- gst_structure_set_value (tmps, "height", height);
- if (framerate)
- gst_structure_set_value (tmps, "framerate", framerate);
-
- tmpcaps = gst_caps_merge_structure (tmpcaps, tmps);
- }
- gst_caps_unref (intersect);
-
- intersect = gst_caps_intersect (caps, tmpcaps);
- gst_caps_unref (tmpcaps);
-
- return intersect;
- }
-
-
- static GstCaps *
- gst_ffmpegenc_getcaps (GstPad * pad, GstCaps * filter)
-gst_ffmpegaudenc_getcaps (GstPad * pad)
++gst_ffmpegaudenc_getcaps (GstFFMpegAudEnc * ffmpegaudenc, GstCaps * filter)
{
- GstFFMpegEnc *ffmpegenc = (GstFFMpegEnc *) GST_PAD_PARENT (pad);
- GstFFMpegEncClass *oclass =
- (GstFFMpegEncClass *) G_OBJECT_GET_CLASS (ffmpegenc);
- AVCodecContext *ctx = NULL;
- enum PixelFormat pixfmt;
- GstCaps *templ, *caps = NULL;
- GstCaps *finalcaps = NULL;
- gint i;
- GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) GST_PAD_PARENT (pad);
+ GstCaps *caps = NULL;
- GST_DEBUG_OBJECT (ffmpegenc, "getting caps, filter %" GST_PTR_FORMAT, filter);
+ GST_DEBUG_OBJECT (ffmpegaudenc, "getting caps");
/* audio needs no special care */
- if (oclass->in_plugin->type == AVMEDIA_TYPE_AUDIO) {
- templ = gst_pad_get_pad_template_caps (pad);
- if (filter) {
- caps = gst_caps_intersect_full (filter, templ, GST_CAPS_INTERSECT_FIRST);
- gst_caps_unref (templ);
- } else
- caps = templ;
-
- GST_DEBUG_OBJECT (ffmpegenc, "audio caps, return intersected template %"
- GST_PTR_FORMAT, caps);
-
- return caps;
- }
-
- /* cached */
- if (oclass->sinkcaps) {
- caps = gst_ffmpegenc_get_possible_sizes (ffmpegenc, pad, oclass->sinkcaps);
- if (filter) {
- finalcaps =
- gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
- gst_caps_unref (caps);
- } else {
- finalcaps = caps;
- }
- GST_DEBUG_OBJECT (ffmpegenc,
- "return intersected cached caps %" GST_PTR_FORMAT, finalcaps);
- return finalcaps;
- }
-
- /* create cache etc. */
-
- /* shut up the logging while we autoprobe; we don't want warnings and
- * errors about unsupported formats */
- /* FIXME: if someone cares about this disabling the logging for other
- * instances/threads/..., one could investigate if there is a way to
- * set this as a struct member on the av context, and check it from the
- * log handler */
- #ifndef GST_DISABLE_GST_DEBUG
- _shut_up_I_am_probing = TRUE;
- #endif
- GST_DEBUG_OBJECT (ffmpegenc, "probing caps");
- i = pixfmt = 0;
- /* check pixfmt until deemed finished */
- for (pixfmt = 0;; pixfmt++) {
- GstCaps *tmpcaps;
-
- /* override looping all pixfmt if codec declares pixfmts;
- * these may not properly check and report supported pixfmt during _init */
- if (oclass->in_plugin->pix_fmts) {
- if ((pixfmt = oclass->in_plugin->pix_fmts[i++]) == PIX_FMT_NONE) {
- GST_DEBUG_OBJECT (ffmpegenc,
- "At the end of official pixfmt for this codec, breaking out");
- break;
- }
- GST_DEBUG_OBJECT (ffmpegenc,
- "Got an official pixfmt [%d], attempting to get caps", pixfmt);
- tmpcaps = gst_ffmpeg_pixfmt_to_caps (pixfmt, NULL, oclass->in_plugin->id);
- if (tmpcaps) {
- GST_DEBUG_OBJECT (ffmpegenc, "Got caps, breaking out");
- if (!caps)
- caps = gst_caps_new_empty ();
- gst_caps_append (caps, tmpcaps);
- continue;
- }
- GST_DEBUG_OBJECT (ffmpegenc,
- "Couldn't figure out caps without context, trying again with a context");
- }
-
- GST_DEBUG_OBJECT (ffmpegenc, "pixfmt :%d", pixfmt);
- if (pixfmt >= PIX_FMT_NB) {
- GST_WARNING ("Invalid pixfmt, breaking out");
- break;
- }
-
- /* need to start with a fresh codec_context each time around, since
- * codec_close may have released stuff causing the next pass to segfault */
- ctx = avcodec_alloc_context ();
- if (!ctx) {
- GST_DEBUG_OBJECT (ffmpegenc, "no context");
- break;
- }
-
- /* set some default properties */
- ctx->width = DEFAULT_WIDTH;
- ctx->height = DEFAULT_HEIGHT;
- ctx->time_base.num = 1;
- ctx->time_base.den = 25;
- ctx->ticks_per_frame = 1;
- ctx->bit_rate = DEFAULT_VIDEO_BITRATE;
- /* makes it silent */
- ctx->strict_std_compliance = -1;
-
- ctx->pix_fmt = pixfmt;
-
- GST_DEBUG ("Attempting to open codec");
- if (gst_ffmpeg_avcodec_open (ctx, oclass->in_plugin) >= 0 &&
- ctx->pix_fmt == pixfmt) {
- ctx->width = -1;
- if (!caps)
- caps = gst_caps_new_empty ();
- tmpcaps = gst_ffmpeg_codectype_to_caps (oclass->in_plugin->type, ctx,
- oclass->in_plugin->id, TRUE);
- if (tmpcaps)
- gst_caps_append (caps, tmpcaps);
- else
- GST_LOG_OBJECT (ffmpegenc,
- "Couldn't get caps for oclass->in_plugin->name:%s",
- oclass->in_plugin->name);
- gst_ffmpeg_avcodec_close (ctx);
- } else {
- GST_DEBUG_OBJECT (ffmpegenc, "Opening codec failed with pixfmt : %d",
- pixfmt);
- }
- if (ctx->priv_data)
- gst_ffmpeg_avcodec_close (ctx);
- av_free (ctx);
- }
- #ifndef GST_DISABLE_GST_DEBUG
- _shut_up_I_am_probing = FALSE;
- #endif
-
- /* make sure we have something */
- if (!caps) {
- templ = gst_pad_get_pad_template_caps (pad);
- caps = gst_ffmpegenc_get_possible_sizes (ffmpegenc, pad, templ);
- gst_caps_unref (templ);
- if (filter) {
- finalcaps =
- gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
- gst_caps_unref (caps);
- } else {
- finalcaps = caps;
- }
- GST_DEBUG_OBJECT (ffmpegenc, "probing gave nothing, "
- "return intersected template %" GST_PTR_FORMAT, finalcaps);
- return finalcaps;
- }
-
- GST_DEBUG_OBJECT (ffmpegenc, "probed caps gave %" GST_PTR_FORMAT, caps);
- oclass->sinkcaps = caps;
-
- finalcaps =
- gst_ffmpegenc_get_possible_sizes (ffmpegenc, pad, oclass->sinkcaps);
- caps = gst_caps_copy (gst_pad_get_pad_template_caps (pad));
++ caps = gst_pad_get_pad_template_caps (ffmpegaudenc->sinkpad);
+
+ if (filter) {
- caps = finalcaps;
- finalcaps =
- gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
++ GstCaps *tmp;
++ tmp = gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (caps);
++ caps = tmp;
+ }
- return finalcaps;
+ GST_DEBUG_OBJECT (ffmpegaudenc,
+ "audio caps, return template %" GST_PTR_FORMAT, caps);
+
+ return caps;
}
static gboolean
- gst_ffmpegenc_setcaps (GstFFMpegEnc * ffmpegenc, GstCaps * caps)
-gst_ffmpegaudenc_setcaps (GstPad * pad, GstCaps * caps)
++gst_ffmpegaudenc_setcaps (GstFFMpegAudEnc * ffmpegaudenc, GstCaps * caps)
{
GstCaps *other_caps;
GstCaps *allowed_caps;
GstCaps *icaps;
- enum PixelFormat pix_fmt;
- GstFFMpegEncClass *oclass =
- (GstFFMpegEncClass *) G_OBJECT_GET_CLASS (ffmpegenc);
- GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) GST_PAD_PARENT (pad);
+ GstFFMpegAudEncClass *oclass =
+ (GstFFMpegAudEncClass *) G_OBJECT_GET_CLASS (ffmpegaudenc);
/* close old session */
- if (ffmpegenc->opened) {
- gst_ffmpeg_avcodec_close (ffmpegenc->context);
- ffmpegenc->opened = FALSE;
+ if (ffmpegaudenc->opened) {
+ gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
+ ffmpegaudenc->opened = FALSE;
+ /* fixed src caps;
+ * so clear src caps for proper (re-)negotiation */
+ gst_pad_set_caps (ffmpegaudenc->srcpad, NULL);
}
/* set defaults */
/* fetch pix_fmt and so on */
gst_ffmpeg_caps_with_codectype (oclass->in_plugin->type,
- caps, ffmpegenc->context);
- if (!ffmpegenc->context->time_base.den) {
- ffmpegenc->context->time_base.den = 25;
- ffmpegenc->context->time_base.num = 1;
- ffmpegenc->context->ticks_per_frame = 1;
- } else if ((oclass->in_plugin->id == CODEC_ID_MPEG4)
- && (ffmpegenc->context->time_base.den > 65535)) {
- /* MPEG4 Standards do not support time_base denominator greater than
- * (1<<16) - 1 . We therefore scale them down.
- * Agreed, it will not be the exact framerate... but the difference
- * shouldn't be that noticeable */
- ffmpegenc->context->time_base.num =
- (gint) gst_util_uint64_scale_int (ffmpegenc->context->time_base.num,
- 65535, ffmpegenc->context->time_base.den);
- ffmpegenc->context->time_base.den = 65535;
- GST_LOG_OBJECT (ffmpegenc, "MPEG4 : scaled down framerate to %d / %d",
- ffmpegenc->context->time_base.den, ffmpegenc->context->time_base.num);
- }
-
- pix_fmt = ffmpegenc->context->pix_fmt;
-
- /* max-key-interval may need the framerate set above */
- if (ffmpegenc->max_key_interval) {
- AVCodecContext *ctx;
-
- /* override gop-size */
- ctx = ffmpegenc->context;
- ctx->gop_size = (ffmpegenc->max_key_interval < 0) ?
- (-ffmpegenc->max_key_interval
- * (ctx->time_base.den * ctx->ticks_per_frame / ctx->time_base.num))
- : ffmpegenc->max_key_interval;
+ caps, ffmpegaudenc->context);
+ if (!ffmpegaudenc->context->time_base.den) {
+ ffmpegaudenc->context->time_base.den = 25;
+ ffmpegaudenc->context->time_base.num = 1;
+ ffmpegaudenc->context->ticks_per_frame = 1;
- } else if ((oclass->in_plugin->id == CODEC_ID_MPEG4)
- && (ffmpegaudenc->context->time_base.den > 65535)) {
- /* MPEG4 Standards do not support time_base denominator greater than
- * (1<<16) - 1 . We therefore scale them down.
- * Agreed, it will not be the exact framerate... but the difference
- * shouldn't be that noticeable */
- ffmpegaudenc->context->time_base.num =
- (gint) gst_util_uint64_scale_int (ffmpegaudenc->context->time_base.num,
- 65535, ffmpegaudenc->context->time_base.den);
- ffmpegaudenc->context->time_base.den = 65535;
- GST_LOG_OBJECT (ffmpegaudenc, "MPEG4 : scaled down framerate to %d / %d",
- ffmpegaudenc->context->time_base.den,
- ffmpegaudenc->context->time_base.num);
}
/* open codec */
- if (gst_ffmpeg_avcodec_open (ffmpegenc->context, oclass->in_plugin) < 0) {
- if (ffmpegenc->context->priv_data)
- gst_ffmpeg_avcodec_close (ffmpegenc->context);
- if (ffmpegenc->context->stats_in)
- g_free (ffmpegenc->context->stats_in);
- GST_DEBUG_OBJECT (ffmpegenc, "avenc_%s: Failed to open libav codec",
+ if (gst_ffmpeg_avcodec_open (ffmpegaudenc->context, oclass->in_plugin) < 0) {
+ if (ffmpegaudenc->context->priv_data)
+ gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
+ if (ffmpegaudenc->context->stats_in)
+ g_free (ffmpegaudenc->context->stats_in);
- GST_DEBUG_OBJECT (ffmpegaudenc, "ffenc_%s: Failed to open FFMPEG codec",
++ GST_DEBUG_OBJECT (ffmpegaudenc, "avenc_%s: Failed to open FFMPEG codec",
oclass->in_plugin->name);
return FALSE;
}
/* second pass stats buffer no longer needed */
- if (ffmpegenc->context->stats_in)
- g_free (ffmpegenc->context->stats_in);
-
- /* is the colourspace correct? */
- if (pix_fmt != ffmpegenc->context->pix_fmt) {
- gst_ffmpeg_avcodec_close (ffmpegenc->context);
- GST_DEBUG_OBJECT (ffmpegenc,
- "avenc_%s: AV wants different colourspace (%d given, %d wanted)",
- oclass->in_plugin->name, pix_fmt, ffmpegenc->context->pix_fmt);
- return FALSE;
- }
- /* we may have failed mapping caps to a pixfmt,
- * and quite some codecs do not make up their own mind about that
- * in any case, _NONE can never work out later on */
- if (oclass->in_plugin->type == AVMEDIA_TYPE_VIDEO && pix_fmt == PIX_FMT_NONE) {
- GST_DEBUG_OBJECT (ffmpegenc, "avenc_%s: Failed to determine input format",
- oclass->in_plugin->name);
- return FALSE;
- }
+ if (ffmpegaudenc->context->stats_in)
+ g_free (ffmpegaudenc->context->stats_in);
/* some codecs support more than one format, first auto-choose one */
- GST_DEBUG_OBJECT (ffmpegenc, "picking an output format ...");
- allowed_caps = gst_pad_get_allowed_caps (ffmpegenc->srcpad);
+ GST_DEBUG_OBJECT (ffmpegaudenc, "picking an output format ...");
+ allowed_caps = gst_pad_get_allowed_caps (ffmpegaudenc->srcpad);
if (!allowed_caps) {
- GST_DEBUG_OBJECT (ffmpegenc, "... but no peer, using template caps");
+ GST_DEBUG_OBJECT (ffmpegaudenc, "... but no peer, using template caps");
/* we need to copy because get_allowed_caps returns a ref, and
* get_pad_template_caps doesn't */
- allowed_caps = gst_pad_get_pad_template_caps (ffmpegenc->srcpad);
- allowed_caps =
- gst_caps_copy (gst_pad_get_pad_template_caps (ffmpegaudenc->srcpad));
++ allowed_caps = gst_pad_get_pad_template_caps (ffmpegaudenc->srcpad);
}
- GST_DEBUG_OBJECT (ffmpegenc, "chose caps %" GST_PTR_FORMAT, allowed_caps);
+ GST_DEBUG_OBJECT (ffmpegaudenc, "chose caps %" GST_PTR_FORMAT, allowed_caps);
gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
- oclass->in_plugin->type, allowed_caps, ffmpegenc->context);
+ oclass->in_plugin->type, allowed_caps, ffmpegaudenc->context);
/* try to set this caps on the other side */
other_caps = gst_ffmpeg_codecid_to_caps (oclass->in_plugin->id,
- ffmpegenc->context, TRUE);
+ ffmpegaudenc->context, TRUE);
if (!other_caps) {
- gst_ffmpeg_avcodec_close (ffmpegenc->context);
+ gst_caps_unref (allowed_caps);
+ gst_ffmpeg_avcodec_close (ffmpegaudenc->context);
GST_DEBUG ("Unsupported codec - no caps found");
return FALSE;
}
/* We need to provide at least ffmpegs minimal buffer size */
outbuf = gst_buffer_new_and_alloc (max_size + FF_MIN_BUFFER_SIZE);
- audio_out = GST_BUFFER_DATA (outbuf);
+ gst_buffer_map (outbuf, &map, GST_MAP_WRITE);
- GST_LOG_OBJECT (ffmpegenc, "encoding buffer of max size %d", max_size);
- if (ffmpegenc->buffer_size != max_size)
- ffmpegenc->buffer_size = max_size;
+ GST_LOG_OBJECT (ffmpegaudenc, "encoding buffer of max size %d", max_size);
+ if (ffmpegaudenc->buffer_size != max_size)
+ ffmpegaudenc->buffer_size = max_size;
- res = avcodec_encode_audio (ctx, audio_out, max_size, (short *) audio_in);
+ res = avcodec_encode_audio (ctx, map.data, max_size, (short *) audio_in);
if (res < 0) {
- GST_ERROR_OBJECT (ffmpegenc, "Failed to encode buffer: %d", res);
+ gst_buffer_unmap (outbuf, &map);
+ GST_ERROR_OBJECT (ffmpegaudenc, "Failed to encode buffer: %d", res);
gst_buffer_unref (outbuf);
return GST_FLOW_OK;
}
- GST_LOG_OBJECT (ffmpegenc, "got output size %d", res);
+ GST_LOG_OBJECT (ffmpegaudenc, "got output size %d", res);
+ gst_buffer_unmap (outbuf, &map);
+ gst_buffer_resize (outbuf, 0, res);
- GST_BUFFER_SIZE (outbuf) = res;
GST_BUFFER_TIMESTAMP (outbuf) = timestamp;
GST_BUFFER_DURATION (outbuf) = duration;
if (discont)
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
- gst_buffer_set_caps (outbuf, GST_PAD_CAPS (ffmpegaudenc->srcpad));
- GST_LOG_OBJECT (ffmpegenc, "pushing size %d, timestamp %" GST_TIME_FORMAT,
+ GST_LOG_OBJECT (ffmpegaudenc, "pushing size %d, timestamp %" GST_TIME_FORMAT,
res, GST_TIME_ARGS (timestamp));
- ret = gst_pad_push (ffmpegenc->srcpad, outbuf);
+ ret = gst_pad_push (ffmpegaudenc->srcpad, outbuf);
return ret;
}
static GstFlowReturn
- gst_ffmpegenc_chain_audio (GstPad * pad, GstObject * parent, GstBuffer * inbuf)
-gst_ffmpegaudenc_chain_audio (GstPad * pad, GstBuffer * inbuf)
++gst_ffmpegaudenc_chain_audio (GstPad * pad, GstObject * parent,
++ GstBuffer * inbuf)
{
- GstFFMpegEnc *ffmpegenc;
- GstFFMpegEncClass *oclass;
+ GstFFMpegAudEnc *ffmpegaudenc;
+ GstFFMpegAudEncClass *oclass;
AVCodecContext *ctx;
GstClockTime timestamp, duration;
- guint size, frame_size;
+ gsize size, frame_size;
gint osize;
GstFlowReturn ret;
gint out_size;
gboolean discont;
guint8 *in_data;
- ffmpegenc = (GstFFMpegEnc *) parent;
- oclass = (GstFFMpegEncClass *) G_OBJECT_GET_CLASS (ffmpegenc);
- ffmpegaudenc = (GstFFMpegAudEnc *) (GST_OBJECT_PARENT (pad));
++ ffmpegaudenc = (GstFFMpegAudEnc *) parent;
+ oclass = (GstFFMpegAudEncClass *) G_OBJECT_GET_CLASS (ffmpegaudenc);
- if (G_UNLIKELY (!ffmpegenc->opened))
++ if (G_UNLIKELY (!ffmpegaudenc->opened))
+ goto not_negotiated;
+
- ctx = ffmpegenc->context;
+ ctx = ffmpegaudenc->context;
- size = GST_BUFFER_SIZE (inbuf);
+ size = gst_buffer_get_size (inbuf);
timestamp = GST_BUFFER_TIMESTAMP (inbuf);
duration = GST_BUFFER_DURATION (inbuf);
discont = GST_BUFFER_IS_DISCONT (inbuf);
- GST_DEBUG_OBJECT (ffmpegenc,
+ GST_DEBUG_OBJECT (ffmpegaudenc,
"Received time %" GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT
- ", size %d", GST_TIME_ARGS (timestamp), GST_TIME_ARGS (duration), size);
+ ", size %" G_GSIZE_FORMAT, GST_TIME_ARGS (timestamp),
+ GST_TIME_ARGS (duration), size);
frame_size = ctx->frame_size;
osize = av_get_bits_per_sample_format (ctx->sample_fmt) / 8;
* or samplesize to divide by the samplerate */
/* take an audio buffer out of the adapter */
- in_data = (guint8 *) gst_adapter_map (ffmpegenc->adapter, frame_bytes);
- ffmpegenc->adapter_consumed += frame_size;
- in_data =
- (guint8 *) gst_adapter_peek (ffmpegaudenc->adapter, frame_bytes);
++ in_data = (guint8 *) gst_adapter_map (ffmpegaudenc->adapter, frame_bytes);
+ ffmpegaudenc->adapter_consumed += frame_size;
/* calculate timestamp and duration relative to start of adapter and to
* the amount of samples we consumed */
out_size = frame_bytes * 4;
ret =
- gst_ffmpegenc_encode_audio (ffmpegenc, in_data, frame_bytes, out_size,
- timestamp, duration, ffmpegenc->discont);
+ gst_ffmpegaudenc_encode_audio (ffmpegaudenc, in_data, frame_bytes,
+ out_size, timestamp, duration, ffmpegaudenc->discont);
- gst_adapter_unmap (ffmpegenc->adapter);
- gst_adapter_flush (ffmpegenc->adapter, frame_bytes);
++ gst_adapter_unmap (ffmpegaudenc->adapter);
+ gst_adapter_flush (ffmpegaudenc->adapter, frame_bytes);
if (ret != GST_FLOW_OK)
goto push_failed;
/* advance the adapter timestamp with the duration */
timestamp += duration;
- ffmpegenc->discont = FALSE;
- avail = gst_adapter_available (ffmpegenc->adapter);
+ ffmpegaudenc->discont = FALSE;
+ avail = gst_adapter_available (ffmpegaudenc->adapter);
}
- GST_LOG_OBJECT (ffmpegenc, "%u bytes left in the adapter", avail);
+ GST_LOG_OBJECT (ffmpegaudenc, "%u bytes left in the adapter", avail);
} else {
+ GstMapInfo map;
/* we have no frame_size, feed the encoder all the data and expect a fixed
* output size */
int coded_bps = av_get_bits_per_sample (oclass->in_plugin->id);
if (coded_bps)
out_size = (out_size * coded_bps) / 8;
- in_data = (guint8 *) GST_BUFFER_DATA (inbuf);
+ gst_buffer_map (inbuf, &map, GST_MAP_READ);
+ in_data = map.data;
+ size = map.size;
- ret = gst_ffmpegenc_encode_audio (ffmpegenc, in_data, size, out_size,
+ ret = gst_ffmpegaudenc_encode_audio (ffmpegaudenc, in_data, size, out_size,
timestamp, duration, discont);
+ gst_buffer_unmap (inbuf, &map);
gst_buffer_unref (inbuf);
if (ret != GST_FLOW_OK)
return GST_FLOW_OK;
/* ERRORS */
- GST_ELEMENT_ERROR (ffmpegenc, CORE, NEGOTIATION, (NULL),
+not_negotiated:
+ {
++ GST_ELEMENT_ERROR (ffmpegaudenc, CORE, NEGOTIATION, (NULL),
+ ("not configured to input format before data start"));
+ gst_buffer_unref (inbuf);
+ return GST_FLOW_NOT_NEGOTIATED;
+ }
push_failed:
{
- GST_DEBUG_OBJECT (ffmpegenc, "Failed to push buffer %d (%s)", ret,
+ GST_DEBUG_OBJECT (ffmpegaudenc, "Failed to push buffer %d (%s)", ret,
gst_flow_get_name (ret));
return ret;
}
}
- static void
- gst_ffmpegenc_flush_buffers (GstFFMpegEnc * ffmpegenc, gboolean send)
- {
- GstBuffer *outbuf, *inbuf;
- gint ret_size;
-
- GST_DEBUG_OBJECT (ffmpegenc, "flushing buffers with sending %d", send);
-
- /* no need to empty codec if there is none */
- if (!ffmpegenc->opened)
- goto flush;
-
- while (!g_queue_is_empty (ffmpegenc->delay)) {
-
- ffmpegenc_setup_working_buf (ffmpegenc);
-
- ret_size = avcodec_encode_video (ffmpegenc->context,
- ffmpegenc->working_buf, ffmpegenc->working_buf_size, NULL);
-
- if (ret_size < 0) { /* there should be something, notify and give up */
- #ifndef GST_DISABLE_GST_DEBUG
- GstFFMpegEncClass *oclass =
- (GstFFMpegEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
- GST_WARNING_OBJECT (ffmpegenc,
- "avenc_%s: failed to flush buffer", oclass->in_plugin->name);
- #endif /* GST_DISABLE_GST_DEBUG */
- break;
- }
-
- /* save stats info if there is some as well as a stats file */
- if (ffmpegenc->file && ffmpegenc->context->stats_out)
- if (fprintf (ffmpegenc->file, "%s", ffmpegenc->context->stats_out) < 0)
- GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, WRITE,
- (("Could not write to file \"%s\"."), ffmpegenc->filename),
- GST_ERROR_SYSTEM);
-
- /* handle b-frame delay when no output, so we don't output empty frames */
- inbuf = g_queue_pop_head (ffmpegenc->delay);
-
- outbuf = gst_buffer_new_and_alloc (ret_size);
- gst_buffer_fill (outbuf, 0, ffmpegenc->working_buf, ret_size);
- GST_BUFFER_TIMESTAMP (outbuf) = GST_BUFFER_TIMESTAMP (inbuf);
- GST_BUFFER_DURATION (outbuf) = GST_BUFFER_DURATION (inbuf);
-
- if (!ffmpegenc->context->coded_frame->key_frame)
- GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
-
- gst_buffer_unref (inbuf);
-
- if (send)
- gst_pad_push (ffmpegenc->srcpad, outbuf);
- else
- gst_buffer_unref (outbuf);
- }
-
- flush:
- {
- /* make sure that we empty the queue, is still needed if we had to break */
- while (!g_queue_is_empty (ffmpegenc->delay))
- gst_buffer_unref (g_queue_pop_head (ffmpegenc->delay));
- }
- }
-
+static gboolean
- gst_ffmpegenc_event_sink (GstPad * pad, GstObject * parent, GstEvent * event)
++gst_ffmpegaudenc_event_sink (GstPad * pad, GstObject * parent, GstEvent * event)
+{
- GstFFMpegEnc *ffmpegenc = (GstFFMpegEnc *) parent;
++ GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) parent;
+
+ switch (GST_EVENT_TYPE (event)) {
- case GST_EVENT_EOS:
- gst_ffmpegenc_flush_buffers (ffmpegenc, TRUE);
- break;
- /* no flushing if flush received,
- * buffers in encoder are considered (in the) past */
- case GST_EVENT_CUSTOM_DOWNSTREAM:
- {
- const GstStructure *s;
-
- s = gst_event_get_structure (event);
- if (gst_structure_has_name (s, "GstForceKeyUnit")) {
- ffmpegenc->picture->pict_type = FF_I_TYPE;
- }
- break;
- }
+ case GST_EVENT_CAPS:
+ {
+ GstCaps *caps;
+ gboolean ret;
+
+ gst_event_parse_caps (event, &caps);
- ret = gst_ffmpegenc_setcaps (ffmpegenc, caps);
++ ret = gst_ffmpegaudenc_setcaps (ffmpegaudenc, caps);
+ gst_event_unref (event);
+ return ret;
+ }
+ default:
+ break;
+ }
+
- return gst_pad_push_event (ffmpegenc->srcpad, event);
- }
-
- static gboolean
- gst_ffmpegenc_event_src (GstPad * pad, GstObject * parent, GstEvent * event)
- {
- GstFFMpegEnc *ffmpegenc = (GstFFMpegEnc *) parent;
- gboolean forward = TRUE;
-
- switch (GST_EVENT_TYPE (event)) {
- case GST_EVENT_CUSTOM_UPSTREAM:{
- const GstStructure *s;
- s = gst_event_get_structure (event);
- if (gst_structure_has_name (s, "GstForceKeyUnit")) {
- GST_OBJECT_LOCK (ffmpegenc);
- ffmpegenc->force_keyframe = TRUE;
- GST_OBJECT_UNLOCK (ffmpegenc);
- forward = FALSE;
- gst_event_unref (event);
- }
- break;
- }
-
- default:
- break;
- }
-
- if (forward)
- return gst_pad_push_event (ffmpegenc->sinkpad, event);
- else
- return TRUE;
++ return gst_pad_event_default (pad, parent, event);
+}
+
+static gboolean
- gst_ffmpegenc_query_sink (GstPad * pad, GstObject * parent, GstQuery * query)
++gst_ffmpegaudenc_query_sink (GstPad * pad, GstObject * parent, GstQuery * query)
+{
++ GstFFMpegAudEnc *ffmpegaudenc = (GstFFMpegAudEnc *) parent;
+ gboolean res = FALSE;
+
+ switch (GST_QUERY_TYPE (query)) {
+ case GST_QUERY_CAPS:
+ {
+ GstCaps *filter, *caps;
+
+ gst_query_parse_caps (query, &filter);
- caps = gst_ffmpegenc_getcaps (pad, filter);
++ caps = gst_ffmpegaudenc_getcaps (ffmpegaudenc, filter);
+ gst_query_set_caps_result (query, caps);
+ gst_caps_unref (caps);
+ res = TRUE;
+ break;
+ }
+ default:
+ res = gst_pad_query_default (pad, parent, query);
+ break;
+ }
+
+ return res;
+}
static void
- gst_ffmpegenc_set_property (GObject * object,
+ gst_ffmpegaudenc_set_property (GObject * object,
guint prop_id, const GValue * value, GParamSpec * pspec)
{
- GstFFMpegEnc *ffmpegenc;
+ GstFFMpegAudEnc *ffmpegaudenc;
/* Get a pointer of the right type. */
- ffmpegenc = (GstFFMpegEnc *) (object);
+ ffmpegaudenc = (GstFFMpegAudEnc *) (object);
- if (ffmpegenc->opened) {
- GST_WARNING_OBJECT (ffmpegenc,
+ if (ffmpegaudenc->opened) {
+ GST_WARNING_OBJECT (ffmpegaudenc,
"Can't change properties once decoder is setup !");
return;
}
/* Check the argument id to see which argument we're setting. */
switch (prop_id) {
case ARG_BIT_RATE:
- ffmpegenc->bitrate = g_value_get_int (value);
- break;
- case ARG_GOP_SIZE:
- ffmpegenc->gop_size = g_value_get_int (value);
- break;
- case ARG_ME_METHOD:
- ffmpegenc->me_method = g_value_get_enum (value);
- ffmpegaudenc->bitrate = g_value_get_ulong (value);
++ ffmpegaudenc->bitrate = g_value_get_int (value);
break;
case ARG_BUFSIZE:
break;
case ARG_RTP_PAYLOAD_SIZE:
- ffmpegenc->rtp_payload_size = g_value_get_int (value);
- ffmpegaudenc->rtp_payload_size = g_value_get_ulong (value);
++ ffmpegaudenc->rtp_payload_size = g_value_get_int (value);
break;
default:
- if (!gst_ffmpeg_cfg_set_property (object, value, pspec))
- G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
switch (prop_id) {
case ARG_BIT_RATE:
- g_value_set_int (value, ffmpegenc->bitrate);
- g_value_set_ulong (value, ffmpegaudenc->bitrate);
++ g_value_set_int (value, ffmpegaudenc->bitrate);
+ break;
- case ARG_GOP_SIZE:
- g_value_set_int (value, ffmpegenc->gop_size);
- break;
- case ARG_ME_METHOD:
- g_value_set_enum (value, ffmpegenc->me_method);
break;
case ARG_BUFSIZE:
- g_value_set_int (value, ffmpegenc->buffer_size);
- g_value_set_ulong (value, ffmpegaudenc->buffer_size);
++ g_value_set_int (value, ffmpegaudenc->buffer_size);
break;
case ARG_RTP_PAYLOAD_SIZE:
- g_value_set_int (value, ffmpegenc->rtp_payload_size);
- g_value_set_ulong (value, ffmpegaudenc->rtp_payload_size);
++ g_value_set_int (value, ffmpegaudenc->rtp_payload_size);
break;
default:
- if (!gst_ffmpeg_cfg_get_property (object, value, pspec))
- G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
goto next;
/* no quasi codecs, please */
-- if (in_plugin->id == CODEC_ID_RAWVIDEO ||
-- in_plugin->id == CODEC_ID_V210 ||
-- in_plugin->id == CODEC_ID_V210X ||
-- in_plugin->id == CODEC_ID_R210 ||
-- in_plugin->id == CODEC_ID_ZLIB ||
-- (in_plugin->id >= CODEC_ID_PCM_S16LE &&
++ if ((in_plugin->id >= CODEC_ID_PCM_S16LE &&
in_plugin->id <= CODEC_ID_PCM_BLURAY)) {
goto next;
}
GST_DEBUG ("Trying plugin %s [%s]", in_plugin->name, in_plugin->long_name);
/* no codecs for which we're GUARANTEED to have better alternatives */
-- if (!strcmp (in_plugin->name, "vorbis") ||
-- !strcmp (in_plugin->name, "gif") || !strcmp (in_plugin->name, "flac")) {
++ if (!strcmp (in_plugin->name, "vorbis")
++ || !strcmp (in_plugin->name, "flac")) {
GST_LOG ("Ignoring encoder %s", in_plugin->name);
goto next;
}