AVCodecContext *context;
AVFrame *picture;
gboolean opened;
+ GstBuffer *cache;
/* cache */
gulong bitrate;
static GstPadLinkReturn
gst_ffmpegenc_connect (GstPad *pad, GstCaps *caps);
-static void gst_ffmpegenc_chain (GstPad *pad, GstData *_data);
+static void gst_ffmpegenc_chain_video (GstPad *pad, GstData *_data);
+static void gst_ffmpegenc_chain_audio (GstPad *pad, GstData *_data);
static void gst_ffmpegenc_set_property (GObject *object,
guint prop_id,
/* setup pads */
ffmpegenc->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
gst_pad_set_link_function (ffmpegenc->sinkpad, gst_ffmpegenc_connect);
- gst_pad_set_chain_function (ffmpegenc->sinkpad, gst_ffmpegenc_chain);
ffmpegenc->srcpad = gst_pad_new_from_template (oclass->srctempl, "src");
gst_element_add_pad (GST_ELEMENT (ffmpegenc), ffmpegenc->sinkpad);
ffmpegenc->context = avcodec_alloc_context();
ffmpegenc->picture = avcodec_alloc_frame();
ffmpegenc->opened = FALSE;
+ ffmpegenc->cache = NULL;
if (oclass->in_plugin->type == CODEC_TYPE_VIDEO) {
+ gst_pad_set_chain_function (ffmpegenc->sinkpad, gst_ffmpegenc_chain_video);
+
ffmpegenc->bitrate = 300000;
ffmpegenc->buffer_size = 512 * 1024;
ffmpegenc->gop_size = 15;
} else if (oclass->in_plugin->type == CODEC_TYPE_AUDIO) {
+ gst_pad_set_chain_function (ffmpegenc->sinkpad, gst_ffmpegenc_chain_audio);
+
ffmpegenc->bitrate = 128000;
}
}
/* no edges */
ffmpegenc->context->flags |= CODEC_FLAG_EMU_EDGE;
- /* FIXME: we actually need to request the framerate
- * from the previous element - we currently use the
- * default (25.0), which is just plain wrong */
- ffmpegenc->context->frame_rate = 25 * DEFAULT_FRAME_RATE_BASE;
- ffmpegenc->context->frame_rate_base = DEFAULT_FRAME_RATE_BASE;
-
for (ret_caps = caps; ret_caps != NULL; ret_caps = ret_caps->next) {
+ enum PixelFormat pix_fmt;
+
/* fetch pix_fmt and so on */
gst_ffmpeg_caps_to_codectype (oclass->in_plugin->type,
caps, ffmpegenc->context);
+ pix_fmt = ffmpegenc->context->pix_fmt;
+
/* open codec */
if (avcodec_open (ffmpegenc->context, oclass->in_plugin) < 0) {
- GST_DEBUG (
- "ffenc_%s: Failed to open FFMPEG codec",
+ GST_DEBUG ("ffenc_%s: Failed to open FFMPEG codec",
oclass->in_plugin->name);
continue;
}
+ /* is the colourspace correct? */
+ if (pix_fmt != ffmpegenc->context->pix_fmt) {
+ avcodec_close (ffmpegenc->context);
+ GST_DEBUG ("ffenc_%s: AV wants different colourspace (%d given, %d wanted)",
+ oclass->in_plugin->name, pix_fmt, ffmpegenc->context->pix_fmt);
+ continue;
+ }
+
break;
}
ffmpegenc->context);
if (!ret_caps) {
avcodec_close (ffmpegenc->context);
- GST_DEBUG (
- "Unsupported codec - no caps found");
+ GST_DEBUG ("Unsupported codec - no caps found");
return GST_PAD_LINK_REFUSED;
}
if ((ret = gst_pad_try_set_caps (ffmpegenc->srcpad, ret_caps)) <= 0) {
avcodec_close (ffmpegenc->context);
- GST_DEBUG (
- "Failed to set caps on next element for ffmpeg encoder (%s)",
+ GST_DEBUG ("Failed to set caps on next element for ffmpeg encoder (%s)",
oclass->in_plugin->name);
return ret;
}
}
static void
-gst_ffmpegenc_chain (GstPad *pad,
- GstData *_data)
+gst_ffmpegenc_chain_video (GstPad *pad,
+ GstData *_data)
{
GstBuffer *inbuf = GST_BUFFER (_data);
GstBuffer *outbuf = NULL;
GstFFMpegEnc *ffmpegenc = (GstFFMpegEnc *)(gst_pad_get_parent (pad));
GstFFMpegEncClass *oclass = (GstFFMpegEncClass*)(G_OBJECT_GET_CLASS(ffmpegenc));
- gpointer data;
- gint size, ret_size = 0;
-
- data = GST_BUFFER_DATA (inbuf);
- size = GST_BUFFER_SIZE (inbuf);
+ gint ret_size = 0;
/* FIXME: events (discont (flush!) and eos (close down) etc.) */
- switch (oclass->in_plugin->type) {
- case CODEC_TYPE_VIDEO:
- outbuf = gst_buffer_new_and_alloc (ffmpegenc->buffer_size);
- avpicture_fill ((AVPicture *) ffmpegenc->picture,
- GST_BUFFER_DATA (inbuf),
- ffmpegenc->context->pix_fmt,
- ffmpegenc->context->width,
- ffmpegenc->context->height);
- ret_size = avcodec_encode_video (ffmpegenc->context,
- GST_BUFFER_DATA (outbuf),
- GST_BUFFER_MAXSIZE (outbuf),
- ffmpegenc->picture);
- break;
-
- case CODEC_TYPE_AUDIO:
- ffmpegenc->context->frame_size = GST_BUFFER_SIZE (inbuf) /
- (2 * ffmpegenc->context->channels);
- outbuf = gst_buffer_new_and_alloc (GST_BUFFER_SIZE (inbuf));
- ret_size = avcodec_encode_audio (ffmpegenc->context,
- GST_BUFFER_DATA (outbuf),
- GST_BUFFER_MAXSIZE (outbuf),
- (const short int *)
- GST_BUFFER_DATA (inbuf));
- break;
-
- default:
- g_assert(0);
- break;
- }
+ outbuf = gst_buffer_new_and_alloc (ffmpegenc->buffer_size);
+ avpicture_fill ((AVPicture *) ffmpegenc->picture,
+ GST_BUFFER_DATA (inbuf),
+ ffmpegenc->context->pix_fmt,
+ ffmpegenc->context->width,
+ ffmpegenc->context->height);
+ ffmpegenc->picture->pts = GST_BUFFER_TIMESTAMP (inbuf) / 1000;
+ ret_size = avcodec_encode_video (ffmpegenc->context,
+ GST_BUFFER_DATA (outbuf),
+ GST_BUFFER_MAXSIZE (outbuf),
+ ffmpegenc->picture);
if (ret_size < 0) {
g_warning("ffenc_%s: failed to encode buffer",
return;
}
- /* bla */
GST_BUFFER_SIZE (outbuf) = ret_size;
GST_BUFFER_TIMESTAMP (outbuf) = GST_BUFFER_TIMESTAMP (inbuf);
GST_BUFFER_DURATION (outbuf) = GST_BUFFER_DURATION (inbuf);
}
static void
+gst_ffmpegenc_chain_audio (GstPad *pad,
+ GstData *_data)
+{
+ GstBuffer *inbuf = GST_BUFFER (_data);
+ GstBuffer *outbuf = NULL, *subbuf;
+ GstFFMpegEnc *ffmpegenc = (GstFFMpegEnc *)(gst_pad_get_parent (pad));
+ GstFFMpegEncClass *oclass = (GstFFMpegEncClass*)(G_OBJECT_GET_CLASS(ffmpegenc));
+ gint size, ret_size = 0, in_size, frame_size;
+
+ size = GST_BUFFER_SIZE (inbuf);
+
+ /* FIXME: events (discont (flush!) and eos (close down) etc.) */
+
+ frame_size = ffmpegenc->context->frame_size * 2 *
+ ffmpegenc->context->channels;
+ in_size = size;
+ if (ffmpegenc->cache)
+ in_size += GST_BUFFER_SIZE (ffmpegenc->cache);
+
+ while (1) {
+ /* do we have enough data for one frame? */
+ if (in_size / (2 * ffmpegenc->context->channels) <
+ ffmpegenc->context->frame_size) {
+ if (in_size > size) {
+ /* this is panic! we got a buffer, but still don't have enough
+ * data. Merge them and retry in the next cycle... */
+ ffmpegenc->cache = gst_buffer_merge (ffmpegenc->cache, inbuf);
+ } else if (in_size == size) {
+ /* exactly the same! how wonderful */
+ ffmpegenc->cache = inbuf;
+ } else if (in_size > 0) {
+ ffmpegenc->cache = gst_buffer_create_sub (inbuf, size - in_size,
+ in_size);
+ GST_BUFFER_DURATION (ffmpegenc->cache) =
+ GST_BUFFER_DURATION (inbuf) * GST_BUFFER_SIZE (ffmpegenc->cache) / size;
+ GST_BUFFER_TIMESTAMP (ffmpegenc->cache) =
+ GST_BUFFER_TIMESTAMP (inbuf) + (GST_BUFFER_DURATION (inbuf) *
+ (size - in_size) / size);
+ gst_buffer_unref (inbuf);
+ } else {
+ gst_buffer_unref (inbuf);
+ }
+
+ return;
+ }
+
+ /* create the frame */
+ if (in_size > size) {
+ /* merge */
+ subbuf = gst_buffer_create_sub (inbuf, 0, frame_size - (in_size - size));
+ GST_BUFFER_DURATION (subbuf) =
+ GST_BUFFER_DURATION (inbuf) * GST_BUFFER_SIZE (subbuf) / size;
+ subbuf = gst_buffer_merge (ffmpegenc->cache, subbuf);
+ ffmpegenc->cache = NULL;
+ } else {
+ subbuf = gst_buffer_create_sub (inbuf, size - in_size, frame_size);
+ GST_BUFFER_DURATION (subbuf) =
+ GST_BUFFER_DURATION (inbuf) * GST_BUFFER_SIZE (subbuf) / size;
+ GST_BUFFER_TIMESTAMP (subbuf) =
+ GST_BUFFER_TIMESTAMP (inbuf) + (GST_BUFFER_DURATION (inbuf) *
+ (size - in_size) / size);
+ }
+
+ outbuf = gst_buffer_new_and_alloc (GST_BUFFER_SIZE (inbuf));
+ ret_size = avcodec_encode_audio (ffmpegenc->context,
+ GST_BUFFER_DATA (outbuf),
+ GST_BUFFER_MAXSIZE (outbuf),
+ (const short int *)
+ GST_BUFFER_DATA (subbuf));
+
+ if (ret_size < 0) {
+ g_warning("ffenc_%s: failed to encode buffer",
+ oclass->in_plugin->name);
+ gst_buffer_unref (inbuf);
+ return;
+ }
+
+ GST_BUFFER_SIZE (outbuf) = ret_size;
+ GST_BUFFER_TIMESTAMP (outbuf) = GST_BUFFER_TIMESTAMP (subbuf);
+ GST_BUFFER_DURATION (outbuf) = GST_BUFFER_DURATION (subbuf);
+ gst_pad_push (ffmpegenc->srcpad, GST_DATA (outbuf));
+
+ in_size -= frame_size;
+ }
+}
+
+static void
gst_ffmpegenc_set_property (GObject *object,
guint prop_id,
const GValue *value,
avcodec_close (ffmpegenc->context);
ffmpegenc->opened = FALSE;
}
+ if (ffmpegenc->cache) {
+ gst_buffer_unref (ffmpegenc->cache);
+ ffmpegenc->cache = NULL;
+ }
break;
}