AVCodecContext *context;
AVFrame *picture;
gboolean opened;
+ GstBufferPool *pool;
union
{
struct
/* reverse playback queue */
GList *queued;
-
- /* Can downstream allocate 16bytes aligned data. */
- gboolean can_allocate_aligned;
};
typedef struct _GstFFMpegDecClass GstFFMpegDecClass;
static gboolean gst_ffmpegdec_query (GstPad * pad, GstQuery * query);
static gboolean gst_ffmpegdec_src_event (GstPad * pad, GstEvent * event);
-static gboolean gst_ffmpegdec_setcaps (GstPad * pad, GstCaps * caps);
static gboolean gst_ffmpegdec_sink_event (GstPad * pad, GstEvent * event);
static GstFlowReturn gst_ffmpegdec_chain (GstPad * pad, GstBuffer * buf);
sinkcaps = gst_caps_from_string ("unknown/unknown");
}
if (in_plugin->type == AVMEDIA_TYPE_VIDEO) {
- srccaps = gst_caps_from_string ("video/x-raw-rgb; video/x-raw-yuv");
+ srccaps = gst_caps_from_string ("video/x-raw");
} else {
srccaps = gst_ffmpeg_codectype_to_audio_caps (NULL,
in_plugin->id, FALSE, in_plugin);
/* setup pads */
ffmpegdec->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
- gst_pad_set_setcaps_function (ffmpegdec->sinkpad,
- GST_DEBUG_FUNCPTR (gst_ffmpegdec_setcaps));
gst_pad_set_event_function (ffmpegdec->sinkpad,
GST_DEBUG_FUNCPTR (gst_ffmpegdec_sink_event));
gst_pad_set_chain_function (ffmpegdec->sinkpad,
ffmpegdec->format.video.fps_n = -1;
ffmpegdec->format.video.old_fps_n = -1;
gst_segment_init (&ffmpegdec->segment, GST_FORMAT_TIME);
-
- /* We initially assume downstream can allocate 16 bytes aligned buffers */
- ffmpegdec->can_allocate_aligned = TRUE;
}
static void
{
GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) object;
- if (ffmpegdec->context != NULL) {
+ if (ffmpegdec->context != NULL)
av_free (ffmpegdec->context);
- ffmpegdec->context = NULL;
- }
- if (ffmpegdec->picture != NULL) {
+ if (ffmpegdec->picture != NULL)
av_free (ffmpegdec->picture);
- ffmpegdec->picture = NULL;
- }
+
+ if (ffmpegdec->pool)
+ gst_object_unref (ffmpegdec->pool);
G_OBJECT_CLASS (parent_class)->finalize (object);
}
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_QOS:
{
+ GstQOSType type;
gdouble proportion;
GstClockTimeDiff diff;
GstClockTime timestamp;
- gst_event_parse_qos (event, &proportion, &diff, ×tamp);
+ gst_event_parse_qos (event, &type, &proportion, &diff, ×tamp);
/* update our QoS values */
gst_ffmpegdec_update_qos (ffmpegdec, proportion, timestamp + diff);
}
static gboolean
-gst_ffmpegdec_setcaps (GstPad * pad, GstCaps * caps)
+gst_ffmpegdec_setcaps (GstFFMpegDec * ffmpegdec, GstCaps * caps)
{
- GstFFMpegDec *ffmpegdec;
GstFFMpegDecClass *oclass;
GstStructure *structure;
const GValue *par;
const GValue *fps;
gboolean ret = TRUE;
- ffmpegdec = (GstFFMpegDec *) (gst_pad_get_parent (pad));
oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
- GST_DEBUG_OBJECT (pad, "setcaps called");
+ GST_DEBUG_OBJECT (ffmpegdec, "setcaps called");
GST_OBJECT_LOCK (ffmpegdec);
gst_structure_get_int (structure, "height",
&ffmpegdec->format.video.clip_height);
- GST_DEBUG_OBJECT (pad, "clipping to %dx%d",
+ GST_DEBUG_OBJECT (ffmpegdec, "clipping to %dx%d",
ffmpegdec->format.video.clip_width, ffmpegdec->format.video.clip_height);
/* take into account the lowres property */
if (ffmpegdec->format.video.clip_height != -1)
ffmpegdec->format.video.clip_height >>= ffmpegdec->lowres;
- GST_DEBUG_OBJECT (pad, "final clipping to %dx%d",
+ GST_DEBUG_OBJECT (ffmpegdec, "final clipping to %dx%d",
ffmpegdec->format.video.clip_width, ffmpegdec->format.video.clip_height);
done:
GST_OBJECT_UNLOCK (ffmpegdec);
- gst_object_unref (ffmpegdec);
-
return ret;
/* ERRORS */
fsize = gst_ffmpeg_avpicture_get_size (ffmpegdec->context->pix_fmt,
width, height);
- if (!ffmpegdec->context->palctrl && ffmpegdec->can_allocate_aligned) {
- GST_LOG_OBJECT (ffmpegdec, "calling pad_alloc");
+ if (!ffmpegdec->context->palctrl) {
+ GST_LOG_OBJECT (ffmpegdec, "doing alloc from pool");
/* no pallete, we can use the buffer size to alloc */
- ret = gst_pad_alloc_buffer_and_set_caps (ffmpegdec->srcpad,
- GST_BUFFER_OFFSET_NONE, fsize,
- GST_PAD_CAPS (ffmpegdec->srcpad), outbuf);
+ ret = gst_buffer_pool_acquire_buffer (ffmpegdec->pool, outbuf, NULL);
if (G_UNLIKELY (ret != GST_FLOW_OK))
goto alloc_failed;
-
- /* If buffer isn't 128-bit aligned, create a memaligned one ourselves */
- if (((uintptr_t) GST_BUFFER_DATA (*outbuf)) % 16) {
- GST_DEBUG_OBJECT (ffmpegdec,
- "Downstream can't allocate aligned buffers.");
- ffmpegdec->can_allocate_aligned = FALSE;
- gst_buffer_unref (*outbuf);
- *outbuf = new_aligned_buffer (fsize, GST_PAD_CAPS (ffmpegdec->srcpad));
- }
} else {
GST_LOG_OBJECT (ffmpegdec,
"not calling pad_alloc, we have a pallete or downstream can't give 16 byte aligned buffers.");
* fsize contains the size of the palette, so the overall size
* is bigger than ffmpegcolorspace's unit size, which will
* prompt GstBaseTransform to complain endlessly ... */
- *outbuf = new_aligned_buffer (fsize, GST_PAD_CAPS (ffmpegdec->srcpad));
+ *outbuf = new_aligned_buffer (fsize);
ret = GST_FLOW_OK;
}
- /* set caps, we do this here because the buffer is still writable here and we
- * are sure to be negotiated */
- gst_buffer_set_caps (*outbuf, GST_PAD_CAPS (ffmpegdec->srcpad));
return ret;
{
GstFlowReturn ret;
gint clip_width, clip_height;
+ guint8 *data;
+ gsize size;
/* take final clipped output size */
if ((clip_width = ffmpegdec->format.video.clip_width) == -1)
return avcodec_default_get_buffer (context, picture);
}
+ /* FIXME, unmap me later */
+ data = gst_buffer_map (buf, &size, NULL, GST_MAP_WRITE);
+ GST_LOG_OBJECT (ffmpegdec, "buffer data %p, size %" G_GSIZE_FORMAT, data,
+ size);
+
/* copy the right pointers and strides in the picture object */
gst_ffmpeg_avpicture_fill ((AVPicture *) picture,
- GST_BUFFER_DATA (buf), context->pix_fmt, width, height);
+ data, context->pix_fmt, width, height);
break;
}
case AVMEDIA_TYPE_AUDIO:
#else
gst_buffer_unref (buf);
#endif
+ /* FIXME, unmap buffer data */
/* zero out the reference in ffmpeg */
for (i = 0; i < 4; i++) {
}
static gboolean
+gst_ffmpegdec_bufferpool (GstFFMpegDec * ffmpegdec, GstCaps * caps)
+{
+ GstQuery *query;
+ GstBufferPool *pool = NULL;
+ guint size, min, max, prefix, alignment;
+ GstStructure *config;
+
+ /* find a pool for the negotiated caps now */
+ query = gst_query_new_allocation (caps, TRUE);
+
+ if (gst_pad_peer_query (ffmpegdec->srcpad, query)) {
+ /* we got configuration from our peer, parse them */
+ gst_query_parse_allocation_params (query, &size, &min, &max, &prefix,
+ &alignment, &pool);
+ } else {
+ size = gst_ffmpeg_avpicture_get_size (ffmpegdec->context->pix_fmt,
+ ffmpegdec->context->width, ffmpegdec->context->height);
+ min = max = 0;
+ prefix = 0;
+ alignment = 15;
+ }
+
+ gst_query_unref (query);
+
+ if (pool == NULL) {
+ /* we did not get a pool, make one ourselves then */
+ pool = gst_buffer_pool_new ();
+ }
+
+ config = gst_buffer_pool_get_config (pool);
+ gst_buffer_pool_config_set (config, caps, size, min, max, prefix,
+ alignment | 15);
+ gst_buffer_pool_set_config (pool, config);
+
+ if (ffmpegdec->pool)
+ gst_object_unref (ffmpegdec->pool);
+ ffmpegdec->pool = pool;
+
+ /* and activate */
+ gst_buffer_pool_set_active (pool, TRUE);
+
+ return TRUE;
+}
+
+static gboolean
gst_ffmpegdec_negotiate (GstFFMpegDec * ffmpegdec, gboolean force)
{
GstFFMpegDecClass *oclass;
if (!gst_pad_set_caps (ffmpegdec->srcpad, caps))
goto caps_failed;
+ /* now figure out a bufferpool */
+ if (!gst_ffmpegdec_bufferpool (ffmpegdec, caps))
+ goto no_bufferpool;
+
gst_caps_unref (caps);
return TRUE;
return FALSE;
}
+no_bufferpool:
+ {
+ GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
+ ("Could not create bufferpool for fmpeg decoder (%s)",
+ oclass->in_plugin->name));
+ gst_caps_unref (caps);
+
+ return FALSE;
+ }
}
/* perform qos calculations before decoding the next frame.
GstClockTime in_dur)
{
gboolean res = TRUE;
- gint64 cstart, cstop;
+ guint64 cstart, cstop;
GstClockTime stop;
GST_LOG_OBJECT (dec,
} else {
AVPicture pic, *outpic;
gint width, height;
+ guint8 *data;
+ gsize size;
GST_LOG_OBJECT (ffmpegdec, "get output buffer");
/* original ffmpeg code does not handle odd sizes correctly.
* This patched up version does */
- gst_ffmpeg_avpicture_fill (&pic, GST_BUFFER_DATA (*outbuf),
+ data = gst_buffer_map (*outbuf, &size, NULL, GST_MAP_WRITE);
+
+ gst_ffmpeg_avpicture_fill (&pic, data,
ffmpegdec->context->pix_fmt, width, height);
outpic = (AVPicture *) ffmpegdec->picture;
(guint) (outpic->data[2] - outpic->data[0]));
av_picture_copy (&pic, outpic, ffmpegdec->context->pix_fmt, width, height);
+ gst_buffer_unmap (*outbuf, data, size);
}
ffmpegdec->picture->reordered_opaque = -1;
/* special cases */
alloc_failed:
{
- GST_DEBUG_OBJECT (ffmpegdec, "pad_alloc failed");
+ GST_DEBUG_OBJECT (ffmpegdec, "buffer alloc failed");
return ret;
}
}
/* palette is not part of raw video frame in gst and the size
* of the outgoing buffer needs to be adjusted accordingly */
- if (ffmpegdec->context->palctrl != NULL)
- GST_BUFFER_SIZE (*outbuf) -= AVPALETTE_SIZE;
+ if (ffmpegdec->context->palctrl != NULL) {
+
+ gst_buffer_resize (*outbuf, 0,
+ gst_buffer_get_size (*outbuf) - AVPALETTE_SIZE);
+ }
/* now see if we need to clip the buffer against the segment boundaries. */
if (G_UNLIKELY (!clip_video_buffer (ffmpegdec, *outbuf, out_timestamp,
GstClockTime in_dur)
{
GstClockTime stop;
- gint64 diff, ctime, cstop;
+ gint64 diff;
+ guint64 ctime, cstop;
gboolean res = TRUE;
+ gsize size, offset;
+
+ size = gst_buffer_get_size (buf);
+ offset = 0;
GST_LOG_OBJECT (dec,
"timestamp:%" GST_TIME_FORMAT ", duration:%" GST_TIME_FORMAT
- ", size %u", GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur),
- GST_BUFFER_SIZE (buf));
+ ", size %" G_GSIZE_FORMAT, GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur),
+ size);
/* can't clip without TIME segment */
if (G_UNLIKELY (dec->segment.format != GST_FORMAT_TIME))
GST_DEBUG_OBJECT (dec, "clipping start to %" GST_TIME_FORMAT " %"
G_GINT64_FORMAT " bytes", GST_TIME_ARGS (ctime), diff);
- GST_BUFFER_SIZE (buf) -= diff;
- GST_BUFFER_DATA (buf) += diff;
+ offset += diff;
+ size -= diff;
}
if (G_UNLIKELY ((diff = stop - cstop) > 0)) {
/* bring clipped time to bytes */
GST_DEBUG_OBJECT (dec, "clipping stop to %" GST_TIME_FORMAT " %"
G_GINT64_FORMAT " bytes", GST_TIME_ARGS (cstop), diff);
- GST_BUFFER_SIZE (buf) -= diff;
+ size -= diff;
}
+ gst_buffer_resize (buf, offset, size);
GST_BUFFER_TIMESTAMP (buf) = ctime;
GST_BUFFER_DURATION (buf) = cstop - ctime;
gint have_data = AVCODEC_MAX_AUDIO_FRAME_SIZE;
GstClockTime out_timestamp, out_duration;
gint64 out_offset;
+ int16_t *odata;
AVPacket packet;
GST_DEBUG_OBJECT (ffmpegdec,
dec_info->offset, GST_TIME_ARGS (dec_info->timestamp),
GST_TIME_ARGS (dec_info->duration), GST_TIME_ARGS (ffmpegdec->next_out));
- *outbuf =
- new_aligned_buffer (AVCODEC_MAX_AUDIO_FRAME_SIZE,
- GST_PAD_CAPS (ffmpegdec->srcpad));
+ *outbuf = new_aligned_buffer (AVCODEC_MAX_AUDIO_FRAME_SIZE);
+
+ odata = gst_buffer_map (*outbuf, NULL, NULL, GST_MAP_WRITE);
gst_avpacket_init (&packet, data, size);
- len = avcodec_decode_audio3 (ffmpegdec->context,
- (int16_t *) GST_BUFFER_DATA (*outbuf), &have_data, &packet);
+ len = avcodec_decode_audio3 (ffmpegdec->context, odata, &have_data, &packet);
+
GST_DEBUG_OBJECT (ffmpegdec,
"Decode audio: len=%d, have_data=%d", len, have_data);
if (len >= 0 && have_data > 0) {
+ /* Buffer size */
+ gst_buffer_unmap (*outbuf, odata, have_data);
+
GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
if (!gst_ffmpegdec_negotiate (ffmpegdec, FALSE)) {
gst_buffer_unref (*outbuf);
goto beach;
}
- /* Buffer size */
- GST_BUFFER_SIZE (*outbuf) = have_data;
-
/*
* Timestamps:
*
GST_BUFFER_TIMESTAMP (*outbuf) = out_timestamp;
GST_BUFFER_DURATION (*outbuf) = out_duration;
GST_BUFFER_OFFSET (*outbuf) = out_offset;
- gst_buffer_set_caps (*outbuf, GST_PAD_CAPS (ffmpegdec->srcpad));
/* the next timestamp we'll use when interpolating */
if (GST_CLOCK_TIME_IS_VALID (out_timestamp))
goto clipped;
} else {
+ gst_buffer_unmap (*outbuf, odata, 0);
gst_buffer_unref (*outbuf);
*outbuf = NULL;
}
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
ffmpegdec->discont = FALSE;
}
-
if (ffmpegdec->segment.rate > 0.0) {
/* and off we go */
*ret = gst_pad_push (ffmpegdec->srcpad, outbuf);
clear_queued (ffmpegdec);
break;
}
- case GST_EVENT_NEWSEGMENT:
+ case GST_EVENT_CAPS:
+ {
+ GstCaps *caps;
+
+ gst_event_parse_caps (event, &caps);
+
+ ret = gst_ffmpegdec_setcaps (ffmpegdec, caps);
+
+ gst_event_unref (event);
+ goto done;
+ }
+ case GST_EVENT_SEGMENT:
{
- gboolean update;
- GstFormat fmt;
- gint64 start, stop, time;
- gdouble rate, arate;
+ GstSegment segment;
- gst_event_parse_new_segment_full (event, &update, &rate, &arate, &fmt,
- &start, &stop, &time);
+ gst_event_copy_segment (event, &segment);
- switch (fmt) {
+ switch (segment.format) {
case GST_FORMAT_TIME:
/* fine, our native segment format */
break;
GST_DEBUG_OBJECT (ffmpegdec, "bitrate: %d", bit_rate);
/* convert values to TIME */
- if (start != -1)
- start = gst_util_uint64_scale_int (start, GST_SECOND, bit_rate);
- if (stop != -1)
- stop = gst_util_uint64_scale_int (stop, GST_SECOND, bit_rate);
- if (time != -1)
- time = gst_util_uint64_scale_int (time, GST_SECOND, bit_rate);
+ if (segment.start != -1)
+ segment.start =
+ gst_util_uint64_scale_int (segment.start, GST_SECOND, bit_rate);
+ if (segment.stop != -1)
+ segment.stop =
+ gst_util_uint64_scale_int (segment.stop, GST_SECOND, bit_rate);
+ if (segment.time != -1)
+ segment.time =
+ gst_util_uint64_scale_int (segment.time, GST_SECOND, bit_rate);
/* unref old event */
gst_event_unref (event);
/* create new converted time segment */
- fmt = GST_FORMAT_TIME;
+ segment.format = GST_FORMAT_TIME;
/* FIXME, bitrate is not good enough too find a good stop, let's
* hope start and time were 0... meh. */
- stop = -1;
- event = gst_event_new_new_segment (update, rate, fmt,
- start, stop, time);
+ segment.stop = -1;
+ event = gst_event_new_segment (&segment);
break;
}
default:
if (ffmpegdec->context->codec)
gst_ffmpegdec_drain (ffmpegdec);
- GST_DEBUG_OBJECT (ffmpegdec,
- "NEWSEGMENT in time start %" GST_TIME_FORMAT " -- stop %"
- GST_TIME_FORMAT, GST_TIME_ARGS (start), GST_TIME_ARGS (stop));
+ GST_DEBUG_OBJECT (ffmpegdec, "SEGMENT in time %" GST_SEGMENT_FORMAT,
+ &segment);
/* and store the values */
- gst_segment_set_newsegment_full (&ffmpegdec->segment, update,
- rate, arate, fmt, start, stop, time);
+ gst_segment_copy_into (&segment, &ffmpegdec->segment);
break;
}
default:
GstFFMpegDec *ffmpegdec;
GstFFMpegDecClass *oclass;
guint8 *data, *bdata;
+ guint8 *odata;
+ gsize osize;
gint size, bsize, len, have_data;
GstFlowReturn ret = GST_FLOW_OK;
GstClockTime in_timestamp;
ffmpegdec->last_frames = 0;
}
- GST_LOG_OBJECT (ffmpegdec,
- "Received new data of size %u, offset:%" G_GUINT64_FORMAT ", ts:%"
- GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT ", info %d",
- GST_BUFFER_SIZE (inbuf), GST_BUFFER_OFFSET (inbuf),
- GST_TIME_ARGS (in_timestamp), GST_TIME_ARGS (in_duration), in_info->idx);
-
/* workarounds, functions write to buffers:
* libavcodec/svq1.c:svq1_decode_frame writes to the given buffer.
* libavcodec/svq3.c:svq3_decode_slice_header too.
inbuf = gst_buffer_make_writable (inbuf);
}
- bdata = GST_BUFFER_DATA (inbuf);
- bsize = GST_BUFFER_SIZE (inbuf);
+ odata = gst_buffer_map (inbuf, &osize, NULL, GST_MAP_READ);
+
+ bdata = odata;
+ bsize = osize;
+
+ GST_LOG_OBJECT (ffmpegdec,
+ "Received new data of size %u, offset:%" G_GUINT64_FORMAT ", ts:%"
+ GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT ", info %d",
+ bsize, in_offset, GST_TIME_ARGS (in_timestamp),
+ GST_TIME_ARGS (in_duration), in_info->idx);
if (ffmpegdec->do_padding) {
/* add padding */
bsize, bdata);
} while (bsize > 0);
+ gst_buffer_unmap (inbuf, odata, osize);
+
/* keep left-over */
if (ffmpegdec->pctx && bsize > 0) {
in_timestamp = GST_BUFFER_TIMESTAMP (inbuf);
"Keeping %d bytes of data with offset %" G_GINT64_FORMAT ", timestamp %"
GST_TIME_FORMAT, bsize, in_offset, GST_TIME_ARGS (in_timestamp));
- ffmpegdec->pcache = gst_buffer_create_sub (inbuf,
- GST_BUFFER_SIZE (inbuf) - bsize, bsize);
+ ffmpegdec->pcache = gst_buffer_copy_region (inbuf, GST_BUFFER_COPY_ALL,
+ gst_buffer_get_size (inbuf) - bsize, bsize);
/* we keep timestamp, even though all we really know is that the correct
* timestamp is not below the one from inbuf */
GST_BUFFER_TIMESTAMP (ffmpegdec->pcache) = in_timestamp;
g_free (ffmpegdec->padded);
ffmpegdec->padded = NULL;
ffmpegdec->padded_size = 0;
- ffmpegdec->can_allocate_aligned = TRUE;
+ if (ffmpegdec->pool) {
+ gst_buffer_pool_set_active (ffmpegdec->pool, FALSE);
+ gst_object_unref (ffmpegdec->pool);
+ }
+ ffmpegdec->pool = NULL;
break;
default:
break;