AV_CH_STEREO_RIGHT, GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT}
};
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+static void
+gst_ffmpeg_channel_positions_to_layout (const GstAudioChannelPosition *
+ const pos, gint channels, AVChannelLayout * layout)
+#else
static guint64
-gst_ffmpeg_channel_positions_to_layout (GstAudioChannelPosition * pos,
- gint channels)
+gst_ffmpeg_channel_positions_to_layout (const GstAudioChannelPosition *
+ const pos, gint channels)
+#endif
{
gint i, j;
guint64 ret = 0;
gint channels_found = 0;
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ g_assert (layout);
+
+ if (!pos) {
+ memset (layout, 0, sizeof (AVChannelLayout));
+ return;
+ }
+
+ if (channels == 1 && pos[0] == GST_AUDIO_CHANNEL_POSITION_MONO) {
+ *layout = (AVChannelLayout) AV_CHANNEL_LAYOUT_MONO;
+ return;
+ }
+#else
if (!pos)
return 0;
if (channels == 1 && pos[0] == GST_AUDIO_CHANNEL_POSITION_MONO)
return AV_CH_LAYOUT_MONO;
+#endif
for (i = 0; i < channels; i++) {
for (j = 0; j < G_N_ELEMENTS (_ff_to_gst_layout); j++) {
}
}
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ if (channels_found != channels && av_channel_layout_check (layout)) {
+ memset (layout, 0, sizeof (AVChannelLayout));
+ return;
+ }
+
+ layout->u.mask = ret;
+ layout->nb_channels = channels_found;
+ layout->order = AV_CHANNEL_ORDER_NATIVE;
+#else
if (channels_found != channels)
return 0;
return ret;
+#endif
}
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+gboolean
+gst_ffmpeg_channel_layout_to_gst (const AVChannelLayout * channel_layout,
+ gint channels, GstAudioChannelPosition * pos)
+#else
gboolean
gst_ffmpeg_channel_layout_to_gst (guint64 channel_layout, gint channels,
GstAudioChannelPosition * pos)
+#endif
{
guint nchannels = 0;
gboolean none_layout = FALSE;
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ g_assert (channel_layout);
+
+ if (channel_layout->nb_channels == 0 || channels > 64) {
+#else
if (channel_layout == 0 || channels > 64) {
+#endif
nchannels = channels;
none_layout = TRUE;
} else {
* as FRONT_CENTER but we distinguish between the two in
* GStreamer
*/
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ static const AVChannelLayout mono = AV_CHANNEL_LAYOUT_MONO;
+ if (channels == 1
+ && (av_channel_layout_compare (channel_layout, &mono) == 0)) {
+#else
if (channels == 1 && channel_layout == AV_CH_LAYOUT_MONO) {
+#endif
pos[0] = GST_AUDIO_CHANNEL_POSITION_MONO;
return TRUE;
}
-
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ nchannels = channel_layout->nb_channels;
+#else
for (i = 0; i < 64; i++) {
if ((channel_layout & (G_GUINT64_CONSTANT (1) << i)) != 0) {
nchannels++;
}
}
+#endif
if (nchannels != channels) {
GST_ERROR ("Number of channels is different (%u != %u)", channels,
} else {
for (i = 0, j = 0; i < G_N_ELEMENTS (_ff_to_gst_layout); i++) {
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ if (channel_layout->order == AV_CHANNEL_ORDER_NATIVE) {
+ if ((channel_layout->u.mask & _ff_to_gst_layout[i].ff) != 0) {
+ pos[j++] = _ff_to_gst_layout[i].gst;
+
+ if (_ff_to_gst_layout[i].gst == GST_AUDIO_CHANNEL_POSITION_NONE)
+ none_layout = TRUE;
+ }
+ } else if (channel_layout->order == AV_CHANNEL_ORDER_CUSTOM) {
+ if (_ff_to_gst_layout[i].ff == (1ULL << channel_layout->u.map[i].id)) {
+ pos[j++] = _ff_to_gst_layout[i].gst;
+
+ if (_ff_to_gst_layout[i].gst == GST_AUDIO_CHANNEL_POSITION_NONE)
+ none_layout = TRUE;
+ }
+ }
+#else
if ((channel_layout & _ff_to_gst_layout[i].ff) != 0) {
pos[j++] = _ff_to_gst_layout[i].gst;
if (_ff_to_gst_layout[i].gst == GST_AUDIO_CHANNEL_POSITION_NONE)
none_layout = TRUE;
}
+#endif
}
if (j != nchannels) {
if (!none_layout
&& !gst_audio_check_valid_channel_positions (pos, nchannels, FALSE)) {
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ GST_ERROR ("Invalid channel layout %" G_GUINT64_FORMAT
+ " - assuming NONE layout", channel_layout->u.mask);
+#else
GST_ERROR ("Invalid channel layout %" G_GUINT64_FORMAT
" - assuming NONE layout", channel_layout);
+#endif
none_layout = TRUE;
}
va_list var_args;
/* fixed, non-probing context */
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ if (context != NULL && context->ch_layout.nb_channels > 0) {
+#else
if (context != NULL && context->channels != -1) {
+#endif
GstAudioChannelPosition pos[64];
guint64 mask;
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ caps = gst_caps_new_simple (mimetype,
+ "rate", G_TYPE_INT, context->sample_rate,
+ "channels", G_TYPE_INT, context->ch_layout.nb_channels, NULL);
+
+ static const AVChannelLayout mono = AV_CHANNEL_LAYOUT_MONO;
+ const gboolean needs_mask = (context->ch_layout.nb_channels == 1 &&
+ av_channel_layout_compare (&context->ch_layout, &mono) != 0)
+ || (context->ch_layout.nb_channels > 1
+ && gst_ffmpeg_channel_layout_to_gst (&context->ch_layout,
+ context->ch_layout.nb_channels, pos));
+
+ if (needs_mask &&
+ gst_audio_channel_positions_to_mask (pos,
+ context->ch_layout.nb_channels, FALSE, &mask)) {
+ gst_caps_set_simple (caps, "channel-mask", GST_TYPE_BITMASK, mask, NULL);
+ }
+#else
caps = gst_caps_new_simple (mimetype,
"rate", G_TYPE_INT, context->sample_rate,
"channels", G_TYPE_INT, context->channels, NULL);
&mask)) {
gst_caps_set_simple (caps, "channel-mask", GST_TYPE_BITMASK, mask, NULL);
}
+#endif
} else if (encode) {
gint maxchannels = 2;
const gint *rates = NULL;
break;
}
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ if (codec && codec->ch_layouts) {
+ const AVChannelLayout *layouts = codec->ch_layouts;
+#else
if (codec && codec->channel_layouts) {
const uint64_t *layouts = codec->channel_layouts;
+#endif
GstAudioChannelPosition pos[64];
caps = gst_caps_new_empty ();
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ // Layout array is terminated with a zeroed layout.
+ AVChannelLayout zero;
+ memset (&zero, 0, sizeof (AVChannelLayout));
+ while (av_channel_layout_compare (layouts, &zero) != 0) {
+ const gint nbits_set = layouts->nb_channels;
+
+ if (gst_ffmpeg_channel_layout_to_gst (layouts, nbits_set, pos)) {
+#else
while (*layouts) {
gint nbits_set = get_nbits_set (*layouts);
if (gst_ffmpeg_channel_layout_to_gst (*layouts, nbits_set, pos)) {
+#endif
guint64 mask;
if (gst_audio_channel_positions_to_mask (pos, nbits_set, FALSE,
structure = gst_caps_get_structure (caps, 0);
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ gst_structure_get_int (structure, "channels",
+ &context->ch_layout.nb_channels);
+#else
gst_structure_get_int (structure, "channels", &context->channels);
+#endif
gst_structure_get_int (structure, "rate", &context->sample_rate);
gst_structure_get_int (structure, "block_align", &context->block_align);
if (gst_structure_get_int (structure, "bitrate", &bitrate))
const enum AVSampleFormat *smpl_fmts;
enum AVSampleFormat smpl_fmt = -1;
- context->channels = info->channels;
context->sample_rate = info->rate;
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ gst_ffmpeg_channel_positions_to_layout (info->position, info->channels,
+ &context->ch_layout);
+#else
+ context->channels = info->channels;
context->channel_layout =
gst_ffmpeg_channel_positions_to_layout (info->position, info->channels);
+#endif
codec = context->codec;
if ((layout = gst_structure_get_string (str, "layout"))) {
if (!strcmp (layout, "g721")) {
context->sample_rate = 8000;
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ context->ch_layout = (AVChannelLayout) AV_CHANNEL_LAYOUT_MONO;
+#else
context->channels = 1;
+#endif
context->bit_rate = 32000;
}
}
switch (codec_id) {
case AV_CODEC_ID_QCELP:
/* QCELP is always mono, no matter what the caps say */
+#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(57, 28, 100)
+ context->ch_layout = (AVChannelLayout) AV_CHANNEL_LAYOUT_MONO;
+#else
context->channels = 1;
+#endif
break;
case AV_CODEC_ID_ADPCM_G726:
if (context->sample_rate && context->bit_rate)
AV_CODEC_CAP_DR1);
}
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT (60, 31, 100)
+static void
+gst_ffmpeg_opaque_free (void *opaque, guint8 * data)
+{
+ GstVideoCodecFrame *frame = (GstVideoCodecFrame *) opaque;
+
+ GST_DEBUG ("Releasing frame %p", frame);
+
+ gst_video_codec_frame_unref (frame);
+}
+#endif
+
/* called when ffmpeg wants us to allocate a buffer to write the decoded frame
* into. We try to give it memory from our pool */
static int
guint c;
GstFlowReturn ret;
int create_buffer_flags = 0;
+ gint system_frame_number = 0;
ffmpegdec = GST_FFMPEGVIDDEC (context->opaque);
/* apply the last info we have seen to this picture, when we get the
* picture back from ffmpeg we can use this to correctly timestamp the output
* buffer */
- GST_DEBUG_OBJECT (ffmpegdec, "opaque value SN %d",
- (gint32) picture->reordered_opaque);
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT (60, 31, 100)
+ {
+ GstVideoCodecFrame *input_frame =
+ av_buffer_get_opaque (picture->opaque_ref);
+ g_assert (input_frame != NULL);
+
+ GST_DEBUG_OBJECT (ffmpegdec, "input_frame %p", input_frame);
+ /* ******************************* */
+ /* Test if the stored frame in the opaque matches the one video decoder has for that ref ! */
+ system_frame_number = input_frame->system_frame_number;
+ }
+#else
+ system_frame_number = (gint) picture->reordered_opaque;
+#endif
+ GST_DEBUG_OBJECT (ffmpegdec, "opaque value SN %d", system_frame_number);
frame =
gst_video_decoder_get_frame (GST_VIDEO_DECODER (ffmpegdec),
- picture->reordered_opaque);
+ system_frame_number);
+
if (G_UNLIKELY (frame == NULL))
goto no_frame;
gst_video_frame_unmap (&vframe);
+#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT (60, 31, 100)
ffmpegdec->picture->reordered_opaque = -1;
+#endif
return ret;
*/
static gboolean
gst_ffmpegviddec_video_frame (GstFFMpegVidDec * ffmpegdec,
- GstVideoCodecFrame * frame, GstFlowReturn * ret)
+ GstVideoCodecFrame * input_frame, GstFlowReturn * ret)
{
gint res;
gboolean got_frame = FALSE;
gboolean mode_switch;
- GstVideoCodecFrame *out_frame;
+ GstVideoCodecFrame *output_frame;
GstFFMpegVidDecVideoFrame *out_dframe;
GstBufferPool *pool;
+ if (G_UNLIKELY (!ffmpegdec->context))
+ goto no_codec;
+
+#if LIBAVCODEC_VERSION_MAJOR >= 60
+ ffmpegdec->context->frame_num++;
+#else
+ ffmpegdec->context->frame_number++;
+#endif
+
*ret = GST_FLOW_OK;
/* in case we skip frames */
/* run QoS code, we don't stop decoding the frame when we are late because
* else we might skip a reference frame */
- gst_ffmpegviddec_do_qos (ffmpegdec, frame, &mode_switch);
+ gst_ffmpegviddec_do_qos (ffmpegdec, input_frame, &mode_switch);
/* FFmpeg might request new buffer from other threads.
* Release lock here */
/* get the output picture timing info again */
out_dframe = ffmpegdec->picture->opaque;
- out_frame = gst_video_codec_frame_ref (out_dframe->frame);
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT (60, 31, 100)
+ output_frame =
+ gst_video_codec_frame_ref (av_buffer_get_opaque (ffmpegdec->
+ picture->opaque_ref));
+#else
+ g_assert (out_dframe);
+ output_frame = gst_video_codec_frame_ref (out_dframe->frame);
+#endif
/* also give back a buffer allocated by the frame, if any */
- gst_buffer_replace (&out_frame->output_buffer, out_dframe->buffer);
- gst_buffer_replace (&out_dframe->buffer, NULL);
+ if (out_dframe) {
+ gst_buffer_replace (&output_frame->output_buffer, out_dframe->buffer);
+ gst_buffer_replace (&out_dframe->buffer, NULL);
+ }
/* Extract auxilliary info not stored in the main AVframe */
{
GST_DEBUG_OBJECT (ffmpegdec,
"pts %" G_GUINT64_FORMAT " duration %" G_GUINT64_FORMAT,
- out_frame->pts, out_frame->duration);
+ output_frame->pts, output_frame->duration);
GST_DEBUG_OBJECT (ffmpegdec, "picture: pts %" G_GUINT64_FORMAT,
(guint64) ffmpegdec->picture->pts);
+#if LIBAVUTIL_VERSION_MAJOR < 58
GST_DEBUG_OBJECT (ffmpegdec, "picture: num %d",
ffmpegdec->picture->coded_picture_number);
GST_DEBUG_OBJECT (ffmpegdec, "picture: display %d",
ffmpegdec->picture->display_picture_number);
- GST_DEBUG_OBJECT (ffmpegdec, "picture: opaque %p",
- ffmpegdec->picture->opaque);
+#endif
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT (60, 31, 100)
+ GST_DEBUG_OBJECT (ffmpegdec, "picture: opaque_ref %p",
+ ffmpegdec->picture->opaque_ref);
+#else
GST_DEBUG_OBJECT (ffmpegdec, "picture: reordered opaque %" G_GUINT64_FORMAT,
(guint64) ffmpegdec->picture->reordered_opaque);
+#endif
+ GST_DEBUG_OBJECT (ffmpegdec, "picture: opaque %p",
+ ffmpegdec->picture->opaque);
GST_DEBUG_OBJECT (ffmpegdec, "repeat_pict:%d",
ffmpegdec->picture->repeat_pict);
GST_DEBUG_OBJECT (ffmpegdec, "corrupted frame: %d",
- ! !(ffmpegdec->picture->flags & AV_FRAME_FLAG_CORRUPT));
+ !!(ffmpegdec->picture->flags & AV_FRAME_FLAG_CORRUPT));
if (!gst_ffmpegviddec_negotiate (ffmpegdec, ffmpegdec->context,
- ffmpegdec->picture, GST_BUFFER_FLAGS (out_frame->input_buffer)))
+ ffmpegdec->picture, GST_BUFFER_FLAGS (output_frame->input_buffer)))
goto negotiation_error;
pool = gst_video_decoder_get_buffer_pool (GST_VIDEO_DECODER (ffmpegdec));
- if (G_UNLIKELY (out_frame->output_buffer == NULL)) {
- *ret = get_output_buffer (ffmpegdec, out_frame);
- } else if (G_UNLIKELY (out_frame->output_buffer->pool != pool)) {
- GstBuffer *tmp = out_frame->output_buffer;
- out_frame->output_buffer = NULL;
- *ret = get_output_buffer (ffmpegdec, out_frame);
+ if (G_UNLIKELY (output_frame->output_buffer == NULL)) {
+ *ret = get_output_buffer (ffmpegdec, output_frame);
+ } else if (G_UNLIKELY (output_frame->output_buffer->pool != pool)) {
+ GstBuffer *tmp = output_frame->output_buffer;
+ output_frame->output_buffer = NULL;
+ *ret = get_output_buffer (ffmpegdec, output_frame);
gst_buffer_unref (tmp);
}
#ifndef G_DISABLE_ASSERT
else {
- GstVideoMeta *vmeta = gst_buffer_get_video_meta (out_frame->output_buffer);
+ GstVideoMeta *vmeta =
+ gst_buffer_get_video_meta (output_frame->output_buffer);
if (vmeta) {
GstVideoInfo *info = &ffmpegdec->output_state->info;
g_assert ((gint) vmeta->width == GST_VIDEO_INFO_WIDTH (info));
/* Mark corrupted frames as corrupted */
if (ffmpegdec->picture->flags & AV_FRAME_FLAG_CORRUPT)
- GST_BUFFER_FLAG_SET (out_frame->output_buffer, GST_BUFFER_FLAG_CORRUPTED);
+ GST_BUFFER_FLAG_SET (output_frame->output_buffer,
+ GST_BUFFER_FLAG_CORRUPTED);
if (ffmpegdec->pic_interlaced) {
/* set interlaced flags */
if (ffmpegdec->picture->repeat_pict)
- GST_BUFFER_FLAG_SET (out_frame->output_buffer, GST_VIDEO_BUFFER_FLAG_RFF);
+ GST_BUFFER_FLAG_SET (output_frame->output_buffer,
+ GST_VIDEO_BUFFER_FLAG_RFF);
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(60, 31, 100)
+ if (ffmpegdec->picture->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST)
+#else
if (ffmpegdec->picture->top_field_first)
- GST_BUFFER_FLAG_SET (out_frame->output_buffer, GST_VIDEO_BUFFER_FLAG_TFF);
+#endif
+ GST_BUFFER_FLAG_SET (output_frame->output_buffer,
+ GST_VIDEO_BUFFER_FLAG_TFF);
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(60, 31, 100)
+ if (ffmpegdec->picture->flags & AV_FRAME_FLAG_INTERLACED)
+#else
if (ffmpegdec->picture->interlaced_frame)
- GST_BUFFER_FLAG_SET (out_frame->output_buffer,
+#endif
+ GST_BUFFER_FLAG_SET (output_frame->output_buffer,
GST_VIDEO_BUFFER_FLAG_INTERLACED);
}
GST_MEMDUMP ("A53 CC", side_data->data, side_data->size);
/* do not add closed caption meta if it already exists */
- if (!gst_buffer_get_meta (out_frame->input_buffer,
+ if (!gst_buffer_get_meta (output_frame->input_buffer,
GST_VIDEO_CAPTION_META_API_TYPE)) {
- out_frame->output_buffer =
- gst_buffer_make_writable (out_frame->output_buffer);
- gst_buffer_add_video_caption_meta (out_frame->output_buffer,
+ output_frame->output_buffer =
+ gst_buffer_make_writable (output_frame->output_buffer);
+ gst_buffer_add_video_caption_meta (output_frame->output_buffer,
GST_VIDEO_CAPTION_TYPE_CEA708_RAW, side_data->data,
side_data->size);
} else {
while (l) {
GstVideoCodecFrame *tmp = l->data;
- if (tmp == frame)
+ if (tmp == output_frame)
old = FALSE;
if (old && GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (tmp)) {
av_frame_unref (ffmpegdec->picture);
- if (frame)
- GST_VIDEO_CODEC_FRAME_FLAG_UNSET (frame,
+ if (input_frame)
+ GST_VIDEO_CODEC_FRAME_FLAG_UNSET (input_frame,
GST_FFMPEG_VIDEO_CODEC_FRAME_FLAG_ALLOCATED);
if (gst_video_decoder_get_subframe_mode (GST_VIDEO_DECODER (ffmpegdec)))
gst_video_decoder_have_last_subframe (GST_VIDEO_DECODER (ffmpegdec),
- out_frame);
+ output_frame);
/* FIXME: Ideally we would remap the buffer read-only now before pushing but
* libav might still have a reference to it!
*/
- if (GST_BUFFER_FLAG_IS_SET (out_frame->input_buffer,
+ if (GST_BUFFER_FLAG_IS_SET (output_frame->input_buffer,
GST_VIDEO_BUFFER_FLAG_ONEFIELD)) {
- GST_BUFFER_FLAG_SET (out_frame->output_buffer,
+ GST_BUFFER_FLAG_SET (output_frame->output_buffer,
GST_VIDEO_BUFFER_FLAG_ONEFIELD);
- if (GST_BUFFER_FLAG_IS_SET (out_frame->input_buffer,
+ if (GST_BUFFER_FLAG_IS_SET (output_frame->input_buffer,
GST_VIDEO_BUFFER_FLAG_TFF)) {
- GST_BUFFER_FLAG_SET (out_frame->output_buffer, GST_VIDEO_BUFFER_FLAG_TFF);
+ GST_BUFFER_FLAG_SET (output_frame->output_buffer,
+ GST_VIDEO_BUFFER_FLAG_TFF);
}
}
#ifdef TIZEN_FEATURE_LIBAV_VIDEODECODER_ADD_VIDEOMETA
{
- GstVideoMeta *vmeta = gst_buffer_get_video_meta (out_frame->output_buffer);
+ GstVideoMeta *vmeta = gst_buffer_get_video_meta (output_frame->output_buffer);
if (!vmeta) {
GstVideoInfo *out_info = &ffmpegdec->output_state->info;
GST_DEBUG_OBJECT (ffmpegdec, "add videometa for output buffer");
- gst_buffer_add_video_meta_full (out_frame->output_buffer, GST_VIDEO_FRAME_FLAG_NONE,
+ gst_buffer_add_video_meta_full (output_frame->output_buffer, GST_VIDEO_FRAME_FLAG_NONE,
GST_VIDEO_INFO_FORMAT (out_info),
GST_VIDEO_INFO_WIDTH (out_info), GST_VIDEO_INFO_HEIGHT (out_info),
GST_VIDEO_INFO_N_PLANES (out_info), out_info->offset, out_info->stride);
}
}
#endif /* TIZEN_FEATURE_LIBAV_VIDEODECODER_ADD_VIDEOMETA */
+
+ /* Temporarily release the video decoder stream lock so that other
+ * threads can continue decoding (e.g. call get_frame()) while data
+ * is being pushed downstream.
+ */
+ GST_VIDEO_DECODER_STREAM_UNLOCK (ffmpegdec);
*ret =
- gst_video_decoder_finish_frame (GST_VIDEO_DECODER (ffmpegdec), out_frame);
+ gst_video_decoder_finish_frame (GST_VIDEO_DECODER (ffmpegdec),
+ output_frame);
+ GST_VIDEO_DECODER_STREAM_LOCK (ffmpegdec);
beach:
GST_DEBUG_OBJECT (ffmpegdec, "return flow %s, got frame: %d",
no_output:
{
GST_DEBUG_OBJECT (ffmpegdec, "no output buffer");
- GST_VIDEO_CODEC_FRAME_FLAG_UNSET (frame,
+ GST_VIDEO_CODEC_FRAME_FLAG_UNSET (input_frame,
GST_FFMPEG_VIDEO_CODEC_FRAME_FLAG_ALLOCATED);
- gst_video_decoder_drop_frame (GST_VIDEO_DECODER (ffmpegdec), out_frame);
+ gst_video_decoder_drop_frame (GST_VIDEO_DECODER (ffmpegdec), output_frame);
goto beach;
}
negotiation_error:
{
- gst_video_decoder_drop_frame (GST_VIDEO_DECODER (ffmpegdec), out_frame);
+ gst_video_decoder_drop_frame (GST_VIDEO_DECODER (ffmpegdec), output_frame);
if (GST_PAD_IS_FLUSHING (GST_VIDEO_DECODER_SRC_PAD (ffmpegdec))) {
*ret = GST_FLOW_FLUSHING;
goto beach;
*ret = GST_FLOW_NOT_NEGOTIATED;
goto beach;
}
-}
-
-
- /* Returns: Whether a frame was decoded */
-static gboolean
-gst_ffmpegviddec_frame (GstFFMpegVidDec * ffmpegdec, GstVideoCodecFrame * frame,
- GstFlowReturn * ret)
-{
- gboolean got_frame = FALSE;
-
- if (G_UNLIKELY (ffmpegdec->context->codec == NULL))
- goto no_codec;
-
- *ret = GST_FLOW_OK;
- ffmpegdec->context->frame_number++;
-
- got_frame = gst_ffmpegviddec_video_frame (ffmpegdec, frame, ret);
-
- return got_frame;
- /* ERRORS */
no_codec:
{
GST_ERROR_OBJECT (ffmpegdec, "no codec context");
*ret = GST_FLOW_NOT_NEGOTIATED;
- return -1;
+ goto beach;
}
}
+
static GstFlowReturn
gst_ffmpegviddec_drain (GstVideoDecoder * decoder)
{
GST_VIDEO_DECODER_STREAM_LOCK (ffmpegdec);
do {
- got_frame = gst_ffmpegviddec_frame (ffmpegdec, NULL, &ret);
+ got_frame = gst_ffmpegviddec_video_frame (ffmpegdec, NULL, &ret);
} while (got_frame && ret == GST_FLOW_OK);
GST_VIDEO_DECODER_STREAM_UNLOCK (ffmpegdec);
gboolean got_frame;
GstMapInfo minfo;
GstFlowReturn ret = GST_FLOW_OK;
- AVPacket packet;
+ AVPacket *packet;
+
+ if (G_UNLIKELY (!ffmpegdec->context)) {
+ gst_video_codec_frame_unref (frame);
+ GST_ERROR_OBJECT (ffmpegdec, "no codec context");
+ return GST_FLOW_NOT_NEGOTIATED;
+ }
GST_LOG_OBJECT (ffmpegdec,
"Received new data of size %" G_GSIZE_FORMAT ", dts %" GST_TIME_FORMAT
if (!gst_buffer_map (frame->input_buffer, &minfo, GST_MAP_READ)) {
GST_ELEMENT_ERROR (ffmpegdec, STREAM, DECODE, ("Decoding problem"),
("Failed to map buffer for reading"));
+ gst_video_codec_frame_unref (frame);
return GST_FLOW_ERROR;
}
+ if (minfo.size == 0)
+ goto done;
+
/* treat frame as void until a buffer is requested for it */
if (!GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
GST_FFMPEG_VIDEO_CODEC_FRAME_FLAG_ALLOCATED))
data = ffmpegdec->padded;
}
- /* now decode the frame */
- gst_avpacket_init (&packet, data, size);
+ /* Note: We use `av_packet_alloc()` so that it is properly initialized by
+ * FFmpeg and it can be properly cleaned-up (via `av_packet_unref()`) by
+ * FFmpeg also. */
+ packet = av_packet_alloc ();
+ packet->data = data;
+ packet->size = size;
- if (!packet.size)
- goto done;
+ /* Store a reference to the input frame. This will be carried along by FFmpeg
+ * to the resulting AVPicture */
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT (60, 31, 100)
+ {
+ packet->opaque_ref =
+ av_buffer_create (NULL, 0, gst_ffmpeg_opaque_free,
+ gst_video_codec_frame_ref (frame), 0);
+ GST_DEBUG_OBJECT (ffmpegdec,
+ "Store incoming frame # %u (%p) on AVPacket opaque",
+ frame->system_frame_number, frame);
+ }
+#else
+ ffmpegdec->context->reordered_opaque = (gint64) frame->system_frame_number;
+ ffmpegdec->picture->reordered_opaque = (gint64) frame->system_frame_number;
+ GST_DEBUG_OBJECT (ffmpegdec, "stored opaque values idx %u",
+ frame->system_frame_number);
+#endif
if (ffmpegdec->palette) {
guint8 *pal;
- pal = av_packet_new_side_data (&packet, AV_PKT_DATA_PALETTE,
- AVPALETTE_SIZE);
+ pal = av_packet_new_side_data (packet, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
gst_buffer_extract (ffmpegdec->palette, 0, pal, AVPALETTE_SIZE);
GST_DEBUG_OBJECT (ffmpegdec, "copy pal %p %p", &packet, pal);
}
- /* save reference to the timing info */
- ffmpegdec->context->reordered_opaque = (gint64) frame->system_frame_number;
- ffmpegdec->picture->reordered_opaque = (gint64) frame->system_frame_number;
-
- GST_DEBUG_OBJECT (ffmpegdec, "stored opaque values idx %d",
- frame->system_frame_number);
-
/* This might call into get_buffer() from another thread,
* which would cause a deadlock. Release the lock here
* and taking it again later seems safe
* See https://bugzilla.gnome.org/show_bug.cgi?id=726020
*/
GST_VIDEO_DECODER_STREAM_UNLOCK (ffmpegdec);
- if (avcodec_send_packet (ffmpegdec->context, &packet) < 0) {
+ if (avcodec_send_packet (ffmpegdec->context, packet) < 0) {
GST_VIDEO_DECODER_STREAM_LOCK (ffmpegdec);
- av_packet_free_side_data (&packet);
+ av_packet_free (&packet);
goto send_packet_failed;
}
- av_packet_free_side_data (&packet);
+ av_packet_free (&packet);
GST_VIDEO_DECODER_STREAM_LOCK (ffmpegdec);
do {
/* decode a frame of audio/video now */
- got_frame = gst_ffmpegviddec_frame (ffmpegdec, frame, &ret);
+ got_frame = gst_ffmpegviddec_video_frame (ffmpegdec, frame, &ret);
if (ret != GST_FLOW_OK) {
GST_LOG_OBJECT (ffmpegdec, "breaking because of flow ret %s",
in_plugin->id == AV_CODEC_ID_V210X ||
in_plugin->id == AV_CODEC_ID_V308 ||
in_plugin->id == AV_CODEC_ID_V408 ||
- in_plugin->id == AV_CODEC_ID_V410 ||
- in_plugin->id == AV_CODEC_ID_R210
+ in_plugin->id == AV_CODEC_ID_V410 || in_plugin->id == AV_CODEC_ID_R210
+#if LIBAVCODEC_VERSION_MAJOR < 61
|| in_plugin->id == AV_CODEC_ID_AYUV
+#endif
|| in_plugin->id == AV_CODEC_ID_Y41P
|| in_plugin->id == AV_CODEC_ID_012V
|| in_plugin->id == AV_CODEC_ID_YUV4