*
* * Each input frame is provided in turn to the subclass' @handle_frame
* callback.
- * The ownership of the frame is given to the @handle_frame callback.
+ * * When the subclass enables the subframe mode with `gst_video_decoder_set_subframe_mode`,
+ * the base class will provide to the subclass the same input frame with
+ * different input buffers to the subclass @handle_frame
+ * callback. During this call, the subclass needs to take
+ * ownership of the input_buffer as @GstVideoCodecFrame.input_buffer
+ * will have been changed before the next subframe buffer is received.
+ * The subclass will call `gst_video_decoder_have_last_subframe`
+ * when a new input frame can be created by the base class.
+ * Every subframe will share the same @GstVideoCodecFrame.output_buffer
+ * to write the decoding result. The subclass is responsible to protect
+ * its access.
*
* * If codec processing results in decoded data, the subclass should call
- * @gst_video_decoder_finish_frame to have decoded data pushed.
- * downstream. Otherwise, the subclass must call
- * @gst_video_decoder_drop_frame, to allow the base class to do timestamp
- * and offset tracking, and possibly to requeue the frame for a later
- * attempt in the case of reverse playback.
+ * @gst_video_decoder_finish_frame to have decoded data pushed
+ * downstream. In subframe mode
+ * the subclass should call @gst_video_decoder_finish_subframe until the
+ * last subframe where it should call @gst_video_decoder_finish_frame.
+ * The subclass can detect the last subframe using GST_VIDEO_BUFFER_FLAG_MARKER
+ * on buffers or using its own logic to collect the subframes.
+ * In case of decoding failure, the subclass must call
+ * @gst_video_decoder_drop_frame or @gst_video_decoder_drop_subframe,
+ * to allow the base class to do timestamp and offset tracking, and possibly
+ * to requeue the frame for a later attempt in the case of reverse playback.
*
* ## Shutdown phase
*
/* Whether input is considered packetized or not */
gboolean packetized;
+ /* whether input is considered as subframes */
+ gboolean subframe_mode;
+
/* Error handling */
gint max_errors;
gint error_count;
GstSegmentFlags decode_flags;
/* ... being tracked here;
- * only available during parsing */
+ * only available during parsing or when doing subframe decoding */
GstVideoCodecFrame *current_frame;
/* events that should apply to the current frame */
/* FIXME 2.0: Use a GQueue or similar, see GstVideoCodecFrame::events */
static gboolean gst_video_decoder_transform_meta_default (GstVideoDecoder *
decoder, GstVideoCodecFrame * frame, GstMeta * meta);
+static void gst_video_decoder_copy_metas (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame, GstBuffer * src_buffer,
+ GstBuffer * dest_buffer);
+
/* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
* method to get to the padtemplates */
GType
ts->flags = GST_BUFFER_FLAGS (buffer);
g_queue_push_tail (&priv->timestamps, ts);
+
+ if (g_queue_get_length (&priv->timestamps) > 40) {
+ GST_WARNING_OBJECT (decoder,
+ "decoder timestamp list getting long: %d timestamps,"
+ "possible internal leaking?", g_queue_get_length (&priv->timestamps));
+ }
}
static void
priv->input_offset += gst_buffer_get_size (buf);
if (priv->packetized) {
+ GstVideoCodecFrame *frame;
gboolean was_keyframe = FALSE;
+
+ frame = priv->current_frame;
+
+ frame->abidata.ABI.num_subframes++;
+ if (gst_video_decoder_get_subframe_mode (decoder)) {
+ /* End the frame if the marker flag is set */
+ if (!GST_BUFFER_FLAG_IS_SET (buf, GST_VIDEO_BUFFER_FLAG_MARKER)
+ && (decoder->input_segment.rate > 0.0))
+ priv->current_frame = gst_video_codec_frame_ref (frame);
+ else
+ priv->current_frame = NULL;
+ } else {
+ priv->current_frame = frame;
+ }
+
if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
was_keyframe = TRUE;
GST_DEBUG_OBJECT (decoder, "Marking current_frame as sync point");
- GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
+ GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
}
- priv->current_frame->input_buffer = buf;
+ if (frame->input_buffer) {
+ gst_video_decoder_copy_metas (decoder, frame, frame->input_buffer, buf);
+ gst_buffer_unref (frame->input_buffer);
+ }
+ frame->input_buffer = buf;
if (decoder->input_segment.rate < 0.0) {
- priv->parse_gather =
- g_list_prepend (priv->parse_gather, priv->current_frame);
+ priv->parse_gather = g_list_prepend (priv->parse_gather, frame);
+ priv->current_frame = NULL;
} else {
- ret = gst_video_decoder_decode_frame (decoder, priv->current_frame);
+ ret = gst_video_decoder_decode_frame (decoder, frame);
+ if (!gst_video_decoder_get_subframe_mode (decoder))
+ priv->current_frame = NULL;
}
- priv->current_frame = NULL;
/* If in trick mode and it was a keyframe, drain decoder to avoid extra
* latency. Only do this for forwards playback as reverse playback handles
* draining on keyframes in flush_parse(), and would otherwise call back
GstVideoDecoderPrivate *priv = dec->priv;
GstFlowReturn res = GST_FLOW_OK;
GList *walk;
-
+ GstVideoCodecFrame *current_frame = NULL;
+ gboolean last_subframe;
GST_DEBUG_OBJECT (dec, "flushing buffers to decode");
walk = priv->decode;
while (walk) {
GList *next;
GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
+ last_subframe = TRUE;
+ /* In subframe mode, we need to get rid of intermediary frames
+ * created during the buffer gather stage. That's why that we keep a current
+ * frame as the main frame and drop all the frame afterwhile until the end
+ * of the subframes batch.
+ * */
+ if (gst_video_decoder_get_subframe_mode (dec)) {
+ if (current_frame == NULL) {
+ current_frame = gst_video_codec_frame_ref (frame);
+ } else {
+ if (current_frame->input_buffer) {
+ gst_video_decoder_copy_metas (dec, current_frame,
+ current_frame->input_buffer, current_frame->output_buffer);
+ gst_buffer_unref (current_frame->input_buffer);
+ }
+ current_frame->input_buffer = gst_buffer_ref (frame->input_buffer);
+ gst_video_codec_frame_unref (frame);
+ }
+ last_subframe = GST_BUFFER_FLAG_IS_SET (current_frame->input_buffer,
+ GST_VIDEO_BUFFER_FLAG_MARKER);
+ } else {
+ current_frame = frame;
+ }
GST_DEBUG_OBJECT (dec, "decoding frame %p buffer %p, PTS %" GST_TIME_FORMAT
", DTS %" GST_TIME_FORMAT, frame, frame->input_buffer,
priv->decode = g_list_delete_link (priv->decode, walk);
/* decode buffer, resulting data prepended to queue */
- res = gst_video_decoder_decode_frame (dec, frame);
+ res = gst_video_decoder_decode_frame (dec, current_frame);
if (res != GST_FLOW_OK)
break;
-
+ if (!gst_video_decoder_get_subframe_mode (dec)
+ || last_subframe)
+ current_frame = NULL;
walk = next;
}
sync = GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame);
GST_LOG_OBJECT (decoder,
- "finish frame %p (#%d) sync:%d PTS:%" GST_TIME_FORMAT " DTS:%"
+ "finish frame %p (#%d)(sub=#%d) sync:%d PTS:%" GST_TIME_FORMAT " DTS:%"
GST_TIME_FORMAT,
- frame, frame->system_frame_number,
+ frame, frame->system_frame_number, frame->abidata.ABI.num_subframes,
sync, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts));
/* Push all pending events that arrived before this frame */
{
GST_LOG_OBJECT (dec, "drop frame %p", frame);
+ if (gst_video_decoder_get_subframe_mode (dec))
+ GST_DEBUG_OBJECT (dec, "Drop subframe %d. Must be the last one.",
+ frame->abidata.ABI.num_subframes);
+
GST_VIDEO_DECODER_STREAM_LOCK (dec);
gst_video_decoder_prepare_finish_frame (dec, frame, TRUE);
return GST_FLOW_OK;
}
+/**
+ * gst_video_decoder_drop_subframe:
+ * @dec: a #GstVideoDecoder
+ * @frame: (transfer full): the #GstVideoCodecFrame
+ *
+ * Drops input data.
+ * The frame is not considered finished until the whole frame
+ * is finished or dropped by the subclass.
+ *
+ * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
+ *
+ * Since: 1.20
+ */
+GstFlowReturn
+gst_video_decoder_drop_subframe (GstVideoDecoder * dec,
+ GstVideoCodecFrame * frame)
+{
+ g_return_val_if_fail (gst_video_decoder_get_subframe_mode (dec),
+ GST_FLOW_NOT_SUPPORTED);
+
+ GST_LOG_OBJECT (dec, "drop subframe %p num=%d", frame->input_buffer,
+ gst_video_decoder_get_input_subframe_index (dec, frame));
+
+ GST_VIDEO_DECODER_STREAM_LOCK (dec);
+
+ gst_video_codec_frame_unref (frame);
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
+
+ return GST_FLOW_OK;
+}
+
static gboolean
gst_video_decoder_transform_meta_default (GstVideoDecoder *
decoder, GstVideoCodecFrame * frame, GstMeta * meta)
{
GstVideoDecoder *decoder;
GstVideoCodecFrame *frame;
+ GstBuffer *buffer;
} CopyMetaData;
static gboolean
GstVideoDecoder *decoder = data->decoder;
GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
GstVideoCodecFrame *frame = data->frame;
+ GstBuffer *buffer = data->buffer;
const GstMetaInfo *info = (*meta)->info;
gboolean do_copy = FALSE;
GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
/* simply copy then */
- info->transform_func (frame->output_buffer, *meta, inbuf,
- _gst_meta_transform_copy, ©_data);
+
+ info->transform_func (buffer, *meta, inbuf, _gst_meta_transform_copy,
+ ©_data);
}
return TRUE;
}
+static void
+gst_video_decoder_copy_metas (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame, GstBuffer * src_buffer, GstBuffer * dest_buffer)
+{
+ GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
+
+ if (decoder_class->transform_meta) {
+ if (G_LIKELY (frame)) {
+ CopyMetaData data;
+
+ data.decoder = decoder;
+ data.frame = frame;
+ data.buffer = dest_buffer;
+ gst_buffer_foreach_meta (src_buffer, foreach_metadata, &data);
+ } else {
+ GST_WARNING_OBJECT (decoder,
+ "Can't copy metadata because input frame disappeared");
+ }
+ }
+}
+
/**
* gst_video_decoder_finish_frame:
* @decoder: a #GstVideoDecoder
GstVideoCodecFrame * frame)
{
GstFlowReturn ret = GST_FLOW_OK;
- GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
GstVideoDecoderPrivate *priv = decoder->priv;
GstBuffer *output_buffer;
gboolean needs_reconfigure = FALSE;
GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_CORRUPTED);
}
- if (decoder_class->transform_meta) {
- if (G_LIKELY (frame->input_buffer)) {
- CopyMetaData data;
-
- data.decoder = decoder;
- data.frame = frame;
- gst_buffer_foreach_meta (frame->input_buffer, foreach_metadata, &data);
- } else {
- GST_WARNING_OBJECT (decoder,
- "Can't copy metadata because input frame disappeared");
- }
- }
+ gst_video_decoder_copy_metas (decoder, frame, frame->input_buffer,
+ frame->output_buffer);
/* Get an additional ref to the buffer, which is going to be pushed
* downstream, the original ref is owned by the frame
return ret;
}
+/**
+ * gst_video_decoder_finish_subframe:
+ * @decoder: a #GstVideoDecoder
+ * @frame: (transfer full): the #GstVideoCodecFrame
+ *
+ * Indicate that a subframe has been finished to be decoded
+ * by the subclass. This method should be called for all subframes
+ * except the last subframe where @gst_video_decoder_finish_frame
+ * should be called instead.
+ *
+ * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
+ *
+ * Since: 1.20
+ */
+GstFlowReturn
+gst_video_decoder_finish_subframe (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame)
+{
+ g_return_val_if_fail (gst_video_decoder_get_subframe_mode (decoder),
+ GST_FLOW_NOT_SUPPORTED);
+
+ GST_LOG_OBJECT (decoder, "finish subframe %p num=%d", frame->input_buffer,
+ gst_video_decoder_get_input_subframe_index (decoder, frame));
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ frame->abidata.ABI.subframes_processed++;
+ gst_video_codec_frame_unref (frame);
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ return GST_FLOW_OK;
+}
+
/* With stream lock, takes the frame reference */
static GstFlowReturn
gst_video_decoder_clip_and_push_buf (GstVideoDecoder * decoder, GstBuffer * buf)
buffer = gst_buffer_new_and_alloc (0);
}
+ if (priv->current_frame->input_buffer) {
+ gst_video_decoder_copy_metas (decoder, priv->current_frame,
+ priv->current_frame->input_buffer, buffer);
+ gst_buffer_unref (priv->current_frame->input_buffer);
+ }
priv->current_frame->input_buffer = buffer;
gst_video_decoder_get_buffer_info_at_offset (decoder,
if (decoder->input_segment.rate < 0.0) {
priv->parse_gather =
g_list_prepend (priv->parse_gather, priv->current_frame);
+ priv->current_frame = NULL;
} else {
- /* Otherwise, decode the frame, which gives away our ref */
- ret = gst_video_decoder_decode_frame (decoder, priv->current_frame);
+ GstVideoCodecFrame *frame = priv->current_frame;
+ frame->abidata.ABI.num_subframes++;
+ /* In subframe mode, we keep a ref for ourselves
+ * as this frame will be kept during the data collection
+ * in parsed mode. The frame reference will be released by
+ * finish_(sub)frame or drop_(sub)frame.*/
+ if (gst_video_decoder_get_subframe_mode (decoder))
+ gst_video_codec_frame_ref (priv->current_frame);
+ else
+ priv->current_frame = NULL;
+
+ /* Decode the frame, which gives away our ref */
+ ret = gst_video_decoder_decode_frame (decoder, frame);
}
- /* Current frame is gone now, either way */
- priv->current_frame = NULL;
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
/* FIXME : This should only have to be checked once (either the subclass has an
* implementation, or it doesn't) */
g_return_val_if_fail (decoder_class->handle_frame != NULL, GST_FLOW_ERROR);
+ g_return_val_if_fail (frame != NULL, GST_FLOW_ERROR);
frame->pts = GST_BUFFER_PTS (frame->input_buffer);
frame->dts = GST_BUFFER_DTS (frame->input_buffer);
frame->distance_from_sync = priv->distance_from_sync;
- frame->abidata.ABI.ts = frame->dts;
- frame->abidata.ABI.ts2 = frame->pts;
+ if (frame->abidata.ABI.num_subframes == 1) {
+ frame->abidata.ABI.ts = frame->dts;
+ frame->abidata.ABI.ts2 = frame->pts;
+ }
- GST_LOG_OBJECT (decoder, "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT
- ", dist %d", GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
+ GST_LOG_OBJECT (decoder,
+ "frame %p PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dist %d",
+ frame, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
frame->distance_from_sync);
-
- g_queue_push_tail (&priv->frames, gst_video_codec_frame_ref (frame));
+ /* FIXME: suboptimal way to add a unique frame to the list, in case of subframe mode. */
+ if (!g_queue_find (&priv->frames, frame)) {
+ g_queue_push_tail (&priv->frames, gst_video_codec_frame_ref (frame));
+ } else {
+ GST_LOG_OBJECT (decoder,
+ "Do not add an existing frame used to decode subframes");
+ }
if (priv->frames.length > 10) {
GST_DEBUG_OBJECT (decoder, "decoder frame list getting long: %d frames,"
}
/**
+ * gst_video_decoder_have_last_subframe:
+ * @decoder: a #GstVideoDecoder
+ * @frame: (transfer none): the #GstVideoCodecFrame to update
+ *
+ * Indicates that the last subframe has been processed by the decoder
+ * in @frame. This will release the current frame in video decoder
+ * allowing to receive new frames from upstream elements. This method
+ * must be called in the subclass @handle_frame callback.
+ *
+ * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
+ *
+ * Since: 1.20
+ */
+GstFlowReturn
+gst_video_decoder_have_last_subframe (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame)
+{
+ g_return_val_if_fail (gst_video_decoder_get_subframe_mode (decoder),
+ GST_FLOW_OK);
+ /* unref once from the list */
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ if (decoder->priv->current_frame == frame) {
+ gst_video_codec_frame_unref (decoder->priv->current_frame);
+ decoder->priv->current_frame = NULL;
+ }
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ return GST_FLOW_OK;
+}
+
+/**
+ * gst_video_decoder_set_subframe_mode:
+ * @decoder: a #GstVideoDecoder
+ * @subframe_mode: whether the input data should be considered as subframes.
+ *
+ * If this is set to TRUE, it informs the base class that the subclass
+ * can receive the data at a granularity lower than one frame.
+ *
+ * Note that in this mode, the subclass has two options. It can either
+ * require the presence of a GST_VIDEO_BUFFER_FLAG_MARKER to mark the
+ * end of a frame. Or it can operate in such a way that it will decode
+ * a single frame at a time. In this second case, every buffer that
+ * arrives to the element is considered part of the same frame until
+ * gst_video_decoder_finish_frame() is called.
+ *
+ * In either case, the same #GstVideoCodecFrame will be passed to the
+ * GstVideoDecoderClass:handle_frame vmethod repeatedly with a
+ * different GstVideoCodecFrame:input_buffer every time until the end of the
+ * frame has been signaled using either method.
+ * This method must be called during the decoder subclass @set_format call.
+ *
+ * Since: 1.20
+ */
+void
+gst_video_decoder_set_subframe_mode (GstVideoDecoder * decoder,
+ gboolean subframe_mode)
+{
+ decoder->priv->subframe_mode = subframe_mode;
+}
+
+/**
+ * gst_video_decoder_get_subframe_mode:
+ * @decoder: a #GstVideoDecoder
+ *
+ * Queries whether input data is considered as subframes or not by the
+ * base class. If FALSE, each input buffer will be considered as a full
+ * frame.
+ *
+ * Returns: TRUE if input data is considered as sub frames.
+ *
+ * Since: 1.20
+ */
+gboolean
+gst_video_decoder_get_subframe_mode (GstVideoDecoder * decoder)
+{
+ return decoder->priv->subframe_mode;
+}
+
+/**
+ * gst_video_decoder_get_input_subframe_index:
+ * @decoder: a #GstVideoDecoder
+ * @frame: (transfer none): the #GstVideoCodecFrame to update
+ *
+ * Queries the number of the last subframe received by
+ * the decoder baseclass in the @frame.
+ *
+ * Returns: the current subframe index received in subframe mode, 1 otherwise.
+ *
+ * Since: 1.20
+ */
+guint
+gst_video_decoder_get_input_subframe_index (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame)
+{
+ return frame->abidata.ABI.num_subframes;
+}
+
+/**
+ * gst_video_decoder_get_processed_subframe_index:
+ * @decoder: a #GstVideoDecoder
+ * @frame: (transfer none): the #GstVideoCodecFrame to update
+ *
+ * Queries the number of subframes in the frame processed by
+ * the decoder baseclass.
+ *
+ * Returns: the current subframe processed received in subframe mode.
+ *
+ * Since: 1.20
+ */
+guint
+gst_video_decoder_get_processed_subframe_index (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame)
+{
+ return frame->abidata.ABI.subframes_processed;
+}
+
+/**
* gst_video_decoder_set_estimate_rate:
* @dec: a #GstVideoDecoder
* @enabled: whether to enable byte to time conversion
guint64 last_buf_num;
guint64 last_kf_num;
gboolean set_output_state;
+ gboolean subframe_mode;
};
struct _GstVideoDecoderTesterClass
guint8 *data;
gint size;
GstMapInfo map;
+ gboolean last_subframe = GST_BUFFER_FLAG_IS_SET (frame->input_buffer,
+ GST_VIDEO_BUFFER_FLAG_MARKER);
+
+ if (gst_video_decoder_get_subframe_mode (dec) && !last_subframe) {
+ if (!GST_CLOCK_TIME_IS_VALID (frame->pts))
+ return gst_video_decoder_drop_subframe (dec, frame);
+ goto done;
+ }
gst_buffer_map (frame->input_buffer, &map, GST_MAP_READ);
if ((input_num == dectester->last_buf_num + 1
&& dectester->last_buf_num != -1)
|| !GST_BUFFER_FLAG_IS_SET (frame->input_buffer,
- GST_BUFFER_FLAG_DELTA_UNIT)) {
+ GST_BUFFER_FLAG_DELTA_UNIT) || last_subframe) {
/* the output is gray8 */
size = TEST_VIDEO_WIDTH * TEST_VIDEO_HEIGHT;
}
gst_buffer_unmap (frame->input_buffer, &map);
+ if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
+
+ if (gst_video_decoder_get_subframe_mode (dec) && last_subframe)
+ gst_video_decoder_have_last_subframe (dec, frame);
+
+ if (frame->output_buffer)
+ return gst_video_decoder_finish_frame (dec, frame);
+ } else {
+ return gst_video_decoder_drop_frame (dec, frame);
+
+ }
- if (frame->output_buffer)
- return gst_video_decoder_finish_frame (dec, frame);
+
+done:
gst_video_codec_frame_unref (frame);
+
return GST_FLOW_OK;
}
+static GstFlowReturn
+gst_video_decoder_tester_parse (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame, GstAdapter * adapter, gboolean at_eos)
+{
+ gint av;
+
+ av = gst_adapter_available (adapter);
+
+ /* and pass along all */
+ gst_video_decoder_add_to_frame (decoder, av);
+ return gst_video_decoder_have_frame (decoder);
+}
+
static void
gst_video_decoder_tester_class_init (GstVideoDecoderTesterClass * klass)
{
videodecoder_class->flush = gst_video_decoder_tester_flush;
videodecoder_class->handle_frame = gst_video_decoder_tester_handle_frame;
videodecoder_class->set_format = gst_video_decoder_tester_set_format;
+ videodecoder_class->parse = gst_video_decoder_tester_parse;
}
static void
}
#define NUM_BUFFERS 1000
+#define NUM_SUB_BUFFERS 4
+
GST_START_TEST (videodecoder_playback)
{
GstSegment segment;
num = *(guint64 *) map.data;
fail_unless (i == num);
+
fail_unless (GST_BUFFER_PTS (buffer) == gst_util_uint64_scale_round (i,
GST_SECOND * TEST_VIDEO_FPS_D, TEST_VIDEO_FPS_N));
fail_unless (GST_BUFFER_DURATION (buffer) ==
GST_END_TEST;
-GST_START_TEST (videodecoder_backwards_playback)
+static void
+videodecoder_backwards_playback (gboolean subframe)
{
GstSegment segment;
GstBuffer *buffer;
guint64 i;
GList *iter;
+ guint num_subframes = 1;
+ guint num_buffers;
+
+ if (subframe)
+ num_subframes = 2;
+ num_buffers = NUM_BUFFERS / num_subframes;
setup_videodecodertester (NULL, NULL);
+ if (num_subframes > 1) {
+ gst_video_decoder_set_subframe_mode (GST_VIDEO_DECODER (dec), TRUE);
+ }
+
gst_pad_set_active (mysrcpad, TRUE);
gst_element_set_state (dec, GST_STATE_PLAYING);
gst_pad_set_active (mysinkpad, TRUE);
/* push a new segment with -1 rate */
gst_segment_init (&segment, GST_FORMAT_TIME);
segment.rate = -1.0;
- segment.stop = (NUM_BUFFERS + 1) * gst_util_uint64_scale_round (GST_SECOND,
+ segment.stop = (num_buffers + 1) * gst_util_uint64_scale_round (GST_SECOND,
TEST_VIDEO_FPS_D, TEST_VIDEO_FPS_N);
fail_unless (gst_pad_push_event (mysrcpad, gst_event_new_segment (&segment)));
/* push buffers, the data is actually a number so we can track them */
- i = NUM_BUFFERS;
+ i = num_buffers * num_subframes;
while (i > 0) {
gint target = i;
gint j;
* it pushes buffers from 'target - 10' up to target.
*/
for (j = MAX (target - 10, 0); j < target; j++) {
- GstBuffer *buffer = create_test_buffer (j);
-
+ GstBuffer *buffer = create_test_buffer (j / num_subframes);
+ if ((j + 1) % num_subframes == 0)
+ GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_MARKER);
if (j % 10 == 0)
GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DISCONT);
if (j % 20 != 0)
fail_unless (gst_pad_push_event (mysrcpad, gst_event_new_eos ()));
/* check that all buffers were received by our source pad */
- fail_unless (g_list_length (buffers) == NUM_BUFFERS);
- i = NUM_BUFFERS - 1;
+ fail_unless (g_list_length (buffers) == num_buffers);
+ i = num_buffers - 1;
for (iter = buffers; iter; iter = g_list_next (iter)) {
GstMapInfo map;
guint64 num;
cleanup_videodecodertest ();
}
+GST_START_TEST (videodecoder_backwards_playback_normal)
+{
+ videodecoder_backwards_playback (FALSE);
+}
+
GST_END_TEST;
+GST_START_TEST (videodecoder_backwards_playback_subframes)
+{
+ videodecoder_backwards_playback (TRUE);
+}
+
+GST_END_TEST;
GST_START_TEST (videodecoder_backwards_buffer_after_segment)
{
GST_END_TEST;
+typedef enum
+{
+ MODE_NONE = 0,
+ MODE_SUBFRAMES = 1,
+ MODE_PACKETIZED = 1 << 1,
+ MODE_META_ROI = 1 << 2,
+} SubframeMode;
+
+static void
+videodecoder_playback_subframe_mode (SubframeMode mode)
+{
+ GstSegment segment;
+ GstBuffer *buffer;
+ guint i;
+ GList *iter;
+ gint num_buffers = NUM_BUFFERS;
+ gint num_subframes = 1;
+ GList *list;
+ gint num_roi_metas = 0;
+
+ setup_videodecodertester (NULL, NULL);
+
+ /* Allow to test combination of subframes and packetized configuration
+ * 0-0: no subframes not packetized.
+ * 0-1: subframes not packetized.
+ * 1-0: no subframes packetized.
+ * 1-1: subframes and packetized.
+ */
+ if (mode & MODE_SUBFRAMES) {
+ gst_video_decoder_set_subframe_mode (GST_VIDEO_DECODER (dec), TRUE);
+ num_subframes = NUM_SUB_BUFFERS;
+ } else {
+ gst_video_decoder_set_subframe_mode (GST_VIDEO_DECODER (dec), FALSE);
+ num_subframes = 1;
+ }
+ gst_video_decoder_set_packetized (GST_VIDEO_DECODER (dec),
+ mode & MODE_PACKETIZED ? TRUE : FALSE);
+
+ gst_pad_set_active (mysrcpad, TRUE);
+ gst_element_set_state (dec, GST_STATE_PLAYING);
+ gst_pad_set_active (mysinkpad, TRUE);
+
+ send_startup_events ();
+
+ /* push a new segment */
+ gst_segment_init (&segment, GST_FORMAT_TIME);
+ fail_unless (gst_pad_push_event (mysrcpad, gst_event_new_segment (&segment)));
+
+ /* push header only in packetized subframe mode */
+ if (mode == (MODE_PACKETIZED | MODE_SUBFRAMES)) {
+ buffer = gst_buffer_new_and_alloc (0);
+ GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_HEADER);
+ fail_unless (gst_pad_push (mysrcpad, buffer) == GST_FLOW_OK);
+ }
+
+ /* push buffers, the data is actually a number so we can track them */
+ for (i = 0; i < num_buffers; i++) {
+ buffer = create_test_buffer (i / num_subframes);
+ if ((i + 1) % num_subframes == 0)
+ GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_MARKER);
+ if (mode & MODE_META_ROI)
+ gst_buffer_add_video_region_of_interest_meta (buffer, "face", 0, 0, 10,
+ 10);
+
+ fail_unless (gst_pad_push (mysrcpad, buffer) == GST_FLOW_OK);
+ fail_unless (gst_pad_push_event (mysrcpad,
+ gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM,
+ gst_structure_new_empty ("custom1"))));
+ }
+ /* Send EOS */
+ fail_unless (gst_pad_push_event (mysrcpad, gst_event_new_eos ()));
+
+ /* Test that no frames or pending events are remaining in the base class */
+ list = gst_video_decoder_get_frames (GST_VIDEO_DECODER (dec));
+ fail_unless (g_list_length (list) == 0);
+ g_list_free_full (list, (GDestroyNotify) gst_video_codec_frame_unref);
+
+ /* check that all buffers were received by our source pad 1 output buffer for 4 input buffer */
+ fail_unless (g_list_length (buffers) == num_buffers / num_subframes);
+
+ i = 0;
+ for (iter = buffers; iter; iter = g_list_next (iter)) {
+ GstMapInfo map;
+ guint num;
+ GstMeta *meta;
+ gpointer state = NULL;
+
+ buffer = iter->data;
+ while ((meta = gst_buffer_iterate_meta (buffer, &state))) {
+ if (meta->info->api == GST_VIDEO_REGION_OF_INTEREST_META_API_TYPE)
+ num_roi_metas++;
+ }
+ gst_buffer_map (buffer, &map, GST_MAP_READ);
+ /* Test that the buffer is carrying the expected value 'num' */
+ num = *(guint64 *) map.data;
+
+ fail_unless (i == num);
+ /* Test that the buffer metadata are correct */
+ fail_unless (GST_BUFFER_PTS (buffer) == gst_util_uint64_scale_round (i,
+ GST_SECOND * TEST_VIDEO_FPS_D, TEST_VIDEO_FPS_N));
+ fail_unless (GST_BUFFER_DURATION (buffer) ==
+ gst_util_uint64_scale_round (GST_SECOND, TEST_VIDEO_FPS_D,
+ TEST_VIDEO_FPS_N));
+
+
+ gst_buffer_unmap (buffer, &map);
+ i++;
+ }
+
+ if (mode &= MODE_META_ROI)
+ fail_unless (num_roi_metas == num_buffers);
+
+ g_list_free_full (buffers, (GDestroyNotify) gst_buffer_unref);
+ buffers = NULL;
+
+ cleanup_videodecodertest ();
+}
+
+static void
+videodecoder_playback_invalid_ts_subframe_mode (SubframeMode mode)
+{
+ GstSegment segment;
+ GstBuffer *buffer;
+ guint i;
+ gint num_buffers = NUM_BUFFERS;
+ gint num_subframes = 1;
+ GList *list;
+
+ setup_videodecodertester (NULL, NULL);
+
+ /* Allow to test combination of subframes and packetized configuration
+ * 0-0: no subframes not packetized.
+ * 0-1: subframes not packetized.
+ * 1-0: no subframes packetized.
+ * 1-1: subframes and packetized.
+ */
+ if (mode & MODE_SUBFRAMES) {
+ gst_video_decoder_set_subframe_mode (GST_VIDEO_DECODER (dec), TRUE);
+ num_subframes = NUM_SUB_BUFFERS;
+ }
+
+ gst_video_decoder_set_packetized (GST_VIDEO_DECODER (dec),
+ mode & MODE_PACKETIZED ? TRUE : FALSE);
+
+ gst_pad_set_active (mysrcpad, TRUE);
+ gst_element_set_state (dec, GST_STATE_PLAYING);
+ gst_pad_set_active (mysinkpad, TRUE);
+
+ send_startup_events ();
+
+ /* push a new segment */
+ gst_segment_init (&segment, GST_FORMAT_TIME);
+
+ fail_unless (gst_pad_push_event (mysrcpad, gst_event_new_segment (&segment)));
+
+ /* push header only in packetized subframe mode */
+ if (mode == (MODE_PACKETIZED | MODE_SUBFRAMES)) {
+ buffer = gst_buffer_new_and_alloc (0);
+ GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_HEADER);
+ fail_unless (gst_pad_push (mysrcpad, buffer) == GST_FLOW_OK);
+ }
+
+ /* push buffers, the data is actually a number so we can track them */
+ for (i = 0; i < num_buffers; i++) {
+ buffer = create_test_buffer (i / num_subframes);
+ GST_BUFFER_PTS (buffer) = GST_CLOCK_TIME_NONE;
+ if ((i + 1) % num_subframes == 0)
+ GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_MARKER);
+
+ fail_unless (gst_pad_push (mysrcpad, buffer) == GST_FLOW_OK);
+ fail_unless (gst_pad_push_event (mysrcpad,
+ gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM,
+ gst_structure_new_empty ("custom1"))));
+ }
+ /* Send EOS */
+ fail_unless (gst_pad_push_event (mysrcpad, gst_event_new_eos ()));
+
+ /* Test that no frames or pending events are remaining in the base class */
+ list = gst_video_decoder_get_frames (GST_VIDEO_DECODER (dec));
+ fail_unless (g_list_length (list) == 0);
+ g_list_free_full (list, (GDestroyNotify) gst_video_codec_frame_unref);
+
+ /* check that all buffers were received by our source pad 1 output buffer for 4 input buffer */
+ fail_unless (g_list_length (buffers) == 0);
+
+
+ cleanup_videodecodertest ();
+}
+
+GST_START_TEST (videodecoder_playback_parsed)
+{
+ videodecoder_playback_subframe_mode (MODE_NONE);
+}
+
+GST_END_TEST;
+
+GST_START_TEST (videodecoder_playback_packetized)
+{
+ videodecoder_playback_subframe_mode (MODE_PACKETIZED);
+}
+
+GST_END_TEST;
+
+GST_START_TEST (videodecoder_playback_parsed_subframes)
+{
+ videodecoder_playback_subframe_mode (MODE_SUBFRAMES);
+}
+
+GST_END_TEST;
+
+GST_START_TEST (videodecoder_playback_packetized_subframes)
+{
+ videodecoder_playback_subframe_mode (MODE_SUBFRAMES | MODE_PACKETIZED);
+}
+
+GST_END_TEST;
+
+GST_START_TEST (videodecoder_playback_packetized_subframes_metadata)
+{
+ videodecoder_playback_subframe_mode (MODE_SUBFRAMES |
+ MODE_PACKETIZED | MODE_META_ROI);
+}
+
+GST_END_TEST;
+
+GST_START_TEST (videodecoder_playback_invalid_ts_packetized)
+{
+ videodecoder_playback_invalid_ts_subframe_mode (MODE_PACKETIZED);
+}
+
+GST_END_TEST;
+
+GST_START_TEST (videodecoder_playback_invalid_ts_packetized_subframes)
+{
+ videodecoder_playback_invalid_ts_subframe_mode (MODE_SUBFRAMES |
+ MODE_PACKETIZED);
+}
+
+GST_END_TEST;
+
+
+
static Suite *
gst_videodecoder_suite (void)
{
tcase_add_test (tc, videodecoder_buffer_after_segment);
tcase_add_test (tc, videodecoder_first_data_is_gap);
- tcase_add_test (tc, videodecoder_backwards_playback);
+ tcase_add_test (tc, videodecoder_backwards_playback_normal);
+ tcase_add_test (tc, videodecoder_backwards_playback_subframes);
tcase_add_test (tc, videodecoder_backwards_buffer_after_segment);
tcase_add_test (tc, videodecoder_flush_events);
G_N_ELEMENTS (test_default_caps));
tcase_add_test (tc, videodecoder_playback_event_order);
+ tcase_add_test (tc, videodecoder_playback_parsed);
+ tcase_add_test (tc, videodecoder_playback_packetized);
+ tcase_add_test (tc, videodecoder_playback_parsed_subframes);
+ tcase_add_test (tc, videodecoder_playback_packetized_subframes);
+ tcase_add_test (tc, videodecoder_playback_packetized_subframes_metadata);
+ tcase_add_test (tc, videodecoder_playback_invalid_ts_packetized);
+ tcase_add_test (tc, videodecoder_playback_invalid_ts_packetized_subframes);
return s;
}