...and also clear all existing frames when resetting the decoder or encoder.
static GstElementClass *parent_class = NULL;
+G_DEFINE_BOXED_TYPE (GstVideoFrameState, gst_video_frame_state,
+ (GBoxedCopyFunc) gst_video_frame_state_ref,
+ (GBoxedFreeFunc) gst_video_frame_state_unref)
+
/* NOTE (Edward): Do not use G_DEFINE_* because we need to have
* a GClassInitFunc called with the target class (which the macros
* don't handle). */
-static void gst_base_video_codec_class_init (GstBaseVideoCodecClass * klass);
-static void gst_base_video_codec_init (GstBaseVideoCodec * dec,
+ static void gst_base_video_codec_class_init (GstBaseVideoCodecClass *
+ klass);
+ static void gst_base_video_codec_init (GstBaseVideoCodec * dec,
GstBaseVideoCodecClass * klass);
GType
GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_codec);
for (g = base_video_codec->frames; g; g = g_list_next (g)) {
- gst_base_video_codec_free_frame ((GstVideoFrameState *) g->data);
+ gst_video_frame_state_unref ((GstVideoFrameState *) g->data);
}
g_list_free (base_video_codec->frames);
base_video_codec->frames = NULL;
return ret;
}
-GstVideoFrameState *
-gst_base_video_codec_new_frame (GstBaseVideoCodec * base_video_codec)
+void
+gst_base_video_codec_append_frame (GstBaseVideoCodec * codec,
+ GstVideoFrameState * frame)
{
- GstVideoFrameState *frame;
+ g_return_if_fail (frame != NULL);
- frame = g_slice_new0 (GstVideoFrameState);
+ gst_video_frame_state_ref (frame);
+ codec->frames = g_list_append (codec->frames, frame);
+}
- GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_codec);
- frame->system_frame_number = base_video_codec->system_frame_number;
- base_video_codec->system_frame_number++;
- GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_codec);
+void
+gst_base_video_codec_remove_frame (GstBaseVideoCodec * codec,
+ GstVideoFrameState * frame)
+{
+ GList *link;
- GST_LOG_OBJECT (base_video_codec, "Created new frame %p (sfn:%d)",
- frame, frame->system_frame_number);
+ g_return_if_fail (frame != NULL);
- return frame;
+ link = g_list_find (codec->frames, frame);
+ if (link) {
+ gst_video_frame_state_unref ((GstVideoFrameState *) link->data);
+ codec->frames = g_list_delete_link (codec->frames, link);
+ }
}
-void
-gst_base_video_codec_free_frame (GstVideoFrameState * frame)
+static void
+_gst_video_frame_state_free (GstVideoFrameState * frame)
{
g_return_if_fail (frame != NULL);
g_slice_free (GstVideoFrameState, frame);
}
+
+GstVideoFrameState *
+gst_base_video_codec_new_frame (GstBaseVideoCodec * base_video_codec)
+{
+ GstVideoFrameState *frame;
+
+ frame = g_slice_new0 (GstVideoFrameState);
+
+ frame->ref_count = 1;
+
+ GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_codec);
+ frame->system_frame_number = base_video_codec->system_frame_number;
+ base_video_codec->system_frame_number++;
+ GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_codec);
+
+ GST_LOG_OBJECT (base_video_codec, "Created new frame %p (sfn:%d)",
+ frame, frame->system_frame_number);
+
+ return frame;
+}
+
+GstVideoFrameState *
+gst_video_frame_state_ref (GstVideoFrameState * frame)
+{
+ g_return_val_if_fail (frame != NULL, NULL);
+
+ g_atomic_int_inc (&frame->ref_count);
+
+ return frame;
+}
+
+void
+gst_video_frame_state_unref (GstVideoFrameState * frame)
+{
+ g_return_if_fail (frame != NULL);
+ g_return_if_fail (frame->ref_count > 0);
+
+ if (g_atomic_int_dec_and_test (&frame->ref_count)) {
+ _gst_video_frame_state_free (frame);
+ }
+}
struct _GstVideoFrameState
{
+ gint ref_count;
+
GstClockTime decode_timestamp;
GstClockTime presentation_timestamp;
GstClockTime presentation_duration;
void *padding[GST_PADDING_LARGE];
};
+GType gst_video_frame_state_get_type (void);
GType gst_base_video_codec_get_type (void);
+void gst_base_video_codec_append_frame (GstBaseVideoCodec *codec, GstVideoFrameState *frame);
+void gst_base_video_codec_remove_frame (GstBaseVideoCodec *codec, GstVideoFrameState *frame);
+
GstVideoFrameState * gst_base_video_codec_new_frame (GstBaseVideoCodec *base_video_codec);
-void gst_base_video_codec_free_frame (GstVideoFrameState *frame);
+
+GstVideoFrameState * gst_video_frame_state_ref (GstVideoFrameState * frame);
+void gst_video_frame_state_unref (GstVideoFrameState * frame);
G_END_DECLS
g_list_foreach (dec->gather, (GFunc) gst_mini_object_unref, NULL);
g_list_free (dec->gather);
dec->gather = NULL;
- g_list_foreach (dec->decode, (GFunc) gst_base_video_codec_free_frame, NULL);
+ g_list_foreach (dec->decode, (GFunc) gst_video_frame_state_unref, NULL);
g_list_free (dec->decode);
dec->decode = NULL;
g_list_foreach (dec->parse, (GFunc) gst_mini_object_unref, NULL);
g_list_free (dec->parse);
dec->parse = NULL;
- g_list_foreach (dec->parse_gather, (GFunc) gst_base_video_codec_free_frame,
- NULL);
+ g_list_foreach (dec->parse_gather, (GFunc) gst_video_frame_state_unref, NULL);
g_list_free (dec->parse_gather);
dec->parse_gather = NULL;
+ g_list_foreach (GST_BASE_VIDEO_CODEC (dec)->frames,
+ (GFunc) gst_video_frame_state_unref, NULL);
+ g_list_free (GST_BASE_VIDEO_CODEC (dec)->frames);
+ GST_BASE_VIDEO_CODEC (dec)->frames = NULL;
}
static void
base_video_decoder->timestamps = NULL;
if (base_video_decoder->current_frame) {
- gst_base_video_codec_free_frame (base_video_decoder->current_frame);
+ gst_video_frame_state_unref (base_video_decoder->current_frame);
base_video_decoder->current_frame = NULL;
}
next = g_list_next (walk);
if (dec->current_frame)
- gst_base_video_codec_free_frame (dec->current_frame);
+ gst_video_frame_state_unref (dec->current_frame);
dec->current_frame = frame;
+ gst_video_frame_state_ref (dec->current_frame);
+
/* decode buffer, resulting data prepended to queue */
res = gst_base_video_decoder_have_frame_2 (dec);
gst_base_video_decoder_do_finish_frame (GstBaseVideoDecoder * dec,
GstVideoFrameState * frame)
{
- GST_BASE_VIDEO_CODEC (dec)->frames =
- g_list_remove (GST_BASE_VIDEO_CODEC (dec)->frames, frame);
+ gst_base_video_codec_remove_frame (GST_BASE_VIDEO_CODEC (dec), frame);
if (frame->src_buffer)
gst_buffer_unref (frame->src_buffer);
- gst_base_video_codec_free_frame (frame);
+ gst_video_frame_state_unref (frame);
}
/**
GST_TIME_ARGS (frame->decode_timestamp));
GST_LOG_OBJECT (base_video_decoder, "dist %d", frame->distance_from_sync);
- GST_BASE_VIDEO_CODEC (base_video_decoder)->frames =
- g_list_append (GST_BASE_VIDEO_CODEC (base_video_decoder)->frames, frame);
+ gst_base_video_codec_append_frame (GST_BASE_VIDEO_CODEC (base_video_decoder),
+ frame);
frame->deadline =
gst_segment_to_running_time (&GST_BASE_VIDEO_CODEC
}
exit:
+ /* current frame has either been added to parse_gather or sent to
+ handle frame so there is no need to unref it */
+
/* create new frame */
base_video_decoder->current_frame =
gst_base_video_decoder_new_frame (base_video_decoder);
#define gst_base_video_encoder_parent_class parent_class
G_DEFINE_TYPE_WITH_CODE (GstBaseVideoEncoder, gst_base_video_encoder,
- GST_TYPE_BASE_VIDEO_CODEC, G_IMPLEMENT_INTERFACE (GST_TYPE_PRESET, NULL););
+ GST_TYPE_BASE_VIDEO_CODEC, G_IMPLEMENT_INTERFACE (GST_TYPE_PRESET, NULL);
+ );
static void
gst_base_video_encoder_class_init (GstBaseVideoEncoderClass * klass)
/* everything should be away now */
if (codec->frames) {
/* not fatal/impossible though if subclass/codec eats stuff */
- GST_WARNING_OBJECT (enc, "still %d frames left after draining",
- g_list_length (codec->frames));
-#if 0
- /* FIXME should do this, but subclass may come up with it later on ?
- * and would then need refcounting or so on frames */
- g_list_foreach (codec->frames,
- (GFunc) gst_base_video_codec_free_frame, NULL);
-#endif
+ g_list_foreach (codec->frames, (GFunc) gst_video_frame_state_unref, NULL);
+ g_list_free (codec->frames);
+ codec->frames = NULL;
}
return ret;
GST_BASE_VIDEO_CODEC (base_video_encoder)->frames =
g_list_remove (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames, frame);
- gst_base_video_codec_free_frame (frame);
+ gst_video_frame_state_unref (frame);
GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder);