-SUBDIRS = interfaces signalprocessor codecparsers
+SUBDIRS = interfaces signalprocessor codecparsers video
noinst_HEADERS = gst-i18n-plugin.h gettext.h
DIST_SUBDIRS = interfaces signalprocessor video basecamerabinsrc codecparsers
static GstElementClass *parent_class = NULL;
+/* NOTE (Edward): Do not use G_DEFINE_* because we need to have
+ * a GClassInitFunc called with the target class (which the macros
+ * don't handle). */
static void gst_base_video_codec_class_init (GstBaseVideoCodecClass * klass);
static void gst_base_video_codec_init (GstBaseVideoCodec * dec,
GstBaseVideoCodecClass * klass);
gobject_class = G_OBJECT_CLASS (klass);
element_class = GST_ELEMENT_CLASS (klass);
+ parent_class = g_type_class_peek_parent (klass);
+
gobject_class->finalize = gst_base_video_codec_finalize;
element_class->change_state = gst_base_video_codec_change_state;
base_video_codec->system_frame_number++;
GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_codec);
+ GST_LOG_OBJECT (base_video_codec, "Created new frame %p (sfn:%d)",
+ frame, frame->system_frame_number);
+
return frame;
}
{
g_return_if_fail (frame != NULL);
+ GST_LOG ("Freeing frame %p (sfn:%d)", frame, frame->system_frame_number);
+
if (frame->sink_buffer) {
gst_buffer_unref (frame->sink_buffer);
}
#include <gst/gst.h>
#include <gst/base/gstadapter.h>
#include <gst/video/video.h>
+#include <gst/video/gstvideopool.h>
+#include <gst/video/gstmetavideo.h>
G_BEGIN_DECLS
guint64 system_frame_number;
GList *frames; /* Protected with OBJECT_LOCK */
- GstVideoState state;
+ GstVideoState state; /* Compressed video pad */
+ GstVideoInfo info; /* Raw video pad */
GstSegment segment;
gdouble proportion;
static void gst_base_video_decoder_finalize (GObject * object);
-static gboolean gst_base_video_decoder_sink_setcaps (GstPad * pad,
+static gboolean gst_base_video_decoder_setcaps (GstBaseVideoDecoder * vdec,
GstCaps * caps);
static gboolean gst_base_video_decoder_sink_event (GstPad * pad,
GstEvent * event);
}
static gboolean
-gst_base_video_decoder_sink_setcaps (GstPad * pad, GstCaps * caps)
+gst_base_video_decoder_setcaps (GstBaseVideoDecoder * base_video_decoder,
+ GstCaps * caps)
{
- GstBaseVideoDecoder *base_video_decoder;
GstBaseVideoDecoderClass *base_video_decoder_class;
GstStructure *structure;
const GValue *codec_data;
GstVideoState state;
gboolean ret = TRUE;
- base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad));
base_video_decoder_class =
GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder);
structure = gst_caps_get_structure (caps, 0);
- gst_video_format_parse_caps (caps, NULL, &state.width, &state.height);
+ /* FIXME : Add have_{width_height|framerate|par} fields to
+ * GstVideoState so we can make better decisions
+ */
- /* this one fails if no framerate in caps */
- if (!gst_video_parse_caps_framerate (caps, &state.fps_n, &state.fps_d)) {
+ gst_structure_get_int (structure, "width", &state.width);
+ gst_structure_get_int (structure, "height", &state.height);
+
+ if (!gst_structure_get_fraction (structure, "framerate", &state.fps_n,
+ &state.fps_d)) {
state.fps_n = 0;
state.fps_d = 1;
}
- /* but the p-a-r sets 1/1 instead, which is not quite informative ... */
- if (!gst_structure_has_field (structure, "pixel-aspect-ratio") ||
- !gst_video_parse_caps_pixel_aspect_ratio (caps,
+
+ if (!gst_structure_get_fraction (structure, "pixel-aspect-ratio",
&state.par_n, &state.par_d)) {
state.par_n = 0;
state.par_d = 1;
}
state.have_interlaced =
- gst_video_format_parse_caps_interlaced (caps, &state.interlaced);
+ gst_structure_get_boolean (structure, "interlaced", &state.interlaced);
codec_data = gst_structure_get_value (structure, "codec_data");
if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER) {
- state.codec_data = GST_BUFFER (gst_value_dup_mini_object (codec_data));
+ state.codec_data = GST_BUFFER (gst_value_get_buffer (codec_data));
}
if (base_video_decoder_class->set_format) {
+ GST_LOG_OBJECT (base_video_decoder, "Calling ::set_format()");
ret = base_video_decoder_class->set_format (base_video_decoder, &state);
}
if (ret) {
- gst_buffer_replace (&GST_BASE_VIDEO_CODEC (base_video_decoder)->state.
- codec_data, NULL);
+ gst_buffer_replace (&GST_BASE_VIDEO_CODEC (base_video_decoder)->
+ state.codec_data, NULL);
gst_caps_replace (&GST_BASE_VIDEO_CODEC (base_video_decoder)->state.caps,
NULL);
GST_BASE_VIDEO_CODEC (base_video_decoder)->state = state;
}
GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder);
- g_object_unref (base_video_decoder);
return ret;
}
base_video_decoder->output_adapter = NULL;
}
+ if (base_video_decoder->pool) {
+ g_object_unref (base_video_decoder->pool);
+ base_video_decoder->pool = NULL;
+ }
+
G_OBJECT_CLASS (parent_class)->finalize (object);
}
GstCaps *caps;
gst_event_parse_caps (event, &caps);
- ret = gst_base_video_decoder_sink_setcaps (pad, caps);
+ ret = gst_base_video_decoder_setcaps (base_video_decoder, caps);
gst_event_unref (event);
break;
}
}
do {
+ GST_LOG_OBJECT (base_video_decoder, "Calling ::parse_data()");
ret = klass->parse_data (base_video_decoder, FALSE);
} while (ret == GST_FLOW_OK);
GST_FORMAT_UNDEFINED) {
GstEvent *event;
GstFlowReturn ret;
+ GstSegment *segment = &GST_BASE_VIDEO_CODEC (base_video_decoder)->segment;
GST_WARNING_OBJECT (base_video_decoder,
"Received buffer without a new-segment. "
"Assuming timestamps start from 0.");
- gst_segment_set_newsegment_full (&GST_BASE_VIDEO_CODEC
- (base_video_decoder)->segment, FALSE, 1.0, 1.0, GST_FORMAT_TIME, 0,
- GST_CLOCK_TIME_NONE, 0);
+ gst_segment_init (segment, GST_FORMAT_TIME);
- event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0,
- GST_CLOCK_TIME_NONE, 0);
+ event = gst_event_new_segment (segment);
ret = gst_base_video_decoder_push_src_event (base_video_decoder, event);
if (!ret) {
tff ^= 1;
}
if (tff) {
- GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_TFF);
+ GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_FLAG_TFF);
} else {
- GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_TFF);
+ GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_FLAG_TFF);
}
- GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_RFF);
- GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_ONEFIELD);
+ GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_FLAG_RFF);
+ GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_FLAG_ONEFIELD);
if (frame->n_fields == 3) {
- GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_RFF);
+ GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_FLAG_RFF);
} else if (frame->n_fields == 1) {
- GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_ONEFIELD);
+ GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_FLAG_ONEFIELD);
}
}
if (GST_BASE_VIDEO_CODEC (base_video_decoder)->discont) {
GST_BASE_VIDEO_CODEC (base_video_decoder)->time = GST_CLOCK_TIME_NONE;
}
- gst_buffer_set_caps (src_buffer,
- GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder)));
-
GST_LOG_OBJECT (base_video_decoder, "pushing frame ts %" GST_TIME_FORMAT
", duration %" GST_TIME_FORMAT,
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer)),
GST_TIME_ARGS (GST_BUFFER_DURATION (src_buffer)));
if (base_video_decoder->sink_clipping) {
- gint64 start = GST_BUFFER_TIMESTAMP (src_buffer);
- gint64 stop = GST_BUFFER_TIMESTAMP (src_buffer) +
+ guint64 start = GST_BUFFER_TIMESTAMP (src_buffer);
+ guint64 stop = GST_BUFFER_TIMESTAMP (src_buffer) +
GST_BUFFER_DURATION (src_buffer);
GstSegment *segment = &GST_BASE_VIDEO_CODEC (base_video_decoder)->segment;
frame->presentation_timestamp);
/* do something with frame */
+ GST_LOG_OBJECT (base_video_decoder, "Calling ::handle_frame()");
ret = base_video_decoder_class->handle_frame (base_video_decoder, frame);
if (ret != GST_FLOW_OK) {
GST_DEBUG_OBJECT (base_video_decoder, "flow error %s",
*
* Sets src pad caps according to currently configured #GstVideoState.
*
+ * The #GstVideoInfo and #GstBufferPool will be created and negotiated
+ * according to those values.
+ *
+ * Returns: %TRUE if the format was properly negotiated, else %FALSE.
*/
gboolean
gst_base_video_decoder_set_src_caps (GstBaseVideoDecoder * base_video_decoder)
{
GstCaps *caps;
- GstVideoState *state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state;
+ GstBaseVideoCodec *codec = GST_BASE_VIDEO_CODEC (base_video_decoder);
+ GstVideoState *state = &codec->state;
+ GstVideoInfo *info = &codec->info;
+ GstQuery *query;
+ GstBufferPool *pool = NULL;
+ GstStructure *config;
+ guint size, min, max, prefix, alignment;
gboolean ret;
/* minimum sense */
GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder);
+ gst_video_info_set_format (info, state->format, state->width, state->height);
+
/* sanitize */
if (state->fps_n == 0 || state->fps_d == 0) {
state->fps_n = 0;
state->par_d = 1;
}
- caps = gst_video_format_new_caps (state->format,
- state->width, state->height,
- state->fps_n, state->fps_d, state->par_n, state->par_d);
- gst_caps_set_simple (caps, "interlaced",
- G_TYPE_BOOLEAN, state->interlaced, NULL);
+ info->par_n = state->par_n;
+ info->par_d = state->par_d;
+ info->fps_n = state->fps_n;
+ info->fps_d = state->fps_d;
+
+ if (state->have_interlaced) {
+ if (state->interlaced)
+ GST_VIDEO_INFO_FLAG_SET (info, GST_VIDEO_FLAG_INTERLACED);
+ if (state->top_field_first)
+ GST_VIDEO_INFO_FLAG_SET (info, GST_VIDEO_FLAG_TFF);
+ }
+
+ /* FIXME : Handle chroma site */
+ /* FIXME : Handle colorimetry */
+
+ caps = gst_video_info_to_caps (info);
GST_DEBUG_OBJECT (base_video_decoder, "setting caps %" GST_PTR_FORMAT, caps);
caps);
gst_caps_unref (caps);
- /* arrange for derived info */
- state->bytes_per_picture =
- gst_video_format_get_size (state->format, state->width, state->height);
+ /* Negotiate pool */
+ query = gst_query_new_allocation (caps, TRUE);
+
+ if (gst_pad_peer_query (codec->srcpad, query)) {
+ GST_DEBUG_OBJECT (codec, "got downstream ALLOCATION hints");
+ /* we got configuration from our peer, parse them */
+ gst_query_parse_allocation_params (query, &size, &min, &max, &prefix,
+ &alignment, &pool);
+ size = MAX (size, info->size);
+ } else {
+ GST_DEBUG_OBJECT (codec, "didn't get downstream ALLOCATION hints");
+ size = info->size;
+ min = max = 0;
+ prefix = 0;
+ alignment = 0;
+ }
+
+ if (pool == NULL) {
+ /* we did not get a pool, make one ourselves then */
+ pool = gst_buffer_pool_new ();
+ }
+
+ if (base_video_decoder->pool)
+ gst_object_unref (base_video_decoder->pool);
+ base_video_decoder->pool = pool;
+
+ config = gst_buffer_pool_get_config (pool);
+ gst_buffer_pool_config_set (config, caps, size, min, max, prefix, alignment);
+ state->bytes_per_picture = size;
+
+ /* just set the option, if the pool can support it we will transparently use
+ * it through the video info API. We could also see if the pool support this
+ * option and only activate it then. */
+ gst_buffer_pool_config_add_option (config, GST_BUFFER_POOL_OPTION_VIDEO_META);
+
+ /* check if downstream supports cropping */
+ base_video_decoder->use_cropping =
+ gst_query_has_allocation_meta (query, GST_META_API_VIDEO_CROP);
+
+ gst_buffer_pool_set_config (pool, config);
+ /* and activate */
+ gst_buffer_pool_set_active (pool, TRUE);
+
+ gst_query_unref (query);
GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder);
* gst_base_video_decoder_alloc_src_buffer:
* @base_video_decoder: a #GstBaseVideoDecoder
*
- * Helper function that uses gst_pad_alloc_buffer_and_set_caps
- * to allocate a buffer to hold a video frame for @base_video_decoder's
- * current #GstVideoState.
+ * Helper function that returns a buffer from the decoders' configured
+ * #GstBufferPool.
*
* Returns: allocated buffer
*/
gst_base_video_decoder_alloc_src_buffer (GstBaseVideoDecoder *
base_video_decoder)
{
- GstBuffer *buffer;
- GstFlowReturn flow_ret;
- GstVideoState *state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state;
- int num_bytes = state->bytes_per_picture;
-
- GST_DEBUG ("alloc src buffer caps=%" GST_PTR_FORMAT,
- GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder)));
+ GstBuffer *buffer = NULL;
GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder);
- flow_ret =
- gst_pad_alloc_buffer_and_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD
- (base_video_decoder), GST_BUFFER_OFFSET_NONE, num_bytes,
- GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder)),
- &buffer);
-
- if (flow_ret != GST_FLOW_OK) {
- buffer = gst_buffer_new_and_alloc (num_bytes);
- gst_buffer_set_caps (buffer,
- GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder)));
- }
+ gst_buffer_pool_acquire_buffer (base_video_decoder->pool, &buffer, NULL);
GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_decoder);
+
return buffer;
}
base_video_decoder, GstVideoFrameState * frame)
{
GstFlowReturn flow_ret;
- GstVideoState *state = &GST_BASE_VIDEO_CODEC (base_video_decoder)->state;
- int num_bytes = state->bytes_per_picture;
- g_return_val_if_fail (state->bytes_per_picture != 0, GST_FLOW_ERROR);
- g_return_val_if_fail (GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD
- (base_video_decoder)) != NULL, GST_FLOW_ERROR);
+ GST_LOG_OBJECT (base_video_decoder, "alloc buffer");
- GST_LOG_OBJECT (base_video_decoder, "alloc buffer size %d", num_bytes);
GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_decoder);
flow_ret =
- gst_pad_alloc_buffer_and_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD
- (base_video_decoder), GST_BUFFER_OFFSET_NONE, num_bytes,
- GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder)),
- &frame->src_buffer);
+ gst_buffer_pool_acquire_buffer (base_video_decoder->pool,
+ &frame->src_buffer, NULL);
if (flow_ret != GST_FLOW_OK) {
GST_WARNING_OBJECT (base_video_decoder, "failed to get buffer %s",
* enclosed in parentheses)
* @ret: variable to receive return value
*
- * Utility function that audio decoder elements can use in case they encountered
+ * Utility function that video decoder elements can use in case they encountered
* a data processing error that may be fatal for the current "data unit" but
* need not prevent subsequent decoding. Such errors are counted and if there
* are too many, as configured in the context's max_errors, the pipeline will
* is logged. In either case, @ret is set to the proper value to
* return to upstream/caller (indicating either GST_FLOW_ERROR or GST_FLOW_OK).
*/
-#define GST_BASE_AUDIO_DECODER_ERROR(el, w, domain, code, text, debug, ret) \
+#define GST_BASE_VIDEO_DECODER_ERROR(el, w, domain, code, text, debug, ret) \
G_STMT_START { \
gchar *__txt = _gst_element_error_printf text; \
gchar *__dbg = _gst_element_error_printf debug; \
int reorder_depth;
int distance_from_sync;
+ /* Raw video bufferpool */
+ GstBufferPool *pool;
+ /* Indicates whether downstream can handle
+ * GST_META_API_VIDEO_CROP */
+ gboolean use_cropping;
+
/* FIXME before moving to base */
void *padding[GST_PADDING_LARGE];
};
/**
- * GstBaseAudioDecoderClass:
+ * GstBaseVideoDecoderClass:
* @start: Optional.
* Called when the element starts processing.
* Allows opening external resources.
static void gst_base_video_encoder_finalize (GObject * object);
-static gboolean gst_base_video_encoder_sink_setcaps (GstPad * pad,
- GstCaps * caps);
-static GstCaps *gst_base_video_encoder_sink_getcaps (GstPad * pad);
+static GstCaps *gst_base_video_encoder_sink_getcaps (GstPad * pad,
+ GstCaps * filter);
static gboolean gst_base_video_encoder_src_event (GstPad * pad,
GstEvent * event);
static gboolean gst_base_video_encoder_sink_event (GstPad * pad,
static gboolean gst_base_video_encoder_src_query (GstPad * pad,
GstQuery * query);
-
-static void
-_do_init (GType object_type)
-{
- const GInterfaceInfo preset_interface_info = {
- NULL, /* interface_init */
- NULL, /* interface_finalize */
- NULL /* interface_data */
- };
-
- g_type_add_interface_static (object_type, GST_TYPE_PRESET,
- &preset_interface_info);
-}
-
-GST_BOILERPLATE_FULL (GstBaseVideoEncoder, gst_base_video_encoder,
- GstBaseVideoCodec, GST_TYPE_BASE_VIDEO_CODEC, _do_init);
-
-static void
-gst_base_video_encoder_base_init (gpointer g_class)
-{
- GST_DEBUG_CATEGORY_INIT (basevideoencoder_debug, "basevideoencoder", 0,
- "Base Video Encoder");
-
-}
+#define gst_base_video_encoder_parent_class parent_class
+G_DEFINE_TYPE_WITH_CODE (GstBaseVideoEncoder, gst_base_video_encoder,
+ GST_TYPE_BASE_VIDEO_CODEC, G_IMPLEMENT_INTERFACE (GST_TYPE_PRESET, NULL);
+ );
static void
gst_base_video_encoder_class_init (GstBaseVideoEncoderClass * klass)
GObjectClass *gobject_class;
GstElementClass *gstelement_class;
+ GST_DEBUG_CATEGORY_INIT (basevideoencoder_debug, "basevideoencoder", 0,
+ "Base Video Encoder");
+
gobject_class = G_OBJECT_CLASS (klass);
gstelement_class = GST_ELEMENT_CLASS (klass);
}
static void
-gst_base_video_encoder_init (GstBaseVideoEncoder * base_video_encoder,
- GstBaseVideoEncoderClass * klass)
+gst_base_video_encoder_init (GstBaseVideoEncoder * base_video_encoder)
{
GstPad *pad;
GST_DEBUG_FUNCPTR (gst_base_video_encoder_chain));
gst_pad_set_event_function (pad,
GST_DEBUG_FUNCPTR (gst_base_video_encoder_sink_event));
- gst_pad_set_setcaps_function (pad,
- GST_DEBUG_FUNCPTR (gst_base_video_encoder_sink_setcaps));
gst_pad_set_getcaps_function (pad,
GST_DEBUG_FUNCPTR (gst_base_video_encoder_sink_getcaps));
}
static gboolean
-gst_base_video_encoder_sink_setcaps (GstPad * pad, GstCaps * caps)
+gst_base_video_encoder_sink_setcaps (GstBaseVideoEncoder * base_video_encoder,
+ GstCaps * caps)
{
- GstBaseVideoEncoder *base_video_encoder;
GstBaseVideoEncoderClass *base_video_encoder_class;
- GstStructure *structure;
+ GstBaseVideoCodec *codec = GST_BASE_VIDEO_CODEC (base_video_encoder);
+ GstVideoInfo *info, tmp_info;
GstVideoState *state, tmp_state;
gboolean ret;
- gboolean changed = FALSE;
+ gboolean changed = TRUE;
+
+ GST_DEBUG_OBJECT (base_video_encoder, "setcaps %" GST_PTR_FORMAT, caps);
- base_video_encoder = GST_BASE_VIDEO_ENCODER (gst_pad_get_parent (pad));
base_video_encoder_class =
GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder);
/* subclass should do something here ... */
g_return_val_if_fail (base_video_encoder_class->set_format != NULL, FALSE);
- GST_DEBUG_OBJECT (base_video_encoder, "setcaps %" GST_PTR_FORMAT, caps);
-
GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder);
- state = &GST_BASE_VIDEO_CODEC (base_video_encoder)->state;
+ /* Get GstVideoInfo from upstream caps */
+ info = &codec->info;
+ if (!gst_video_info_from_caps (&tmp_info, caps))
+ goto exit;
+
+ state = &codec->state;
memset (&tmp_state, 0, sizeof (tmp_state));
tmp_state.caps = gst_caps_ref (caps);
- structure = gst_caps_get_structure (caps, 0);
-
- ret =
- gst_video_format_parse_caps (caps, &tmp_state.format, &tmp_state.width,
- &tmp_state.height);
- if (!ret)
- goto exit;
- changed = (tmp_state.format != state->format
- || tmp_state.width != state->width || tmp_state.height != state->height);
-
- if (!gst_video_parse_caps_framerate (caps, &tmp_state.fps_n,
- &tmp_state.fps_d)) {
- tmp_state.fps_n = 0;
- tmp_state.fps_d = 1;
+ /* Check if input caps changed */
+ if (info->finfo) {
+ /* Check if anything changed */
+ changed = GST_VIDEO_INFO_FORMAT (&tmp_info) != GST_VIDEO_INFO_FORMAT (info);
+ changed |= GST_VIDEO_INFO_FLAGS (&tmp_info) != GST_VIDEO_INFO_FLAGS (info);
+ changed |= GST_VIDEO_INFO_WIDTH (&tmp_info) != GST_VIDEO_INFO_WIDTH (info);
+ changed |=
+ GST_VIDEO_INFO_HEIGHT (&tmp_info) != GST_VIDEO_INFO_HEIGHT (info);
+ changed |= GST_VIDEO_INFO_SIZE (&tmp_info) != GST_VIDEO_INFO_SIZE (info);
+ changed |= GST_VIDEO_INFO_VIEWS (&tmp_info) != GST_VIDEO_INFO_VIEWS (info);
+ changed |= GST_VIDEO_INFO_FPS_N (&tmp_info) != GST_VIDEO_INFO_FPS_N (info);
+ changed |= GST_VIDEO_INFO_FPS_D (&tmp_info) != GST_VIDEO_INFO_FPS_D (info);
+ changed |= GST_VIDEO_INFO_PAR_N (&tmp_info) != GST_VIDEO_INFO_PAR_N (info);
+ changed |= GST_VIDEO_INFO_PAR_D (&tmp_info) != GST_VIDEO_INFO_PAR_D (info);
}
- changed = changed || (tmp_state.fps_n != state->fps_n
- || tmp_state.fps_d != state->fps_d);
- if (!gst_video_parse_caps_pixel_aspect_ratio (caps, &tmp_state.par_n,
- &tmp_state.par_d)) {
- tmp_state.par_n = 1;
- tmp_state.par_d = 1;
- }
- changed = changed || (tmp_state.par_n != state->par_n
- || tmp_state.par_d != state->par_d);
-
- tmp_state.have_interlaced =
- gst_structure_get_boolean (structure, "interlaced",
- &tmp_state.interlaced);
- changed = changed || (tmp_state.have_interlaced != state->have_interlaced
- || tmp_state.interlaced != state->interlaced);
-
- tmp_state.bytes_per_picture =
- gst_video_format_get_size (tmp_state.format, tmp_state.width,
- tmp_state.height);
- tmp_state.clean_width = tmp_state.width;
- tmp_state.clean_height = tmp_state.height;
+ /* Copy over info from input GstVideoInfo into output GstVideoFrameState */
+ tmp_state.format = GST_VIDEO_INFO_FORMAT (&tmp_info);
+ tmp_state.bytes_per_picture = tmp_info.size;
+ tmp_state.width = tmp_info.width;
+ tmp_state.height = tmp_info.height;
+ tmp_state.fps_n = tmp_info.fps_n;
+ tmp_state.fps_d = tmp_info.fps_d;
+ tmp_state.par_n = tmp_info.par_n;
+ tmp_state.par_d = tmp_info.par_d;
+ tmp_state.clean_width = tmp_info.width;
+ tmp_state.clean_height = tmp_info.height;
tmp_state.clean_offset_left = 0;
tmp_state.clean_offset_top = 0;
+ /* FIXME (Edward): We need flags in GstVideoInfo to know whether
+ * interlaced field was present in input caps */
+ tmp_state.have_interlaced = tmp_state.interlaced =
+ GST_VIDEO_INFO_FLAG_IS_SET (&tmp_info, GST_VIDEO_FLAG_INTERLACED);
+ tmp_state.top_field_first =
+ GST_VIDEO_INFO_FLAG_IS_SET (&tmp_info, GST_VIDEO_FLAG_TFF);
if (changed) {
/* arrange draining pending frames */
/* and subclass should be ready to configure format at any time around */
if (base_video_encoder_class->set_format)
ret =
- base_video_encoder_class->set_format (base_video_encoder, &tmp_state);
+ base_video_encoder_class->set_format (base_video_encoder, &tmp_info);
if (ret) {
gst_caps_replace (&state->caps, NULL);
*state = tmp_state;
+ *info = tmp_info;
}
} else {
/* no need to stir things up */
caps);
}
- g_object_unref (base_video_encoder);
-
return ret;
}
static GstCaps *
-gst_base_video_encoder_sink_getcaps (GstPad * pad)
+gst_base_video_encoder_sink_getcaps (GstPad * pad, GstCaps * filter)
{
GstBaseVideoEncoder *base_video_encoder;
const GstCaps *templ_caps;
const GValue *val;
GstStructure *s;
- s = gst_structure_id_empty_new (q_name);
+ s = gst_structure_new_id_empty (q_name);
if ((val = gst_structure_get_value (allowed_s, "width")))
gst_structure_set_value (s, "width", val);
if ((val = gst_structure_get_value (allowed_s, "height")))
}
}
+ GST_LOG_OBJECT (base_video_encoder, "filtered caps (first) %" GST_PTR_FORMAT,
+ filter_caps);
+
fcaps = gst_caps_intersect (filter_caps, templ_caps);
gst_caps_unref (filter_caps);
+ if (filter) {
+ GST_LOG_OBJECT (base_video_encoder, "intersecting with %" GST_PTR_FORMAT,
+ filter);
+ filter_caps = gst_caps_intersect (fcaps, filter);
+ gst_caps_unref (fcaps);
+ fcaps = filter_caps;
+ }
+
done:
gst_caps_replace (&allowed, NULL);
GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder);
switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_CAPS:
+ {
+ GstCaps *caps;
+
+ gst_event_parse_caps (event, &caps);
+ ret = gst_base_video_encoder_sink_setcaps (base_video_encoder, caps);
+ gst_event_unref (event);
+ }
+ break;
case GST_EVENT_EOS:
{
GstFlowReturn flow_ret;
GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder);
break;
}
- case GST_EVENT_NEWSEGMENT:
+ case GST_EVENT_SEGMENT:
{
- gboolean update;
- double rate;
- double applied_rate;
- GstFormat format;
- gint64 start;
- gint64 stop;
- gint64 position;
+ const GstSegment *segment;
GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder);
- gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
- &format, &start, &stop, &position);
+ gst_event_parse_segment (event, &segment);
GST_DEBUG_OBJECT (base_video_encoder, "newseg rate %g, applied rate %g, "
"format %d, start = %" GST_TIME_FORMAT ", stop = %" GST_TIME_FORMAT
- ", pos = %" GST_TIME_FORMAT, rate, applied_rate, format,
- GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
- GST_TIME_ARGS (position));
+ ", pos = %" GST_TIME_FORMAT, segment->rate, segment->applied_rate,
+ segment->format, GST_TIME_ARGS (segment->start),
+ GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->position));
- if (format != GST_FORMAT_TIME) {
+ if (segment->format != GST_FORMAT_TIME) {
GST_DEBUG_OBJECT (base_video_encoder, "received non TIME newsegment");
GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder);
break;
base_video_encoder->a.at_eos = FALSE;
- gst_segment_set_newsegment_full (&GST_BASE_VIDEO_CODEC
- (base_video_encoder)->segment, update, rate, applied_rate, format,
- start, stop, position);
+ gst_segment_copy_into (segment, &GST_BASE_VIDEO_CODEC
+ (base_video_encoder)->segment);
GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder);
break;
}
GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder);
- if (!GST_PAD_CAPS (pad)) {
- ret = GST_FLOW_NOT_NEGOTIATED;
- goto done;
- }
-
GST_LOG_OBJECT (base_video_encoder,
"received buffer of size %d with ts %" GST_TIME_FORMAT
- ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buf),
+ ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
}
if (base_video_encoder->sink_clipping) {
- gint64 start = GST_BUFFER_TIMESTAMP (buf);
- gint64 stop = start + GST_BUFFER_DURATION (buf);
- gint64 clip_start;
- gint64 clip_stop;
+ guint64 start = GST_BUFFER_TIMESTAMP (buf);
+ guint64 stop = start + GST_BUFFER_DURATION (buf);
+ guint64 clip_start;
+ guint64 clip_stop;
if (!gst_segment_clip (&GST_BASE_VIDEO_CODEC (base_video_encoder)->segment,
GST_FORMAT_TIME, start, stop, &clip_start, &clip_stop)) {
base_video_encoder->force_keyunit_event = NULL;
} else {
ev = gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM,
- gst_structure_new ("GstForceKeyUnit", NULL));
+ gst_structure_new_empty ("GstForceKeyUnit"));
}
GST_OBJECT_UNLOCK (base_video_encoder);
- gst_structure_set (ev->structure,
+ gst_structure_set (gst_event_writable_structure (ev),
"timestamp", G_TYPE_UINT64, frame->presentation_timestamp,
"stream-time", G_TYPE_UINT64, stream_time,
"running-time", G_TYPE_UINT64, running_time, NULL);
/* update rate estimate */
GST_BASE_VIDEO_CODEC (base_video_encoder)->bytes +=
- GST_BUFFER_SIZE (frame->src_buffer);
+ gst_buffer_get_size (frame->src_buffer);
if (GST_CLOCK_TIME_IS_VALID (frame->presentation_duration)) {
GST_BASE_VIDEO_CODEC (base_video_encoder)->time +=
frame->presentation_duration;
GST_BASE_VIDEO_CODEC (base_video_encoder)->discont = FALSE;
}
- gst_buffer_set_caps (GST_BUFFER (frame->src_buffer),
- GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_encoder)));
-
if (base_video_encoder_class->shape_output) {
ret = base_video_encoder_class->shape_output (base_video_encoder, frame);
} else {
* Allows closing external resources.
* @set_format: Optional.
* Notifies subclass of incoming data format.
- * GstVideoState fields have already been
+ * GstVideoInfo fields have already been
* set according to provided caps.
* @handle_frame: Provides input frame to subclass.
* @finish: Optional.
gboolean (*stop) (GstBaseVideoEncoder *coder);
gboolean (*set_format) (GstBaseVideoEncoder *coder,
- GstVideoState *state);
+ GstVideoInfo *info);
GstFlowReturn (*handle_frame) (GstBaseVideoEncoder *coder,
GstVideoFrameState *frame);