#include <gst/video/video.h>
#include "gstvideoencoder.h"
#include "gstvideoutils.h"
+#include "gstvideoutilsprivate.h"
#include <gst/video/gstvideometa.h>
#include <gst/video/gstvideopool.h>
GstAllocator *allocator;
GstAllocationParams params;
+ /* upstream stream tags (global tags are passed through as-is) */
+ GstTagList *upstream_tags;
+
+ /* subclass tags */
GstTagList *tags;
+ GstTagMergeMode tags_merge_mode;
+
gboolean tags_changed;
+
+ GstClockTime min_pts;
+ /* adjustment needed on pts, dts, segment start and stop to accomodate
+ * min_pts */
+ GstClockTime time_adjustment;
};
typedef struct _ForcedKeyUnitEvent ForcedKeyUnitEvent;
GstVideoEncoderClass * klass);
static void gst_video_encoder_finalize (GObject * object);
-
+static void
+gst_video_encoder_release_frame (GstVideoEncoder * enc,
+ GstVideoCodecFrame * frame);
static gboolean gst_video_encoder_setcaps (GstVideoEncoder * enc,
GstCaps * caps);
static GstCaps *gst_video_encoder_sink_getcaps (GstVideoEncoder * encoder,
static gboolean gst_video_encoder_src_query_default (GstVideoEncoder * encoder,
GstQuery * query);
+static gboolean gst_video_encoder_transform_meta_default (GstVideoEncoder *
+ encoder, GstVideoCodecFrame * frame, GstMeta * meta);
+
/* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
* method to get to the padtemplates */
GType
klass->negotiate = gst_video_encoder_negotiate_default;
klass->sink_query = gst_video_encoder_sink_query_default;
klass->src_query = gst_video_encoder_src_query_default;
+ klass->transform_meta = gst_video_encoder_transform_meta_default;
}
static GList *
priv->drained = TRUE;
- g_list_foreach (priv->frames, (GFunc) gst_video_codec_frame_unref, NULL);
- g_list_free (priv->frames);
- priv->frames = NULL;
-
priv->bytes = 0;
priv->time = 0;
+ priv->time_adjustment = GST_CLOCK_TIME_NONE;
+
if (hard) {
gst_segment_init (&encoder->input_segment, GST_FORMAT_TIME);
gst_segment_init (&encoder->output_segment, GST_FORMAT_TIME);
gst_video_codec_state_unref (priv->output_state);
priv->output_state = NULL;
+ if (priv->upstream_tags) {
+ gst_tag_list_unref (priv->upstream_tags);
+ priv->upstream_tags = NULL;
+ }
if (priv->tags)
gst_tag_list_unref (priv->tags);
priv->tags = NULL;
+ priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
priv->tags_changed = FALSE;
- priv->min_latency = 0;
- priv->max_latency = 0;
-
g_list_foreach (priv->headers, (GFunc) gst_event_unref, NULL);
g_list_free (priv->headers);
priv->headers = NULL;
encoder->priv->current_frame_events);
}
+ g_list_foreach (priv->frames, (GFunc) gst_video_codec_frame_unref, NULL);
+ g_list_free (priv->frames);
+ priv->frames = NULL;
+
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
return ret;
priv->headers = NULL;
priv->new_headers = FALSE;
+ priv->min_latency = 0;
+ priv->max_latency = 0;
+ priv->min_pts = GST_CLOCK_TIME_NONE;
+ priv->time_adjustment = GST_CLOCK_TIME_NONE;
+
gst_video_encoder_reset (encoder, TRUE);
}
tgt->par_d = ref->par_d;
tgt->fps_n = ref->fps_n;
tgt->fps_d = ref->fps_d;
+
+ GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_INFO_MULTIVIEW_MODE (ref);
+ GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) = GST_VIDEO_INFO_MULTIVIEW_FLAGS (ref);
}
return state;
if (encoder_class->reset) {
GST_FIXME_OBJECT (encoder, "GstVideoEncoder::reset() is deprecated");
- ret = encoder_class->reset (encoder, TRUE);
+ encoder_class->reset (encoder, TRUE);
}
/* and subclass should be ready to configure format at any time around */
/**
* gst_video_encoder_proxy_getcaps:
* @enc: a #GstVideoEncoder
- * @caps: initial caps
- * @filter: filter caps
+ * @caps: (allow-none): initial caps
+ * @filter: (allow-none): filter caps
*
* Returns caps that express @caps (or sink template caps if @caps == NULL)
* restricted to resolution/format/... combinations supported by downstream
* elements (e.g. muxers).
*
- * Returns: a #GstCaps owned by caller
+ * Returns: (transfer full): a #GstCaps owned by caller
*/
GstCaps *
gst_video_encoder_proxy_getcaps (GstVideoEncoder * encoder, GstCaps * caps,
GstCaps * filter)
{
- GstCaps *templ_caps;
- GstCaps *allowed;
- GstCaps *fcaps, *filter_caps;
- gint i, j;
-
- /* Allow downstream to specify width/height/framerate/PAR constraints
- * and forward them upstream for video converters to handle
- */
- templ_caps =
- caps ? gst_caps_ref (caps) :
- gst_pad_get_pad_template_caps (encoder->sinkpad);
- allowed = gst_pad_get_allowed_caps (encoder->srcpad);
-
- if (!allowed || gst_caps_is_empty (allowed) || gst_caps_is_any (allowed)) {
- fcaps = templ_caps;
- goto done;
- }
-
- GST_LOG_OBJECT (encoder, "template caps %" GST_PTR_FORMAT, templ_caps);
- GST_LOG_OBJECT (encoder, "allowed caps %" GST_PTR_FORMAT, allowed);
-
- filter_caps = gst_caps_new_empty ();
-
- for (i = 0; i < gst_caps_get_size (templ_caps); i++) {
- GQuark q_name =
- gst_structure_get_name_id (gst_caps_get_structure (templ_caps, i));
-
- for (j = 0; j < gst_caps_get_size (allowed); j++) {
- const GstStructure *allowed_s = gst_caps_get_structure (allowed, j);
- const GValue *val;
- GstStructure *s;
-
- s = gst_structure_new_id_empty (q_name);
- if ((val = gst_structure_get_value (allowed_s, "width")))
- gst_structure_set_value (s, "width", val);
- if ((val = gst_structure_get_value (allowed_s, "height")))
- gst_structure_set_value (s, "height", val);
- if ((val = gst_structure_get_value (allowed_s, "framerate")))
- gst_structure_set_value (s, "framerate", val);
- if ((val = gst_structure_get_value (allowed_s, "pixel-aspect-ratio")))
- gst_structure_set_value (s, "pixel-aspect-ratio", val);
-
- filter_caps = gst_caps_merge_structure (filter_caps, s);
- }
- }
-
- fcaps = gst_caps_intersect (filter_caps, templ_caps);
- gst_caps_unref (filter_caps);
- gst_caps_unref (templ_caps);
-
- if (filter) {
- GST_LOG_OBJECT (encoder, "intersecting with %" GST_PTR_FORMAT, filter);
- filter_caps = gst_caps_intersect (fcaps, filter);
- gst_caps_unref (fcaps);
- fcaps = filter_caps;
- }
-
-done:
- gst_caps_replace (&allowed, NULL);
-
- GST_LOG_OBJECT (encoder, "proxy caps %" GST_PTR_FORMAT, fcaps);
-
- return fcaps;
+ return __gst_video_element_proxy_getcaps (GST_ELEMENT_CAST (encoder),
+ GST_VIDEO_ENCODER_SINK_PAD (encoder),
+ GST_VIDEO_ENCODER_SRC_PAD (encoder), caps, filter);
}
static GstCaps *
break;
}
+ if (encoder->priv->time_adjustment != GST_CLOCK_TIME_NONE) {
+ segment.start += encoder->priv->time_adjustment;
+ if (GST_CLOCK_TIME_IS_VALID (segment.stop)) {
+ segment.stop += encoder->priv->time_adjustment;
+ }
+ }
+
encoder->output_segment = segment;
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ gst_event_unref (event);
+ event = gst_event_new_segment (&encoder->output_segment);
+
break;
}
default:
return gst_pad_push_event (encoder->srcpad, event);
}
+static GstEvent *
+gst_video_encoder_create_merged_tags_event (GstVideoEncoder * enc)
+{
+ GstTagList *merged_tags;
+
+ GST_LOG_OBJECT (enc, "upstream : %" GST_PTR_FORMAT, enc->priv->upstream_tags);
+ GST_LOG_OBJECT (enc, "encoder : %" GST_PTR_FORMAT, enc->priv->tags);
+ GST_LOG_OBJECT (enc, "mode : %d", enc->priv->tags_merge_mode);
+
+ merged_tags =
+ gst_tag_list_merge (enc->priv->upstream_tags, enc->priv->tags,
+ enc->priv->tags_merge_mode);
+
+ GST_DEBUG_OBJECT (enc, "merged : %" GST_PTR_FORMAT, merged_tags);
+
+ if (merged_tags == NULL)
+ return NULL;
+
+ if (gst_tag_list_is_empty (merged_tags)) {
+ gst_tag_list_unref (merged_tags);
+ return NULL;
+ }
+
+ return gst_event_new_tag (merged_tags);
+}
+
static inline void
gst_video_encoder_check_and_push_tags (GstVideoEncoder * encoder)
{
- if (encoder->priv->tags && encoder->priv->tags_changed) {
- gst_video_encoder_push_event (encoder,
- gst_event_new_tag (gst_tag_list_ref (encoder->priv->tags)));
+ if (encoder->priv->tags_changed) {
+ GstEvent *tags_event;
+
+ tags_event = gst_video_encoder_create_merged_tags_event (encoder);
+
+ if (tags_event != NULL)
+ gst_video_encoder_push_event (encoder, tags_event);
+
encoder->priv->tags_changed = FALSE;
}
}
}
break;
}
+ case GST_EVENT_STREAM_START:
+ {
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ /* Flush upstream tags after a STREAM_START */
+ GST_DEBUG_OBJECT (encoder, "STREAM_START, clearing upstream tags");
+ if (encoder->priv->upstream_tags) {
+ gst_tag_list_unref (encoder->priv->upstream_tags);
+ encoder->priv->upstream_tags = NULL;
+ encoder->priv->tags_changed = TRUE;
+ }
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ break;
+ }
case GST_EVENT_TAG:
{
GstTagList *tags;
gst_event_parse_tag (event, &tags);
if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
- tags = gst_tag_list_copy (tags);
-
- /* FIXME: make generic based on GST_TAG_FLAG_ENCODED */
- gst_tag_list_remove_tag (tags, GST_TAG_CODEC);
- gst_tag_list_remove_tag (tags, GST_TAG_AUDIO_CODEC);
- gst_tag_list_remove_tag (tags, GST_TAG_VIDEO_CODEC);
- gst_tag_list_remove_tag (tags, GST_TAG_SUBTITLE_CODEC);
- gst_tag_list_remove_tag (tags, GST_TAG_CONTAINER_FORMAT);
- gst_tag_list_remove_tag (tags, GST_TAG_BITRATE);
- gst_tag_list_remove_tag (tags, GST_TAG_NOMINAL_BITRATE);
- gst_tag_list_remove_tag (tags, GST_TAG_MAXIMUM_BITRATE);
- gst_tag_list_remove_tag (tags, GST_TAG_MINIMUM_BITRATE);
- gst_tag_list_remove_tag (tags, GST_TAG_ENCODER);
- gst_tag_list_remove_tag (tags, GST_TAG_ENCODER_VERSION);
-
- gst_video_encoder_merge_tags (encoder, tags, GST_TAG_MERGE_REPLACE);
- gst_tag_list_unref (tags);
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ if (encoder->priv->upstream_tags != tags) {
+ tags = gst_tag_list_copy (tags);
+
+ /* FIXME: make generic based on GST_TAG_FLAG_ENCODED */
+ gst_tag_list_remove_tag (tags, GST_TAG_CODEC);
+ gst_tag_list_remove_tag (tags, GST_TAG_AUDIO_CODEC);
+ gst_tag_list_remove_tag (tags, GST_TAG_VIDEO_CODEC);
+ gst_tag_list_remove_tag (tags, GST_TAG_SUBTITLE_CODEC);
+ gst_tag_list_remove_tag (tags, GST_TAG_CONTAINER_FORMAT);
+ gst_tag_list_remove_tag (tags, GST_TAG_BITRATE);
+ gst_tag_list_remove_tag (tags, GST_TAG_NOMINAL_BITRATE);
+ gst_tag_list_remove_tag (tags, GST_TAG_MAXIMUM_BITRATE);
+ gst_tag_list_remove_tag (tags, GST_TAG_MINIMUM_BITRATE);
+ gst_tag_list_remove_tag (tags, GST_TAG_ENCODER);
+ gst_tag_list_remove_tag (tags, GST_TAG_ENCODER_VERSION);
+
+ if (encoder->priv->upstream_tags)
+ gst_tag_list_unref (encoder->priv->upstream_tags);
+ encoder->priv->upstream_tags = tags;
+ GST_INFO_OBJECT (encoder, "upstream tags: %" GST_PTR_FORMAT, tags);
+ }
gst_event_unref (event);
- event = NULL;
- ret = TRUE;
+ event = gst_video_encoder_create_merged_tags_event (encoder);
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ if (!event)
+ ret = TRUE;
}
break;
}
GST_OBJECT_LOCK (enc);
min_latency += priv->min_latency;
- if (enc->priv->max_latency == GST_CLOCK_TIME_NONE) {
+ if (max_latency == GST_CLOCK_TIME_NONE
+ || enc->priv->max_latency == GST_CLOCK_TIME_NONE)
max_latency = GST_CLOCK_TIME_NONE;
- } else if (max_latency != GST_CLOCK_TIME_NONE) {
+ else
max_latency += enc->priv->max_latency;
- }
GST_OBJECT_UNLOCK (enc);
gst_query_set_latency (query, live, min_latency, max_latency);
else
duration = GST_CLOCK_TIME_NONE;
+ if (priv->min_pts != GST_CLOCK_TIME_NONE
+ && priv->time_adjustment == GST_CLOCK_TIME_NONE) {
+ if (cstart < priv->min_pts) {
+ priv->time_adjustment = priv->min_pts - cstart;
+ }
+ }
+
+ if (priv->time_adjustment != GST_CLOCK_TIME_NONE) {
+ cstart += priv->time_adjustment;
+ }
+
/* incoming DTS is not really relevant and does not make sense anyway,
* so pass along _NONE and maybe come up with something better later on */
frame = gst_video_encoder_new_frame (encoder, buf, cstart,
ret = klass->handle_frame (encoder, frame);
+ if (ret == GST_VIDEO_ENCODER_FLOW_DROPPED || ret == GST_FLOW_ERROR) {
+ GST_INFO_OBJECT (encoder, "Dropping frame %p", frame);
+ gst_video_encoder_release_frame (encoder, frame);
+ }
+
done:
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
goto open_failed;
break;
case GST_STATE_CHANGE_READY_TO_PAUSED:
- /* Initialize device/library if needed */
- if (encoder_class->start && !encoder_class->start (encoder))
- goto start_failed;
GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
gst_video_encoder_reset (encoder, TRUE);
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ /* Initialize device/library if needed */
+ if (encoder_class->start && !encoder_class->start (encoder))
+ goto start_failed;
break;
default:
break;
ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
switch (transition) {
- case GST_STATE_CHANGE_PAUSED_TO_READY:
+ case GST_STATE_CHANGE_PAUSED_TO_READY:{
+ gboolean stopped = TRUE;
+
+ if (encoder_class->stop)
+ stopped = encoder_class->stop (encoder);
+
GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
gst_video_encoder_reset (encoder, TRUE);
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
- if (encoder_class->stop && !encoder_class->stop (encoder))
+
+ if (!stopped)
goto stop_failed;
break;
+ }
case GST_STATE_CHANGE_READY_TO_NULL:
/* close device/library if needed */
if (encoder_class->close && !encoder_class->close (encoder))
if (state->codec_data)
gst_caps_set_simple (state->caps, "codec_data", GST_TYPE_BUFFER,
state->codec_data, NULL);
+
+ if (GST_VIDEO_INFO_MULTIVIEW_MODE (info) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
+ const gchar *caps_mview_mode =
+ gst_video_multiview_mode_to_caps_string (GST_VIDEO_INFO_MULTIVIEW_MODE
+ (info));
+
+ gst_caps_set_simple (state->caps, "multiview-mode", G_TYPE_STRING,
+ caps_mview_mode, "multiview-flags", GST_TYPE_VIDEO_MULTIVIEW_FLAGSET,
+ GST_VIDEO_INFO_MULTIVIEW_FLAGS (info), GST_FLAG_SET_MASK_EXACT, NULL);
+ }
encoder->priv->output_state_changed = FALSE;
}
gst_video_codec_frame_unref (frame);
}
+static gboolean
+gst_video_encoder_transform_meta_default (GstVideoEncoder *
+ encoder, GstVideoCodecFrame * frame, GstMeta * meta)
+{
+ const GstMetaInfo *info = meta->info;
+ const gchar *const *tags;
+
+ tags = gst_meta_api_type_get_tags (info->api);
+
+ if (!tags || (g_strv_length ((gchar **) tags) == 1
+ && gst_meta_api_type_has_tag (info->api,
+ g_quark_from_string (GST_META_TAG_VIDEO_STR))))
+ return TRUE;
+
+ return FALSE;
+}
+
+typedef struct
+{
+ GstVideoEncoder *encoder;
+ GstVideoCodecFrame *frame;
+} CopyMetaData;
+
+static gboolean
+foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
+{
+ CopyMetaData *data = user_data;
+ GstVideoEncoder *encoder = data->encoder;
+ GstVideoEncoderClass *klass = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+ GstVideoCodecFrame *frame = data->frame;
+ const GstMetaInfo *info = (*meta)->info;
+ gboolean do_copy = FALSE;
+
+ if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
+ /* never call the transform_meta with memory specific metadata */
+ GST_DEBUG_OBJECT (encoder, "not copying memory specific metadata %s",
+ g_type_name (info->api));
+ do_copy = FALSE;
+ } else if (klass->transform_meta) {
+ do_copy = klass->transform_meta (encoder, frame, *meta);
+ GST_DEBUG_OBJECT (encoder, "transformed metadata %s: copy: %d",
+ g_type_name (info->api), do_copy);
+ }
+
+ /* we only copy metadata when the subclass implemented a transform_meta
+ * function and when it returns %TRUE */
+ if (do_copy) {
+ GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
+ GST_DEBUG_OBJECT (encoder, "copy metadata %s", g_type_name (info->api));
+ /* simply copy then */
+ info->transform_func (frame->output_buffer, *meta, inbuf,
+ _gst_meta_transform_copy, ©_data);
+ }
+ return TRUE;
+}
+
/**
* gst_video_encoder_finish_frame:
* @encoder: a #GstVideoEncoder
if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
priv->distance_from_sync = 0;
GST_BUFFER_FLAG_UNSET (frame->output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
- /* For keyframes, DTS = PTS, if decoder doesn't decide otherwise */
+ /* For keyframes, DTS = PTS, if encoder doesn't decide otherwise */
if (!GST_CLOCK_TIME_IS_VALID (frame->dts)) {
frame->dts = frame->pts;
}
if (encoder_class->pre_push)
ret = encoder_class->pre_push (encoder, frame);
+ if (encoder_class->transform_meta) {
+ if (G_LIKELY (frame->input_buffer)) {
+ CopyMetaData data;
+
+ data.encoder = encoder;
+ data.frame = frame;
+ gst_buffer_foreach_meta (frame->input_buffer, foreach_metadata, &data);
+ } else {
+ GST_WARNING_OBJECT (encoder,
+ "Can't copy metadata because input frame disappeared");
+ }
+ }
+
/* Get an additional ref to the buffer, which is going to be pushed
* downstream, the original ref is owned by the frame */
buffer = gst_buffer_ref (frame->output_buffer);
/**
* gst_video_encoder_merge_tags:
* @encoder: a #GstVideoEncoder
- * @tags: a #GstTagList to merge
- * @mode: the #GstTagMergeMode to use
+ * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
+ * previously-set tags
+ * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
*
- * Adds tags to so-called pending tags, which will be processed
- * before pushing out data downstream.
+ * Sets the video encoder tags and how they should be merged with any
+ * upstream stream tags. This will override any tags previously-set
+ * with gst_video_encoder_merge_tags().
*
* Note that this is provided for convenience, and the subclass is
* not required to use this and can still do tag handling on its own.
gst_video_encoder_merge_tags (GstVideoEncoder * encoder,
const GstTagList * tags, GstTagMergeMode mode)
{
- GstTagList *otags;
-
g_return_if_fail (GST_IS_VIDEO_ENCODER (encoder));
g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
+ g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
- if (tags)
- GST_DEBUG_OBJECT (encoder, "merging tags %" GST_PTR_FORMAT, tags);
- otags = encoder->priv->tags;
- encoder->priv->tags = gst_tag_list_merge (encoder->priv->tags, tags, mode);
- if (otags)
- gst_tag_list_unref (otags);
- encoder->priv->tags_changed = TRUE;
+ if (encoder->priv->tags != tags) {
+ if (encoder->priv->tags) {
+ gst_tag_list_unref (encoder->priv->tags);
+ encoder->priv->tags = NULL;
+ encoder->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
+ }
+ if (tags) {
+ encoder->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
+ encoder->priv->tags_merge_mode = mode;
+ }
+
+ GST_DEBUG_OBJECT (encoder, "setting encoder tags to %" GST_PTR_FORMAT,
+ tags);
+ encoder->priv->tags_changed = TRUE;
+ }
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
}
if (params)
*params = encoder->priv->params;
}
+
+/**
+ * gst_video_encoder_set_min_pts:
+ * @encoder: a #GstVideoEncoder
+ * @min_pts: minimal PTS that will be passed to handle_frame
+ *
+ * Request minimal value for PTS passed to handle_frame.
+ *
+ * For streams with reordered frames this can be used to ensure that there
+ * is enough time to accomodate first DTS, which may be less than first PTS
+ *
+ * Since 1.6
+ */
+void
+gst_video_encoder_set_min_pts (GstVideoEncoder * encoder, GstClockTime min_pts)
+{
+ g_return_if_fail (GST_IS_VIDEO_ENCODER (encoder));
+ encoder->priv->min_pts = min_pts;
+ encoder->priv->time_adjustment = GST_CLOCK_TIME_NONE;
+}