*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
*/
/**
/* TODO
*
- * * Change _set_output_format() to steal the reference of the provided caps
* * Calculate actual latency based on input/output timestamp/frame_number
* and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
*/
-/* FIXME 0.11: suppress warnings for deprecated API such as GStaticRecMutex
- * with newer GLib versions (>= 2.31.0) */
-#define GLIB_DISABLE_DEPRECATION_WARNINGS
-
+#include <gst/video/video.h>
#include "gstvideoencoder.h"
#include "gstvideoutils.h"
+#include "gstvideoutilsprivate.h"
#include <gst/video/gstvideometa.h>
+#include <gst/video/gstvideopool.h>
#include <string.h>
/* FIXME : (and introduce a context ?) */
gboolean drained;
- gboolean at_eos;
gint64 min_latency;
gint64 max_latency;
GList *force_key_unit; /* List of pending forced keyunits */
- guint64 system_frame_number;
+ guint32 system_frame_number;
GList *frames; /* Protected with OBJECT_LOCK */
GstVideoCodecState *input_state;
GstAllocator *allocator;
GstAllocationParams params;
+
+ /* upstream stream tags (global tags are passed through as-is) */
+ GstTagList *upstream_tags;
+
+ /* subclass tags */
+ GstTagList *tags;
+ GstTagMergeMode tags_merge_mode;
+
+ gboolean tags_changed;
+
+ GstClockTime min_pts;
+ /* adjustment needed on pts, dts, segment start and stop to accomodate
+ * min_pts */
+ GstClockTime time_adjustment;
};
typedef struct _ForcedKeyUnitEvent ForcedKeyUnitEvent;
gboolean pending; /* TRUE if this was requested already */
gboolean all_headers;
guint count;
+ guint32 frame_id;
};
static void
GstVideoEncoderClass * klass);
static void gst_video_encoder_finalize (GObject * object);
-
+static void
+gst_video_encoder_release_frame (GstVideoEncoder * enc,
+ GstVideoCodecFrame * frame);
static gboolean gst_video_encoder_setcaps (GstVideoEncoder * enc,
GstCaps * caps);
static GstCaps *gst_video_encoder_sink_getcaps (GstVideoEncoder * encoder,
encoder, GstQuery * query);
static gboolean gst_video_encoder_propose_allocation_default (GstVideoEncoder *
encoder, GstQuery * query);
+static gboolean gst_video_encoder_negotiate_default (GstVideoEncoder * encoder);
+static gboolean gst_video_encoder_negotiate_unlocked (GstVideoEncoder *
+ encoder);
+
+static gboolean gst_video_encoder_sink_query_default (GstVideoEncoder * encoder,
+ GstQuery * query);
+static gboolean gst_video_encoder_src_query_default (GstVideoEncoder * encoder,
+ GstQuery * query);
+
+static gboolean gst_video_encoder_transform_meta_default (GstVideoEncoder *
+ encoder, GstVideoCodecFrame * frame, GstMeta * meta);
/* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
* method to get to the padtemplates */
klass->src_event = gst_video_encoder_src_event_default;
klass->propose_allocation = gst_video_encoder_propose_allocation_default;
klass->decide_allocation = gst_video_encoder_decide_allocation_default;
+ klass->negotiate = gst_video_encoder_negotiate_default;
+ klass->sink_query = gst_video_encoder_sink_query_default;
+ klass->src_query = gst_video_encoder_src_query_default;
+ klass->transform_meta = gst_video_encoder_transform_meta_default;
}
-static void
-gst_video_encoder_reset (GstVideoEncoder * encoder)
+static GList *
+_flush_events (GstPad * pad, GList * events)
+{
+ GList *tmp;
+
+ for (tmp = events; tmp; tmp = tmp->next) {
+ if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
+ GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
+ GST_EVENT_IS_STICKY (tmp->data)) {
+ gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
+ }
+ gst_event_unref (tmp->data);
+ }
+ g_list_free (events);
+
+ return NULL;
+}
+
+static gboolean
+gst_video_encoder_reset (GstVideoEncoder * encoder, gboolean hard)
{
GstVideoEncoderPrivate *priv = encoder->priv;
- GList *g;
+ gboolean ret = TRUE;
GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
priv->force_key_unit = NULL;
priv->drained = TRUE;
- priv->min_latency = 0;
- priv->max_latency = 0;
- g_list_foreach (priv->headers, (GFunc) gst_event_unref, NULL);
- g_list_free (priv->headers);
- priv->headers = NULL;
- priv->new_headers = FALSE;
+ priv->bytes = 0;
+ priv->time = 0;
- g_list_foreach (priv->current_frame_events, (GFunc) gst_event_unref, NULL);
- g_list_free (priv->current_frame_events);
- priv->current_frame_events = NULL;
+ priv->time_adjustment = GST_CLOCK_TIME_NONE;
+
+ if (hard) {
+ gst_segment_init (&encoder->input_segment, GST_FORMAT_TIME);
+ gst_segment_init (&encoder->output_segment, GST_FORMAT_TIME);
+
+ if (priv->input_state)
+ gst_video_codec_state_unref (priv->input_state);
+ priv->input_state = NULL;
+ if (priv->output_state)
+ gst_video_codec_state_unref (priv->output_state);
+ priv->output_state = NULL;
+
+ if (priv->upstream_tags) {
+ gst_tag_list_unref (priv->upstream_tags);
+ priv->upstream_tags = NULL;
+ }
+ if (priv->tags)
+ gst_tag_list_unref (priv->tags);
+ priv->tags = NULL;
+ priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
+ priv->tags_changed = FALSE;
+
+ g_list_foreach (priv->headers, (GFunc) gst_event_unref, NULL);
+ g_list_free (priv->headers);
+ priv->headers = NULL;
+ priv->new_headers = FALSE;
+
+ if (priv->allocator) {
+ gst_object_unref (priv->allocator);
+ priv->allocator = NULL;
+ }
+
+ g_list_foreach (priv->current_frame_events, (GFunc) gst_event_unref, NULL);
+ g_list_free (priv->current_frame_events);
+ priv->current_frame_events = NULL;
+
+ } else {
+ GList *l;
- for (g = priv->frames; g; g = g->next) {
- gst_video_codec_frame_unref ((GstVideoCodecFrame *) g->data);
+ for (l = priv->frames; l; l = l->next) {
+ GstVideoCodecFrame *frame = l->data;
+
+ frame->events = _flush_events (encoder->srcpad, frame->events);
+ }
+ priv->current_frame_events = _flush_events (encoder->srcpad,
+ encoder->priv->current_frame_events);
}
+
+ g_list_foreach (priv->frames, (GFunc) gst_video_codec_frame_unref, NULL);
g_list_free (priv->frames);
priv->frames = NULL;
- priv->bytes = 0;
- priv->time = 0;
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
- if (priv->input_state)
- gst_video_codec_state_unref (priv->input_state);
- priv->input_state = NULL;
- if (priv->output_state)
- gst_video_codec_state_unref (priv->output_state);
- priv->output_state = NULL;
+ return ret;
+}
+
+/* Always call reset() in one way or another after this */
+static gboolean
+gst_video_encoder_flush (GstVideoEncoder * encoder)
+{
+ GstVideoEncoderClass *klass = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+ gboolean ret = TRUE;
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ if (klass->flush)
+ ret = klass->flush (encoder);
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ return ret;
}
static void
g_rec_mutex_init (&encoder->stream_lock);
- priv->at_eos = FALSE;
priv->headers = NULL;
priv->new_headers = FALSE;
- gst_video_encoder_reset (encoder);
+ priv->min_latency = 0;
+ priv->max_latency = 0;
+ priv->min_pts = GST_CLOCK_TIME_NONE;
+ priv->time_adjustment = GST_CLOCK_TIME_NONE;
+
+ gst_video_encoder_reset (encoder, TRUE);
}
static gboolean
GST_VIDEO_ENCODER_STREAM_UNLOCK (video_encoder);
}
-static gboolean
-gst_video_encoder_drain (GstVideoEncoder * enc)
-{
- GstVideoEncoderPrivate *priv;
- GstVideoEncoderClass *enc_class;
- gboolean ret = TRUE;
-
- enc_class = GST_VIDEO_ENCODER_GET_CLASS (enc);
- priv = enc->priv;
-
- GST_DEBUG_OBJECT (enc, "draining");
-
- if (priv->drained) {
- GST_DEBUG_OBJECT (enc, "already drained");
- return TRUE;
- }
-
- if (enc_class->reset) {
- GST_DEBUG_OBJECT (enc, "requesting subclass to finish");
- ret = enc_class->reset (enc, TRUE);
- }
- /* everything should be away now */
- if (priv->frames) {
- /* not fatal/impossible though if subclass/enc eats stuff */
- g_list_foreach (priv->frames, (GFunc) gst_video_codec_frame_unref, NULL);
- g_list_free (priv->frames);
- priv->frames = NULL;
- }
-
- return ret;
-}
-
static GstVideoCodecState *
_new_output_state (GstCaps * caps, GstVideoCodecState * reference)
{
tgt->par_d = ref->par_d;
tgt->fps_n = ref->fps_n;
tgt->fps_d = ref->fps_d;
+
+ GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_INFO_MULTIVIEW_MODE (ref);
+ GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) = GST_VIDEO_INFO_MULTIVIEW_FLAGS (ref);
}
return state;
GstVideoEncoderClass *encoder_class;
GstVideoCodecState *state;
gboolean ret;
- gboolean samecaps = FALSE;
encoder_class = GST_VIDEO_ENCODER_GET_CLASS (encoder);
GST_DEBUG_OBJECT (encoder, "setcaps %" GST_PTR_FORMAT, caps);
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+
+ if (encoder->priv->input_state) {
+ GST_DEBUG_OBJECT (encoder,
+ "Checking if caps changed old %" GST_PTR_FORMAT " new %" GST_PTR_FORMAT,
+ encoder->priv->input_state->caps, caps);
+ if (gst_caps_is_equal (encoder->priv->input_state->caps, caps))
+ goto caps_not_changed;
+ }
+
state = _new_input_state (caps);
if (G_UNLIKELY (!state))
goto parse_fail;
- GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ if (encoder->priv->input_state
+ && gst_video_info_is_equal (&state->info,
+ &encoder->priv->input_state->info)) {
+ gst_video_codec_state_unref (state);
+ goto caps_not_changed;
+ }
+
+ if (encoder_class->reset) {
+ GST_FIXME_OBJECT (encoder, "GstVideoEncoder::reset() is deprecated");
+ encoder_class->reset (encoder, TRUE);
+ }
- if (encoder->priv->input_state)
- samecaps =
- gst_video_info_is_equal (&state->info,
- &encoder->priv->input_state->info);
-
- if (!samecaps) {
- /* arrange draining pending frames */
- gst_video_encoder_drain (encoder);
-
- /* and subclass should be ready to configure format at any time around */
- ret = encoder_class->set_format (encoder, state);
- if (ret) {
- if (encoder->priv->input_state)
- gst_video_codec_state_unref (encoder->priv->input_state);
- encoder->priv->input_state = state;
- } else
- gst_video_codec_state_unref (state);
+ /* and subclass should be ready to configure format at any time around */
+ ret = encoder_class->set_format (encoder, state);
+ if (ret) {
+ if (encoder->priv->input_state)
+ gst_video_codec_state_unref (encoder->priv->input_state);
+ encoder->priv->input_state = state;
} else {
- /* no need to stir things up */
- GST_DEBUG_OBJECT (encoder,
- "new video format identical to configured format");
gst_video_codec_state_unref (state);
- ret = TRUE;
}
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
return ret;
+caps_not_changed:
+ {
+ GST_DEBUG_OBJECT (encoder, "Caps did not change - ignore");
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ return TRUE;
+ }
+
+ /* ERRORS */
parse_fail:
{
GST_WARNING_OBJECT (encoder, "Failed to parse caps");
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
return FALSE;
}
}
/**
* gst_video_encoder_proxy_getcaps:
* @enc: a #GstVideoEncoder
- * @caps: initial caps
+ * @caps: (allow-none): initial caps
+ * @filter: (allow-none): filter caps
*
* Returns caps that express @caps (or sink template caps if @caps == NULL)
* restricted to resolution/format/... combinations supported by downstream
* elements (e.g. muxers).
*
- * Returns: a #GstCaps owned by caller
+ * Returns: (transfer full): a #GstCaps owned by caller
*/
GstCaps *
gst_video_encoder_proxy_getcaps (GstVideoEncoder * encoder, GstCaps * caps,
GstCaps * filter)
{
- GstCaps *templ_caps;
- GstCaps *allowed;
- GstCaps *fcaps, *filter_caps;
- gint i, j;
-
- /* Allow downstream to specify width/height/framerate/PAR constraints
- * and forward them upstream for video converters to handle
- */
- templ_caps =
- caps ? gst_caps_ref (caps) :
- gst_pad_get_pad_template_caps (encoder->sinkpad);
- allowed = gst_pad_get_allowed_caps (encoder->srcpad);
-
- if (!allowed || gst_caps_is_empty (allowed) || gst_caps_is_any (allowed)) {
- fcaps = templ_caps;
- goto done;
- }
-
- GST_LOG_OBJECT (encoder, "template caps %" GST_PTR_FORMAT, templ_caps);
- GST_LOG_OBJECT (encoder, "allowed caps %" GST_PTR_FORMAT, allowed);
-
- filter_caps = gst_caps_new_empty ();
-
- for (i = 0; i < gst_caps_get_size (templ_caps); i++) {
- GQuark q_name =
- gst_structure_get_name_id (gst_caps_get_structure (templ_caps, i));
-
- for (j = 0; j < gst_caps_get_size (allowed); j++) {
- const GstStructure *allowed_s = gst_caps_get_structure (allowed, j);
- const GValue *val;
- GstStructure *s;
-
- s = gst_structure_new_id_empty (q_name);
- if ((val = gst_structure_get_value (allowed_s, "width")))
- gst_structure_set_value (s, "width", val);
- if ((val = gst_structure_get_value (allowed_s, "height")))
- gst_structure_set_value (s, "height", val);
- if ((val = gst_structure_get_value (allowed_s, "framerate")))
- gst_structure_set_value (s, "framerate", val);
- if ((val = gst_structure_get_value (allowed_s, "pixel-aspect-ratio")))
- gst_structure_set_value (s, "pixel-aspect-ratio", val);
-
- filter_caps = gst_caps_merge_structure (filter_caps, s);
- }
- }
-
- fcaps = gst_caps_intersect (filter_caps, templ_caps);
- gst_caps_unref (filter_caps);
- gst_caps_unref (templ_caps);
-
- if (filter) {
- GST_LOG_OBJECT (encoder, "intersecting with %" GST_PTR_FORMAT, filter);
- filter_caps = gst_caps_intersect (fcaps, filter);
- gst_caps_unref (fcaps);
- fcaps = filter_caps;
- }
-
-done:
- gst_caps_replace (&allowed, NULL);
-
- GST_LOG_OBJECT (encoder, "proxy caps %" GST_PTR_FORMAT, fcaps);
-
- return fcaps;
+ return __gst_video_element_proxy_getcaps (GST_ELEMENT_CAST (encoder),
+ GST_VIDEO_ENCODER_SINK_PAD (encoder),
+ GST_VIDEO_ENCODER_SRC_PAD (encoder), caps, filter);
}
static GstCaps *
gst_video_encoder_propose_allocation_default (GstVideoEncoder * encoder,
GstQuery * query)
{
+ GstCaps *caps;
+ GstVideoInfo info;
+ GstBufferPool *pool;
+ guint size;
+
+ gst_query_parse_allocation (query, &caps, NULL);
+
+ if (caps == NULL)
+ return FALSE;
+
+ if (!gst_video_info_from_caps (&info, caps))
+ return FALSE;
+
+ size = GST_VIDEO_INFO_SIZE (&info);
+
+ if (gst_query_get_n_allocation_pools (query) == 0) {
+ GstStructure *structure;
+ GstAllocator *allocator = NULL;
+ GstAllocationParams params = { 0, 15, 0, 0 };
+
+ if (gst_query_get_n_allocation_params (query) > 0)
+ gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
+ else
+ gst_query_add_allocation_param (query, allocator, ¶ms);
+
+ pool = gst_video_buffer_pool_new ();
+
+ structure = gst_buffer_pool_get_config (pool);
+ gst_buffer_pool_config_set_params (structure, caps, size, 0, 0);
+ gst_buffer_pool_config_set_allocator (structure, allocator, ¶ms);
+
+ if (allocator)
+ gst_object_unref (allocator);
+
+ if (!gst_buffer_pool_set_config (pool, structure))
+ goto config_failed;
+
+ gst_query_add_allocation_pool (query, pool, size, 0, 0);
+ gst_object_unref (pool);
+ gst_query_add_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL);
+ }
+
return TRUE;
+
+ /* ERRORS */
+config_failed:
+ {
+ GST_ERROR_OBJECT (encoder, "failed to set config");
+ gst_object_unref (pool);
+ return FALSE;
+ }
}
static gboolean
-gst_video_encoder_sink_query (GstPad * pad, GstObject * parent,
+gst_video_encoder_sink_query_default (GstVideoEncoder * encoder,
GstQuery * query)
{
- GstVideoEncoder *encoder;
+ GstPad *pad = GST_VIDEO_ENCODER_SINK_PAD (encoder);
gboolean res = FALSE;
- encoder = GST_VIDEO_ENCODER (parent);
-
switch (GST_QUERY_TYPE (query)) {
case GST_QUERY_CAPS:
{
break;
}
default:
- res = gst_pad_query_default (pad, parent, query);
+ res = gst_pad_query_default (pad, GST_OBJECT (encoder), query);
break;
}
return res;
}
+static gboolean
+gst_video_encoder_sink_query (GstPad * pad, GstObject * parent,
+ GstQuery * query)
+{
+ GstVideoEncoder *encoder;
+ GstVideoEncoderClass *encoder_class;
+ gboolean ret = FALSE;
+
+ encoder = GST_VIDEO_ENCODER (parent);
+ encoder_class = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+
+ GST_DEBUG_OBJECT (encoder, "received query %d, %s", GST_QUERY_TYPE (query),
+ GST_QUERY_TYPE_NAME (query));
+
+ if (encoder_class->sink_query)
+ ret = encoder_class->sink_query (encoder, query);
+
+ return ret;
+}
+
static void
gst_video_encoder_finalize (GObject * object)
{
GST_DEBUG_OBJECT (object, "finalize");
encoder = GST_VIDEO_ENCODER (object);
- if (encoder->priv->headers) {
- g_list_foreach (encoder->priv->headers, (GFunc) gst_buffer_unref, NULL);
- g_list_free (encoder->priv->headers);
- }
g_rec_mutex_clear (&encoder->stream_lock);
if (encoder->priv->allocator) {
break;
}
+ if (encoder->priv->time_adjustment != GST_CLOCK_TIME_NONE) {
+ segment.start += encoder->priv->time_adjustment;
+ if (GST_CLOCK_TIME_IS_VALID (segment.stop)) {
+ segment.stop += encoder->priv->time_adjustment;
+ }
+ }
+
encoder->output_segment = segment;
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ gst_event_unref (event);
+ event = gst_event_new_segment (&encoder->output_segment);
+
break;
}
default:
return gst_pad_push_event (encoder->srcpad, event);
}
+static GstEvent *
+gst_video_encoder_create_merged_tags_event (GstVideoEncoder * enc)
+{
+ GstTagList *merged_tags;
+
+ GST_LOG_OBJECT (enc, "upstream : %" GST_PTR_FORMAT, enc->priv->upstream_tags);
+ GST_LOG_OBJECT (enc, "encoder : %" GST_PTR_FORMAT, enc->priv->tags);
+ GST_LOG_OBJECT (enc, "mode : %d", enc->priv->tags_merge_mode);
+
+ merged_tags =
+ gst_tag_list_merge (enc->priv->upstream_tags, enc->priv->tags,
+ enc->priv->tags_merge_mode);
+
+ GST_DEBUG_OBJECT (enc, "merged : %" GST_PTR_FORMAT, merged_tags);
+
+ if (merged_tags == NULL)
+ return NULL;
+
+ if (gst_tag_list_is_empty (merged_tags)) {
+ gst_tag_list_unref (merged_tags);
+ return NULL;
+ }
+
+ return gst_event_new_tag (merged_tags);
+}
+
+static inline void
+gst_video_encoder_check_and_push_tags (GstVideoEncoder * encoder)
+{
+ if (encoder->priv->tags_changed) {
+ GstEvent *tags_event;
+
+ tags_event = gst_video_encoder_create_merged_tags_event (encoder);
+
+ if (tags_event != NULL)
+ gst_video_encoder_push_event (encoder, tags_event);
+
+ encoder->priv->tags_changed = FALSE;
+ }
+}
+
static gboolean
gst_video_encoder_sink_event_default (GstVideoEncoder * encoder,
GstEvent * event)
gst_event_parse_caps (event, &caps);
ret = gst_video_encoder_setcaps (encoder, caps);
+
gst_event_unref (event);
event = NULL;
break;
GstFlowReturn flow_ret;
GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
- encoder->priv->at_eos = TRUE;
if (encoder_class->finish) {
flow_ret = encoder_class->finish (encoder);
flow_ret = GST_FLOW_OK;
}
+ if (encoder->priv->current_frame_events) {
+ GList *l;
+
+ for (l = g_list_last (encoder->priv->current_frame_events); l;
+ l = g_list_previous (l)) {
+ GstEvent *event = GST_EVENT (l->data);
+
+ gst_video_encoder_push_event (encoder, event);
+ }
+ }
+ g_list_free (encoder->priv->current_frame_events);
+ encoder->priv->current_frame_events = NULL;
+
+ gst_video_encoder_check_and_push_tags (encoder);
+
ret = (flow_ret == GST_FLOW_OK);
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
break;
break;
}
- encoder->priv->at_eos = FALSE;
-
encoder->input_segment = segment;
ret = TRUE;
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
}
break;
}
+ case GST_EVENT_STREAM_START:
+ {
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ /* Flush upstream tags after a STREAM_START */
+ GST_DEBUG_OBJECT (encoder, "STREAM_START, clearing upstream tags");
+ if (encoder->priv->upstream_tags) {
+ gst_tag_list_unref (encoder->priv->upstream_tags);
+ encoder->priv->upstream_tags = NULL;
+ encoder->priv->tags_changed = TRUE;
+ }
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ break;
+ }
+ case GST_EVENT_TAG:
+ {
+ GstTagList *tags;
+
+ gst_event_parse_tag (event, &tags);
+
+ if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ if (encoder->priv->upstream_tags != tags) {
+ tags = gst_tag_list_copy (tags);
+
+ /* FIXME: make generic based on GST_TAG_FLAG_ENCODED */
+ gst_tag_list_remove_tag (tags, GST_TAG_CODEC);
+ gst_tag_list_remove_tag (tags, GST_TAG_AUDIO_CODEC);
+ gst_tag_list_remove_tag (tags, GST_TAG_VIDEO_CODEC);
+ gst_tag_list_remove_tag (tags, GST_TAG_SUBTITLE_CODEC);
+ gst_tag_list_remove_tag (tags, GST_TAG_CONTAINER_FORMAT);
+ gst_tag_list_remove_tag (tags, GST_TAG_BITRATE);
+ gst_tag_list_remove_tag (tags, GST_TAG_NOMINAL_BITRATE);
+ gst_tag_list_remove_tag (tags, GST_TAG_MAXIMUM_BITRATE);
+ gst_tag_list_remove_tag (tags, GST_TAG_MINIMUM_BITRATE);
+ gst_tag_list_remove_tag (tags, GST_TAG_ENCODER);
+ gst_tag_list_remove_tag (tags, GST_TAG_ENCODER_VERSION);
+
+ if (encoder->priv->upstream_tags)
+ gst_tag_list_unref (encoder->priv->upstream_tags);
+ encoder->priv->upstream_tags = tags;
+ GST_INFO_OBJECT (encoder, "upstream tags: %" GST_PTR_FORMAT, tags);
+ }
+ gst_event_unref (event);
+ event = gst_video_encoder_create_merged_tags_event (encoder);
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ if (!event)
+ ret = TRUE;
+ }
+ break;
+ }
+ case GST_EVENT_FLUSH_STOP:{
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ gst_video_encoder_flush (encoder);
+ gst_segment_init (&encoder->input_segment, GST_FORMAT_TIME);
+ gst_segment_init (&encoder->output_segment, GST_FORMAT_TIME);
+ gst_video_encoder_reset (encoder, FALSE);
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ break;
+ }
default:
break;
}
}
static gboolean
-gst_video_encoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
+gst_video_encoder_src_query_default (GstVideoEncoder * enc, GstQuery * query)
{
+ GstPad *pad = GST_VIDEO_ENCODER_SRC_PAD (enc);
GstVideoEncoderPrivate *priv;
- GstVideoEncoder *enc;
gboolean res;
- enc = GST_VIDEO_ENCODER (parent);
priv = enc->priv;
GST_LOG_OBJECT (enc, "handling query: %" GST_PTR_FORMAT, query);
GST_OBJECT_LOCK (enc);
min_latency += priv->min_latency;
- if (enc->priv->max_latency == GST_CLOCK_TIME_NONE) {
+ if (max_latency == GST_CLOCK_TIME_NONE
+ || enc->priv->max_latency == GST_CLOCK_TIME_NONE)
max_latency = GST_CLOCK_TIME_NONE;
- } else if (max_latency != GST_CLOCK_TIME_NONE) {
+ else
max_latency += enc->priv->max_latency;
- }
GST_OBJECT_UNLOCK (enc);
gst_query_set_latency (query, live, min_latency, max_latency);
}
break;
default:
- res = gst_pad_query_default (pad, parent, query);
+ res = gst_pad_query_default (pad, GST_OBJECT (enc), query);
}
return res;
return res;
}
+static gboolean
+gst_video_encoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
+{
+ GstVideoEncoder *encoder;
+ GstVideoEncoderClass *encoder_class;
+ gboolean ret = FALSE;
+
+ encoder = GST_VIDEO_ENCODER (parent);
+ encoder_class = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+
+ GST_DEBUG_OBJECT (encoder, "received query %d, %s", GST_QUERY_TYPE (query),
+ GST_QUERY_TYPE_NAME (query));
+
+ if (encoder_class->src_query)
+ ret = encoder_class->src_query (encoder, query);
+
+ return ret;
+}
+
static GstVideoCodecFrame *
gst_video_encoder_new_frame (GstVideoEncoder * encoder, GstBuffer * buf,
GstClockTime pts, GstClockTime dts, GstClockTime duration)
frame->pts = pts;
frame->dts = dts;
frame->duration = duration;
+ frame->abidata.ABI.ts = pts;
return frame;
}
GstVideoEncoderPrivate *priv;
GstVideoEncoderClass *klass;
GstVideoCodecFrame *frame;
- GstClockTime pts, dts, duration;
+ GstClockTime pts, duration;
GstFlowReturn ret = GST_FLOW_OK;
guint64 start, stop, cstart, cstop;
g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR);
+ if (!encoder->priv->input_state)
+ goto not_negotiated;
+
GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
pts = GST_BUFFER_PTS (buf);
- dts = GST_BUFFER_DTS (buf);
duration = GST_BUFFER_DURATION (buf);
GST_LOG_OBJECT (encoder,
"received buffer of size %" G_GSIZE_FORMAT " with PTS %" GST_TIME_FORMAT
- ", PTS %" GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT,
- gst_buffer_get_size (buf), GST_TIME_ARGS (pts), GST_TIME_ARGS (dts),
- GST_TIME_ARGS (duration));
-
- if (priv->at_eos) {
- ret = GST_FLOW_EOS;
- goto done;
- }
+ ", DTS %" GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT,
+ gst_buffer_get_size (buf), GST_TIME_ARGS (pts),
+ GST_TIME_ARGS (GST_BUFFER_DTS (buf)), GST_TIME_ARGS (duration));
start = pts;
if (GST_CLOCK_TIME_IS_VALID (duration))
stop = GST_CLOCK_TIME_NONE;
/* Drop buffers outside of segment */
- if (!gst_segment_clip (&encoder->output_segment,
+ if (!gst_segment_clip (&encoder->input_segment,
GST_FORMAT_TIME, start, stop, &cstart, &cstop)) {
GST_DEBUG_OBJECT (encoder, "clipping to segment dropped frame");
gst_buffer_unref (buf);
goto done;
}
- frame =
- gst_video_encoder_new_frame (encoder, buf, cstart, dts, cstop - cstart);
+ if (GST_CLOCK_TIME_IS_VALID (cstop))
+ duration = cstop - cstart;
+ else
+ duration = GST_CLOCK_TIME_NONE;
+
+ if (priv->min_pts != GST_CLOCK_TIME_NONE
+ && priv->time_adjustment == GST_CLOCK_TIME_NONE) {
+ if (cstart < priv->min_pts) {
+ priv->time_adjustment = priv->min_pts - cstart;
+ }
+ }
+
+ if (priv->time_adjustment != GST_CLOCK_TIME_NONE) {
+ cstart += priv->time_adjustment;
+ }
+
+ /* incoming DTS is not really relevant and does not make sense anyway,
+ * so pass along _NONE and maybe come up with something better later on */
+ frame = gst_video_encoder_new_frame (encoder, buf, cstart,
+ GST_CLOCK_TIME_NONE, duration);
GST_OBJECT_LOCK (encoder);
if (priv->force_key_unit) {
}
if (fevt) {
+ fevt->frame_id = frame->system_frame_number;
GST_DEBUG_OBJECT (encoder,
"Forcing a key unit at running time %" GST_TIME_FORMAT,
GST_TIME_ARGS (running_time));
ret = klass->handle_frame (encoder, frame);
+ if (ret == GST_VIDEO_ENCODER_FLOW_DROPPED || ret == GST_FLOW_ERROR) {
+ GST_INFO_OBJECT (encoder, "Dropping frame %p", frame);
+ gst_video_encoder_release_frame (encoder, frame);
+ }
+
done:
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
return ret;
+
+ /* ERRORS */
+not_negotiated:
+ {
+ GST_ELEMENT_ERROR (encoder, CORE, NEGOTIATION, (NULL),
+ ("encoder not initialized"));
+ gst_buffer_unref (buf);
+ return GST_FLOW_NOT_NEGOTIATED;
+ }
}
static GstStateChangeReturn
goto open_failed;
break;
case GST_STATE_CHANGE_READY_TO_PAUSED:
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ gst_video_encoder_reset (encoder, TRUE);
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
/* Initialize device/library if needed */
if (encoder_class->start && !encoder_class->start (encoder))
goto start_failed;
ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
switch (transition) {
- case GST_STATE_CHANGE_PAUSED_TO_READY:
- gst_video_encoder_reset (encoder);
- if (encoder_class->stop && !encoder_class->stop (encoder))
+ case GST_STATE_CHANGE_PAUSED_TO_READY:{
+ gboolean stopped = TRUE;
+
+ if (encoder_class->stop)
+ stopped = encoder_class->stop (encoder);
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ gst_video_encoder_reset (encoder, TRUE);
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ if (!stopped)
goto stop_failed;
break;
+ }
case GST_STATE_CHANGE_READY_TO_NULL:
/* close device/library if needed */
if (encoder_class->close && !encoder_class->close (encoder))
}
static gboolean
-gst_video_encoder_set_src_caps (GstVideoEncoder * encoder)
+gst_video_encoder_negotiate_default (GstVideoEncoder * encoder)
{
GstVideoEncoderClass *klass = GST_VIDEO_ENCODER_GET_CLASS (encoder);
GstAllocator *allocator;
GstAllocationParams params;
- gboolean ret;
+ gboolean ret = TRUE;
GstVideoCodecState *state = encoder->priv->output_state;
GstVideoInfo *info = &state->info;
GstQuery *query = NULL;
+ GstVideoCodecFrame *frame;
+ GstCaps *prevcaps;
g_return_val_if_fail (state->caps != NULL, FALSE);
if (state->codec_data)
gst_caps_set_simple (state->caps, "codec_data", GST_TYPE_BUFFER,
state->codec_data, NULL);
+
+ if (GST_VIDEO_INFO_MULTIVIEW_MODE (info) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
+ const gchar *caps_mview_mode =
+ gst_video_multiview_mode_to_caps_string (GST_VIDEO_INFO_MULTIVIEW_MODE
+ (info));
+
+ gst_caps_set_simple (state->caps, "multiview-mode", G_TYPE_STRING,
+ caps_mview_mode, "multiview-flags", GST_TYPE_VIDEO_MULTIVIEW_FLAGSET,
+ GST_VIDEO_INFO_MULTIVIEW_FLAGS (info), GST_FLAG_SET_MASK_EXACT, NULL);
+ }
encoder->priv->output_state_changed = FALSE;
}
- ret = gst_pad_set_caps (encoder->srcpad, state->caps);
+ /* Push all pending pre-caps events of the oldest frame before
+ * setting caps */
+ frame = encoder->priv->frames ? encoder->priv->frames->data : NULL;
+ if (frame || encoder->priv->current_frame_events) {
+ GList **events, *l;
+
+ if (frame) {
+ events = &frame->events;
+ } else {
+ events = &encoder->priv->current_frame_events;
+ }
+
+ for (l = g_list_last (*events); l;) {
+ GstEvent *event = GST_EVENT (l->data);
+ GList *tmp;
+
+ if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
+ gst_video_encoder_push_event (encoder, event);
+ tmp = l;
+ l = l->prev;
+ *events = g_list_delete_link (*events, tmp);
+ } else {
+ l = l->prev;
+ }
+ }
+ }
+
+ prevcaps = gst_pad_get_current_caps (encoder->srcpad);
+ if (!prevcaps || !gst_caps_is_equal (prevcaps, state->caps))
+ ret = gst_pad_set_caps (encoder->srcpad, state->caps);
+ else
+ ret = TRUE;
+ if (prevcaps)
+ gst_caps_unref (prevcaps);
+
if (!ret)
goto done;
}
}
+static gboolean
+gst_video_encoder_negotiate_unlocked (GstVideoEncoder * encoder)
+{
+ GstVideoEncoderClass *klass = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+ gboolean ret = TRUE;
+
+ if (G_LIKELY (klass->negotiate))
+ ret = klass->negotiate (encoder);
+
+ return ret;
+}
+
+/**
+ * gst_video_encoder_negotiate:
+ * @encoder: a #GstVideoEncoder
+ *
+ * Negotiate with downstream elements to currently configured #GstVideoCodecState.
+ * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
+ * negotiate fails.
+ *
+ * Returns: #TRUE if the negotiation succeeded, else #FALSE.
+ */
+gboolean
+gst_video_encoder_negotiate (GstVideoEncoder * encoder)
+{
+ GstVideoEncoderClass *klass;
+ gboolean ret = TRUE;
+
+ g_return_val_if_fail (GST_IS_VIDEO_ENCODER (encoder), FALSE);
+ g_return_val_if_fail (encoder->priv->output_state, FALSE);
+
+ klass = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ gst_pad_check_reconfigure (encoder->srcpad);
+ if (klass->negotiate) {
+ ret = klass->negotiate (encoder);
+ if (!ret)
+ gst_pad_mark_reconfigure (encoder->srcpad);
+ }
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ return ret;
+}
+
/**
* gst_video_encoder_allocate_output_buffer:
* @encoder: a #GstVideoEncoder
gst_video_encoder_allocate_output_buffer (GstVideoEncoder * encoder, gsize size)
{
GstBuffer *buffer;
+ gboolean needs_reconfigure = FALSE;
g_return_val_if_fail (size > 0, NULL);
GST_DEBUG ("alloc src buffer");
GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ needs_reconfigure = gst_pad_check_reconfigure (encoder->srcpad);
if (G_UNLIKELY (encoder->priv->output_state_changed
- || (encoder->priv->output_state
- && gst_pad_check_reconfigure (encoder->srcpad))))
- gst_video_encoder_set_src_caps (encoder);
+ || (encoder->priv->output_state && needs_reconfigure))) {
+ if (!gst_video_encoder_negotiate_unlocked (encoder)) {
+ GST_DEBUG_OBJECT (encoder, "Failed to negotiate, fallback allocation");
+ gst_pad_mark_reconfigure (encoder->srcpad);
+ goto fallback;
+ }
+ }
buffer =
gst_buffer_new_allocate (encoder->priv->allocator, size,
&encoder->priv->params);
+ if (!buffer) {
+ GST_INFO_OBJECT (encoder, "couldn't allocate output buffer");
+ goto fallback;
+ }
+
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ return buffer;
+
+fallback:
+ buffer = gst_buffer_new_allocate (NULL, size, NULL);
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
gst_video_encoder_allocate_output_frame (GstVideoEncoder *
encoder, GstVideoCodecFrame * frame, gsize size)
{
+ gboolean needs_reconfigure = FALSE;
+
g_return_val_if_fail (frame->output_buffer == NULL, GST_FLOW_ERROR);
- g_return_val_if_fail (size > 0, GST_FLOW_ERROR);
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ needs_reconfigure = gst_pad_check_reconfigure (encoder->srcpad);
if (G_UNLIKELY (encoder->priv->output_state_changed
- || (encoder->priv->output_state
- && gst_pad_check_reconfigure (encoder->srcpad))))
- gst_video_encoder_set_src_caps (encoder);
+ || (encoder->priv->output_state && needs_reconfigure))) {
+ if (!gst_video_encoder_negotiate_unlocked (encoder)) {
+ GST_DEBUG_OBJECT (encoder, "Failed to negotiate, fallback allocation");
+ gst_pad_mark_reconfigure (encoder->srcpad);
+ }
+ }
- GST_LOG_OBJECT (encoder, "alloc buffer size %d", size);
- GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ GST_LOG_OBJECT (encoder, "alloc buffer size %" G_GSIZE_FORMAT, size);
frame->output_buffer =
gst_buffer_new_allocate (encoder->priv->allocator, size,
return frame->output_buffer ? GST_FLOW_OK : GST_FLOW_ERROR;
}
+static void
+gst_video_encoder_release_frame (GstVideoEncoder * enc,
+ GstVideoCodecFrame * frame)
+{
+ GList *link;
+
+ /* unref once from the list */
+ link = g_list_find (enc->priv->frames, frame);
+ if (link) {
+ gst_video_codec_frame_unref (frame);
+ enc->priv->frames = g_list_delete_link (enc->priv->frames, link);
+ }
+ /* unref because this function takes ownership */
+ gst_video_codec_frame_unref (frame);
+}
+
+static gboolean
+gst_video_encoder_transform_meta_default (GstVideoEncoder *
+ encoder, GstVideoCodecFrame * frame, GstMeta * meta)
+{
+ const GstMetaInfo *info = meta->info;
+ const gchar *const *tags;
+
+ tags = gst_meta_api_type_get_tags (info->api);
+
+ if (!tags || (g_strv_length ((gchar **) tags) == 1
+ && gst_meta_api_type_has_tag (info->api,
+ g_quark_from_string (GST_META_TAG_VIDEO_STR))))
+ return TRUE;
+
+ return FALSE;
+}
+
+typedef struct
+{
+ GstVideoEncoder *encoder;
+ GstVideoCodecFrame *frame;
+} CopyMetaData;
+
+static gboolean
+foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
+{
+ CopyMetaData *data = user_data;
+ GstVideoEncoder *encoder = data->encoder;
+ GstVideoEncoderClass *klass = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+ GstVideoCodecFrame *frame = data->frame;
+ const GstMetaInfo *info = (*meta)->info;
+ gboolean do_copy = FALSE;
+
+ if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
+ /* never call the transform_meta with memory specific metadata */
+ GST_DEBUG_OBJECT (encoder, "not copying memory specific metadata %s",
+ g_type_name (info->api));
+ do_copy = FALSE;
+ } else if (klass->transform_meta) {
+ do_copy = klass->transform_meta (encoder, frame, *meta);
+ GST_DEBUG_OBJECT (encoder, "transformed metadata %s: copy: %d",
+ g_type_name (info->api), do_copy);
+ }
+
+ /* we only copy metadata when the subclass implemented a transform_meta
+ * function and when it returns %TRUE */
+ if (do_copy) {
+ GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
+ GST_DEBUG_OBJECT (encoder, "copy metadata %s", g_type_name (info->api));
+ /* simply copy then */
+ info->transform_func (frame->output_buffer, *meta, inbuf,
+ _gst_meta_transform_copy, ©_data);
+ }
+ return TRUE;
+}
+
/**
* gst_video_encoder_finish_frame:
* @encoder: a #GstVideoEncoder
GList *l;
gboolean send_headers = FALSE;
gboolean discont = (frame->presentation_frame_number == 0);
+ GstBuffer *buffer;
+ gboolean needs_reconfigure = FALSE;
encoder_class = GST_VIDEO_ENCODER_GET_CLASS (encoder);
GST_LOG_OBJECT (encoder,
"finish frame fpn %d", frame->presentation_frame_number);
+ GST_LOG_OBJECT (encoder, "frame PTS %" GST_TIME_FORMAT
+ ", DTS %" GST_TIME_FORMAT, GST_TIME_ARGS (frame->pts),
+ GST_TIME_ARGS (frame->dts));
+
GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
- if (G_UNLIKELY (priv->output_state_changed))
- gst_video_encoder_set_src_caps (encoder);
+ needs_reconfigure = gst_pad_check_reconfigure (encoder->srcpad);
+ if (G_UNLIKELY (priv->output_state_changed || (priv->output_state
+ && needs_reconfigure))) {
+ if (!gst_video_encoder_negotiate_unlocked (encoder)) {
+ gst_pad_mark_reconfigure (encoder->srcpad);
+ if (GST_PAD_IS_FLUSHING (encoder->srcpad))
+ ret = GST_FLOW_FLUSHING;
+ else
+ ret = GST_FLOW_NOT_NEGOTIATED;
+ goto done;
+ }
+ }
if (G_UNLIKELY (priv->output_state == NULL))
goto no_output_state;
break;
}
+ gst_video_encoder_check_and_push_tags (encoder);
+
/* no buffer data means this frame is skipped/dropped */
if (!frame->output_buffer) {
GST_DEBUG_OBJECT (encoder, "skipping frame %" GST_TIME_FORMAT,
if (!tmp->pending)
continue;
+ /* Exact match using the frame id */
+ if (frame->system_frame_number == tmp->frame_id) {
+ fevt = tmp;
+ break;
+ }
+
/* Simple case, keyunit ASAP */
if (tmp->running_time == GST_CLOCK_TIME_NONE) {
fevt = tmp;
if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
priv->distance_from_sync = 0;
GST_BUFFER_FLAG_UNSET (frame->output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
- /* For keyframes, DTS = PTS */
- frame->dts = frame->pts;
+ /* For keyframes, DTS = PTS, if encoder doesn't decide otherwise */
+ if (!GST_CLOCK_TIME_IS_VALID (frame->dts)) {
+ frame->dts = frame->pts;
+ }
} else {
GST_BUFFER_FLAG_SET (frame->output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
}
+ /* DTS is expected monotone ascending,
+ * so a good guess is the lowest unsent PTS (all being OK) */
+ {
+ GstClockTime min_ts = GST_CLOCK_TIME_NONE;
+ GstVideoCodecFrame *oframe = NULL;
+ gboolean seen_none = FALSE;
+
+ /* some maintenance regardless */
+ for (l = priv->frames; l; l = l->next) {
+ GstVideoCodecFrame *tmp = l->data;
+
+ if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts)) {
+ seen_none = TRUE;
+ continue;
+ }
+
+ if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts < min_ts) {
+ min_ts = tmp->abidata.ABI.ts;
+ oframe = tmp;
+ }
+ }
+ /* save a ts if needed */
+ if (oframe && oframe != frame) {
+ oframe->abidata.ABI.ts = frame->abidata.ABI.ts;
+ }
+
+ /* and set if needed */
+ if (!GST_CLOCK_TIME_IS_VALID (frame->dts) && !seen_none) {
+ frame->dts = min_ts;
+ GST_DEBUG_OBJECT (encoder,
+ "no valid DTS, using oldest PTS %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (frame->pts));
+ }
+ }
+
frame->distance_from_sync = priv->distance_from_sync;
priv->distance_from_sync++;
for (tmp = priv->headers; tmp; tmp = tmp->next) {
GstBuffer *tmpbuf = GST_BUFFER (tmp->data);
- gst_buffer_ref (tmpbuf);
priv->bytes += gst_buffer_get_size (tmpbuf);
if (G_UNLIKELY (discont)) {
GST_LOG_OBJECT (encoder, "marking discont");
discont = FALSE;
}
- gst_pad_push (encoder->srcpad, tmpbuf);
+ gst_pad_push (encoder->srcpad, gst_buffer_ref (tmpbuf));
}
priv->new_headers = FALSE;
}
if (encoder_class->pre_push)
ret = encoder_class->pre_push (encoder, frame);
- /* A reference always needs to be owned by the frame on the buffer.
- * For that reason, we use a complete sub-buffer (zero-cost) to push
- * downstream.
- * The original buffer will be free-ed only when downstream AND the
- * current implementation are done with the frame. */
+ if (encoder_class->transform_meta) {
+ if (G_LIKELY (frame->input_buffer)) {
+ CopyMetaData data;
+
+ data.encoder = encoder;
+ data.frame = frame;
+ gst_buffer_foreach_meta (frame->input_buffer, foreach_metadata, &data);
+ } else {
+ GST_WARNING_OBJECT (encoder,
+ "Can't copy metadata because input frame disappeared");
+ }
+ }
+
+ /* Get an additional ref to the buffer, which is going to be pushed
+ * downstream, the original ref is owned by the frame */
+ buffer = gst_buffer_ref (frame->output_buffer);
+
+ /* Release frame so the buffer is writable when we push it downstream
+ * if possible, i.e. if the subclass does not hold additional references
+ * to the frame
+ */
+ gst_video_encoder_release_frame (encoder, frame);
+ frame = NULL;
+
if (ret == GST_FLOW_OK)
- ret = gst_pad_push (encoder->srcpad, gst_buffer_ref (frame->output_buffer));
+ ret = gst_pad_push (encoder->srcpad, buffer);
done:
/* handed out */
-
- /* unref once from the list */
- l = g_list_find (priv->frames, frame);
- if (l) {
- gst_video_codec_frame_unref (frame);
- priv->frames = g_list_delete_link (priv->frames, l);
- }
- /* unref because this function takes ownership */
- gst_video_codec_frame_unref (frame);
+ if (frame)
+ gst_video_encoder_release_frame (encoder, frame);
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
/* ERRORS */
no_output_state:
{
+ gst_video_encoder_release_frame (encoder, frame);
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
GST_ERROR_OBJECT (encoder, "Output state was not configured");
return GST_FLOW_ERROR;
*
* Creates a new #GstVideoCodecState with the specified caps as the output state
* for the encoder.
- * Any previously set output state on @decoder will be replaced by the newly
+ * Any previously set output state on @encoder will be replaced by the newly
* created one.
*
* The specified @caps should not contain any resolution, pixel-aspect-ratio,
return frame;
}
+
+/**
+ * gst_video_encoder_get_frames:
+ * @encoder: a #GstVideoEncoder
+ *
+ * Get all pending unfinished #GstVideoCodecFrame
+ *
+ * Returns: (transfer full) (element-type GstVideoCodecFrame): pending unfinished #GstVideoCodecFrame.
+ */
+GList *
+gst_video_encoder_get_frames (GstVideoEncoder * encoder)
+{
+ GList *frames;
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ frames = g_list_copy (encoder->priv->frames);
+ g_list_foreach (frames, (GFunc) gst_video_codec_frame_ref, NULL);
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ return frames;
+}
+
+/**
+ * gst_video_encoder_merge_tags:
+ * @encoder: a #GstVideoEncoder
+ * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
+ * previously-set tags
+ * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
+ *
+ * Sets the video encoder tags and how they should be merged with any
+ * upstream stream tags. This will override any tags previously-set
+ * with gst_video_encoder_merge_tags().
+ *
+ * Note that this is provided for convenience, and the subclass is
+ * not required to use this and can still do tag handling on its own.
+ *
+ * MT safe.
+ */
+void
+gst_video_encoder_merge_tags (GstVideoEncoder * encoder,
+ const GstTagList * tags, GstTagMergeMode mode)
+{
+ g_return_if_fail (GST_IS_VIDEO_ENCODER (encoder));
+ g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
+ g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ if (encoder->priv->tags != tags) {
+ if (encoder->priv->tags) {
+ gst_tag_list_unref (encoder->priv->tags);
+ encoder->priv->tags = NULL;
+ encoder->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
+ }
+ if (tags) {
+ encoder->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
+ encoder->priv->tags_merge_mode = mode;
+ }
+
+ GST_DEBUG_OBJECT (encoder, "setting encoder tags to %" GST_PTR_FORMAT,
+ tags);
+ encoder->priv->tags_changed = TRUE;
+ }
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+}
+
+/**
+ * gst_video_encoder_get_allocator:
+ * @encoder: a #GstVideoEncoder
+ * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
+ * used
+ * @params: (out) (allow-none) (transfer full): the
+ * #GstAllocatorParams of @allocator
+ *
+ * Lets #GstVideoEncoder sub-classes to know the memory @allocator
+ * used by the base class and its @params.
+ *
+ * Unref the @allocator after use it.
+ */
+void
+gst_video_encoder_get_allocator (GstVideoEncoder * encoder,
+ GstAllocator ** allocator, GstAllocationParams * params)
+{
+ g_return_if_fail (GST_IS_VIDEO_ENCODER (encoder));
+
+ if (allocator)
+ *allocator = encoder->priv->allocator ?
+ gst_object_ref (encoder->priv->allocator) : NULL;
+
+ if (params)
+ *params = encoder->priv->params;
+}
+
+/**
+ * gst_video_encoder_set_min_pts:
+ * @encoder: a #GstVideoEncoder
+ * @min_pts: minimal PTS that will be passed to handle_frame
+ *
+ * Request minimal value for PTS passed to handle_frame.
+ *
+ * For streams with reordered frames this can be used to ensure that there
+ * is enough time to accomodate first DTS, which may be less than first PTS
+ *
+ * Since 1.6
+ */
+void
+gst_video_encoder_set_min_pts (GstVideoEncoder * encoder, GstClockTime min_pts)
+{
+ g_return_if_fail (GST_IS_VIDEO_ENCODER (encoder));
+ encoder->priv->min_pts = min_pts;
+ encoder->priv->time_adjustment = GST_CLOCK_TIME_NONE;
+}