# include "config.h"
#endif
-#define GST_USE_UNSTABLE_API
#include "gstaudioencoder.h"
#include <gst/base/gstadapter.h>
#include <gst/audio/audio.h>
+#include <gst/pbutils/descriptions.h>
#include <stdlib.h>
#include <string.h>
GstAudioInfo info;
/* output */
- gint frame_samples;
+ gint frame_samples_min, frame_samples_max;
gint frame_max;
gint lookahead;
/* MT-protected (with LOCK) */
gboolean perfect_ts;
gboolean hard_resync;
gboolean granule;
+
+ /* pending tags */
+ GstTagList *tags;
+ /* pending serialized sink events, will be sent from finish_frame() */
+ GList *pending_events;
};
gboolean active);
static gboolean gst_audio_encoder_sink_event (GstPad * pad, GstEvent * event);
-static gboolean gst_audio_encoder_sink_setcaps (GstPad * pad, GstCaps * caps);
+static gboolean gst_audio_encoder_sink_setcaps (GstAudioEncoder * enc,
+ GstCaps * caps);
static GstFlowReturn gst_audio_encoder_chain (GstPad * pad, GstBuffer * buffer);
static gboolean gst_audio_encoder_src_query (GstPad * pad, GstQuery * query);
static gboolean gst_audio_encoder_sink_query (GstPad * pad, GstQuery * query);
static const GstQueryType *gst_audio_encoder_get_query_types (GstPad * pad);
-static GstCaps *gst_audio_encoder_sink_getcaps (GstPad * pad);
-
+static GstCaps *gst_audio_encoder_sink_getcaps (GstPad * pad, GstCaps * filter);
static void
gst_audio_encoder_class_init (GstAudioEncoderClass * klass)
enc->sinkpad = gst_pad_new_from_template (pad_template, "sink");
gst_pad_set_event_function (enc->sinkpad,
GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_event));
- gst_pad_set_setcaps_function (enc->sinkpad,
- GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_setcaps));
gst_pad_set_getcaps_function (enc->sinkpad,
GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_getcaps));
gst_pad_set_query_function (enc->sinkpad,
enc->priv->adapter = gst_adapter_new ();
+ g_static_rec_mutex_init (&enc->stream_lock);
+
/* property default */
enc->priv->granule = DEFAULT_GRANULE;
enc->priv->perfect_ts = DEFAULT_PERFECT_TS;
static void
gst_audio_encoder_reset (GstAudioEncoder * enc, gboolean full)
{
- GST_OBJECT_LOCK (enc);
+ GST_AUDIO_ENCODER_STREAM_LOCK (enc);
+
+ GST_LOG_OBJECT (enc, "reset full %d", full);
if (full) {
enc->priv->active = FALSE;
enc->priv->samples_in = 0;
enc->priv->bytes_out = 0;
- gst_audio_info_clear (&enc->priv->ctx.info);
+ gst_audio_info_init (&enc->priv->ctx.info);
memset (&enc->priv->ctx, 0, sizeof (enc->priv->ctx));
+
+ if (enc->priv->tags)
+ gst_tag_list_free (enc->priv->tags);
+ enc->priv->tags = NULL;
+
+ g_list_foreach (enc->priv->pending_events, (GFunc) gst_event_unref, NULL);
+ g_list_free (enc->priv->pending_events);
+ enc->priv->pending_events = NULL;
}
gst_segment_init (&enc->segment, GST_FORMAT_TIME);
enc->priv->samples = 0;
enc->priv->discont = FALSE;
- GST_OBJECT_UNLOCK (enc);
+ GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
}
static void
g_object_unref (enc->priv->adapter);
+ g_static_rec_mutex_free (&enc->stream_lock);
+
G_OBJECT_CLASS (parent_class)->finalize (object);
}
ctx = &enc->priv->ctx;
/* subclass should know what it is producing by now */
- g_return_val_if_fail (GST_PAD_CAPS (enc->srcpad) != NULL, GST_FLOW_ERROR);
+ g_return_val_if_fail (gst_pad_has_current_caps (enc->srcpad), GST_FLOW_ERROR);
/* subclass should not hand us no data */
- g_return_val_if_fail (buf == NULL || GST_BUFFER_SIZE (buf) > 0,
+ g_return_val_if_fail (buf == NULL || gst_buffer_get_size (buf) > 0,
GST_FLOW_ERROR);
+ GST_AUDIO_ENCODER_STREAM_LOCK (enc);
+
+ if (G_UNLIKELY (enc->priv->tags)) {
+ GstTagList *tags;
+
+ /* add codec info to pending tags */
+ tags = enc->priv->tags;
+ /* no more pending */
+ enc->priv->tags = NULL;
+ gst_pb_utils_add_codec_description_to_tag_list (tags, GST_TAG_CODEC,
+ GST_PAD_CAPS (enc->srcpad));
+ gst_pb_utils_add_codec_description_to_tag_list (tags, GST_TAG_AUDIO_CODEC,
+ GST_PAD_CAPS (enc->srcpad));
+ GST_DEBUG_OBJECT (enc, "sending tags %" GST_PTR_FORMAT, tags);
+ gst_element_found_tags_for_pad (GST_ELEMENT (enc), enc->srcpad, tags);
+ }
+
GST_LOG_OBJECT (enc, "accepting %d bytes encoded data as %d samples",
- buf ? GST_BUFFER_SIZE (buf) : -1, samples);
+ buf ? gst_buffer_get_size (buf) : -1, samples);
/* mark subclass still alive and providing */
priv->got_data = TRUE;
+ if (priv->pending_events) {
+ GList *pending_events, *l;
+
+ pending_events = priv->pending_events;
+ priv->pending_events = NULL;
+
+ GST_DEBUG_OBJECT (enc, "Pushing pending events");
+ for (l = priv->pending_events; l; l = l->next)
+ gst_pad_push_event (enc->srcpad, l->data);
+ g_list_free (pending_events);
+ }
+
/* remove corresponding samples from input */
if (samples < 0)
samples = (enc->priv->offset / ctx->info.bpf);
/* collect output */
if (G_LIKELY (buf)) {
- GST_LOG_OBJECT (enc, "taking %d bytes for output", GST_BUFFER_SIZE (buf));
- buf = gst_buffer_make_metadata_writable (buf);
+ gsize size;
+
+ size = gst_buffer_get_size (buf);
+
+ GST_LOG_OBJECT (enc, "taking %d bytes for output", size);
+ buf = gst_buffer_make_writable (buf);
/* decorate */
- gst_buffer_set_caps (buf, GST_PAD_CAPS (enc->srcpad));
if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
/* FIXME ? lookahead could lead to weird ts and duration ?
* (particularly if not in perfect mode) */
ctx->info.rate);
} else {
GST_BUFFER_OFFSET (buf) = priv->bytes_out;
- GST_BUFFER_OFFSET_END (buf) = priv->bytes_out + GST_BUFFER_SIZE (buf);
+ GST_BUFFER_OFFSET_END (buf) = priv->bytes_out + size;
}
}
- priv->bytes_out += GST_BUFFER_SIZE (buf);
+ priv->bytes_out += size;
if (G_UNLIKELY (priv->discont)) {
GST_LOG_OBJECT (enc, "marking discont");
}
GST_LOG_OBJECT (enc, "pushing buffer of size %d with ts %" GST_TIME_FORMAT
- ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buf),
+ ", duration %" GST_TIME_FORMAT, size,
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
}
exit:
+ GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
+
return ret;
/* ERRORS */
samples, priv->offset / ctx->info.bpf), (NULL));
if (buf)
gst_buffer_unref (buf);
- return GST_FLOW_ERROR;
+ ret = GST_FLOW_ERROR;
+ goto exit;
}
}
g_assert (priv->offset <= av);
av -= priv->offset;
- need = ctx->frame_samples > 0 ? ctx->frame_samples * ctx->info.bpf : av;
- GST_LOG_OBJECT (enc, "available: %d, needed: %d, force: %d",
- av, need, force);
+ need =
+ ctx->frame_samples_min >
+ 0 ? ctx->frame_samples_min * ctx->info.bpf : av;
+ GST_LOG_OBJECT (enc, "available: %d, needed: %d, force: %d", av, need,
+ force);
if ((need > av) || !av) {
if (G_UNLIKELY (force)) {
priv->force = FALSE;
}
- /* if we have some extra metadata,
- * provide for integer multiple of frames to allow for better granularity
- * of processing */
- if (ctx->frame_samples > 0 && need) {
- if (ctx->frame_max > 1)
- need = need * MIN ((av / need), ctx->frame_max);
- else if (ctx->frame_max == 0)
- need = need * (av / need);
+ if (ctx->frame_samples_max > 0)
+ need = MIN (av, ctx->frame_samples_max * ctx->info.bpf);
+
+ if (ctx->frame_samples_min == ctx->frame_samples_max) {
+ /* if we have some extra metadata,
+ * provide for integer multiple of frames to allow for better granularity
+ * of processing */
+ if (ctx->frame_samples_min > 0 && need) {
+ if (ctx->frame_max > 1)
+ need = need * MIN ((av / need), ctx->frame_max);
+ else if (ctx->frame_max == 0)
+ need = need * (av / need);
+ }
}
if (need) {
- buf = gst_buffer_new ();
- GST_BUFFER_DATA (buf) = (guint8 *)
- gst_adapter_peek (priv->adapter, priv->offset + need) + priv->offset;
- GST_BUFFER_SIZE (buf) = need;
+ const guint8 *data;
+
+ data = gst_adapter_map (priv->adapter, priv->offset + need);
+ buf =
+ gst_buffer_new_wrapped_full ((gpointer) data, NULL, priv->offset,
+ need);
}
GST_LOG_OBJECT (enc, "providing subclass with %d bytes at offset %d",
priv->got_data = FALSE;
ret = klass->handle_frame (enc, buf);
- if (G_LIKELY (buf))
+ if (G_LIKELY (buf)) {
gst_buffer_unref (buf);
+ gst_adapter_unmap (priv->adapter, 0);
+ }
/* no data to feed, no leftover provided, then bail out */
if (G_UNLIKELY (!buf && !priv->got_data)) {
GstAudioEncoderContext *ctx;
GstFlowReturn ret = GST_FLOW_OK;
gboolean discont;
+ gsize size;
enc = GST_AUDIO_ENCODER (GST_OBJECT_PARENT (pad));
priv = enc->priv;
ctx = &enc->priv->ctx;
+ GST_AUDIO_ENCODER_STREAM_LOCK (enc);
+
/* should know what is coming by now */
if (!ctx->info.bpf)
goto not_negotiated;
+ size = gst_buffer_get_size (buffer);
+
GST_LOG_OBJECT (enc,
"received buffer of size %d with ts %" GST_TIME_FORMAT
- ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buffer),
+ ", duration %" GST_TIME_FORMAT, size,
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)),
GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)));
/* input shoud be whole number of sample frames */
- if (GST_BUFFER_SIZE (buffer) % ctx->info.bpf)
+ if (size % ctx->info.bpf)
goto wrong_buffer;
#ifndef GST_DISABLE_GST_DEBUG
GstClockTimeDiff diff;
/* verify buffer duration */
- duration = gst_util_uint64_scale (GST_BUFFER_SIZE (buffer), GST_SECOND,
+ duration = gst_util_uint64_scale (size, GST_SECOND,
ctx->info.rate * ctx->info.bpf);
diff = GST_CLOCK_DIFF (duration, GST_BUFFER_DURATION (buffer));
if (GST_BUFFER_DURATION (buffer) != GST_CLOCK_TIME_NONE &&
goto done;
}
+ size = gst_buffer_get_size (buffer);
+
GST_LOG_OBJECT (enc,
"buffer after segment clipping has size %d with ts %" GST_TIME_FORMAT
- ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buffer),
+ ", duration %" GST_TIME_FORMAT, size,
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)),
GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)));
diff_bytes =
GST_CLOCK_TIME_TO_FRAMES (-diff, ctx->info.rate) * ctx->info.bpf;
- if (diff_bytes >= GST_BUFFER_SIZE (buffer)) {
+ if (diff_bytes >= size) {
gst_buffer_unref (buffer);
goto done;
}
- buffer = gst_buffer_make_metadata_writable (buffer);
- GST_BUFFER_DATA (buffer) += diff_bytes;
- GST_BUFFER_SIZE (buffer) -= diff_bytes;
+ buffer = gst_buffer_make_writable (buffer);
+ gst_buffer_resize (buffer, diff_bytes, size - diff_bytes);
GST_BUFFER_TIMESTAMP (buffer) += diff;
/* care even less about duration after this */
done:
GST_LOG_OBJECT (enc, "chain leaving");
+
+ GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
+
return ret;
/* ERRORS */
GST_ELEMENT_ERROR (enc, CORE, NEGOTIATION, (NULL),
("encoder not initialized"));
gst_buffer_unref (buffer);
- return GST_FLOW_NOT_NEGOTIATED;
+ ret = GST_FLOW_NOT_NEGOTIATED;
+ goto done;
}
wrong_buffer:
{
GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
- ("buffer size %d not a multiple of %d", GST_BUFFER_SIZE (buffer),
+ ("buffer size %d not a multiple of %d", gst_buffer_get_size (buffer),
ctx->info.bpf));
gst_buffer_unref (buffer);
- return GST_FLOW_ERROR;
+ ret = GST_FLOW_ERROR;
+ goto done;
}
}
{
if (from == to)
return TRUE;
+ if (from->finfo == NULL || to->finfo == NULL)
+ return FALSE;
if (GST_AUDIO_INFO_FORMAT (from) != GST_AUDIO_INFO_FORMAT (to))
return FALSE;
if (GST_AUDIO_INFO_RATE (from) != GST_AUDIO_INFO_RATE (to))
}
static gboolean
-gst_audio_encoder_sink_setcaps (GstPad * pad, GstCaps * caps)
+gst_audio_encoder_sink_setcaps (GstAudioEncoder * enc, GstCaps * caps)
{
- GstAudioEncoder *enc;
GstAudioEncoderClass *klass;
GstAudioEncoderContext *ctx;
- GstAudioInfo *state, *old_state;
+ GstAudioInfo state;
gboolean res = TRUE, changed = FALSE;
guint old_rate;
- enc = GST_AUDIO_ENCODER (GST_PAD_PARENT (pad));
klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
/* subclass must do something here ... */
g_return_val_if_fail (klass->set_format != NULL, FALSE);
ctx = &enc->priv->ctx;
- state = &ctx->info;
+
+ GST_AUDIO_ENCODER_STREAM_LOCK (enc);
GST_DEBUG_OBJECT (enc, "caps: %" GST_PTR_FORMAT, caps);
goto refuse_caps;
/* adjust ts tracking to new sample rate */
- old_rate = GST_AUDIO_INFO_RATE (state);
+ old_rate = GST_AUDIO_INFO_RATE (&ctx->info);
if (GST_CLOCK_TIME_IS_VALID (enc->priv->base_ts) && old_rate) {
enc->priv->base_ts +=
GST_FRAMES_TO_CLOCK_TIME (enc->priv->samples, old_rate);
enc->priv->samples = 0;
}
- old_state = gst_audio_info_copy (state);
- if (!gst_audio_info_from_caps (state, caps))
+ if (!gst_audio_info_from_caps (&state, caps))
goto refuse_caps;
- changed = audio_info_is_equal (state, old_state);
- gst_audio_info_free (old_state);
+ changed = !audio_info_is_equal (&state, &ctx->info);
if (changed) {
GstClockTime old_min_latency;
gst_audio_encoder_drain (enc);
/* context defaults */
- enc->priv->ctx.frame_samples = 0;
+ enc->priv->ctx.frame_samples_min = 0;
+ enc->priv->ctx.frame_samples_max = 0;
enc->priv->ctx.frame_max = 0;
enc->priv->ctx.lookahead = 0;
GST_OBJECT_UNLOCK (enc);
if (klass->set_format)
- res = klass->set_format (enc, state);
+ res = klass->set_format (enc, &state);
/* notify if new latency */
GST_OBJECT_LOCK (enc);
GST_DEBUG_OBJECT (enc, "new audio format identical to configured format");
}
+exit:
+
+ GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
+
return res;
/* ERRORS */
refuse_caps:
{
GST_WARNING_OBJECT (enc, "rejected caps %" GST_PTR_FORMAT, caps);
- return res;
+ goto exit;
}
}
gst_structure_set_value (s, "rate", val);
if ((val = gst_structure_get_value (allowed_s, "channels")))
gst_structure_set_value (s, "channels", val);
+ /* following might also make sense for some encoded formats,
+ * e.g. wavpack */
+ if ((val = gst_structure_get_value (allowed_s, "width")))
+ gst_structure_set_value (s, "width", val);
+ if ((val = gst_structure_get_value (allowed_s, "depth")))
+ gst_structure_set_value (s, "depth", val);
+ if ((val = gst_structure_get_value (allowed_s, "endianness")))
+ gst_structure_set_value (s, "endianness", val);
+ if ((val = gst_structure_get_value (allowed_s, "signed")))
+ gst_structure_set_value (s, "signed", val);
+ if ((val = gst_structure_get_value (allowed_s, "channel-positions")))
+ gst_structure_set_value (s, "channel-positions", val);
gst_caps_merge_structure (filter_caps, s);
}
}
static GstCaps *
-gst_audio_encoder_sink_getcaps (GstPad * pad)
+gst_audio_encoder_sink_getcaps (GstPad * pad, GstCaps * filter)
{
GstAudioEncoder *enc;
GstAudioEncoderClass *klass;
g_assert (pad == enc->sinkpad);
if (klass->getcaps)
- caps = klass->getcaps (enc);
+ caps = klass->getcaps (enc, filter);
else
caps = gst_audio_encoder_proxy_getcaps (enc, NULL);
gst_object_unref (enc);
klass = GST_AUDIO_ENCODER_GET_CLASS (enc);
switch (GST_EVENT_TYPE (event)) {
- case GST_EVENT_NEWSEGMENT:
+ case GST_EVENT_SEGMENT:
{
- GstFormat format;
- gdouble rate, arate;
- gint64 start, stop, time;
- gboolean update;
-
- gst_event_parse_new_segment_full (event, &update, &rate, &arate, &format,
- &start, &stop, &time);
-
- if (format == GST_FORMAT_TIME) {
- GST_DEBUG_OBJECT (enc, "received TIME NEW_SEGMENT %" GST_TIME_FORMAT
- " -- %" GST_TIME_FORMAT ", time %" GST_TIME_FORMAT
- ", rate %g, applied_rate %g",
- GST_TIME_ARGS (start), GST_TIME_ARGS (stop), GST_TIME_ARGS (time),
- rate, arate);
+ GstSegment seg;
+
+ gst_event_copy_segment (event, &seg);
+
+ if (seg.format == GST_FORMAT_TIME) {
+ GST_DEBUG_OBJECT (enc, "received TIME SEGMENT %" GST_PTR_FORMAT, &seg);
} else {
- GST_DEBUG_OBJECT (enc, "received NEW_SEGMENT %" G_GINT64_FORMAT
- " -- %" G_GINT64_FORMAT ", time %" G_GINT64_FORMAT
- ", rate %g, applied_rate %g", start, stop, time, rate, arate);
+ GST_DEBUG_OBJECT (enc, "received SEGMENT %" GST_PTR_FORMAT, &seg);
GST_DEBUG_OBJECT (enc, "unsupported format; ignoring");
break;
}
+ GST_AUDIO_ENCODER_STREAM_LOCK (enc);
/* finish current segment */
gst_audio_encoder_drain (enc);
/* reset partially for new segment */
gst_audio_encoder_reset (enc, FALSE);
/* and follow along with segment */
- gst_segment_set_newsegment_full (&enc->segment, update, rate, arate,
- format, start, stop, time);
+ enc->segment = seg;
+ GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
break;
}
break;
case GST_EVENT_FLUSH_STOP:
+ GST_AUDIO_ENCODER_STREAM_LOCK (enc);
/* discard any pending stuff */
/* TODO route through drain ?? */
if (!enc->priv->drained && klass->flush)
klass->flush (enc);
/* and get (re)set for the sequel */
gst_audio_encoder_reset (enc, FALSE);
+
+ g_list_foreach (enc->priv->pending_events, (GFunc) gst_event_unref, NULL);
+ g_list_free (enc->priv->pending_events);
+ enc->priv->pending_events = NULL;
+ GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
+
break;
case GST_EVENT_EOS:
+ GST_AUDIO_ENCODER_STREAM_LOCK (enc);
gst_audio_encoder_drain (enc);
+ GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
+ break;
+
+ case GST_EVENT_TAG:
+ {
+ GstTagList *tags;
+
+ gst_event_parse_tag (event, &tags);
+ tags = gst_tag_list_copy (tags);
+ gst_event_unref (event);
+ gst_tag_list_remove_tag (tags, GST_TAG_CODEC);
+ gst_tag_list_remove_tag (tags, GST_TAG_AUDIO_CODEC);
+ event = gst_event_new_tag (tags);
+
+ GST_OBJECT_LOCK (enc);
+ enc->priv->pending_events =
+ g_list_append (enc->priv->pending_events, event);
+ GST_OBJECT_UNLOCK (enc);
+ handled = TRUE;
+ break;
+ }
+
+ case GST_EVENT_CAPS:
+ {
+ GstCaps *caps;
+
+ gst_event_parse_caps (event, &caps);
+ gst_audio_encoder_sink_setcaps (enc, caps);
+ gst_event_unref (event);
+ handled = TRUE;
break;
+ }
default:
break;
if (!handled)
handled = gst_audio_encoder_sink_eventfunc (enc, event);
- if (!handled)
- ret = gst_pad_event_default (pad, event);
+ if (!handled) {
+ /* Forward non-serialized events and EOS/FLUSH_STOP immediately.
+ * For EOS this is required because no buffer or serialized event
+ * will come after EOS and nothing could trigger another
+ * _finish_frame() call.
+ *
+ * For FLUSH_STOP this is required because it is expected
+ * to be forwarded immediately and no buffers are queued anyway.
+ */
+ if (!GST_EVENT_IS_SERIALIZED (event)
+ || GST_EVENT_TYPE (event) == GST_EVENT_EOS
+ || GST_EVENT_TYPE (event) == GST_EVENT_FLUSH_STOP) {
+ ret = gst_pad_event_default (pad, event);
+ } else {
+ GST_AUDIO_ENCODER_STREAM_LOCK (enc);
+ enc->priv->pending_events =
+ g_list_append (enc->priv->pending_events, event);
+ GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
+ ret = TRUE;
+ }
+ }
GST_DEBUG_OBJECT (enc, "event handled");
gst_query_parse_position (query, &req_fmt, NULL);
fmt = GST_FORMAT_TIME;
- if (!(res = gst_pad_query_position (peerpad, &fmt, &pos)))
+ if (!(res = gst_pad_query_position (peerpad, fmt, &pos)))
break;
- if ((res = gst_pad_query_convert (peerpad, fmt, pos, &req_fmt, &val))) {
+ if ((res = gst_pad_query_convert (peerpad, fmt, pos, req_fmt, &val))) {
gst_query_set_position (query, req_fmt, val);
}
break;
gst_query_parse_duration (query, &req_fmt, NULL);
fmt = GST_FORMAT_TIME;
- if (!(res = gst_pad_query_duration (peerpad, &fmt, &dur)))
+ if (!(res = gst_pad_query_duration (peerpad, fmt, &dur)))
break;
- if ((res = gst_pad_query_convert (peerpad, fmt, dur, &req_fmt, &val))) {
+ if ((res = gst_pad_query_convert (peerpad, fmt, dur, req_fmt, &val))) {
gst_query_set_duration (query, req_fmt, val);
}
break;
GST_DEBUG_OBJECT (enc, "activate %d", active);
if (active) {
+
+ if (enc->priv->tags)
+ gst_tag_list_free (enc->priv->tags);
+ enc->priv->tags = gst_tag_list_new ();
+
if (!enc->priv->active && klass->start)
result = klass->start (enc);
} else {
}
/**
- * gst_audio_encoder_set_frame_samples:
+ * gst_audio_encoder_set_frame_samples_min:
* @enc: a #GstAudioEncoder
* @num: number of samples per frame
*
* Sets number of samples (per channel) subclass needs to be handed,
- * or will be handed all available if 0.
+ * at least or will be handed all available if 0.
+ *
+ * If an exact number of samples is required, gst_audio_encoder_set_frame_samples_max()
+ * must be called with the same number.
*
* Since: 0.10.36
*/
void
-gst_audio_encoder_set_frame_samples (GstAudioEncoder * enc, gint num)
+gst_audio_encoder_set_frame_samples_min (GstAudioEncoder * enc, gint num)
{
g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
- enc->priv->ctx.frame_samples = num;
+ enc->priv->ctx.frame_samples_min = num;
}
/**
- * gst_audio_encoder_get_frame_samples:
+ * gst_audio_encoder_get_frame_samples_min:
* @enc: a #GstAudioEncoder
*
- * Returns: currently requested samples per frame
+ * Returns: currently minimum requested samples per frame
*
* Since: 0.10.36
*/
gint
-gst_audio_encoder_get_frame_samples (GstAudioEncoder * enc)
+gst_audio_encoder_get_frame_samples_min (GstAudioEncoder * enc)
{
g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
- return enc->priv->ctx.frame_samples;
+ return enc->priv->ctx.frame_samples_min;
+}
+
+/**
+ * gst_audio_encoder_set_frame_samples_max:
+ * @enc: a #GstAudioEncoder
+ * @num: number of samples per frame
+ *
+ * Sets number of samples (per channel) subclass needs to be handed,
+ * at most or will be handed all available if 0.
+ *
+ * If an exact number of samples is required, gst_audio_encoder_set_frame_samples_min()
+ * must be called with the same number.
+ *
+ * Since: 0.10.36
+ */
+void
+gst_audio_encoder_set_frame_samples_max (GstAudioEncoder * enc, gint num)
+{
+ g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
+
+ enc->priv->ctx.frame_samples_max = num;
+}
+
+/**
+ * gst_audio_encoder_get_frame_samples_min:
+ * @enc: a #GstAudioEncoder
+ *
+ * Returns: currently maximum requested samples per frame
+ *
+ * Since: 0.10.36
+ */
+gint
+gst_audio_encoder_get_frame_samples_max (GstAudioEncoder * enc)
+{
+ g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
+
+ return enc->priv->ctx.frame_samples_max;
}
/**
* @enc: a #GstAudioEncoder
* @num: number of frames
*
- * Sets max number of frames accepted at once (assumed minimally 1)
+ * Sets max number of frames accepted at once (assumed minimally 1).
+ * Requires @frame_samples_min and @frame_samples_max to be the equal.
*
* Since: 0.10.36
*/
return result;
}
+
+/**
+ * gst_audio_encoder_merge_tags:
+ * @enc: a #GstAudioEncoder
+ * @tags: a #GstTagList to merge
+ * @mode: the #GstTagMergeMode to use
+ *
+ * Adds tags to so-called pending tags, which will be processed
+ * before pushing out data downstream.
+ *
+ * Note that this is provided for convenience, and the subclass is
+ * not required to use this and can still do tag handling on its own,
+ * although it should be aware that baseclass already takes care
+ * of the usual CODEC/AUDIO_CODEC tags.
+ *
+ * MT safe.
+ *
+ * Since: 0.10.36
+ */
+void
+gst_audio_encoder_merge_tags (GstAudioEncoder * enc,
+ const GstTagList * tags, GstTagMergeMode mode)
+{
+ GstTagList *otags;
+
+ g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
+ g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
+
+ GST_OBJECT_LOCK (enc);
+ if (tags)
+ GST_DEBUG_OBJECT (enc, "merging tags %" GST_PTR_FORMAT, tags);
+ otags = enc->priv->tags;
+ enc->priv->tags = gst_tag_list_merge (enc->priv->tags, tags, mode);
+ if (otags)
+ gst_tag_list_free (otags);
+ GST_OBJECT_UNLOCK (enc);
+}