* The fragmented file features defined (only) in ISO Base Media are used by
* ISMV files making up (a.o.) Smooth Streaming (ismlmux).
*
- * A few properties (#GstQTMux:movie-timescale, #GstQTMux:trak-timescale) allow
- * adjusting some technical parameters, which might be useful in (rare) cases to
- * resolve compatibility issues in some situations.
+ * A few properties (#GstQTMux:movie-timescale, #GstQTMux:trak-timescale,
+ * #GstQTMuxPad:trak-timescale) allow adjusting some technical parameters,
+ * which might be useful in (rare) cases to resolve compatibility issues in
+ * some situations.
*
* Some other properties influence the result more fundamentally.
* A typical mov/mp4 file's metadata (aka moov) is located at the end of the
* #GstQTMux::reserved-duration-remaining property to see how close to full
* the reserved space is becoming.
*
+ * Applications that wish to be able to use/edit a file while it is being
+ * written to by live content, can use the "Robust Prefill Muxing" mode. That
+ * mode is a variant of the "Robust Muxing" mode in that it will pre-allocate a
+ * completely valid header from the start for all tracks (i.e. it appears as
+ * though the file is "reserved-max-duration" long with all samples
+ * present). This mode can be enabled by setting the
+ * #GstQTMux::reserved-moov-update-period and #GstQTMux::reserved-prefill
+ * properties. Note that this mode is only possible with input streams that have
+ * a fixed sample size (such as raw audio and Prores Video) and that don't
+ * have reordered samples.
+ *
* <refsect2>
* <title>Example pipelines</title>
* |[
#include <gst/audio/audio.h>
#include <gst/video/video.h>
#include <gst/tag/tag.h>
+#include <gst/pbutils/pbutils.h>
#include <sys/types.h>
#ifdef G_OS_WIN32
GST_DEBUG_CATEGORY_STATIC (gst_qt_mux_debug);
#define GST_CAT_DEFAULT gst_qt_mux_debug
+#ifndef ABSDIFF
+#define ABSDIFF(a, b) ((a) > (b) ? (a) - (b) : (b) - (a))
+#endif
+
/* Hacker notes.
*
* The basic building blocks of MP4 files are:
(gst_qt_mux_dts_method_get_type ())
#endif
+enum
+{
+ PROP_PAD_0,
+ PROP_PAD_TRAK_TIMESCALE,
+};
+
+#define DEFAULT_PAD_TRAK_TIMESCALE 0
+
+GType gst_qt_mux_pad_get_type (void);
+
+#define GST_TYPE_QT_MUX_PAD \
+ (gst_qt_mux_pad_get_type())
+#define GST_QT_MUX_PAD(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST ((obj), GST_TYPE_QT_MUX_PAD, GstQTMuxPad))
+#define GST_QT_MUX_PAD_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST ((klass), GST_TYPE_QT_MUX_PAD, GstQTMuxPadClass))
+#define GST_IS_QT_MUX_PAD(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_QT_MUX_PAD))
+#define GST_IS_QT_MUX_PAD_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE ((klass), GST_TYPE_QT_MUX_PAD))
+#define GST_QT_MUX_PAD_CAST(obj) \
+ ((GstQTMuxPad *)(obj))
+
+typedef struct _GstQTMuxPad GstQTMuxPad;
+typedef struct _GstQTMuxPadClass GstQTMuxPadClass;
+
+struct _GstQTMuxPad
+{
+ GstPad parent;
+
+ guint32 trak_timescale;
+};
+
+struct _GstQTMuxPadClass
+{
+ GstPadClass parent;
+};
+
+G_DEFINE_TYPE (GstQTMuxPad, gst_qt_mux_pad, GST_TYPE_PAD);
+
+static void
+gst_qt_mux_pad_set_property (GObject * object,
+ guint prop_id, const GValue * value, GParamSpec * pspec)
+{
+ GstQTMuxPad *pad = GST_QT_MUX_PAD_CAST (object);
+
+ GST_OBJECT_LOCK (pad);
+ switch (prop_id) {
+ case PROP_PAD_TRAK_TIMESCALE:
+ pad->trak_timescale = g_value_get_uint (value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+ GST_OBJECT_UNLOCK (pad);
+}
+
+static void
+gst_qt_mux_pad_get_property (GObject * object,
+ guint prop_id, GValue * value, GParamSpec * pspec)
+{
+ GstQTMuxPad *pad = GST_QT_MUX_PAD_CAST (object);
+
+ GST_OBJECT_LOCK (pad);
+ switch (prop_id) {
+ case PROP_PAD_TRAK_TIMESCALE:
+ g_value_set_uint (value, pad->trak_timescale);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+ GST_OBJECT_UNLOCK (pad);
+}
+
+static void
+gst_qt_mux_pad_class_init (GstQTMuxPadClass * klass)
+{
+ GObjectClass *gobject_class = (GObjectClass *) klass;
+
+ gobject_class->get_property = gst_qt_mux_pad_get_property;
+ gobject_class->set_property = gst_qt_mux_pad_set_property;
+
+ g_object_class_install_property (gobject_class, PROP_PAD_TRAK_TIMESCALE,
+ g_param_spec_uint ("trak-timescale", "Track timescale",
+ "Timescale to use for this pad's trak (units per second, 0 is automatic)",
+ 0, G_MAXUINT32, DEFAULT_PAD_TRAK_TIMESCALE,
+ G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
+}
+
+static void
+gst_qt_mux_pad_init (GstQTMuxPad * pad)
+{
+ pad->trak_timescale = DEFAULT_PAD_TRAK_TIMESCALE;
+}
+
+static guint32
+gst_qt_mux_pad_get_timescale (GstQTMuxPad * pad)
+{
+ guint32 timescale;
+
+ GST_OBJECT_LOCK (pad);
+ timescale = pad->trak_timescale;
+ GST_OBJECT_UNLOCK (pad);
+
+ return timescale;
+}
+
/* QTMux signals and args */
enum
{
PROP_RESERVED_DURATION_REMAINING,
PROP_RESERVED_MOOV_UPDATE_PERIOD,
PROP_RESERVED_BYTES_PER_SEC,
+ PROP_RESERVED_PREFILL,
#ifndef GST_REMOVE_DEPRECATED
PROP_DTS_METHOD,
#endif
PROP_DO_CTTS,
+ PROP_INTERLEAVE_BYTES,
+ PROP_INTERLEAVE_TIME,
+ PROP_MAX_RAW_AUDIO_DRIFT,
+ PROP_START_GAP_THRESHOLD,
#ifdef TIZEN_FEATURE_GST_MUX_ENHANCEMENT
PROP_EXPECTED_TRAILER_SIZE,
-#endif /* TIZEN_FEATURE_GST_MUX_ENHANCEMENT */
+#endif /* TIZEN_FEATURE_GST_MUX_ENHANCEMENT */
};
/* some spare for header size as well */
#define MDAT_LARGE_FILE_LIMIT ((guint64) 1024 * 1024 * 1024 * 2)
-#define DEFAULT_MOVIE_TIMESCALE 1800
+#define DEFAULT_MOVIE_TIMESCALE 0
#define DEFAULT_TRAK_TIMESCALE 0
#define DEFAULT_DO_CTTS TRUE
#define DEFAULT_FAST_START FALSE
#define DEFAULT_RESERVED_MAX_DURATION GST_CLOCK_TIME_NONE
#define DEFAULT_RESERVED_MOOV_UPDATE_PERIOD GST_CLOCK_TIME_NONE
#define DEFAULT_RESERVED_BYTES_PER_SEC_PER_TRAK 550
+#define DEFAULT_RESERVED_PREFILL FALSE
+#define DEFAULT_INTERLEAVE_BYTES 0
+#define DEFAULT_INTERLEAVE_TIME 250*GST_MSECOND
+#define DEFAULT_MAX_RAW_AUDIO_DRIFT 40 * GST_MSECOND
+#define DEFAULT_START_GAP_THRESHOLD 0
static void gst_qt_mux_finalize (GObject * object);
static gboolean gst_qt_mux_sink_event (GstCollectPads * pads,
GstCollectData * data, GstEvent * event, gpointer user_data);
-static GstFlowReturn gst_qt_mux_handle_buffer (GstCollectPads * pads,
- GstCollectData * cdata, GstBuffer * buf, gpointer user_data);
+static GstFlowReturn gst_qt_mux_collected (GstCollectPads * pads,
+ gpointer user_data);
static GstFlowReturn gst_qt_mux_add_buffer (GstQTMux * qtmux, GstQTPad * pad,
GstBuffer * buf);
static GstFlowReturn
gst_qt_mux_robust_recording_rewrite_moov (GstQTMux * qtmux);
+static void gst_qt_mux_update_global_statistics (GstQTMux * qtmux);
+static void gst_qt_mux_update_edit_lists (GstQTMux * qtmux);
+
static GstElementClass *parent_class = NULL;
#ifdef TIZEN_FEATURE_GST_MUX_ENHANCEMENT
GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
GstQTMuxClass *klass = (GstQTMuxClass *) g_class;
GstQTMuxClassParams *params;
- GstPadTemplate *videosinktempl, *audiosinktempl, *subtitlesinktempl;
+ GstPadTemplate *videosinktempl, *audiosinktempl, *subtitlesinktempl,
+ *captionsinktempl;
GstPadTemplate *srctempl;
gchar *longname, *description;
gst_element_class_add_pad_template (element_class, srctempl);
if (params->audio_sink_caps) {
- audiosinktempl = gst_pad_template_new ("audio_%u",
- GST_PAD_SINK, GST_PAD_REQUEST, params->audio_sink_caps);
+ audiosinktempl = gst_pad_template_new_with_gtype ("audio_%u",
+ GST_PAD_SINK, GST_PAD_REQUEST, params->audio_sink_caps,
+ GST_TYPE_QT_MUX_PAD);
gst_element_class_add_pad_template (element_class, audiosinktempl);
}
if (params->video_sink_caps) {
- videosinktempl = gst_pad_template_new ("video_%u",
- GST_PAD_SINK, GST_PAD_REQUEST, params->video_sink_caps);
+ videosinktempl = gst_pad_template_new_with_gtype ("video_%u",
+ GST_PAD_SINK, GST_PAD_REQUEST, params->video_sink_caps,
+ GST_TYPE_QT_MUX_PAD);
gst_element_class_add_pad_template (element_class, videosinktempl);
}
if (params->subtitle_sink_caps) {
- subtitlesinktempl = gst_pad_template_new ("subtitle_%u",
- GST_PAD_SINK, GST_PAD_REQUEST, params->subtitle_sink_caps);
+ subtitlesinktempl = gst_pad_template_new_with_gtype ("subtitle_%u",
+ GST_PAD_SINK, GST_PAD_REQUEST, params->subtitle_sink_caps,
+ GST_TYPE_QT_MUX_PAD);
gst_element_class_add_pad_template (element_class, subtitlesinktempl);
}
+ if (params->caption_sink_caps) {
+ captionsinktempl = gst_pad_template_new_with_gtype ("caption_%u",
+ GST_PAD_SINK, GST_PAD_REQUEST, params->caption_sink_caps,
+ GST_TYPE_QT_MUX_PAD);
+ gst_element_class_add_pad_template (element_class, captionsinktempl);
+ }
+
klass->format = params->prop->format;
}
g_object_class_install_property (gobject_class, PROP_MOVIE_TIMESCALE,
g_param_spec_uint ("movie-timescale", "Movie timescale",
- "Timescale to use in the movie (units per second)",
- 1, G_MAXUINT32, DEFAULT_MOVIE_TIMESCALE,
+ "Timescale to use in the movie (units per second, 0 == default)",
+ 0, G_MAXUINT32, DEFAULT_MOVIE_TIMESCALE,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_TRAK_TIMESCALE,
g_param_spec_uint ("trak-timescale", "Track timescale",
"Multiplier for converting reserved-max-duration into bytes of header to reserve, per second, per track",
0, 10000, DEFAULT_RESERVED_BYTES_PER_SEC_PER_TRAK,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_RESERVED_PREFILL,
+ g_param_spec_boolean ("reserved-prefill",
+ "Reserved Prefill Samples Table",
+ "Prefill samples table of reserved duration",
+ DEFAULT_RESERVED_PREFILL,
+ G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_INTERLEAVE_BYTES,
+ g_param_spec_uint64 ("interleave-bytes", "Interleave (bytes)",
+ "Interleave between streams in bytes",
+ 0, G_MAXUINT64, DEFAULT_INTERLEAVE_BYTES,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_INTERLEAVE_TIME,
+ g_param_spec_uint64 ("interleave-time", "Interleave (time)",
+ "Interleave between streams in nanoseconds",
+ 0, G_MAXUINT64, DEFAULT_INTERLEAVE_TIME,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_MAX_RAW_AUDIO_DRIFT,
+ g_param_spec_uint64 ("max-raw-audio-drift", "Max Raw Audio Drift",
+ "Maximum allowed drift of raw audio samples vs. timestamps in nanoseconds",
+ 0, G_MAXUINT64, DEFAULT_MAX_RAW_AUDIO_DRIFT,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_START_GAP_THRESHOLD,
+ g_param_spec_uint64 ("start-gap-threshold", "Start Gap Threshold",
+ "Threshold for creating an edit list for gaps at the start in nanoseconds",
+ 0, G_MAXUINT64, DEFAULT_START_GAP_THRESHOLD,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
#ifdef TIZEN_FEATURE_GST_MUX_ENHANCEMENT
tspec = g_param_spec_uint("expected-trailer-size", "Expected Trailer Size",
qtpad->sample_size = 0;
qtpad->sync = FALSE;
qtpad->last_dts = 0;
+ qtpad->sample_offset = 0;
qtpad->dts_adjustment = GST_CLOCK_TIME_NONE;
qtpad->first_ts = GST_CLOCK_TIME_NONE;
qtpad->first_dts = GST_CLOCK_TIME_NONE;
qtpad->total_bytes = 0;
qtpad->sparse = FALSE;
- qtpad->buf_head = 0;
- qtpad->buf_tail = 0;
-
gst_buffer_replace (&qtpad->last_buf, NULL);
if (qtpad->tags) {
/* reference owned elsewhere */
qtpad->trak = NULL;
+ qtpad->tc_trak = NULL;
if (qtpad->traf) {
atom_traf_free (qtpad->traf);
qtpad->traf = NULL;
}
atom_array_clear (&qtpad->fragment_buffers);
+ if (qtpad->samples)
+ g_array_unref (qtpad->samples);
+ qtpad->samples = NULL;
/* reference owned elsewhere */
qtpad->tfra = NULL;
+
+ qtpad->first_pts = GST_CLOCK_TIME_NONE;
+ qtpad->tc_pos = -1;
+ if (qtpad->first_tc)
+ gst_video_time_code_free (qtpad->first_tc);
+ qtpad->first_tc = NULL;
+
+ if (qtpad->raw_audio_adapter)
+ gst_object_unref (qtpad->raw_audio_adapter);
+ qtpad->raw_audio_adapter = NULL;
}
/*
qtmux->moov_pos = 0;
qtmux->mdat_pos = 0;
qtmux->longest_chunk = GST_CLOCK_TIME_NONE;
- qtmux->video_pads = 0;
- qtmux->audio_pads = 0;
qtmux->fragment_sequence = 0;
if (qtmux->ftyp) {
if (alloc) {
qtmux->moov = atom_moov_new (qtmux->context);
+#ifndef TIZEN_FEATURE_GST_MUX_ENHANCEMENT
/* ensure all is as nice and fresh as request_new_pad would provide it */
for (walk = qtmux->sinkpads; walk; walk = g_slist_next (walk)) {
GstQTPad *qtpad = (GstQTPad *) walk->data;
qtpad->trak = atom_trak_new (qtmux->context);
atom_moov_add_trak (qtmux->moov, qtpad->trak);
}
+#endif
}
+ qtmux->current_pad = NULL;
+ qtmux->current_chunk_size = 0;
+ qtmux->current_chunk_duration = 0;
+ qtmux->current_chunk_offset = -1;
+
qtmux->reserved_moov_size = 0;
qtmux->last_moov_update = GST_CLOCK_TIME_NONE;
qtmux->muxed_since_last_update = 0;
qtmux->sinkpads = NULL;
qtmux->collect = gst_collect_pads_new ();
- gst_collect_pads_set_buffer_function (qtmux->collect,
- GST_DEBUG_FUNCPTR (gst_qt_mux_handle_buffer), qtmux);
gst_collect_pads_set_event_function (qtmux->collect,
GST_DEBUG_FUNCPTR (gst_qt_mux_sink_event), qtmux);
gst_collect_pads_set_clip_function (qtmux->collect,
GST_DEBUG_FUNCPTR (gst_collect_pads_clip_running_time), qtmux);
+ gst_collect_pads_set_function (qtmux->collect,
+ GST_DEBUG_FUNCPTR (gst_qt_mux_collected), qtmux);
/* properties set to default upon construction */
qtmux->reserved_moov_update_period = DEFAULT_RESERVED_MOOV_UPDATE_PERIOD;
qtmux->reserved_bytes_per_sec_per_trak =
DEFAULT_RESERVED_BYTES_PER_SEC_PER_TRAK;
+ qtmux->interleave_bytes = DEFAULT_INTERLEAVE_BYTES;
+ qtmux->interleave_time = DEFAULT_INTERLEAVE_TIME;
+ qtmux->max_raw_audio_drift = DEFAULT_MAX_RAW_AUDIO_DRIFT;
+ qtmux->start_gap_threshold = DEFAULT_START_GAP_THRESHOLD;
/* always need this */
qtmux->context =
return newbuf;
}
+static gsize
+extract_608_field_from_s334_1a (const guint8 * ccdata, gsize ccdata_size,
+ guint field, guint8 ** res)
+{
+ guint8 *storage;
+ gsize storage_size = 128;
+ gsize i, res_size = 0;
+
+ storage = g_malloc0 (storage_size);
+
+ /* Iterate over the ccdata and put the corresponding tuples for the given field
+ * in the storage */
+ for (i = 0; i < ccdata_size; i += 3) {
+ if ((field == 1 && (ccdata[i * 3] & 0x80)) ||
+ (field == 2 && !(ccdata[i * 3] & 0x80))) {
+ GST_DEBUG ("Storing matching cc for field %d : 0x%02x 0x%02x", field,
+ ccdata[i * 3 + 1], ccdata[i * 3 + 2]);
+ if (res_size >= storage_size) {
+ storage_size += 128;
+ storage = g_realloc (storage, storage_size);
+ }
+ storage[res_size] = ccdata[i * 3 + 1];
+ storage[res_size + 1] = ccdata[i * 3 + 2];
+ res_size += 2;
+ }
+ }
+
+ if (res_size == 0) {
+ g_free (storage);
+ *res = NULL;
+ return 0;
+ }
+
+ *res = storage;
+ return res_size;
+}
+
+
+static GstBuffer *
+gst_qt_mux_prepare_caption_buffer (GstQTPad * qtpad, GstBuffer * buf,
+ GstQTMux * qtmux)
+{
+ GstBuffer *newbuf = NULL;
+ GstMapInfo map, inmap;
+ gsize size;
+ gboolean in_prefill;
+
+ if (buf == NULL)
+ return NULL;
+
+ in_prefill = (qtmux->mux_mode == GST_QT_MUX_MODE_ROBUST_RECORDING_PREFILL);
+
+ size = gst_buffer_get_size (buf);
+ gst_buffer_map (buf, &inmap, GST_MAP_READ);
+
+ GST_LOG_OBJECT (qtmux,
+ "Preparing caption buffer %" GST_FOURCC_FORMAT " size:%" G_GSIZE_FORMAT,
+ GST_FOURCC_ARGS (qtpad->fourcc), size);
+
+ switch (qtpad->fourcc) {
+ case FOURCC_c608:
+ {
+ guint8 *cdat, *cdt2;
+ gsize cdat_size, cdt2_size, total_size = 0;
+ gsize write_offs = 0;
+
+ cdat_size =
+ extract_608_field_from_s334_1a (inmap.data, inmap.size, 1, &cdat);
+ cdt2_size =
+ extract_608_field_from_s334_1a (inmap.data, inmap.size, 2, &cdt2);
+
+ if (cdat_size)
+ total_size += cdat_size + 8;
+ if (cdt2_size)
+ total_size += cdt2_size + 8;
+ if (total_size == 0) {
+ GST_DEBUG_OBJECT (qtmux, "No 608 data ?");
+ /* FIXME : We might want to *always* store something, even if
+ * it's "empty" CC (i.e. 0x80 0x80) */
+ break;
+ }
+
+ newbuf = gst_buffer_new_and_alloc (in_prefill ? 20 : total_size);
+ /* Let's copy over all metadata and not the memory */
+ gst_buffer_copy_into (newbuf, buf, GST_BUFFER_COPY_METADATA, 0, size);
+
+ gst_buffer_map (newbuf, &map, GST_MAP_WRITE);
+ if (cdat_size || in_prefill) {
+ GST_WRITE_UINT32_BE (map.data, in_prefill ? 10 : cdat_size + 8);
+ GST_WRITE_UINT32_LE (map.data + 4, FOURCC_cdat);
+ if (cdat_size)
+ memcpy (map.data + 8, cdat, in_prefill ? 2 : cdat_size);
+ else {
+ /* Write 'empty' CC */
+ map.data[8] = 0x80;
+ map.data[9] = 0x80;
+ }
+ write_offs = in_prefill ? 10 : cdat_size + 8;
+ if (cdat_size)
+ g_free (cdat);
+ }
+
+ if (cdt2_size || in_prefill) {
+ GST_WRITE_UINT32_BE (map.data + write_offs,
+ in_prefill ? 10 : cdt2_size + 8);
+ GST_WRITE_UINT32_LE (map.data + write_offs + 4, FOURCC_cdt2);
+ if (cdt2_size)
+ memcpy (map.data + write_offs + 8, cdt2, in_prefill ? 2 : cdt2_size);
+ else {
+ /* Write 'empty' CC */
+ map.data[write_offs + 8] = 0x80;
+ map.data[write_offs + 9] = 0x80;
+ }
+ if (cdt2_size)
+ g_free (cdt2);
+ }
+ gst_buffer_unmap (newbuf, &map);
+ break;
+ }
+ break;
+ case FOURCC_c708:
+ {
+ /* Take the whole CDP */
+ if (in_prefill && size > 256) {
+ GST_ERROR_OBJECT (qtmux, "Input C708 CDP too big for prefill mode !");
+ break;
+ }
+ newbuf = gst_buffer_new_and_alloc (in_prefill ? 256 + 8 : size + 8);
+
+ /* Let's copy over all metadata and not the memory */
+ gst_buffer_copy_into (newbuf, buf, GST_BUFFER_COPY_METADATA, 0, size);
+
+ gst_buffer_map (newbuf, &map, GST_MAP_WRITE);
+
+ GST_WRITE_UINT32_BE (map.data, size + 8);
+ GST_WRITE_UINT32_LE (map.data + 4, FOURCC_ccdp);
+ memcpy (map.data + 8, inmap.data, inmap.size);
+
+ gst_buffer_unmap (newbuf, &map);
+ break;
+ }
+ default:
+ /* theoretically this should never happen, but let's keep this here in case */
+ GST_WARNING_OBJECT (qtmux, "Unknown caption format");
+ break;
+ }
+
+ gst_buffer_unmap (buf, &inmap);
+ gst_buffer_unref (buf);
+
+ return newbuf;
+}
+
static GstBuffer *
gst_qt_mux_prepare_tx3g_buffer (GstQTPad * qtpad, GstBuffer * buf,
GstQTMux * qtmux)
guint8 *data;
GstBuffer *buf;
GstFlowReturn ret = GST_FLOW_OK;
+ GSList *walk;
+ guint64 current_time = atoms_get_current_qt_time ();
+
+ /* update modification times */
+ qtmux->moov->mvhd.time_info.modification_time = current_time;
+ for (walk = qtmux->collect->data; walk; walk = g_slist_next (walk)) {
+ GstCollectData *cdata = (GstCollectData *) walk->data;
+ GstQTPad *qtpad = (GstQTPad *) cdata;
+
+ qtpad->trak->mdia.mdhd.time_info.modification_time = current_time;
+ qtpad->trak->tkhd.modification_time = current_time;
+ }
/* serialize moov */
offset = size = 0;
AtomFTYP *ftyp = NULL;
GstBuffer *prefix = NULL;
- GST_DEBUG_OBJECT (qtmux, "Openning moov recovery file: %s",
+ GST_DEBUG_OBJECT (qtmux, "Opening moov recovery file: %s",
qtmux->moov_recov_file_path);
qtmux->moov_recov_file = g_fopen (qtmux->moov_recov_file_path, "wb+");
}
}
+ return;
+
fail:
/* cleanup */
fclose (qtmux->moov_recov_file);
qtmux->moov_recov_file = NULL;
- GST_WARNING_OBJECT (qtmux, "An error was detected while writing to "
- "recover file, moov recovery won't work");
}
-static GstFlowReturn
-gst_qt_mux_start_file (GstQTMux * qtmux)
+static guint64
+prefill_get_block_index (GstQTMux * qtmux, GstQTPad * qpad)
{
- GstQTMuxClass *qtmux_klass = (GstQTMuxClass *) (G_OBJECT_GET_CLASS (qtmux));
- GstFlowReturn ret = GST_FLOW_OK;
- GstCaps *caps;
- GstSegment segment;
- gchar s_id[32];
- GstClockTime reserved_max_duration;
- guint reserved_bytes_per_sec_per_trak;
-
- GST_DEBUG_OBJECT (qtmux, "starting file");
-
- GST_OBJECT_LOCK (qtmux);
- reserved_max_duration = qtmux->reserved_max_duration;
- reserved_bytes_per_sec_per_trak = qtmux->reserved_bytes_per_sec_per_trak;
- GST_OBJECT_UNLOCK (qtmux);
-
- /* stream-start (FIXME: create id based on input ids) */
- g_snprintf (s_id, sizeof (s_id), "qtmux-%08x", g_random_int ());
- gst_pad_push_event (qtmux->srcpad, gst_event_new_stream_start (s_id));
-
- caps = gst_caps_copy (gst_pad_get_pad_template_caps (qtmux->srcpad));
- /* qtmux has structure with and without variant, remove all but the first */
- while (gst_caps_get_size (caps) > 1)
- gst_caps_remove_structure (caps, 1);
- gst_pad_set_caps (qtmux->srcpad, caps);
- gst_caps_unref (caps);
-
- /* Default is 'normal' mode */
- qtmux->mux_mode = GST_QT_MUX_MODE_MOOV_AT_END;
-
- /* Require a sensible fragment duration when muxing
- * using the ISML muxer */
- if (qtmux_klass->format == GST_QT_MUX_FORMAT_ISML &&
- qtmux->fragment_duration == 0)
- goto invalid_isml;
-
- if (qtmux->fragment_duration > 0) {
- if (qtmux->streamable)
- qtmux->mux_mode = GST_QT_MUX_MODE_FRAGMENTED_STREAMABLE;
- else
- qtmux->mux_mode = GST_QT_MUX_MODE_FRAGMENTED;
- } else if (qtmux->fast_start) {
- qtmux->mux_mode = GST_QT_MUX_MODE_FAST_START;
- } else if (reserved_max_duration != GST_CLOCK_TIME_NONE) {
- qtmux->mux_mode = GST_QT_MUX_MODE_ROBUST_RECORDING;
+ switch (qpad->fourcc) {
+ case FOURCC_apch:
+ case FOURCC_apcn:
+ case FOURCC_apcs:
+ case FOURCC_apco:
+ case FOURCC_ap4h:
+ case FOURCC_ap4x:
+ case FOURCC_c608:
+ case FOURCC_c708:
+ return qpad->sample_offset;
+ case FOURCC_sowt:
+ case FOURCC_twos:
+ return gst_util_uint64_scale_ceil (qpad->sample_offset,
+ qpad->expected_sample_duration_n,
+ qpad->expected_sample_duration_d *
+ atom_trak_get_timescale (qpad->trak));
+ default:
+ return -1;
}
+}
- switch (qtmux->mux_mode) {
- case GST_QT_MUX_MODE_MOOV_AT_END:
- case GST_QT_MUX_MODE_ROBUST_RECORDING:
- /* We have to be able to seek to rewrite the mdat header, or any
- * moov atom we write will not be visible in the file, because an
- * MDAT with 0 as the size covers the rest of the file. A file
- * with no moov is not playable, so error out now. */
- if (!gst_qt_mux_downstream_is_seekable (qtmux)) {
- GST_ELEMENT_ERROR (qtmux, STREAM, MUX,
- ("Downstream is not seekable - will not be able to create a playable file"),
- (NULL));
- return GST_FLOW_ERROR;
+static guint
+prefill_get_sample_size (GstQTMux * qtmux, GstQTPad * qpad)
+{
+ switch (qpad->fourcc) {
+ case FOURCC_apch:
+ if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 480) {
+ return 300000;
+ } else if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 576) {
+ return 350000;
+ } else if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 720) {
+ return 525000;
+ } else if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 1080) {
+ return 1050000;
+ } else {
+ return 4150000;
}
break;
- case GST_QT_MUX_MODE_FAST_START:
- case GST_QT_MUX_MODE_FRAGMENTED_STREAMABLE:
- break; /* Don't need seekability, ignore */
- case GST_QT_MUX_MODE_FRAGMENTED:
- if (!gst_qt_mux_downstream_is_seekable (qtmux)) {
- GST_WARNING_OBJECT (qtmux, "downstream is not seekable, but "
- "streamable=false. Will ignore that and create streamable output "
- "instead");
- qtmux->streamable = TRUE;
- g_object_notify (G_OBJECT (qtmux), "streamable");
+ case FOURCC_apcn:
+ if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 480) {
+ return 200000;
+ } else if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 576) {
+ return 250000;
+ } else if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 720) {
+ return 350000;
+ } else if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 1080) {
+ return 700000;
+ } else {
+ return 2800000;
+ }
+ break;
+ case FOURCC_apcs:
+ if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 480) {
+ return 150000;
+ } else if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 576) {
+ return 200000;
+ } else if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 720) {
+ return 250000;
+ } else if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 1080) {
+ return 500000;
+ } else {
+ return 2800000;
}
break;
+ case FOURCC_apco:
+ if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 480) {
+ return 80000;
+ } else if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 576) {
+ return 100000;
+ } else if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 720) {
+ return 150000;
+ } else if (((SampleTableEntryMP4V *) qpad->trak_ste)->height <= 1080) {
+ return 250000;
+ } else {
+ return 900000;
+ }
+ break;
+ case FOURCC_c608:
+ /* We always write both cdat and cdt2 atom in prefill mode */
+ return 20;
+ case FOURCC_c708:
+ /* We're cheating a bit by always allocating 256 bytes plus 8 bytes for the atom header
+ * even if we use less */
+ return 256 + 8;
+ case FOURCC_sowt:
+ case FOURCC_twos:{
+ guint64 block_idx;
+ guint64 next_sample_offset;
+
+ block_idx = prefill_get_block_index (qtmux, qpad);
+ next_sample_offset =
+ gst_util_uint64_scale (block_idx + 1,
+ qpad->expected_sample_duration_d *
+ atom_trak_get_timescale (qpad->trak),
+ qpad->expected_sample_duration_n);
+
+ return (next_sample_offset - qpad->sample_offset) * qpad->sample_size;
+ }
+ case FOURCC_ap4h:
+ case FOURCC_ap4x:
+ default:
+ GST_ERROR_OBJECT (qtmux, "unsupported codec for pre-filling");
+ return -1;
}
- /* let downstream know we think in BYTES and expect to do seeking later on */
- gst_segment_init (&segment, GST_FORMAT_BYTES);
- gst_pad_push_event (qtmux->srcpad, gst_event_new_segment (&segment));
+ return -1;
+}
- /* initialize our moov recovery file */
- GST_OBJECT_LOCK (qtmux);
- if (qtmux->moov_recov_file_path) {
- gst_qt_mux_prepare_moov_recovery (qtmux);
+static GstClockTime
+prefill_get_next_timestamp (GstQTMux * qtmux, GstQTPad * qpad)
+{
+ switch (qpad->fourcc) {
+ case FOURCC_apch:
+ case FOURCC_apcn:
+ case FOURCC_apcs:
+ case FOURCC_apco:
+ case FOURCC_ap4h:
+ case FOURCC_ap4x:
+ case FOURCC_c608:
+ case FOURCC_c708:
+ return gst_util_uint64_scale (qpad->sample_offset + 1,
+ qpad->expected_sample_duration_d * GST_SECOND,
+ qpad->expected_sample_duration_n);
+ case FOURCC_sowt:
+ case FOURCC_twos:{
+ guint64 block_idx;
+ guint64 next_sample_offset;
+
+ block_idx = prefill_get_block_index (qtmux, qpad);
+ next_sample_offset =
+ gst_util_uint64_scale (block_idx + 1,
+ qpad->expected_sample_duration_d *
+ atom_trak_get_timescale (qpad->trak),
+ qpad->expected_sample_duration_n);
+
+ return gst_util_uint64_scale (next_sample_offset, GST_SECOND,
+ atom_trak_get_timescale (qpad->trak));
+ }
+ default:
+ GST_ERROR_OBJECT (qtmux, "unsupported codec for pre-filling");
+ return -1;
}
- /* Make sure the first time we update the moov, we'll
- * include any tagsetter tags */
- qtmux->tags_changed = TRUE;
+ return -1;
+}
- GST_OBJECT_UNLOCK (qtmux);
+static GstBuffer *
+prefill_raw_audio_prepare_buf_func (GstQTPad * qtpad, GstBuffer * buf,
+ GstQTMux * qtmux)
+{
+ guint64 block_idx;
+ guint64 nsamples;
+ GstClockTime input_timestamp;
+ guint64 input_timestamp_distance;
- /*
- * send mdat header if already needed, and mark position for later update.
- * We don't send ftyp now if we are on fast start mode, because we can
- * better fine tune using the information we gather to create the whole moov
- * atom.
- */
- switch (qtmux->mux_mode) {
- case GST_QT_MUX_MODE_MOOV_AT_END:
- ret = gst_qt_mux_prepare_and_send_ftyp (qtmux);
- if (ret != GST_FLOW_OK)
- break;
+ if (buf)
+ gst_adapter_push (qtpad->raw_audio_adapter, buf);
+
+ block_idx = gst_util_uint64_scale_ceil (qtpad->raw_audio_adapter_offset,
+ qtpad->expected_sample_duration_n,
+ qtpad->expected_sample_duration_d *
+ atom_trak_get_timescale (qtpad->trak));
+ nsamples =
+ gst_util_uint64_scale (block_idx + 1,
+ qtpad->expected_sample_duration_d * atom_trak_get_timescale (qtpad->trak),
+ qtpad->expected_sample_duration_n) - qtpad->raw_audio_adapter_offset;
+
+ if ((!GST_COLLECT_PADS_STATE_IS_SET (&qtpad->collect,
+ GST_COLLECT_PADS_STATE_EOS)
+ && gst_adapter_available (qtpad->raw_audio_adapter) <
+ nsamples * qtpad->sample_size)
+ || gst_adapter_available (qtpad->raw_audio_adapter) == 0) {
+ return NULL;
+ }
- /* Store this as the mdat offset for later updating
- * when we write the moov */
+ input_timestamp =
+ gst_adapter_prev_pts (qtpad->raw_audio_adapter,
+ &input_timestamp_distance);
+ if (input_timestamp != GST_CLOCK_TIME_NONE)
+ input_timestamp +=
+ gst_util_uint64_scale (input_timestamp_distance, GST_SECOND,
+ qtpad->sample_size * atom_trak_get_timescale (qtpad->trak));
+
+ buf =
+ gst_adapter_take_buffer (qtpad->raw_audio_adapter,
+ !GST_COLLECT_PADS_STATE_IS_SET (&qtpad->collect,
+ GST_COLLECT_PADS_STATE_EOS) ? nsamples *
+ qtpad->sample_size : gst_adapter_available (qtpad->raw_audio_adapter));
+ GST_BUFFER_PTS (buf) = input_timestamp;
+ GST_BUFFER_DTS (buf) = GST_CLOCK_TIME_NONE;
+ GST_BUFFER_DURATION (buf) = GST_CLOCK_TIME_NONE;
+
+ qtpad->raw_audio_adapter_offset += nsamples;
+
+ /* Check if we have yet another block of raw audio in the adapter */
+ nsamples =
+ gst_util_uint64_scale (block_idx + 2,
+ qtpad->expected_sample_duration_d * atom_trak_get_timescale (qtpad->trak),
+ qtpad->expected_sample_duration_n) - qtpad->raw_audio_adapter_offset;
+ if (gst_adapter_available (qtpad->raw_audio_adapter) >=
+ nsamples * qtpad->sample_size) {
+ input_timestamp =
+ gst_adapter_prev_pts (qtpad->raw_audio_adapter,
+ &input_timestamp_distance);
+ if (input_timestamp != GST_CLOCK_TIME_NONE)
+ input_timestamp +=
+ gst_util_uint64_scale (input_timestamp_distance, GST_SECOND,
+ qtpad->sample_size * atom_trak_get_timescale (qtpad->trak));
+ qtpad->raw_audio_adapter_pts = input_timestamp;
+ } else {
+ qtpad->raw_audio_adapter_pts = GST_CLOCK_TIME_NONE;
+ }
+
+ return buf;
+}
+
+static void
+find_video_sample_duration (GstQTMux * qtmux, guint * dur_n, guint * dur_d)
+{
+ GSList *walk;
+
+ /* Find the (first) video track and assume that we have to output
+ * in that size */
+ for (walk = qtmux->collect->data; walk; walk = g_slist_next (walk)) {
+ GstCollectData *cdata = (GstCollectData *) walk->data;
+ GstQTPad *tmp_qpad = (GstQTPad *) cdata;
+
+ if (tmp_qpad->trak->is_video) {
+ *dur_n = tmp_qpad->expected_sample_duration_n;
+ *dur_d = tmp_qpad->expected_sample_duration_d;
+ break;
+ }
+ }
+
+ if (walk == NULL) {
+ GST_INFO_OBJECT (qtmux,
+ "Found no video framerate, using 40ms audio buffers");
+ *dur_n = 25;
+ *dur_d = 1;
+ }
+}
+
+/* Called when all pads are prerolled to adjust and */
+static gboolean
+prefill_update_sample_size (GstQTMux * qtmux, GstQTPad * qpad)
+{
+ switch (qpad->fourcc) {
+ case FOURCC_apch:
+ case FOURCC_apcn:
+ case FOURCC_apcs:
+ case FOURCC_apco:
+ case FOURCC_ap4h:
+ case FOURCC_ap4x:
+ {
+ guint sample_size = prefill_get_sample_size (qtmux, qpad);
+ atom_trak_set_constant_size_samples (qpad->trak, sample_size);
+ return TRUE;
+ }
+ case FOURCC_c608:
+ case FOURCC_c708:
+ {
+ guint sample_size = prefill_get_sample_size (qtmux, qpad);
+ /* We need a "valid" duration */
+ find_video_sample_duration (qtmux, &qpad->expected_sample_duration_n,
+ &qpad->expected_sample_duration_d);
+ atom_trak_set_constant_size_samples (qpad->trak, sample_size);
+ return TRUE;
+ }
+ case FOURCC_sowt:
+ case FOURCC_twos:{
+ find_video_sample_duration (qtmux, &qpad->expected_sample_duration_n,
+ &qpad->expected_sample_duration_d);
+ /* Set a prepare_buf_func that ensures this */
+ qpad->prepare_buf_func = prefill_raw_audio_prepare_buf_func;
+ qpad->raw_audio_adapter = gst_adapter_new ();
+ qpad->raw_audio_adapter_offset = 0;
+ qpad->raw_audio_adapter_pts = GST_CLOCK_TIME_NONE;
+
+ return TRUE;
+ }
+ default:
+ return TRUE;
+ }
+}
+
+/* Only called at startup when doing the "fake" iteration of all tracks in order
+ * to prefill the sample tables in the header. */
+static GstQTPad *
+find_best_pad_prefill_start (GstQTMux * qtmux)
+{
+ GSList *walk;
+ GstQTPad *best_pad = NULL;
+
+ /* If interleave limits have been specified and the current pad is within
+ * those interleave limits, pick that one, otherwise let's try to figure out
+ * the next best one. */
+ if (qtmux->current_pad &&
+ (qtmux->interleave_bytes != 0 || qtmux->interleave_time != 0) &&
+ (qtmux->interleave_bytes == 0
+ || qtmux->current_chunk_size <= qtmux->interleave_bytes)
+ && (qtmux->interleave_time == 0
+ || qtmux->current_chunk_duration <= qtmux->interleave_time)
+ && qtmux->mux_mode != GST_QT_MUX_MODE_FRAGMENTED
+ && qtmux->mux_mode != GST_QT_MUX_MODE_FRAGMENTED_STREAMABLE) {
+
+ if (qtmux->current_pad->total_duration < qtmux->reserved_max_duration) {
+ best_pad = qtmux->current_pad;
+ }
+ } else if (qtmux->collect->data->next) {
+ /* Attempt to try another pad if we have one. Otherwise use the only pad
+ * present */
+ best_pad = qtmux->current_pad = NULL;
+ }
+
+ /* The next best pad is the one which has the lowest timestamp and hasn't
+ * exceeded the reserved max duration */
+ if (!best_pad) {
+ GstClockTime best_time = GST_CLOCK_TIME_NONE;
+
+ for (walk = qtmux->collect->data; walk; walk = g_slist_next (walk)) {
+ GstCollectData *cdata = (GstCollectData *) walk->data;
+ GstQTPad *qtpad = (GstQTPad *) cdata;
+ GstClockTime timestamp;
+
+ if (qtpad->total_duration >= qtmux->reserved_max_duration)
+ continue;
+
+ timestamp = qtpad->total_duration;
+
+ if (best_pad == NULL ||
+ !GST_CLOCK_TIME_IS_VALID (best_time) || timestamp < best_time) {
+ best_pad = qtpad;
+ best_time = timestamp;
+ }
+ }
+ }
+
+ return best_pad;
+}
+
+/* Called when starting the file in prefill_mode to figure out all the entries
+ * of the header based on the input stream and reserved maximum duration.
+ *
+ * The _actual_ header (i.e. with the proper duration and trimmed sample tables)
+ * will be updated and written on EOS. */
+static gboolean
+gst_qt_mux_prefill_samples (GstQTMux * qtmux)
+{
+ GstQTPad *qpad;
+ GSList *walk;
+ GstQTMuxClass *qtmux_klass = (GstQTMuxClass *) (G_OBJECT_GET_CLASS (qtmux));
+
+ /* Update expected sample sizes/durations as needed, this is for raw
+ * audio where samples are actual audio samples. */
+ for (walk = qtmux->collect->data; walk; walk = g_slist_next (walk)) {
+ GstCollectData *cdata = (GstCollectData *) walk->data;
+ GstQTPad *qpad = (GstQTPad *) cdata;
+
+ if (!prefill_update_sample_size (qtmux, qpad))
+ return FALSE;
+ }
+
+ if (qtmux_klass->format == GST_QT_MUX_FORMAT_QT) {
+ /* For the first sample check/update timecode as needed. We do that before
+ * all actual samples as the code in gst_qt_mux_add_buffer() does it with
+ * initial buffer directly, not with last_buf */
+ for (walk = qtmux->collect->data; walk; walk = g_slist_next (walk)) {
+ GstCollectData *cdata = (GstCollectData *) walk->data;
+ GstQTPad *qpad = (GstQTPad *) cdata;
+ GstBuffer *buffer =
+ gst_collect_pads_peek (qtmux->collect, (GstCollectData *) qpad);
+ GstVideoTimeCodeMeta *tc_meta;
+
+ if (buffer && (tc_meta = gst_buffer_get_video_time_code_meta (buffer))
+ && qpad->trak->is_video) {
+ GstVideoTimeCode *tc = &tc_meta->tc;
+
+ qpad->tc_trak = atom_trak_new (qtmux->context);
+ atom_moov_add_trak (qtmux->moov, qpad->tc_trak);
+
+ qpad->trak->tref = atom_tref_new (FOURCC_tmcd);
+ atom_tref_add_entry (qpad->trak->tref, qpad->tc_trak->tkhd.track_ID);
+
+ atom_trak_set_timecode_type (qpad->tc_trak, qtmux->context,
+ qpad->trak->mdia.mdhd.time_info.timescale, tc);
+
+ atom_trak_add_samples (qpad->tc_trak, 1, 1, 4,
+ qtmux->mdat_size, FALSE, 0);
+
+ qpad->tc_pos = qtmux->mdat_size;
+ qpad->first_tc = gst_video_time_code_copy (tc);
+ qpad->first_pts = GST_BUFFER_PTS (buffer);
+
+ qtmux->current_chunk_offset = -1;
+ qtmux->current_chunk_size = 0;
+ qtmux->current_chunk_duration = 0;
+ qtmux->mdat_size += 4;
+ }
+ if (buffer)
+ gst_buffer_unref (buffer);
+ }
+ }
+
+ while ((qpad = find_best_pad_prefill_start (qtmux))) {
+ GstClockTime timestamp, next_timestamp, duration;
+ guint nsamples, sample_size;
+ guint64 chunk_offset;
+ gint64 scaled_duration;
+ gint64 pts_offset = 0;
+ gboolean sync = FALSE;
+ TrakBufferEntryInfo sample_entry;
+
+ sample_size = prefill_get_sample_size (qtmux, qpad);
+
+ if (sample_size == -1) {
+ return FALSE;
+ }
+
+ if (!qpad->samples)
+ qpad->samples = g_array_new (FALSE, FALSE, sizeof (TrakBufferEntryInfo));
+
+ timestamp = qpad->total_duration;
+ next_timestamp = prefill_get_next_timestamp (qtmux, qpad);
+ duration = next_timestamp - timestamp;
+
+ if (qpad->first_ts == GST_CLOCK_TIME_NONE)
+ qpad->first_ts = timestamp;
+ if (qpad->first_dts == GST_CLOCK_TIME_NONE)
+ qpad->first_dts = timestamp;
+
+ if (qtmux->current_pad != qpad || qtmux->current_chunk_offset == -1) {
+ qtmux->current_pad = qpad;
+ if (qtmux->current_chunk_offset == -1)
+ qtmux->current_chunk_offset = qtmux->mdat_size;
+ else
+ qtmux->current_chunk_offset += qtmux->current_chunk_size;
+ qtmux->current_chunk_size = 0;
+ qtmux->current_chunk_duration = 0;
+ }
+ if (qpad->sample_size)
+ nsamples = sample_size / qpad->sample_size;
+ else
+ nsamples = 1;
+ qpad->last_dts = timestamp;
+ scaled_duration = gst_util_uint64_scale_round (timestamp + duration,
+ atom_trak_get_timescale (qpad->trak),
+ GST_SECOND) - gst_util_uint64_scale_round (timestamp,
+ atom_trak_get_timescale (qpad->trak), GST_SECOND);
+
+ qtmux->current_chunk_size += sample_size;
+ qtmux->current_chunk_duration += duration;
+ qpad->total_bytes += sample_size;
+
+ chunk_offset = qtmux->current_chunk_offset;
+
+ /* I-frame only, no frame reordering */
+ sync = FALSE;
+ pts_offset = 0;
+
+ if (qtmux->current_chunk_duration > qtmux->longest_chunk
+ || !GST_CLOCK_TIME_IS_VALID (qtmux->longest_chunk)) {
+ qtmux->longest_chunk = qtmux->current_chunk_duration;
+ }
+
+ sample_entry.track_id = qpad->trak->tkhd.track_ID;
+ sample_entry.nsamples = nsamples;
+ sample_entry.delta = scaled_duration / nsamples;
+ sample_entry.size = sample_size / nsamples;
+ sample_entry.chunk_offset = chunk_offset;
+ sample_entry.pts_offset = pts_offset;
+ sample_entry.sync = sync;
+ sample_entry.do_pts = TRUE;
+ g_array_append_val (qpad->samples, sample_entry);
+ atom_trak_add_samples (qpad->trak, nsamples, scaled_duration / nsamples,
+ sample_size / nsamples, chunk_offset, sync, pts_offset);
+
+ qpad->total_duration = next_timestamp;
+ qtmux->mdat_size += sample_size;
+ qpad->sample_offset += nsamples;
+ }
+
+ return TRUE;
+}
+
+static GstFlowReturn
+gst_qt_mux_start_file (GstQTMux * qtmux)
+{
+ GstQTMuxClass *qtmux_klass = (GstQTMuxClass *) (G_OBJECT_GET_CLASS (qtmux));
+ GstFlowReturn ret = GST_FLOW_OK;
+ GstCaps *caps;
+ GstSegment segment;
+ gchar s_id[32];
+ GstClockTime reserved_max_duration;
+ guint reserved_bytes_per_sec_per_trak;
+ GSList *walk;
+
+ GST_DEBUG_OBJECT (qtmux, "starting file");
+
+ GST_OBJECT_LOCK (qtmux);
+ reserved_max_duration = qtmux->reserved_max_duration;
+ reserved_bytes_per_sec_per_trak = qtmux->reserved_bytes_per_sec_per_trak;
+ GST_OBJECT_UNLOCK (qtmux);
+
+ /* stream-start (FIXME: create id based on input ids) */
+ g_snprintf (s_id, sizeof (s_id), "qtmux-%08x", g_random_int ());
+ gst_pad_push_event (qtmux->srcpad, gst_event_new_stream_start (s_id));
+
+ caps = gst_caps_copy (gst_pad_get_pad_template_caps (qtmux->srcpad));
+ /* qtmux has structure with and without variant, remove all but the first */
+ while (gst_caps_get_size (caps) > 1)
+ gst_caps_remove_structure (caps, 1);
+ gst_pad_set_caps (qtmux->srcpad, caps);
+ gst_caps_unref (caps);
+
+ /* Default is 'normal' mode */
+ qtmux->mux_mode = GST_QT_MUX_MODE_MOOV_AT_END;
+
+ /* Require a sensible fragment duration when muxing
+ * using the ISML muxer */
+ if (qtmux_klass->format == GST_QT_MUX_FORMAT_ISML &&
+ qtmux->fragment_duration == 0)
+ goto invalid_isml;
+
+ if (qtmux->fragment_duration > 0) {
+ if (qtmux->streamable)
+ qtmux->mux_mode = GST_QT_MUX_MODE_FRAGMENTED_STREAMABLE;
+ else
+ qtmux->mux_mode = GST_QT_MUX_MODE_FRAGMENTED;
+ } else if (qtmux->fast_start) {
+ qtmux->mux_mode = GST_QT_MUX_MODE_FAST_START;
+ } else if (reserved_max_duration != GST_CLOCK_TIME_NONE) {
+ if (qtmux->reserved_prefill)
+ qtmux->mux_mode = GST_QT_MUX_MODE_ROBUST_RECORDING_PREFILL;
+ else
+ qtmux->mux_mode = GST_QT_MUX_MODE_ROBUST_RECORDING;
+ }
+
+ switch (qtmux->mux_mode) {
+ case GST_QT_MUX_MODE_MOOV_AT_END:
+ case GST_QT_MUX_MODE_ROBUST_RECORDING:
+ /* We have to be able to seek to rewrite the mdat header, or any
+ * moov atom we write will not be visible in the file, because an
+ * MDAT with 0 as the size covers the rest of the file. A file
+ * with no moov is not playable, so error out now. */
+ if (!gst_qt_mux_downstream_is_seekable (qtmux)) {
+ GST_ELEMENT_ERROR (qtmux, STREAM, MUX,
+ ("Downstream is not seekable - will not be able to create a playable file"),
+ (NULL));
+ return GST_FLOW_ERROR;
+ }
+ if (qtmux->reserved_moov_update_period == GST_CLOCK_TIME_NONE) {
+ GST_WARNING_OBJECT (qtmux,
+ "Robust muxing requires reserved-moov-update-period to be set");
+ }
+ break;
+ case GST_QT_MUX_MODE_FAST_START:
+ case GST_QT_MUX_MODE_FRAGMENTED_STREAMABLE:
+ break; /* Don't need seekability, ignore */
+ case GST_QT_MUX_MODE_FRAGMENTED:
+ if (!gst_qt_mux_downstream_is_seekable (qtmux)) {
+ GST_WARNING_OBJECT (qtmux, "downstream is not seekable, but "
+ "streamable=false. Will ignore that and create streamable output "
+ "instead");
+ qtmux->streamable = TRUE;
+ g_object_notify (G_OBJECT (qtmux), "streamable");
+ }
+ break;
+ case GST_QT_MUX_MODE_ROBUST_RECORDING_PREFILL:
+ if (!gst_qt_mux_downstream_is_seekable (qtmux)) {
+ GST_WARNING_OBJECT (qtmux,
+ "downstream is not seekable, will not be able "
+ "to trim samples table at the end if less than reserved-duration is "
+ "recorded");
+ }
+ break;
+ }
+
+ /* let downstream know we think in BYTES and expect to do seeking later on */
+ gst_segment_init (&segment, GST_FORMAT_BYTES);
+ gst_pad_push_event (qtmux->srcpad, gst_event_new_segment (&segment));
+
+ GST_OBJECT_LOCK (qtmux);
+
+ if (qtmux->timescale == 0) {
+ guint32 suggested_timescale = 0;
+ GSList *walk;
+
+ /* Calculate a reasonable timescale for the moov:
+ * If there is video, it is the biggest video track timescale or an even
+ * multiple of it if it's smaller than 1800.
+ * Otherwise it is 1800 */
+ for (walk = qtmux->sinkpads; walk; walk = g_slist_next (walk)) {
+ GstCollectData *cdata = (GstCollectData *) walk->data;
+ GstQTPad *qpad = (GstQTPad *) cdata;
+
+ if (!qpad->trak)
+ continue;
+
+ /* not video */
+ if (!qpad->trak->mdia.minf.vmhd)
+ continue;
+
+ suggested_timescale =
+ MAX (qpad->trak->mdia.mdhd.time_info.timescale, suggested_timescale);
+ }
+
+ if (suggested_timescale == 0)
+ suggested_timescale = 1800;
+
+ while (suggested_timescale < 1800)
+ suggested_timescale *= 2;
+
+ qtmux->timescale = suggested_timescale;
+ }
+
+ /* Set width/height/timescale of any closed caption tracks to that of the
+ * first video track */
+ {
+ guint video_width = 0, video_height = 0;
+ guint32 video_timescale = 0;
+ GSList *walk;
+
+ for (walk = qtmux->sinkpads; walk; walk = g_slist_next (walk)) {
+ GstCollectData *cdata = (GstCollectData *) walk->data;
+ GstQTPad *qpad = (GstQTPad *) cdata;
+
+ if (!qpad->trak)
+ continue;
+
+ /* Not closed caption */
+ if (qpad->trak->mdia.hdlr.handler_type != FOURCC_clcp)
+ continue;
+
+ if (video_width == 0 || video_height == 0 || video_timescale == 0) {
+ GSList *walk2;
+
+ for (walk2 = qtmux->sinkpads; walk2; walk2 = g_slist_next (walk2)) {
+ GstCollectData *cdata2 = (GstCollectData *) walk2->data;
+ GstQTPad *qpad2 = (GstQTPad *) cdata2;
+
+ if (!qpad2->trak)
+ continue;
+
+ /* not video */
+ if (!qpad2->trak->mdia.minf.vmhd)
+ continue;
+
+ video_width = qpad2->trak->tkhd.width;
+ video_height = qpad2->trak->tkhd.height;
+ video_timescale = qpad2->trak->mdia.mdhd.time_info.timescale;
+ }
+ }
+
+ qpad->trak->tkhd.width = video_width << 16;
+ qpad->trak->tkhd.height = video_height << 16;
+ qpad->trak->mdia.mdhd.time_info.timescale = video_timescale;
+ }
+ }
+
+ /* initialize our moov recovery file */
+ if (qtmux->moov_recov_file_path) {
+ gst_qt_mux_prepare_moov_recovery (qtmux);
+ }
+
+ /* Make sure the first time we update the moov, we'll
+ * include any tagsetter tags */
+ qtmux->tags_changed = TRUE;
+
+ GST_OBJECT_UNLOCK (qtmux);
+
+ /*
+ * send mdat header if already needed, and mark position for later update.
+ * We don't send ftyp now if we are on fast start mode, because we can
+ * better fine tune using the information we gather to create the whole moov
+ * atom.
+ */
+ switch (qtmux->mux_mode) {
+ case GST_QT_MUX_MODE_MOOV_AT_END:
+ ret = gst_qt_mux_prepare_and_send_ftyp (qtmux);
+ if (ret != GST_FLOW_OK)
+ break;
+
+ /* Store this as the mdat offset for later updating
+ * when we write the moov */
qtmux->mdat_pos = qtmux->header_size;
/* extended atom in case we go over 4GB while writing and need
* the full 64-bit atom */
FALSE);
break;
case GST_QT_MUX_MODE_ROBUST_RECORDING:
-
ret = gst_qt_mux_prepare_and_send_ftyp (qtmux);
if (ret != GST_FLOW_OK)
break;
gst_qt_mux_send_mdat_header (qtmux, &qtmux->header_size, 0, TRUE,
FALSE);
break;
+ case GST_QT_MUX_MODE_ROBUST_RECORDING_PREFILL:
+ ret = gst_qt_mux_prepare_and_send_ftyp (qtmux);
+ if (ret != GST_FLOW_OK)
+ break;
+
+ /* Store this as the moov offset for later updating.
+ * We record mdat position below */
+ qtmux->moov_pos = qtmux->header_size;
+
+ if (!gst_qt_mux_prefill_samples (qtmux)) {
+ GST_ELEMENT_ERROR (qtmux, STREAM, MUX,
+ ("Unsupported codecs or configuration for prefill mode"), (NULL));
+
+ return GST_FLOW_ERROR;
+ }
+
+ gst_qt_mux_update_global_statistics (qtmux);
+ gst_qt_mux_configure_moov (qtmux);
+ gst_qt_mux_update_edit_lists (qtmux);
+ gst_qt_mux_setup_metadata (qtmux);
+
+ /* Moov header with pre-filled samples */
+ ret = gst_qt_mux_send_moov (qtmux, &qtmux->header_size, 0, FALSE, FALSE);
+ if (ret != GST_FLOW_OK)
+ return ret;
+
+ /* last_moov_size now contains the full size of the moov, moov_pos the
+ * position. This allows us to rewrite it in the very end as needed */
+ qtmux->reserved_moov_size =
+ qtmux->last_moov_size + 12 * g_slist_length (qtmux->sinkpads) + 8;
+
+ /* Send an additional free atom at the end so we definitely have space
+ * to rewrite the moov header at the end and remove the samples that
+ * were not actually written */
+ ret =
+ gst_qt_mux_send_free_atom (qtmux, &qtmux->header_size,
+ 12 * g_slist_length (qtmux->sinkpads) + 8, FALSE);
+ if (ret != GST_FLOW_OK)
+ return ret;
+
+ /* extra atoms go after the free/moov(s), before the mdat */
+ ret =
+ gst_qt_mux_send_extra_atoms (qtmux, TRUE, &qtmux->header_size, FALSE);
+ if (ret != GST_FLOW_OK)
+ return ret;
+
+ qtmux->mdat_pos = qtmux->header_size;
+
+ /* And now send the mdat header */
+ ret =
+ gst_qt_mux_send_mdat_header (qtmux, &qtmux->header_size,
+ qtmux->mdat_size, TRUE, FALSE);
+
+ /* chunks position is set relative to the first byte of the
+ * MDAT atom payload. Set the overall offset into the file */
+ atom_moov_chunks_set_offset (qtmux->moov, qtmux->header_size);
+
+ {
+ GstSegment segment;
+
+ gst_segment_init (&segment, GST_FORMAT_BYTES);
+ segment.start = qtmux->moov_pos;
+ gst_pad_push_event (qtmux->srcpad, gst_event_new_segment (&segment));
+
+ ret = gst_qt_mux_send_moov (qtmux, NULL, 0, FALSE, FALSE);
+ if (ret != GST_FLOW_OK)
+ return ret;
+
+ segment.start = qtmux->header_size;
+ gst_pad_push_event (qtmux->srcpad, gst_event_new_segment (&segment));
+ }
+
+ qtmux->current_chunk_size = 0;
+ qtmux->current_chunk_duration = 0;
+ qtmux->current_chunk_offset = -1;
+ qtmux->mdat_size = 0;
+ qtmux->current_pad = NULL;
+ qtmux->longest_chunk = GST_CLOCK_TIME_NONE;
+
+ for (walk = qtmux->collect->data; walk; walk = g_slist_next (walk)) {
+ GstCollectData *cdata = (GstCollectData *) walk->data;
+ GstQTPad *qtpad = (GstQTPad *) cdata;
+
+ qtpad->total_bytes = 0;
+ qtpad->total_duration = 0;
+ qtpad->first_dts = qtpad->first_ts = GST_CLOCK_TIME_NONE;
+ qtpad->last_dts = GST_CLOCK_TIME_NONE;
+ qtpad->sample_offset = 0;
+ }
+
+ break;
case GST_QT_MUX_MODE_FAST_START:
GST_OBJECT_LOCK (qtmux);
qtmux->fast_start_file = g_fopen (qtmux->fast_start_file_path, "wb+");
/* having flushed above, can check for buffers now */
if (GST_CLOCK_TIME_IS_VALID (qtpad->first_ts)) {
+ GstClockTime first_pts_in = qtpad->first_ts;
+ /* it should be, since we got first_ts by adding adjustment
+ * to a positive incoming PTS */
+ if (qtpad->dts_adjustment <= first_pts_in)
+ first_pts_in -= qtpad->dts_adjustment;
/* determine max stream duration */
if (!GST_CLOCK_TIME_IS_VALID (qtmux->last_dts)
|| qtpad->last_dts > qtmux->last_dts) {
qtmux->last_dts = qtpad->last_dts;
}
if (!GST_CLOCK_TIME_IS_VALID (qtmux->first_ts)
- || qtpad->first_ts < qtmux->first_ts) {
- qtmux->first_ts = qtpad->first_ts;
+ || first_pts_in < qtmux->first_ts) {
+ /* we need the original incoming PTS here, as this first_ts
+ * is used in update_edit_lists to construct the edit list that arrange
+ * for sync'ed streams. The first_ts is most likely obtained from
+ * some (audio) stream with 0 dts_adjustment and initial 0 PTS,
+ * so it makes no difference, though it matters in other cases */
+ qtmux->first_ts = first_pts_in;
}
}
GstCollectData *cdata = (GstCollectData *) walk->data;
GstQTPad *qtpad = (GstQTPad *) cdata;
+ atom_trak_edts_clear (qtpad->trak);
+
if (GST_CLOCK_TIME_IS_VALID (qtpad->first_ts)) {
guint32 lateness = 0;
guint32 duration = qtpad->trak->tkhd.duration;
gboolean has_gap;
- gboolean has_shift;
has_gap = (qtpad->first_ts > (qtmux->first_ts + qtpad->dts_adjustment));
- has_shift = (qtpad->dts_adjustment > 0);
if (has_gap) {
- GstClockTime diff;
+ GstClockTime diff, trak_lateness;
diff = qtpad->first_ts - (qtmux->first_ts + qtpad->dts_adjustment);
lateness = gst_util_uint64_scale_round (diff,
qtmux->timescale, GST_SECOND);
- GST_DEBUG_OBJECT (qtmux, "Pad %s is a late stream by %" GST_TIME_FORMAT,
- GST_PAD_NAME (qtpad->collect.pad), GST_TIME_ARGS (lateness));
+ /* Allow up to 1 trak timescale unit of lateness, Such a small
+ * timestamp/duration can't be represented by the trak-specific parts
+ * of the headers anyway, so it's irrelevantly small */
+ trak_lateness = gst_util_uint64_scale (diff,
+ atom_trak_get_timescale (qtpad->trak), GST_SECOND);
+
+ if (trak_lateness > 0 && diff > qtmux->start_gap_threshold) {
+ GST_DEBUG_OBJECT (qtmux,
+ "Pad %s is a late stream by %" GST_TIME_FORMAT,
+ GST_PAD_NAME (qtpad->collect.pad), GST_TIME_ARGS (diff));
- atom_trak_set_elst_entry (qtpad->trak, 0, lateness, (guint32) - 1,
- (guint32) (1 * 65536.0));
+ atom_trak_set_elst_entry (qtpad->trak, 0, lateness, (guint32) - 1,
+ (guint32) (1 * 65536.0));
+ }
}
- if (has_gap || has_shift) {
- GstClockTime ctts;
+ /* Always write an edit list for the whole track. In general this is not
+ * necessary except for the case of having a gap or DTS adjustment but
+ * it allows to give the whole track's duration in the usually more
+ * accurate media timescale
+ */
+ {
+ GstClockTime ctts = 0;
guint32 media_start;
- ctts = qtpad->first_ts - qtpad->first_dts;
+ if (qtpad->first_ts > qtpad->first_dts)
+ ctts = qtpad->first_ts - qtpad->first_dts;
+
media_start = gst_util_uint64_scale_round (ctts,
atom_trak_get_timescale (qtpad->trak), GST_SECOND);
+ /* atom_trak_set_elst_entry() has a quirk - if the edit list
+ * is empty because there's no gap added above, this call
+ * will not replace index 1, it will create the entry at index 0.
+ * Luckily, that's exactly what we want here */
atom_trak_set_elst_entry (qtpad->trak, 1, duration, media_start,
(guint32) (1 * 65536.0));
}
/* need to add the empty time to the trak duration */
duration += lateness;
-
qtpad->trak->tkhd.duration = duration;
+ if (qtpad->tc_trak) {
+ qtpad->tc_trak->tkhd.duration = duration;
+ qtpad->tc_trak->mdia.mdhd.time_info.duration = duration;
+ }
/* And possibly grow the moov duration */
if (duration > qtmux->moov->mvhd.time_info.duration) {
}
static GstFlowReturn
+gst_qt_mux_update_timecode (GstQTMux * qtmux, GstQTPad * qtpad)
+{
+ GstSegment segment;
+ GstBuffer *buf;
+ GstMapInfo map;
+ guint64 offset = qtpad->tc_pos;
+ GstQTMuxClass *qtmux_klass = (GstQTMuxClass *) (G_OBJECT_GET_CLASS (qtmux));
+
+ if (qtmux_klass->format != GST_QT_MUX_FORMAT_QT)
+ return GST_FLOW_OK;
+
+ g_assert (qtpad->tc_pos != -1);
+
+ gst_segment_init (&segment, GST_FORMAT_BYTES);
+ segment.start = offset;
+ gst_pad_push_event (qtmux->srcpad, gst_event_new_segment (&segment));
+
+ buf = gst_buffer_new_and_alloc (4);
+ gst_buffer_map (buf, &map, GST_MAP_WRITE);
+
+ GST_WRITE_UINT32_BE (map.data,
+ gst_video_time_code_frames_since_daily_jam (qtpad->first_tc));
+ gst_buffer_unmap (buf, &map);
+
+ /* Reset this value, so the timecode won't be re-rewritten */
+ qtpad->tc_pos = -1;
+
+ return gst_qt_mux_send_buffer (qtmux, buf, &offset, FALSE);
+}
+
+static GstFlowReturn
gst_qt_mux_stop_file (GstQTMux * qtmux)
{
gboolean ret = GST_FLOW_OK;
guint64 offset = 0, size = 0;
gboolean large_file;
+ GSList *walk;
GST_DEBUG_OBJECT (qtmux, "Updating remaining values and sending last data");
}
gst_qt_mux_update_global_statistics (qtmux);
+ for (walk = qtmux->collect->data; walk; walk = walk->next) {
+ GstQTPad *qtpad = (GstQTPad *) walk->data;
+
+ if (qtpad->tc_pos != -1) {
+ /* File is being stopped and timecode hasn't been updated. Update it now
+ * with whatever we have */
+ ret = gst_qt_mux_update_timecode (qtmux, qtpad);
+ if (ret != GST_FLOW_OK)
+ return ret;
+ }
+ }
switch (qtmux->mux_mode) {
case GST_QT_MUX_MODE_FRAGMENTED:{
* mvhd should be consistent with empty moov
* (but TODO maybe some clients do not handle that well ?) */
qtmux->moov->mvex.mehd.fragment_duration =
- gst_util_uint64_scale (qtmux->last_dts, qtmux->timescale, GST_SECOND);
- GST_DEBUG_OBJECT (qtmux, "rewriting moov with mvex duration %"
- GST_TIME_FORMAT, GST_TIME_ARGS (qtmux->last_dts));
+ gst_util_uint64_scale_round (qtmux->last_dts, qtmux->timescale,
+ GST_SECOND);
+ GST_DEBUG_OBJECT (qtmux,
+ "rewriting moov with mvex duration %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (qtmux->last_dts));
/* seek and rewrite the header */
gst_segment_init (&segment, GST_FORMAT_BYTES);
segment.start = qtmux->moov_pos;
return gst_qt_mux_update_mdat_size (qtmux, qtmux->mdat_pos,
qtmux->mdat_size, NULL, TRUE);
}
+ case GST_QT_MUX_MODE_ROBUST_RECORDING_PREFILL:{
+ GSList *walk;
+ guint32 next_track_id = qtmux->moov->mvhd.next_track_id;
+
+ for (walk = qtmux->collect->data; walk; walk = g_slist_next (walk)) {
+ GstCollectData *cdata = (GstCollectData *) walk->data;
+ GstQTPad *qpad = (GstQTPad *) cdata;
+ guint64 block_idx;
+ AtomSTBL *stbl = &qpad->trak->mdia.minf.stbl;
+
+ /* Get the block index of the last sample we wrote, not of the next
+ * sample we would write */
+ block_idx = prefill_get_block_index (qtmux, qpad);
+
+ /* stts */
+ if (block_idx > 0) {
+ STTSEntry *entry;
+ guint64 nsamples = 0;
+ gint i, n;
+
+ n = atom_array_get_len (&stbl->stts.entries);
+ for (i = 0; i < n; i++) {
+ entry = &atom_array_index (&stbl->stts.entries, i);
+ if (nsamples + entry->sample_count >= qpad->sample_offset) {
+ entry->sample_count = qpad->sample_offset - nsamples;
+ stbl->stts.entries.len = i + 1;
+ break;
+ }
+ nsamples += entry->sample_count;
+ }
+ g_assert (i < n);
+ } else {
+ stbl->stts.entries.len = 0;
+ }
+
+ /* stsz */
+ {
+ g_assert (stbl->stsz.entries.len == 0);
+ stbl->stsz.table_size = qpad->sample_offset;
+ }
+
+ /* stco/stsc */
+ {
+ gint i, n;
+ guint64 nsamples = 0;
+ gint chunk_index = 0;
+ const TrakBufferEntryInfo *sample_entry;
+
+ if (block_idx > 0) {
+ sample_entry =
+ &g_array_index (qpad->samples, TrakBufferEntryInfo,
+ block_idx - 1);
+
+ n = stbl->stco64.entries.len;
+ for (i = 0; i < n; i++) {
+ guint64 *entry = &atom_array_index (&stbl->stco64.entries, i);
+
+ if (*entry == sample_entry->chunk_offset) {
+ stbl->stco64.entries.len = i + 1;
+ chunk_index = i + 1;
+ break;
+ }
+ }
+ g_assert (i < n);
+ g_assert (chunk_index > 0);
+
+ n = stbl->stsc.entries.len;
+ for (i = 0; i < n; i++) {
+ STSCEntry *entry = &atom_array_index (&stbl->stsc.entries, i);
+
+ if (entry->first_chunk >= chunk_index)
+ break;
+
+ if (i > 0) {
+ nsamples +=
+ (entry->first_chunk - atom_array_index (&stbl->stsc.entries,
+ i -
+ 1).first_chunk) * atom_array_index (&stbl->stsc.entries,
+ i - 1).samples_per_chunk;
+ }
+ }
+ g_assert (i <= n);
+
+ if (i > 0) {
+ STSCEntry *prev_entry =
+ &atom_array_index (&stbl->stsc.entries, i - 1);
+ nsamples +=
+ (chunk_index -
+ prev_entry->first_chunk) * prev_entry->samples_per_chunk;
+ if (qpad->sample_offset - nsamples > 0) {
+ stbl->stsc.entries.len = i;
+ atom_stsc_add_new_entry (&stbl->stsc, chunk_index,
+ qpad->sample_offset - nsamples);
+ } else {
+ stbl->stsc.entries.len = i;
+ stbl->stco64.entries.len--;
+ }
+ } else {
+ /* Everything in a single chunk */
+ stbl->stsc.entries.len = 0;
+ atom_stsc_add_new_entry (&stbl->stsc, chunk_index,
+ qpad->sample_offset);
+ }
+ } else {
+ stbl->stco64.entries.len = 0;
+ stbl->stsc.entries.len = 0;
+ }
+ }
+
+ {
+ GList *walk2;
+
+ for (walk2 = qtmux->moov->mvex.trexs; walk2; walk2 = walk2->next) {
+ AtomTREX *trex = walk2->data;
+
+ if (trex->track_ID == qpad->trak->tkhd.track_ID) {
+ trex->track_ID = next_track_id;
+ break;
+ }
+ }
+
+ qpad->trak->tkhd.track_ID = next_track_id++;
+ }
+ }
+ qtmux->moov->mvhd.next_track_id = next_track_id;
+
+ gst_qt_mux_update_global_statistics (qtmux);
+ gst_qt_mux_configure_moov (qtmux);
+
+ gst_qt_mux_update_edit_lists (qtmux);
+
+ /* Check if any gap edit lists were added. We don't have any space
+ * reserved for this in the moov and the pre-finalized moov would have
+ * broken A/V synchronization. Error out here now
+ */
+ for (walk = qtmux->collect->data; walk; walk = g_slist_next (walk)) {
+ GstCollectData *cdata = (GstCollectData *) walk->data;
+ GstQTPad *qpad = (GstQTPad *) cdata;
+
+ if (qpad->trak->edts
+ && g_slist_length (qpad->trak->edts->elst.entries) > 1) {
+ GST_ELEMENT_ERROR (qtmux, STREAM, MUX, (NULL),
+ ("Can't support gaps in prefill mode"));
+
+ return GST_FLOW_ERROR;
+ }
+ }
+
+ gst_qt_mux_setup_metadata (qtmux);
+ atom_moov_chunks_set_offset (qtmux->moov, qtmux->header_size);
+
+ {
+ GstSegment segment;
+
+ gst_segment_init (&segment, GST_FORMAT_BYTES);
+ segment.start = qtmux->moov_pos;
+ gst_pad_push_event (qtmux->srcpad, gst_event_new_segment (&segment));
+
+ ret =
+ gst_qt_mux_send_moov (qtmux, NULL, qtmux->reserved_moov_size, FALSE,
+ FALSE);
+ if (ret != GST_FLOW_OK)
+ return ret;
+
+ if (qtmux->reserved_moov_size > qtmux->last_moov_size) {
+ ret =
+ gst_qt_mux_send_free_atom (qtmux, NULL,
+ qtmux->reserved_moov_size - qtmux->last_moov_size, TRUE);
+ }
+
+ if (ret != GST_FLOW_OK)
+ return ret;
+ }
+
+ ret = gst_qt_mux_update_mdat_size (qtmux, qtmux->mdat_pos,
+ qtmux->mdat_size, NULL, FALSE);
+ return ret;
+ }
default:
break;
}
pad->tfra = atom_tfra_new (qtmux->context, atom_trak_get_id (pad->trak));
atom_mfra_add_tfra (qtmux->mfra, pad->tfra);
}
+ atom_traf_set_base_decode_time (pad->traf, dts);
}
/* add buffer and metadata */
guint64 mdat_offset = qtmux->mdat_pos + 16 + qtmux->mdat_size;
GST_OBJECT_LOCK (qtmux);
+
+ /* Update the offset of how much we've muxed, so the
+ * report of remaining space keeps counting down */
+ if (position > qtmux->last_moov_update &&
+ position - qtmux->last_moov_update > qtmux->muxed_since_last_update) {
+ GST_LOG_OBJECT (qtmux,
+ "Muxed time %" G_GUINT64_FORMAT " since last moov update",
+ qtmux->muxed_since_last_update);
+ qtmux->muxed_since_last_update = position - qtmux->last_moov_update;
+ }
+
+ /* Next, check if we're supposed to send periodic moov updates downstream */
if (qtmux->reserved_moov_update_period == GST_CLOCK_TIME_NONE) {
GST_OBJECT_UNLOCK (qtmux);
return GST_FLOW_OK;
(position <= qtmux->last_moov_update ||
(position - qtmux->last_moov_update) <
qtmux->reserved_moov_update_period)) {
- /* Update the offset of how much we've muxed, so the
- * report of remaining space keeps counting down */
- if (position > qtmux->last_moov_update &&
- position - qtmux->last_moov_update > qtmux->muxed_since_last_update) {
- GST_LOG_OBJECT (qtmux,
- "Muxed time %" G_GUINT64_FORMAT " since last moov update",
- qtmux->muxed_since_last_update);
- qtmux->muxed_since_last_update = position - qtmux->last_moov_update;
- }
GST_OBJECT_UNLOCK (qtmux);
return GST_FLOW_OK; /* No update needed yet */
}
}
switch (qtmux->mux_mode) {
+ case GST_QT_MUX_MODE_ROBUST_RECORDING_PREFILL:{
+ const TrakBufferEntryInfo *sample_entry;
+ guint64 block_idx = prefill_get_block_index (qtmux, pad);
+
+ if (block_idx >= pad->samples->len) {
+ GST_ELEMENT_ERROR (qtmux, STREAM, MUX, (NULL),
+ ("Unexpected sample %" G_GUINT64_FORMAT ", expected up to %u",
+ block_idx, pad->samples->len));
+ gst_buffer_unref (buffer);
+ return GST_FLOW_ERROR;
+ }
+
+ /* Check if all values are as expected */
+ sample_entry =
+ &g_array_index (pad->samples, TrakBufferEntryInfo, block_idx);
+
+ /* Allow +/- 1 difference for the scaled_duration to allow
+ * for some rounding errors
+ */
+ if (sample_entry->nsamples != nsamples
+ || ABSDIFF (sample_entry->delta, scaled_duration) > 1
+ || sample_entry->size != sample_size
+ || sample_entry->chunk_offset != chunk_offset
+ || sample_entry->pts_offset != pts_offset
+ || sample_entry->sync != sync) {
+ GST_ELEMENT_ERROR (qtmux, STREAM, MUX, (NULL),
+ ("Unexpected values in sample %" G_GUINT64_FORMAT,
+ pad->sample_offset + 1));
+ GST_ERROR_OBJECT (qtmux, "Expected: samples %u, delta %u, size %u, "
+ "chunk offset %" G_GUINT64_FORMAT ", "
+ "pts offset %" G_GUINT64_FORMAT ", sync %d",
+ sample_entry->nsamples,
+ sample_entry->delta,
+ sample_entry->size,
+ sample_entry->chunk_offset,
+ sample_entry->pts_offset, sample_entry->sync);
+ GST_ERROR_OBJECT (qtmux, "Got: samples %u, delta %u, size %u, "
+ "chunk offset %" G_GUINT64_FORMAT ", "
+ "pts offset %" G_GUINT64_FORMAT ", sync %d",
+ nsamples,
+ (guint) scaled_duration,
+ sample_size, chunk_offset, pts_offset, sync);
+
+ gst_buffer_unref (buffer);
+ return GST_FLOW_ERROR;
+ }
+
+ ret = gst_qt_mux_send_buffer (qtmux, buffer, &qtmux->mdat_size, TRUE);
+ break;
+ }
case GST_QT_MUX_MODE_MOOV_AT_END:
case GST_QT_MUX_MODE_FAST_START:
case GST_QT_MUX_MODE_ROBUST_RECORDING:
return ret;
}
+static void
+gst_qt_mux_register_buffer_in_chunk (GstQTMux * qtmux, GstQTPad * pad,
+ guint buffer_size, GstClockTime duration)
+{
+ /* not that much happens here,
+ * but updating any of this very likely needs to happen all in sync,
+ * unless there is a very good reason not to */
+
+ /* for computing the avg bitrate */
+ pad->total_bytes += buffer_size;
+ pad->total_duration += duration;
+ /* for keeping track of where we are in chunk;
+ * ensures that data really is located as recorded in atoms */
+ qtmux->current_chunk_size += buffer_size;
+ qtmux->current_chunk_duration += duration;
+}
+
+static GstFlowReturn
+gst_qt_mux_check_and_update_timecode (GstQTMux * qtmux, GstQTPad * pad,
+ GstBuffer * buf, GstFlowReturn ret)
+{
+ GstVideoTimeCodeMeta *tc_meta;
+ GstVideoTimeCode *tc;
+ GstBuffer *tc_buf;
+ gsize szret;
+ guint32 frames_since_daily_jam;
+ GstQTMuxClass *qtmux_klass = (GstQTMuxClass *) (G_OBJECT_GET_CLASS (qtmux));
+
+ if (!pad->trak->is_video)
+ return ret;
+
+ if (qtmux_klass->format != GST_QT_MUX_FORMAT_QT)
+ return ret;
+
+ if (buf == NULL || (pad->tc_trak != NULL && pad->tc_pos == -1))
+ return ret;
+
+ tc_meta = gst_buffer_get_video_time_code_meta (buf);
+ if (!tc_meta)
+ return ret;
+
+ tc = &tc_meta->tc;
+
+ /* This means we never got a timecode before */
+ if (pad->first_tc == NULL) {
+#ifndef GST_DISABLE_GST_DEBUG
+ gchar *tc_str = gst_video_time_code_to_string (tc);
+ GST_DEBUG_OBJECT (qtmux, "Found first timecode %s", tc_str);
+ g_free (tc_str);
+#endif
+ g_assert (pad->tc_trak == NULL);
+ pad->first_tc = gst_video_time_code_copy (tc);
+ /* If frames are out of order, the frame we're currently getting might
+ * not be the first one. Just write a 0 timecode for now and wait
+ * until we receive a timecode that's lower than the current one */
+ if (pad->is_out_of_order) {
+ pad->first_pts = GST_BUFFER_PTS (buf);
+ frames_since_daily_jam = 0;
+ /* Position to rewrite */
+ pad->tc_pos = qtmux->mdat_size;
+ } else {
+ frames_since_daily_jam =
+ gst_video_time_code_frames_since_daily_jam (pad->first_tc);
+ frames_since_daily_jam = GUINT32_TO_BE (frames_since_daily_jam);
+ }
+ /* Write the timecode trak now */
+ pad->tc_trak = atom_trak_new (qtmux->context);
+ atom_moov_add_trak (qtmux->moov, pad->tc_trak);
+
+ pad->trak->tref = atom_tref_new (FOURCC_tmcd);
+ atom_tref_add_entry (pad->trak->tref, pad->tc_trak->tkhd.track_ID);
+
+ atom_trak_set_timecode_type (pad->tc_trak, qtmux->context,
+ pad->trak->mdia.mdhd.time_info.timescale, pad->first_tc);
+
+ tc_buf = gst_buffer_new_allocate (NULL, 4, NULL);
+ szret = gst_buffer_fill (tc_buf, 0, &frames_since_daily_jam, 4);
+ g_assert (szret == 4);
+
+ atom_trak_add_samples (pad->tc_trak, 1, 1, 4, qtmux->mdat_size, FALSE, 0);
+ ret = gst_qt_mux_send_buffer (qtmux, tc_buf, &qtmux->mdat_size, TRUE);
+
+ /* Need to reset the current chunk (of the previous pad) here because
+ * some other data was written now above, and the pad has to start a
+ * new chunk now */
+ qtmux->current_chunk_offset = -1;
+ qtmux->current_chunk_size = 0;
+ qtmux->current_chunk_duration = 0;
+ } else if (qtmux->mux_mode == GST_QT_MUX_MODE_ROBUST_RECORDING_PREFILL) {
+ frames_since_daily_jam =
+ gst_video_time_code_frames_since_daily_jam (pad->first_tc);
+ frames_since_daily_jam = GUINT32_TO_BE (frames_since_daily_jam);
+
+ tc_buf = gst_buffer_new_allocate (NULL, 4, NULL);
+ szret = gst_buffer_fill (tc_buf, 0, &frames_since_daily_jam, 4);
+ g_assert (szret == 4);
+
+ ret = gst_qt_mux_send_buffer (qtmux, tc_buf, &qtmux->mdat_size, TRUE);
+ pad->tc_pos = -1;
+
+ qtmux->current_chunk_offset = -1;
+ qtmux->current_chunk_size = 0;
+ qtmux->current_chunk_duration = 0;
+ } else if (pad->is_out_of_order) {
+ /* Check for a lower timecode than the one stored */
+ g_assert (pad->tc_trak != NULL);
+ if (GST_BUFFER_DTS (buf) <= pad->first_pts) {
+ if (gst_video_time_code_compare (tc, pad->first_tc) == -1) {
+ gst_video_time_code_free (pad->first_tc);
+ pad->first_tc = gst_video_time_code_copy (tc);
+ }
+ } else {
+ guint64 bk_size = qtmux->mdat_size;
+ GstSegment segment;
+ /* If this frame's DTS is after the first PTS received, it means
+ * we've already received the first frame to be presented. Otherwise
+ * the decoder would need to go back in time */
+ gst_qt_mux_update_timecode (qtmux, pad);
+
+ /* Reset writing position */
+ gst_segment_init (&segment, GST_FORMAT_BYTES);
+ segment.start = bk_size;
+ gst_pad_push_event (qtmux->srcpad, gst_event_new_segment (&segment));
+ }
+ }
+
+ return ret;
+}
+
/*
* Here we push the buffer and update the tables in the track atoms
*/
gint64 pts_offset = 0;
gboolean sync = FALSE;
GstFlowReturn ret = GST_FLOW_OK;
+ guint buffer_size;
if (!pad->fourcc)
goto not_negotiated;
/* if this pad has a prepare function, call it */
if (pad->prepare_buf_func != NULL) {
- buf = pad->prepare_buf_func (pad, buf, qtmux);
- }
+ GstBuffer *new_buf;
- last_buf = pad->last_buf;
-
- /* DTS delta is used to calculate sample duration.
- * If buffer has missing DTS, we take either segment start or
- * previous buffer end time, whichever is later.
- * This must only be done for non sparse streams, sparse streams
- * can have gaps between buffers (which is handled later by adding
- * extra empty buffer with duration that fills the gap). */
- if (!pad->sparse && buf && !GST_BUFFER_DTS_IS_VALID (buf)) {
- GstClockTime last_buf_duration = last_buf
- && GST_BUFFER_DURATION_IS_VALID (last_buf) ?
- GST_BUFFER_DURATION (last_buf) : 0;
-
- buf = gst_buffer_make_writable (buf);
- GST_BUFFER_DTS (buf) = 0; /* running-time 0 */
-
- if (last_buf
- && (GST_BUFFER_DTS (last_buf) + last_buf_duration) >
- GST_BUFFER_DTS (buf)) {
- GST_BUFFER_DTS (buf) = GST_BUFFER_DTS (last_buf) + last_buf_duration;
- }
+ new_buf = pad->prepare_buf_func (pad, buf, qtmux);
+ if (buf && !new_buf)
+ return GST_FLOW_OK;
+ buf = new_buf;
}
- if (last_buf && !buf && !GST_BUFFER_DURATION_IS_VALID (last_buf)) {
- /* this is last buffer; there is no next buffer so we need valid number as duration */
- last_buf = gst_buffer_make_writable (last_buf);
- GST_BUFFER_DURATION (last_buf) = 0;
+ ret = gst_qt_mux_check_and_update_timecode (qtmux, pad, buf, ret);
+ if (ret != GST_FLOW_OK) {
+ if (buf)
+ gst_buffer_unref (buf);
+ return ret;
}
+ last_buf = pad->last_buf;
+ pad->last_buf = buf;
+
if (last_buf == NULL) {
#ifndef GST_DISABLE_GST_DEBUG
if (buf == NULL) {
GST_PAD_NAME (pad->collect.pad));
}
#endif
- pad->last_buf = buf;
goto exit;
- } else
- gst_buffer_ref (last_buf);
+ }
+
+ if (!GST_BUFFER_PTS_IS_VALID (last_buf))
+ goto no_pts;
/* if this is the first buffer, store the timestamp */
- if (G_UNLIKELY (pad->first_ts == GST_CLOCK_TIME_NONE) && last_buf) {
+ if (G_UNLIKELY (pad->first_ts == GST_CLOCK_TIME_NONE)) {
if (GST_BUFFER_PTS_IS_VALID (last_buf)) {
pad->first_ts = GST_BUFFER_PTS (last_buf);
} else if (GST_BUFFER_DTS_IS_VALID (last_buf)) {
GST_TIME_ARGS (pad->first_ts));
}
- if (last_buf && buf && GST_CLOCK_TIME_IS_VALID (GST_BUFFER_DTS (buf)) &&
+ if (buf && GST_CLOCK_TIME_IS_VALID (GST_BUFFER_DTS (buf)) &&
GST_CLOCK_TIME_IS_VALID (GST_BUFFER_DTS (last_buf)) &&
GST_BUFFER_DTS (buf) < GST_BUFFER_DTS (last_buf)) {
GST_ERROR ("decreasing DTS value %" GST_TIME_FORMAT " < %" GST_TIME_FORMAT,
GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
GST_TIME_ARGS (GST_BUFFER_DTS (last_buf)));
+ pad->last_buf = buf = gst_buffer_make_writable (buf);
GST_BUFFER_DTS (buf) = GST_BUFFER_DTS (last_buf);
}
+ buffer_size = gst_buffer_get_size (last_buf);
+
+ if (qtmux->mux_mode == GST_QT_MUX_MODE_ROBUST_RECORDING_PREFILL) {
+ guint required_buffer_size = prefill_get_sample_size (qtmux, pad);
+ guint fill_size = required_buffer_size - buffer_size;
+ GstMemory *mem;
+ GstMapInfo map;
+
+ if (required_buffer_size < buffer_size) {
+ GST_ELEMENT_ERROR (qtmux, STREAM, MUX, (NULL),
+ ("Sample size %u bigger than expected maximum %u", buffer_size,
+ required_buffer_size));
+ goto bail;
+ }
+
+ if (fill_size > 0) {
+ GST_DEBUG_OBJECT (qtmux,
+ "Padding buffer by %u bytes to reach required %u bytes", fill_size,
+ required_buffer_size);
+ mem = gst_allocator_alloc (NULL, fill_size, NULL);
+ gst_memory_map (mem, &map, GST_MAP_WRITE);
+ memset (map.data, 0, map.size);
+ gst_memory_unmap (mem, &map);
+ last_buf = gst_buffer_make_writable (last_buf);
+ gst_buffer_append_memory (last_buf, mem);
+ buffer_size = required_buffer_size;
+ }
+ }
+
/* duration actually means time delta between samples, so we calculate
* the duration based on the difference in DTS or PTS, falling back
* to DURATION if the other two don't exist, such as with the last
- * sample before EOS. */
- duration = GST_BUFFER_DURATION (last_buf);
+ * sample before EOS. Or use 0 if nothing else is available */
+ if (GST_BUFFER_DURATION_IS_VALID (last_buf))
+ duration = GST_BUFFER_DURATION (last_buf);
+ else
+ duration = 0;
if (!pad->sparse) {
- if (last_buf && buf && GST_BUFFER_DTS_IS_VALID (buf)
+ if (buf && GST_BUFFER_DTS_IS_VALID (buf)
&& GST_BUFFER_DTS_IS_VALID (last_buf))
duration = GST_BUFFER_DTS (buf) - GST_BUFFER_DTS (last_buf);
- else if (last_buf && buf && GST_BUFFER_PTS_IS_VALID (buf)
+ else if (buf && GST_BUFFER_PTS_IS_VALID (buf)
&& GST_BUFFER_PTS_IS_VALID (last_buf))
duration = GST_BUFFER_PTS (buf) - GST_BUFFER_PTS (last_buf);
}
- gst_buffer_replace (&pad->last_buf, buf);
-
- /* for computing the avg bitrate */
- if (G_LIKELY (last_buf)) {
- pad->total_bytes += gst_buffer_get_size (last_buf);
- pad->total_duration += duration;
+ if (qtmux->current_pad != pad || qtmux->current_chunk_offset == -1) {
+ GST_DEBUG_OBJECT (qtmux,
+ "Switching to next chunk for pad %s:%s: offset %" G_GUINT64_FORMAT
+ ", size %" G_GUINT64_FORMAT ", duration %" GST_TIME_FORMAT,
+ GST_DEBUG_PAD_NAME (pad->collect.pad), qtmux->current_chunk_offset,
+ qtmux->current_chunk_size,
+ GST_TIME_ARGS (qtmux->current_chunk_duration));
+ qtmux->current_pad = pad;
+ if (qtmux->current_chunk_offset == -1)
+ qtmux->current_chunk_offset = qtmux->mdat_size;
+ else
+ qtmux->current_chunk_offset += qtmux->current_chunk_size;
+ qtmux->current_chunk_size = 0;
+ qtmux->current_chunk_duration = 0;
}
last_dts = gst_util_uint64_scale_round (pad->last_dts,
/* fragments only deal with 1 buffer == 1 chunk (== 1 sample) */
if (pad->sample_size && !qtmux->fragment_sequence) {
+ GstClockTime expected_timestamp;
+
/* Constant size packets: usually raw audio (with many samples per
buffer (= chunk)), but can also be fixed-packet-size codecs like ADPCM
*/
sample_size = pad->sample_size;
- if (gst_buffer_get_size (last_buf) % sample_size != 0)
+ if (buffer_size % sample_size != 0)
goto fragmented_sample;
+
/* note: qt raw audio storage warps it implicitly into a timewise
- * perfect stream, discarding buffer times */
+ * perfect stream, discarding buffer times.
+ * If the difference between the current PTS and the expected one
+ * becomes too big, we error out: there was a gap and we have no way to
+ * represent that, causing A/V sync to be off */
+ expected_timestamp =
+ gst_util_uint64_scale (pad->sample_offset, GST_SECOND,
+ atom_trak_get_timescale (pad->trak)) + pad->first_ts;
+ if (ABSDIFF (GST_BUFFER_DTS_OR_PTS (last_buf),
+ expected_timestamp) > qtmux->max_raw_audio_drift)
+ goto raw_audio_timestamp_drift;
+
if (GST_BUFFER_DURATION (last_buf) != GST_CLOCK_TIME_NONE) {
nsamples = gst_util_uint64_scale_round (GST_BUFFER_DURATION (last_buf),
atom_trak_get_timescale (pad->trak), GST_SECOND);
+ duration = GST_BUFFER_DURATION (last_buf);
} else {
- nsamples = gst_buffer_get_size (last_buf) / sample_size;
+ nsamples = buffer_size / sample_size;
+ duration =
+ gst_util_uint64_scale_round (nsamples, GST_SECOND,
+ atom_trak_get_timescale (pad->trak));
}
- if (nsamples > 0)
- duration = GST_BUFFER_DURATION (last_buf) / nsamples;
- else
- duration = 0;
/* timescale = samplerate */
scaled_duration = 1;
- pad->last_dts += duration * nsamples;
+ pad->last_dts =
+ pad->first_dts + gst_util_uint64_scale_round (pad->sample_offset +
+ nsamples, GST_SECOND, atom_trak_get_timescale (pad->trak));
} else {
nsamples = 1;
- sample_size = gst_buffer_get_size (last_buf);
- if ((pad->last_buf && GST_BUFFER_DTS_IS_VALID (pad->last_buf))
- || GST_BUFFER_DTS_IS_VALID (last_buf)) {
+ sample_size = buffer_size;
+ if (!pad->sparse && ((buf && GST_BUFFER_DTS_IS_VALID (buf))
+ || GST_BUFFER_DTS_IS_VALID (last_buf))) {
gint64 scaled_dts;
- if (pad->last_buf && GST_BUFFER_DTS_IS_VALID (pad->last_buf)) {
- pad->last_dts = GST_BUFFER_DTS (pad->last_buf);
+ if (buf && GST_BUFFER_DTS_IS_VALID (buf)) {
+ pad->last_dts = GST_BUFFER_DTS (buf);
} else {
- pad->last_dts = GST_BUFFER_DTS (last_buf) +
- GST_BUFFER_DURATION (last_buf);
+ pad->last_dts = GST_BUFFER_DTS (last_buf) + duration;
}
if ((gint64) (pad->last_dts) < 0) {
scaled_dts = -gst_util_uint64_scale_round (-pad->last_dts,
pad->last_dts += duration;
}
}
- chunk_offset = qtmux->mdat_size;
+
+ gst_qt_mux_register_buffer_in_chunk (qtmux, pad, buffer_size, duration);
+
+ chunk_offset = qtmux->current_chunk_offset;
GST_LOG_OBJECT (qtmux,
"Pad (%s) dts updated to %" GST_TIME_FORMAT,
sync = TRUE;
}
- if (GST_CLOCK_TIME_IS_VALID (GST_BUFFER_DTS (last_buf))) {
+ if (GST_BUFFER_DTS_IS_VALID (last_buf)) {
last_dts = gst_util_uint64_scale_round (GST_BUFFER_DTS (last_buf),
atom_trak_get_timescale (pad->trak), GST_SECOND);
pts_offset =
(gint64) (gst_util_uint64_scale_round (GST_BUFFER_PTS (last_buf),
atom_trak_get_timescale (pad->trak), GST_SECOND) - last_dts);
-
} else {
pts_offset = 0;
last_dts = gst_util_uint64_scale_round (GST_BUFFER_PTS (last_buf),
GST_TIME_ARGS (GST_BUFFER_PTS (last_buf)),
(int) (last_dts), (int) (pts_offset));
- /*
- * Each buffer starts a new chunk, so we can assume the buffer
- * duration is the chunk duration
- */
- if (GST_CLOCK_TIME_IS_VALID (duration) && (duration > qtmux->longest_chunk ||
- !GST_CLOCK_TIME_IS_VALID (qtmux->longest_chunk))) {
- GST_DEBUG_OBJECT (qtmux, "New longest chunk found: %" GST_TIME_FORMAT
- ", pad %s", GST_TIME_ARGS (duration), GST_PAD_NAME (pad->collect.pad));
- qtmux->longest_chunk = duration;
+ if (GST_CLOCK_TIME_IS_VALID (duration)
+ && (qtmux->current_chunk_duration > qtmux->longest_chunk
+ || !GST_CLOCK_TIME_IS_VALID (qtmux->longest_chunk))) {
+ GST_DEBUG_OBJECT (qtmux,
+ "New longest chunk found: %" GST_TIME_FORMAT ", pad %s",
+ GST_TIME_ARGS (qtmux->current_chunk_duration),
+ GST_PAD_NAME (pad->collect.pad));
+ qtmux->longest_chunk = qtmux->current_chunk_duration;
+ }
+
+ if (qtmux->mux_mode == GST_QT_MUX_MODE_ROBUST_RECORDING_PREFILL) {
+ const TrakBufferEntryInfo *sample_entry;
+ guint64 block_idx = prefill_get_block_index (qtmux, pad);
+
+ if (block_idx >= pad->samples->len) {
+ GST_ELEMENT_ERROR (qtmux, STREAM, MUX, (NULL),
+ ("Unexpected sample %" G_GUINT64_FORMAT ", expected up to %u",
+ block_idx, pad->samples->len));
+ goto bail;
+ }
+
+ /* Check if all values are as expected */
+ sample_entry =
+ &g_array_index (pad->samples, TrakBufferEntryInfo, block_idx);
+
+ if (chunk_offset < sample_entry->chunk_offset) {
+ guint fill_size = sample_entry->chunk_offset - chunk_offset;
+ GstBuffer *fill_buf;
+
+ fill_buf = gst_buffer_new_allocate (NULL, fill_size, NULL);
+ gst_buffer_memset (fill_buf, 0, 0, fill_size);
+
+ ret = gst_qt_mux_send_buffer (qtmux, fill_buf, &qtmux->mdat_size, TRUE);
+ if (ret != GST_FLOW_OK)
+ goto bail;
+ qtmux->current_chunk_offset = chunk_offset = sample_entry->chunk_offset;
+ qtmux->current_chunk_size = buffer_size;
+ qtmux->current_chunk_duration = duration;
+ } else if (chunk_offset != sample_entry->chunk_offset) {
+ GST_ELEMENT_ERROR (qtmux, STREAM, MUX, (NULL),
+ ("Unexpected chunk offset %" G_GUINT64_FORMAT ", expected up to %"
+ G_GUINT64_FORMAT, chunk_offset, sample_entry->chunk_offset));
+ goto bail;
+ }
}
/* now we go and register this buffer/sample all over */
ret = gst_qt_mux_register_and_push_sample (qtmux, pad, last_buf,
buf == NULL, nsamples, last_dts, scaled_duration, sample_size,
chunk_offset, sync, TRUE, pts_offset);
+ pad->sample_offset += nsamples;
/* if this is sparse and we have a next buffer, check if there is any gap
* between them to insert an empty sample */
if (pad->create_empty_buffer) {
GstBuffer *empty_buf;
gint64 empty_duration =
- GST_BUFFER_TIMESTAMP (buf) - (GST_BUFFER_TIMESTAMP (last_buf) +
- duration);
+ GST_BUFFER_PTS (buf) - (GST_BUFFER_PTS (last_buf) + duration);
gint64 empty_duration_scaled;
+ guint empty_size;
empty_buf = pad->create_empty_buffer (pad, empty_duration);
- empty_duration_scaled = gst_util_uint64_scale_round (empty_duration,
- atom_trak_get_timescale (pad->trak), GST_SECOND);
+ pad->last_dts = GST_BUFFER_PTS (buf);
+ empty_duration_scaled = gst_util_uint64_scale_round (pad->last_dts,
+ atom_trak_get_timescale (pad->trak), GST_SECOND)
+ - (last_dts + scaled_duration);
+ empty_size = gst_buffer_get_size (empty_buf);
- pad->total_bytes += gst_buffer_get_size (empty_buf);
- pad->total_duration += duration;
+ gst_qt_mux_register_buffer_in_chunk (qtmux, pad, empty_size,
+ empty_duration);
ret =
gst_qt_mux_register_and_push_sample (qtmux, pad, empty_buf, FALSE, 1,
last_dts + scaled_duration, empty_duration_scaled,
- gst_buffer_get_size (empty_buf), qtmux->mdat_size, sync, TRUE, 0);
- } else {
- /* our only case currently is tx3g subtitles, so there is no reason to fill this yet */
+ empty_size, chunk_offset, sync, TRUE, 0);
+ } else if (pad->fourcc != FOURCC_c608 && pad->fourcc != FOURCC_c708) {
+ /* This assert is kept here to make sure implementors of new
+ * sparse input format decide whether there needs to be special
+ * gap handling or not */
g_assert_not_reached ();
GST_WARNING_OBJECT (qtmux,
"no empty buffer creation function found for pad %s",
gst_qt_mux_update_expected_trailer_size(qtmux, pad);
#endif /* TIZEN_FEATURE_GST_MUX_ENHANCEMENT */
- if (buf)
- gst_buffer_unref (buf);
-
exit:
return ret;
/* ERRORS */
bail:
{
- if (buf)
- gst_buffer_unref (buf);
gst_buffer_unref (last_buf);
return GST_FLOW_ERROR;
}
("Audio buffer contains fragmented sample."));
goto bail;
}
+raw_audio_timestamp_drift:
+ {
+ /* TODO: Could in theory be implemented with edit lists */
+ GST_ELEMENT_ERROR (qtmux, STREAM, MUX, (NULL),
+ ("Audio stream timestamps are drifting (got %" GST_TIME_FORMAT
+ ", expected %" GST_TIME_FORMAT "). This is not supported yet!",
+ GST_TIME_ARGS (GST_BUFFER_DTS_OR_PTS (last_buf)),
+ GST_TIME_ARGS (gst_util_uint64_scale (pad->sample_offset,
+ GST_SECOND,
+ atom_trak_get_timescale (pad->trak)) + pad->first_ts)));
+ goto bail;
+ }
+no_pts:
+ {
+ GST_ELEMENT_ERROR (qtmux, STREAM, MUX, (NULL), ("Buffer has no PTS."));
+ goto bail;
+ }
not_negotiated:
{
GST_ELEMENT_ERROR (qtmux, CORE, NEGOTIATION, (NULL),
pts = dts;
}
- GST_BUFFER_PTS (*buf) = pts;
- GST_BUFFER_DTS (*buf) = dts;
+ GST_BUFFER_PTS (*buf) = pts;
+ GST_BUFFER_DTS (*buf) = dts;
+
+ GST_LOG_OBJECT (qtmux, "time adjusted to PTS %" GST_TIME_FORMAT
+ " and DTS %" GST_TIME_FORMAT, GST_TIME_ARGS (pts), GST_TIME_ARGS (dts));
+ }
+}
+
+static GstQTPad *
+find_best_pad (GstQTMux * qtmux, GstCollectPads * pads)
+{
+ GSList *walk;
+ GstQTPad *best_pad = NULL;
+
+ if (qtmux->mux_mode == GST_QT_MUX_MODE_ROBUST_RECORDING_PREFILL) {
+ guint64 smallest_offset = G_MAXUINT64;
+ guint64 chunk_offset = 0;
+
+ for (walk = qtmux->collect->data; walk; walk = g_slist_next (walk)) {
+ GstCollectData *cdata = (GstCollectData *) walk->data;
+ GstQTPad *qtpad = (GstQTPad *) cdata;
+ const TrakBufferEntryInfo *sample_entry;
+ guint64 block_idx, current_block_idx;
+ guint64 chunk_offset_offset = 0;
+ GstBuffer *tmp_buf =
+ gst_collect_pads_peek (pads, (GstCollectData *) qtpad);
+
+ /* Check for EOS pads and just skip them */
+ if (!tmp_buf && !qtpad->last_buf && (!qtpad->raw_audio_adapter
+ || gst_adapter_available (qtpad->raw_audio_adapter) == 0))
+ continue;
+ if (tmp_buf)
+ gst_buffer_unref (tmp_buf);
+
+ /* Find the exact offset where the next sample of this track is supposed
+ * to be written at */
+ block_idx = current_block_idx = prefill_get_block_index (qtmux, qtpad);
+ sample_entry =
+ &g_array_index (qtpad->samples, TrakBufferEntryInfo, block_idx);
+ while (block_idx > 0) {
+ const TrakBufferEntryInfo *tmp =
+ &g_array_index (qtpad->samples, TrakBufferEntryInfo, block_idx - 1);
+
+ if (tmp->chunk_offset != sample_entry->chunk_offset)
+ break;
+ chunk_offset_offset += tmp->size * tmp->nsamples;
+ block_idx--;
+ }
+
+ /* Except for the previously selected pad being EOS we always have
+ * qtmux->current_chunk_offset + qtmux->current_chunk_size
+ * ==
+ * sample_entry->chunk_offset + chunk_offset_offset
+ * for the best pad. Instead of checking that, we just return the
+ * pad that has the smallest offset for the next to-be-written sample.
+ */
+ if (sample_entry->chunk_offset + chunk_offset_offset < smallest_offset) {
+ smallest_offset = sample_entry->chunk_offset + chunk_offset_offset;
+ best_pad = qtpad;
+ chunk_offset = sample_entry->chunk_offset;
+ }
+ }
+
+ if (chunk_offset != qtmux->current_chunk_offset) {
+ qtmux->current_pad = NULL;
+ }
+
+ return best_pad;
+ }
+
+ if (qtmux->current_pad && (qtmux->interleave_bytes != 0
+ || qtmux->interleave_time != 0) && (qtmux->interleave_bytes == 0
+ || qtmux->current_chunk_size <= qtmux->interleave_bytes)
+ && (qtmux->interleave_time == 0
+ || qtmux->current_chunk_duration <= qtmux->interleave_time)
+ && qtmux->mux_mode != GST_QT_MUX_MODE_FRAGMENTED
+ && qtmux->mux_mode != GST_QT_MUX_MODE_FRAGMENTED_STREAMABLE) {
+ GstBuffer *tmp_buf =
+ gst_collect_pads_peek (pads, (GstCollectData *) qtmux->current_pad);
+
+ if (tmp_buf || qtmux->current_pad->last_buf) {
+ best_pad = qtmux->current_pad;
+ if (tmp_buf)
+ gst_buffer_unref (tmp_buf);
+ GST_DEBUG_OBJECT (qtmux, "Reusing pad %s:%s",
+ GST_DEBUG_PAD_NAME (best_pad->collect.pad));
+ }
+ } else if (qtmux->collect->data->next) {
+ /* Only switch pads if we have more than one, otherwise
+ * we can just put everything into a single chunk and save
+ * a few bytes of offsets
+ */
+ if (qtmux->current_pad)
+ GST_DEBUG_OBJECT (qtmux, "Switching from pad %s:%s",
+ GST_DEBUG_PAD_NAME (qtmux->current_pad->collect.pad));
+ best_pad = qtmux->current_pad = NULL;
+ }
+
+ if (!best_pad) {
+ GstClockTime best_time = GST_CLOCK_TIME_NONE;
+
+ for (walk = qtmux->collect->data; walk; walk = g_slist_next (walk)) {
+ GstCollectData *cdata = (GstCollectData *) walk->data;
+ GstQTPad *qtpad = (GstQTPad *) cdata;
+ GstBuffer *tmp_buf;
+ GstClockTime timestamp;
+
+ tmp_buf = gst_collect_pads_peek (pads, cdata);
+ if (!tmp_buf) {
+ /* This one is newly EOS now, finish it for real */
+ if (qtpad->last_buf) {
+ timestamp = GST_BUFFER_DTS_OR_PTS (qtpad->last_buf);
+ } else {
+ continue;
+ }
+ } else {
+ if (qtpad->last_buf)
+ timestamp = GST_BUFFER_DTS_OR_PTS (qtpad->last_buf);
+ else
+ timestamp = GST_BUFFER_DTS_OR_PTS (tmp_buf);
+ }
+
+ if (best_pad == NULL ||
+ !GST_CLOCK_TIME_IS_VALID (best_time) || timestamp < best_time) {
+ best_pad = qtpad;
+ best_time = timestamp;
+ }
+
+ if (tmp_buf)
+ gst_buffer_unref (tmp_buf);
+ }
- GST_LOG_OBJECT (qtmux, "time adjusted to PTS %" GST_TIME_FORMAT
- " and DTS %" GST_TIME_FORMAT, GST_TIME_ARGS (pts), GST_TIME_ARGS (dts));
+ if (best_pad) {
+ GST_DEBUG_OBJECT (qtmux, "Choosing pad %s:%s",
+ GST_DEBUG_PAD_NAME (best_pad->collect.pad));
+ } else {
+ GST_DEBUG_OBJECT (qtmux, "No best pad: EOS");
+ }
}
+
+ return best_pad;
}
static GstFlowReturn
-gst_qt_mux_handle_buffer (GstCollectPads * pads, GstCollectData * cdata,
- GstBuffer * buf, gpointer user_data)
+gst_qt_mux_collected (GstCollectPads * pads, gpointer user_data)
{
GstFlowReturn ret = GST_FLOW_OK;
GstQTMux *qtmux = GST_QT_MUX_CAST (user_data);
if (G_UNLIKELY (qtmux->state == GST_QT_MUX_STATE_EOS))
return GST_FLOW_EOS;
- best_pad = (GstQTPad *) cdata;
+ best_pad = find_best_pad (qtmux, pads);
/* clipping already converted to running time */
if (best_pad != NULL) {
- g_assert (buf);
- gst_qt_pad_adjust_buffer_dts (qtmux, best_pad, cdata, &buf);
+ GstBuffer *buf = NULL;
+
+ if (qtmux->mux_mode != GST_QT_MUX_MODE_ROBUST_RECORDING_PREFILL ||
+ best_pad->raw_audio_adapter == NULL ||
+ best_pad->raw_audio_adapter_pts == GST_CLOCK_TIME_NONE)
+ buf = gst_collect_pads_pop (pads, (GstCollectData *) best_pad);
+
+ g_assert (buf || best_pad->last_buf || (best_pad->raw_audio_adapter
+ && gst_adapter_available (best_pad->raw_audio_adapter) > 0));
+
+ if (buf)
+ gst_qt_pad_adjust_buffer_dts (qtmux, best_pad,
+ (GstCollectData *) best_pad, &buf);
+
ret = gst_qt_mux_add_buffer (qtmux, best_pad, buf);
} else {
qtmux->state = GST_QT_MUX_STATE_EOS;
return gst_structure_foreach (sub_s, check_field, sup_s);
}
+/* will unref @qtmux */
+static gboolean
+gst_qt_mux_can_renegotiate (GstQTMux * qtmux, GstPad * pad, GstCaps * caps)
+{
+ GstCaps *current_caps;
+
+ /* does not go well to renegotiate stream mid-way, unless
+ * the old caps are a subset of the new one (this means upstream
+ * added more info to the caps, as both should be 'fixed' caps) */
+ current_caps = gst_pad_get_current_caps (pad);
+ g_assert (caps != NULL);
+
+ if (!gst_qtmux_caps_is_subset_full (qtmux, current_caps, caps)) {
+ gst_caps_unref (current_caps);
+ GST_WARNING_OBJECT (qtmux,
+ "pad %s refused renegotiation to %" GST_PTR_FORMAT,
+ GST_PAD_NAME (pad), caps);
+ gst_object_unref (qtmux);
+ return FALSE;
+ }
+
+ GST_DEBUG_OBJECT (qtmux,
+ "pad %s accepted renegotiation to %" GST_PTR_FORMAT " from %"
+ GST_PTR_FORMAT, GST_PAD_NAME (pad), caps, current_caps);
+ gst_object_unref (qtmux);
+ gst_caps_unref (current_caps);
+
+ return TRUE;
+}
+
static gboolean
gst_qt_mux_audio_sink_set_caps (GstQTPad * qtpad, GstCaps * caps)
{
AtomInfo *ext_atom = NULL;
gint constant_size = 0;
const gchar *stream_format;
+ guint32 timescale;
- qtpad->prepare_buf_func = NULL;
-
- /* does not go well to renegotiate stream mid-way, unless
- * the old caps are a subset of the new one (this means upstream
- * added more info to the caps, as both should be 'fixed' caps) */
- if (qtpad->fourcc) {
- GstCaps *current_caps;
-
- current_caps = gst_pad_get_current_caps (pad);
- g_assert (caps != NULL);
-
- if (!gst_qtmux_caps_is_subset_full (qtmux, current_caps, caps)) {
- gst_caps_unref (current_caps);
- goto refuse_renegotiation;
- }
- GST_DEBUG_OBJECT (qtmux,
- "pad %s accepted renegotiation to %" GST_PTR_FORMAT " from %"
- GST_PTR_FORMAT, GST_PAD_NAME (pad), caps, current_caps);
- gst_caps_unref (current_caps);
- }
+ if (qtpad->fourcc)
+ return gst_qt_mux_can_renegotiate (qtmux, pad, caps);
GST_DEBUG_OBJECT (qtmux, "%s:%s, caps=%" GST_PTR_FORMAT,
GST_DEBUG_PAD_NAME (pad), caps);
+ qtpad->prepare_buf_func = NULL;
+
format = qtmux_klass->format;
structure = gst_caps_get_structure (caps, 0);
mimetype = gst_structure_get_name (structure);
/* now map onto a fourcc, and some extra properties */
if (strcmp (mimetype, "audio/mpeg") == 0) {
- gint mpegversion = 0;
+ gint mpegversion = 0, mpegaudioversion = 0;
gint layer = -1;
gst_structure_get_int (structure, "mpegversion", &mpegversion);
switch (mpegversion) {
case 1:
gst_structure_get_int (structure, "layer", &layer);
- switch (layer) {
- case 3:
- /* mp3 */
- /* note: QuickTime player does not like mp3 either way in iso/mp4 */
- if (format == GST_QT_MUX_FORMAT_QT)
- entry.fourcc = FOURCC__mp3;
- else {
- entry.fourcc = FOURCC_mp4a;
- ext_atom =
- build_esds_extension (qtpad->trak, ESDS_OBJECT_TYPE_MPEG1_P3,
- ESDS_STREAM_TYPE_AUDIO, codec_data, qtpad->avg_bitrate,
- qtpad->max_bitrate);
- }
- entry.samples_per_packet = 1152;
- entry.bytes_per_sample = 2;
- break;
+ gst_structure_get_int (structure, "mpegaudioversion",
+ &mpegaudioversion);
+
+ /* mp1/2/3 */
+ /* note: QuickTime player does not like mp3 either way in iso/mp4 */
+ if (format == GST_QT_MUX_FORMAT_QT)
+ entry.fourcc = FOURCC__mp3;
+ else {
+ entry.fourcc = FOURCC_mp4a;
+ ext_atom =
+ build_esds_extension (qtpad->trak, ESDS_OBJECT_TYPE_MPEG1_P3,
+ ESDS_STREAM_TYPE_AUDIO, codec_data, qtpad->avg_bitrate,
+ qtpad->max_bitrate);
+ }
+ if (layer == 1) {
+ g_warn_if_fail (format == GST_QT_MUX_FORMAT_MP4
+ || format == GST_QT_MUX_FORMAT_QT);
+ entry.samples_per_packet = 384;
+ } else if (layer == 2) {
+ g_warn_if_fail (format == GST_QT_MUX_FORMAT_MP4
+ || format == GST_QT_MUX_FORMAT_QT);
+ entry.samples_per_packet = 1152;
+ } else {
+ g_warn_if_fail (layer == 3);
+ entry.samples_per_packet = (mpegaudioversion <= 1) ? 1152 : 576;
}
+ entry.bytes_per_sample = 2;
break;
case 4:
"assuming 'raw'");
}
- if (!codec_data || gst_buffer_get_size ((GstBuffer *) codec_data) < 2)
+ if (!codec_data || gst_buffer_get_size ((GstBuffer *) codec_data) < 2) {
GST_WARNING_OBJECT (qtmux, "no (valid) codec_data for AAC audio");
- else {
+ goto refuse_caps;
+ } else {
guint8 profile;
gst_buffer_extract ((GstBuffer *) codec_data, 0, &profile, 1);
GST_WARNING_OBJECT (qtmux, "unexpected codec-data size, possibly broken");
}
if (format == GST_QT_MUX_FORMAT_QT)
- ext_atom = build_mov_alac_extension (qtpad->trak, codec_config);
+ ext_atom = build_mov_alac_extension (codec_config);
else
ext_atom = build_codec_data_extension (FOURCC_alac, codec_config);
/* set some more info */
* the stream itself. Abuse the prepare_buf_func so we parse a frame
* and get the needed data */
qtpad->prepare_buf_func = gst_qt_mux_prepare_parse_ac3_frame;
+ } else if (strcmp (mimetype, "audio/x-opus") == 0) {
+ /* Based on the specification defined in:
+ * https://www.opus-codec.org/docs/opus_in_isobmff.html */
+ guint8 channels, mapping_family, stream_count, coupled_count;
+ guint16 pre_skip;
+ gint16 output_gain;
+ guint32 rate;
+ guint8 channel_mapping[256];
+ const GValue *streamheader;
+ const GValue *first_element;
+ GstBuffer *header;
+
+ entry.fourcc = FOURCC_opus;
+ entry.sample_size = 16;
+
+ streamheader = gst_structure_get_value (structure, "streamheader");
+ if (streamheader && GST_VALUE_HOLDS_ARRAY (streamheader) &&
+ gst_value_array_get_size (streamheader) != 0) {
+ first_element = gst_value_array_get_value (streamheader, 0);
+ header = gst_value_get_buffer (first_element);
+ if (!gst_codec_utils_opus_parse_header (header, &rate, &channels,
+ &mapping_family, &stream_count, &coupled_count, channel_mapping,
+ &pre_skip, &output_gain)) {
+ GST_ERROR_OBJECT (qtmux, "Incomplete OpusHead");
+ goto refuse_caps;
+ }
+ } else {
+ GST_WARNING_OBJECT (qtmux,
+ "no streamheader field in caps %" GST_PTR_FORMAT, caps);
+
+ if (!gst_codec_utils_opus_parse_caps (caps, &rate, &channels,
+ &mapping_family, &stream_count, &coupled_count,
+ channel_mapping)) {
+ GST_ERROR_OBJECT (qtmux, "Incomplete Opus caps");
+ goto refuse_caps;
+ }
+ pre_skip = 0;
+ output_gain = 0;
+ }
+
+ entry.channels = channels;
+ ext_atom = build_opus_extension (rate, channels, mapping_family,
+ stream_count, coupled_count, channel_mapping, pre_skip, output_gain);
}
if (!entry.fourcc)
goto refuse_caps;
+ timescale = gst_qt_mux_pad_get_timescale (GST_QT_MUX_PAD_CAST (pad));
+ if (!timescale && qtmux->trak_timescale)
+ timescale = qtmux->trak_timescale;
+ else if (!timescale)
+ timescale = entry.sample_rate;
+
/* ok, set the pad info accordingly */
qtpad->fourcc = entry.fourcc;
qtpad->sample_size = constant_size;
qtpad->trak_ste =
(SampleTableEntry *) atom_trak_set_audio_type (qtpad->trak,
- qtmux->context, &entry,
- qtmux->trak_timescale ? qtmux->trak_timescale : entry.sample_rate,
- ext_atom, constant_size);
+ qtmux->context, &entry, timescale, ext_atom, constant_size);
gst_object_unref (qtmux);
return TRUE;
gst_object_unref (qtmux);
return FALSE;
}
-refuse_renegotiation:
- {
- GST_WARNING_OBJECT (qtmux,
- "pad %s refused renegotiation to %" GST_PTR_FORMAT,
- GST_PAD_NAME (pad), caps);
- gst_object_unref (qtmux);
- return FALSE;
- }
-}
-
-/* scale rate up or down by factor of 10 to fit into [1000,10000] interval */
-static guint32
-adjust_rate (guint64 rate)
-{
- if (rate == 0)
- return 10000;
-
- while (rate >= 10000)
- rate /= 10;
-
- while (rate < 1000)
- rate *= 10;
-
- return (guint32) rate;
}
static gboolean
GList *ext_atom_list = NULL;
gboolean sync = FALSE;
int par_num, par_den;
+ const gchar *multiview_mode;
- qtpad->prepare_buf_func = NULL;
-
- /* does not go well to renegotiate stream mid-way, unless
- * the old caps are a subset of the new one (this means upstream
- * added more info to the caps, as both should be 'fixed' caps) */
- if (qtpad->fourcc) {
- GstCaps *current_caps;
-
- current_caps = gst_pad_get_current_caps (pad);
- g_assert (caps != NULL);
-
- if (!gst_qtmux_caps_is_subset_full (qtmux, current_caps, caps)) {
- gst_caps_unref (current_caps);
- goto refuse_renegotiation;
- }
- GST_DEBUG_OBJECT (qtmux,
- "pad %s accepted renegotiation to %" GST_PTR_FORMAT " from %"
- GST_PTR_FORMAT, GST_PAD_NAME (pad), caps, current_caps);
- gst_caps_unref (current_caps);
- }
+ if (qtpad->fourcc)
+ return gst_qt_mux_can_renegotiate (qtmux, pad, caps);
GST_DEBUG_OBJECT (qtmux, "%s:%s, caps=%" GST_PTR_FORMAT,
GST_DEBUG_PAD_NAME (pad), caps);
+ qtpad->prepare_buf_func = NULL;
+
format = qtmux_klass->format;
structure = gst_caps_get_structure (caps, 0);
mimetype = gst_structure_get_name (structure);
/* bring frame numerator into a range that ensures both reasonable resolution
* as well as a fair duration */
- rate = qtmux->trak_timescale ?
- qtmux->trak_timescale : adjust_rate (framerate_num);
+ qtpad->expected_sample_duration_n = framerate_num;
+ qtpad->expected_sample_duration_d = framerate_den;
+
+ rate = gst_qt_mux_pad_get_timescale (GST_QT_MUX_PAD_CAST (pad));
+ if (!rate && qtmux->trak_timescale)
+ rate = qtmux->trak_timescale;
+ else if (!rate)
+ rate = atom_framerate_to_timescale (framerate_num, framerate_den);
+
GST_DEBUG_OBJECT (qtmux, "Rate of video track selected: %" G_GUINT32_FORMAT,
rate);
+ multiview_mode = gst_structure_get_string (structure, "multiview-mode");
+ if (multiview_mode && !qtpad->trak->mdia.minf.stbl.svmi) {
+ GstVideoMultiviewMode mode;
+ GstVideoMultiviewFlags flags = 0;
+
+ mode = gst_video_multiview_mode_from_caps_string (multiview_mode);
+ gst_structure_get_flagset (structure, "multiview-flags", &flags, NULL);
+ switch (mode) {
+ case GST_VIDEO_MULTIVIEW_MODE_SIDE_BY_SIDE:
+ qtpad->trak->mdia.minf.stbl.svmi =
+ atom_svmi_new (0,
+ flags & GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST);
+ break;
+ case GST_VIDEO_MULTIVIEW_MODE_ROW_INTERLEAVED:
+ qtpad->trak->mdia.minf.stbl.svmi =
+ atom_svmi_new (1,
+ flags & GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST);
+ break;
+ case GST_VIDEO_MULTIVIEW_MODE_FRAME_BY_FRAME:
+ qtpad->trak->mdia.minf.stbl.svmi =
+ atom_svmi_new (2,
+ flags & GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST);
+ break;
+ default:
+ GST_DEBUG_OBJECT (qtmux, "Unsupported multiview-mode %s",
+ multiview_mode);
+ break;
+ }
+ }
+
/* set common properties */
entry.width = width;
entry.height = height;
"output might not play in Apple QuickTime (try global-headers?)");
}
} else if (strcmp (mimetype, "video/x-h264") == 0) {
- /* check if we accept these caps */
- if (gst_structure_has_field (structure, "stream-format")) {
- const gchar *format;
- const gchar *alignment;
-
- format = gst_structure_get_string (structure, "stream-format");
- alignment = gst_structure_get_string (structure, "alignment");
-
- if (strcmp (format, "avc") != 0 || alignment == NULL ||
- strcmp (alignment, "au") != 0) {
- GST_WARNING_OBJECT (qtmux, "Rejecting h264 caps, qtmux only accepts "
- "avc format with AU aligned samples");
- goto refuse_caps;
- }
- } else {
- GST_WARNING_OBJECT (qtmux, "no stream-format field in h264 caps");
- goto refuse_caps;
- }
-
if (!codec_data) {
GST_WARNING_OBJECT (qtmux, "no codec_data in h264 caps");
goto refuse_caps;
}
entry.fourcc = FOURCC_avc1;
- if (qtpad->avg_bitrate == 0) {
- gint avg_bitrate = 0;
- gst_structure_get_int (structure, "bitrate", &avg_bitrate);
- qtpad->avg_bitrate = avg_bitrate;
- }
+
ext_atom = build_btrt_extension (0, qtpad->avg_bitrate, qtpad->max_bitrate);
if (ext_atom != NULL)
ext_atom_list = g_list_prepend (ext_atom_list, ext_atom);
ext_atom = build_codec_data_extension (FOURCC_avcC, codec_data);
if (ext_atom != NULL)
ext_atom_list = g_list_prepend (ext_atom_list, ext_atom);
+ } else if (strcmp (mimetype, "video/x-h265") == 0) {
+ const gchar *format;
+
+ if (!codec_data) {
+ GST_WARNING_OBJECT (qtmux, "no codec_data in h265 caps");
+ goto refuse_caps;
+ }
+
+ format = gst_structure_get_string (structure, "stream-format");
+ if (strcmp (format, "hvc1") == 0)
+ entry.fourcc = FOURCC_hvc1;
+ else if (strcmp (format, "hev1") == 0)
+ entry.fourcc = FOURCC_hev1;
+
+ ext_atom = build_btrt_extension (0, qtpad->avg_bitrate, qtpad->max_bitrate);
+ if (ext_atom != NULL)
+ ext_atom_list = g_list_prepend (ext_atom_list, ext_atom);
+
+ ext_atom = build_codec_data_extension (FOURCC_hvcC, codec_data);
+ if (ext_atom != NULL)
+ ext_atom_list = g_list_prepend (ext_atom_list, ext_atom);
+
} else if (strcmp (mimetype, "video/x-svq") == 0) {
gint version = 0;
const GstBuffer *seqh = NULL;
switch (version) {
case 25:
if (pal)
- entry.fourcc = GST_MAKE_FOURCC ('d', 'v', 'c', 'p');
+ entry.fourcc = FOURCC_dvcp;
else
- entry.fourcc = GST_MAKE_FOURCC ('d', 'v', 'c', ' ');
+ entry.fourcc = FOURCC_dvc_;
break;
case 50:
if (pal)
- entry.fourcc = GST_MAKE_FOURCC ('d', 'v', '5', 'p');
+ entry.fourcc = FOURCC_dv5p;
else
- entry.fourcc = GST_MAKE_FOURCC ('d', 'v', '5', 'n');
+ entry.fourcc = FOURCC_dv5n;
break;
default:
GST_WARNING_OBJECT (qtmux, "unrecognized dv version");
} else if (strcmp (mimetype, "image/jpeg") == 0) {
entry.fourcc = FOURCC_jpeg;
sync = FALSE;
+ } else if (strcmp (mimetype, "image/png") == 0) {
+ entry.fourcc = FOURCC_png;
+ sync = FALSE;
} else if (strcmp (mimetype, "image/x-j2c") == 0 ||
strcmp (mimetype, "image/x-jpc") == 0) {
const gchar *colorspace;
const GValue *cmap_array;
const GValue *cdef_array;
gint ncomp = 0;
- gint fields = 1;
if (strcmp (mimetype, "image/x-jpc") == 0) {
qtpad->prepare_buf_func = gst_qt_mux_prepare_jpc_buffer;
}
gst_structure_get_int (structure, "num-components", &ncomp);
- gst_structure_get_int (structure, "fields", &fields);
cmap_array = gst_structure_get_value (structure, "component-map");
cdef_array = gst_structure_get_value (structure, "channel-definitions");
colorspace = gst_structure_get_string (structure, "colorspace");
if (colorspace &&
(ext_atom =
- build_jp2h_extension (qtpad->trak, width, height, colorspace, ncomp,
- cmap_array, cdef_array)) != NULL) {
+ build_jp2h_extension (width, height, colorspace, ncomp, cmap_array,
+ cdef_array)) != NULL) {
ext_atom_list = g_list_append (ext_atom_list, ext_atom);
- ext_atom = build_fiel_extension (fields);
- if (ext_atom)
- ext_atom_list = g_list_append (ext_atom_list, ext_atom);
-
ext_atom = build_jp2x_extension (codec_data);
if (ext_atom)
ext_atom_list = g_list_append (ext_atom_list, ext_atom);
goto refuse_caps;
}
} else if (strcmp (mimetype, "video/x-vp8") == 0) {
- entry.fourcc = FOURCC_VP80;
- sync = FALSE;
+ entry.fourcc = FOURCC_vp08;
+ } else if (strcmp (mimetype, "video/x-vp9") == 0) {
+ entry.fourcc = FOURCC_vp09;
} else if (strcmp (mimetype, "video/x-dirac") == 0) {
entry.fourcc = FOURCC_drac;
} else if (strcmp (mimetype, "video/x-qt-part") == 0) {
- guint32 fourcc;
+ guint32 fourcc = 0;
gst_structure_get_uint (structure, "format", &fourcc);
entry.fourcc = fourcc;
} else if (strcmp (mimetype, "video/x-mp4-part") == 0) {
- guint32 fourcc;
+ guint32 fourcc = 0;
gst_structure_get_uint (structure, "format", &fourcc);
entry.fourcc = fourcc;
} else if (strcmp (mimetype, "video/x-prores") == 0) {
const gchar *variant;
- variant = gst_structure_get_string (structure, "format");
+ variant = gst_structure_get_string (structure, "variant");
if (!variant || !g_strcmp0 (variant, "standard"))
- entry.fourcc = GST_MAKE_FOURCC ('a', 'p', 'c', 'n');
+ entry.fourcc = FOURCC_apcn;
else if (!g_strcmp0 (variant, "lt"))
- entry.fourcc = GST_MAKE_FOURCC ('a', 'p', 'c', 's');
+ entry.fourcc = FOURCC_apcs;
else if (!g_strcmp0 (variant, "hq"))
- entry.fourcc = GST_MAKE_FOURCC ('a', 'p', 'c', 'h');
+ entry.fourcc = FOURCC_apch;
else if (!g_strcmp0 (variant, "proxy"))
- entry.fourcc = GST_MAKE_FOURCC ('a', 'p', '4', 'h');
+ entry.fourcc = FOURCC_apco;
+ else if (!g_strcmp0 (variant, "4444"))
+ entry.fourcc = FOURCC_ap4h;
+ else if (!g_strcmp0 (variant, "4444xq"))
+ entry.fourcc = FOURCC_ap4x;
+
+ sync = FALSE;
+
+ if (!qtmux->interleave_time_set)
+ qtmux->interleave_time = 500 * GST_MSECOND;
+ if (!qtmux->interleave_bytes_set)
+ qtmux->interleave_bytes = width > 720 ? 4 * 1024 * 1024 : 2 * 1024 * 1024;
+ } else if (strcmp (mimetype, "video/x-cineform") == 0) {
+ entry.fourcc = FOURCC_cfhd;
+ sync = FALSE;
+ } else if (strcmp (mimetype, "video/x-av1") == 0) {
+ gint presentation_delay;
+ guint8 presentation_delay_byte = 0;
+ GstBuffer *av1_codec_data;
+
+ if (gst_structure_get_int (structure, "presentation-delay",
+ &presentation_delay)) {
+ presentation_delay_byte = 1 << 5;
+ presentation_delay_byte |= MAX (0xF, presentation_delay & 0xF);
+ }
+
+
+ av1_codec_data = gst_buffer_new_allocate (NULL, 5, NULL);
+ /* Fill version and 3 bytes of flags to 0 */
+ gst_buffer_memset (av1_codec_data, 0, 0, 4);
+ gst_buffer_fill (av1_codec_data, 4, &presentation_delay_byte, 1);
+ if (codec_data)
+ av1_codec_data = gst_buffer_append (av1_codec_data,
+ gst_buffer_ref ((GstBuffer *) codec_data));
+
+ entry.fourcc = FOURCC_av01;
+
+ ext_atom = build_btrt_extension (0, qtpad->avg_bitrate, qtpad->max_bitrate);
+ if (ext_atom != NULL)
+ ext_atom_list = g_list_prepend (ext_atom_list, ext_atom);
+ ext_atom = build_codec_data_extension (FOURCC_av1C, av1_codec_data);
+ if (ext_atom != NULL)
+ ext_atom_list = g_list_prepend (ext_atom_list, ext_atom);
+ gst_buffer_unref (av1_codec_data);
}
if (!entry.fourcc)
goto refuse_caps;
+ if (qtmux_klass->format == GST_QT_MUX_FORMAT_QT ||
+ qtmux_klass->format == GST_QT_MUX_FORMAT_MP4) {
+ const gchar *s;
+ GstVideoColorimetry colorimetry;
+
+ s = gst_structure_get_string (structure, "colorimetry");
+ if (s && gst_video_colorimetry_from_string (&colorimetry, s)) {
+ ext_atom =
+ build_colr_extension (&colorimetry,
+ qtmux_klass->format == GST_QT_MUX_FORMAT_MP4);
+ if (ext_atom)
+ ext_atom_list = g_list_append (ext_atom_list, ext_atom);
+ }
+ }
+
+ if (qtmux_klass->format == GST_QT_MUX_FORMAT_QT
+ || strcmp (mimetype, "image/x-j2c") == 0
+ || strcmp (mimetype, "image/x-jpc") == 0) {
+ const gchar *s;
+ GstVideoInterlaceMode interlace_mode;
+ GstVideoFieldOrder field_order;
+ gint fields = -1;
+
+ if (strcmp (mimetype, "image/x-j2c") == 0 ||
+ strcmp (mimetype, "image/x-jpc") == 0) {
+
+ fields = 1;
+ gst_structure_get_int (structure, "fields", &fields);
+ }
+
+ s = gst_structure_get_string (structure, "interlace-mode");
+ if (s)
+ interlace_mode = gst_video_interlace_mode_from_string (s);
+ else
+ interlace_mode =
+ (fields <=
+ 1) ? GST_VIDEO_INTERLACE_MODE_PROGRESSIVE :
+ GST_VIDEO_INTERLACE_MODE_MIXED;
+
+ field_order = GST_VIDEO_FIELD_ORDER_UNKNOWN;
+ if (interlace_mode == GST_VIDEO_INTERLACE_MODE_INTERLEAVED) {
+ s = gst_structure_get_string (structure, "field-order");
+ if (s)
+ field_order = gst_video_field_order_from_string (s);
+ }
+
+ ext_atom = build_fiel_extension (interlace_mode, field_order);
+ if (ext_atom)
+ ext_atom_list = g_list_append (ext_atom_list, ext_atom);
+ }
+
+
+ if (qtmux_klass->format == GST_QT_MUX_FORMAT_QT &&
+ width > 640 && width <= 1052 && height >= 480 && height <= 576) {
+ /* The 'clap' extension is also defined for MP4 but inventing values in
+ * general seems a bit tricky for this one. We only write it for
+ * SD resolution in MOV, where it is a requirement.
+ * The same goes for the 'tapt' extension, just that it is not defined for
+ * MP4 and only for MOV
+ */
+ gint dar_num, dar_den;
+ gint clef_width, clef_height, prof_width;
+ gint clap_width_n, clap_width_d, clap_height;
+ gint cdiv;
+ double approx_dar;
+
+ /* First, guess display aspect ratio based on pixel aspect ratio,
+ * width and height. We assume that display aspect ratio is either
+ * 4:3 or 16:9
+ */
+ approx_dar = (gdouble) (width * par_num) / (height * par_den);
+ if (approx_dar > 11.0 / 9 && approx_dar < 14.0 / 9) {
+ dar_num = 4;
+ dar_den = 3;
+ } else if (approx_dar > 15.0 / 9 && approx_dar < 18.0 / 9) {
+ dar_num = 16;
+ dar_den = 9;
+ } else {
+ dar_num = width * par_num;
+ dar_den = height * par_den;
+ cdiv = gst_util_greatest_common_divisor (dar_num, dar_den);
+ dar_num /= cdiv;
+ dar_den /= cdiv;
+ }
+
+ /* Then, calculate clean-aperture values (clap and clef)
+ * using the guessed DAR.
+ */
+ clef_height = clap_height = (height == 486 ? 480 : height);
+ clef_width = gst_util_uint64_scale (clef_height,
+ dar_num * G_GUINT64_CONSTANT (65536), dar_den);
+ prof_width = gst_util_uint64_scale (width,
+ par_num * G_GUINT64_CONSTANT (65536), par_den);
+ clap_width_n = clap_height * dar_num * par_den;
+ clap_width_d = dar_den * par_num;
+ cdiv = gst_util_greatest_common_divisor (clap_width_n, clap_width_d);
+ clap_width_n /= cdiv;
+ clap_width_d /= cdiv;
+
+ ext_atom = build_tapt_extension (clef_width, clef_height << 16, prof_width,
+ height << 16, width << 16, height << 16);
+ qtpad->trak->tapt = ext_atom;
+
+ ext_atom = build_clap_extension (clap_width_n, clap_width_d,
+ clap_height, 1, 0, 1, 0, 1);
+ if (ext_atom)
+ ext_atom_list = g_list_append (ext_atom_list, ext_atom);
+ }
+
/* ok, set the pad info accordingly */
qtpad->fourcc = entry.fourcc;
qtpad->sync = sync;
qtpad->trak_ste =
(SampleTableEntry *) atom_trak_set_video_type (qtpad->trak,
qtmux->context, &entry, rate, ext_atom_list);
+ if (strcmp (mimetype, "video/x-prores") == 0) {
+ SampleTableEntryMP4V *mp4v = (SampleTableEntryMP4V *) qtpad->trak_ste;
+ const gchar *compressor = NULL;
+ mp4v->spatial_quality = 0x3FF;
+ mp4v->temporal_quality = 0;
+ mp4v->vendor = FOURCC_appl;
+ mp4v->horizontal_resolution = 72 << 16;
+ mp4v->vertical_resolution = 72 << 16;
+ mp4v->depth = (entry.fourcc == FOURCC_ap4h
+ || entry.fourcc == FOURCC_ap4x) ? 32 : 24;
+
+ /* Set compressor name, required by some software */
+ switch (entry.fourcc) {
+ case FOURCC_apcn:
+ compressor = "Apple ProRes 422";
+ break;
+ case FOURCC_apcs:
+ compressor = "Apple ProRes 422 LT";
+ break;
+ case FOURCC_apch:
+ compressor = "Apple ProRes 422 HQ";
+ break;
+ case FOURCC_apco:
+ compressor = "Apple ProRes 422 Proxy";
+ break;
+ case FOURCC_ap4h:
+ compressor = "Apple ProRes 4444";
+ break;
+ case FOURCC_ap4x:
+ compressor = "Apple ProRes 4444 XQ";
+ break;
+ }
+ if (compressor) {
+ strcpy ((gchar *) mp4v->compressor + 1, compressor);
+ mp4v->compressor[0] = strlen (compressor);
+ }
+ }
gst_object_unref (qtmux);
return TRUE;
gst_object_unref (qtmux);
return FALSE;
}
-refuse_renegotiation:
- {
- GST_WARNING_OBJECT (qtmux,
- "pad %s refused renegotiation to %" GST_PTR_FORMAT, GST_PAD_NAME (pad),
- caps);
- gst_object_unref (qtmux);
- return FALSE;
- }
}
static gboolean
GstStructure *structure;
SubtitleSampleEntry entry = { 0, };
- /* does not go well to renegotiate stream mid-way, unless
- * the old caps are a subset of the new one (this means upstream
- * added more info to the caps, as both should be 'fixed' caps) */
- if (qtpad->fourcc) {
- GstCaps *current_caps;
-
- current_caps = gst_pad_get_current_caps (pad);
- g_assert (caps != NULL);
-
- if (!gst_qtmux_caps_is_subset_full (qtmux, current_caps, caps)) {
- gst_caps_unref (current_caps);
- goto refuse_renegotiation;
- }
- GST_DEBUG_OBJECT (qtmux,
- "pad %s accepted renegotiation to %" GST_PTR_FORMAT " from %"
- GST_PTR_FORMAT, GST_PAD_NAME (pad), caps, current_caps);
- gst_caps_unref (current_caps);
- }
+ if (qtpad->fourcc)
+ return gst_qt_mux_can_renegotiate (qtmux, pad, caps);
GST_DEBUG_OBJECT (qtmux, "%s:%s, caps=%" GST_PTR_FORMAT,
GST_DEBUG_PAD_NAME (pad), caps);
gst_object_unref (qtmux);
return FALSE;
}
-refuse_renegotiation:
+}
+
+static gboolean
+gst_qt_mux_caption_sink_set_caps (GstQTPad * qtpad, GstCaps * caps)
+{
+ GstPad *pad = qtpad->collect.pad;
+ GstQTMux *qtmux = GST_QT_MUX_CAST (gst_pad_get_parent (pad));
+ GstStructure *structure;
+ guint32 fourcc_entry;
+ guint32 timescale;
+
+ if (qtpad->fourcc)
+ return gst_qt_mux_can_renegotiate (qtmux, pad, caps);
+
+ GST_DEBUG_OBJECT (qtmux, "%s:%s, caps=%" GST_PTR_FORMAT,
+ GST_DEBUG_PAD_NAME (pad), caps);
+
+ /* captions default */
+ qtpad->is_out_of_order = FALSE;
+ qtpad->sync = FALSE;
+ qtpad->sparse = TRUE;
+ /* Closed caption data are within atoms */
+ qtpad->prepare_buf_func = gst_qt_mux_prepare_caption_buffer;
+
+ structure = gst_caps_get_structure (caps, 0);
+
+ /* We know we only handle 608,format=s334-1a and 708,format=cdp */
+ if (gst_structure_has_name (structure, "closedcaption/x-cea-608")) {
+ fourcc_entry = FOURCC_c608;
+ } else if (gst_structure_has_name (structure, "closedcaption/x-cea-708")) {
+ fourcc_entry = FOURCC_c708;
+ } else
+ goto refuse_caps;
+
+ /* We set the real timescale later to the one from the video track when
+ * writing the headers */
+ timescale = gst_qt_mux_pad_get_timescale (GST_QT_MUX_PAD_CAST (pad));
+ if (!timescale && qtmux->trak_timescale)
+ timescale = qtmux->trak_timescale;
+ else if (!timescale)
+ timescale = 30000;
+
+ qtpad->fourcc = fourcc_entry;
+ qtpad->trak_ste =
+ (SampleTableEntry *) atom_trak_set_caption_type (qtpad->trak,
+ qtmux->context, timescale, fourcc_entry);
+
+ /* Initialize caption track language code to 0 unless something else is
+ * specified. Without this, Final Cut considers it "non-standard"
+ */
+ qtpad->trak->mdia.mdhd.language_code = 0;
+
+ gst_object_unref (qtmux);
+ return TRUE;
+
+ /* ERRORS */
+refuse_caps:
{
- GST_WARNING_OBJECT (qtmux,
- "pad %s refused renegotiation to %" GST_PTR_FORMAT, GST_PAD_NAME (pad),
- caps);
+ GST_WARNING_OBJECT (qtmux, "pad %s refused caps %" GST_PTR_FORMAT,
+ GST_PAD_NAME (pad), caps);
gst_object_unref (qtmux);
return FALSE;
}
g_assert (qtpad);
if (qtpad->trak) {
/* https://developer.apple.com/library/mac/#documentation/QuickTime/QTFF/QTFFChap4/qtff4.html */
- qtpad->trak->mdia.mdhd.language_code =
- (iso_code[0] - 0x60) * 0x400 + (iso_code[1] - 0x60) * 0x20 +
- (iso_code[2] - 0x60);
+ qtpad->trak->mdia.mdhd.language_code = language_code (iso_code);
}
}
g_free (code);
}
}
+ if (mux->current_pad && mux->current_pad->collect.pad == pad) {
+ mux->current_pad = NULL;
+ mux->current_chunk_size = 0;
+ mux->current_chunk_duration = 0;
+ }
+
gst_collect_pads_remove_pad (mux->collect, pad);
+
+ if (mux->sinkpads == NULL) {
+ /* No more outstanding request pads, reset our counters */
+ mux->video_pads = 0;
+ mux->audio_pads = 0;
+ mux->subtitle_pads = 0;
+ }
}
static GstPad *
name = g_strdup_printf ("subtitle_%u", qtmux->subtitle_pads++);
}
lock = FALSE;
+ } else if (templ == gst_element_class_get_pad_template (klass, "caption_%u")) {
+ setcaps_func = gst_qt_mux_caption_sink_set_caps;
+ if (req_name != NULL && sscanf (req_name, "caption_%u", &pad_id) == 1) {
+ name = g_strdup (req_name);
+ } else {
+ name = g_strdup_printf ("caption_%u", qtmux->caption_pads++);
+ }
+ lock = FALSE;
} else
goto wrong_template;
GST_DEBUG_OBJECT (qtmux, "Requested pad: %s", name);
/* create pad and add to collections */
- newpad = gst_pad_new_from_template (templ, name);
+ newpad =
+ g_object_new (GST_TYPE_QT_MUX_PAD, "name", name, "direction",
+ templ->direction, "template", templ, NULL);
g_free (name);
collect_pad = (GstQTPad *)
gst_collect_pads_add_pad (qtmux->collect, newpad, sizeof (GstQTPad),
case PROP_RESERVED_BYTES_PER_SEC:
g_value_set_uint (value, qtmux->reserved_bytes_per_sec_per_trak);
break;
+ case PROP_RESERVED_PREFILL:
+ g_value_set_boolean (value, qtmux->reserved_prefill);
+ break;
+ case PROP_INTERLEAVE_BYTES:
+ g_value_set_uint64 (value, qtmux->interleave_bytes);
+ break;
+ case PROP_INTERLEAVE_TIME:
+ g_value_set_uint64 (value, qtmux->interleave_time);
+ break;
+ case PROP_MAX_RAW_AUDIO_DRIFT:
+ g_value_set_uint64 (value, qtmux->max_raw_audio_drift);
+ break;
+ case PROP_START_GAP_THRESHOLD:
+ g_value_set_uint64 (value, qtmux->start_gap_threshold);
+ break;
#ifdef TIZEN_FEATURE_GST_MUX_ENHANCEMENT
case PROP_EXPECTED_TRAILER_SIZE:
g_value_set_uint(value, qtmux->expected_trailer_size);
break;
-#endif /* TIZEN_FEATURE_GST_MUX_ENHANCEMENT */
+#endif /* TIZEN_FEATURE_GST_MUX_ENHANCEMENT */
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
case PROP_RESERVED_BYTES_PER_SEC:
qtmux->reserved_bytes_per_sec_per_trak = g_value_get_uint (value);
break;
+ case PROP_RESERVED_PREFILL:
+ qtmux->reserved_prefill = g_value_get_boolean (value);
+ break;
+ case PROP_INTERLEAVE_BYTES:
+ qtmux->interleave_bytes = g_value_get_uint64 (value);
+ qtmux->interleave_bytes_set = TRUE;
+ break;
+ case PROP_INTERLEAVE_TIME:
+ qtmux->interleave_time = g_value_get_uint64 (value);
+ qtmux->interleave_time_set = TRUE;
+ break;
+ case PROP_MAX_RAW_AUDIO_DRIFT:
+ qtmux->max_raw_audio_drift = g_value_get_uint64 (value);
+ break;
+ case PROP_START_GAP_THRESHOLD:
+ qtmux->start_gap_threshold = g_value_get_uint64 (value);
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
static const GInterfaceInfo tag_xmp_writer_info = {
NULL, NULL, NULL
};
+ static const GInterfaceInfo preset_info = {
+ NULL, NULL, NULL
+ };
GType type;
GstQTMuxFormat format;
GstQTMuxClassParams *params;
while (TRUE) {
GstQTMuxFormatProp *prop;
- GstCaps *subtitle_caps;
+ GstCaps *subtitle_caps, *caption_caps;
prop = &gst_qt_mux_format_list[i];
format = prop->format;
} else {
gst_caps_unref (subtitle_caps);
}
+ caption_caps = gst_static_caps_get (&prop->caption_sink_caps);
+ if (!gst_caps_is_equal (caption_caps, GST_CAPS_NONE)) {
+ params->caption_sink_caps = caption_caps;
+ } else {
+ gst_caps_unref (caption_caps);
+ }
/* create the type now */
type = g_type_register_static (GST_TYPE_ELEMENT, prop->type_name, &typeinfo,
g_type_add_interface_static (type, GST_TYPE_TAG_SETTER, &tag_setter_info);
g_type_add_interface_static (type, GST_TYPE_TAG_XMP_WRITER,
&tag_xmp_writer_info);
+ g_type_add_interface_static (type, GST_TYPE_PRESET, &preset_info);
if (!gst_element_register (plugin, prop->name, prop->rank, type))
return FALSE;