#include <gst/math-compat.h>
#ifdef HAVE_ZLIB
-# include <zlib.h>
+#include <zlib.h>
#endif
/* max. size considered 'sane' for non-mdat atoms */
#define ABSDIFF(x, y) ( (x) > (y) ? ((x) - (y)) : ((y) - (x)) )
-#define QTDEMUX_FIRST_STREAM(demux) ((QtDemuxStream *)(demux)->active_streams \
- ? (QtDemuxStream *)(demux)->active_streams->data : NULL)
#define QTDEMUX_STREAM(s) ((QtDemuxStream *)(s))
+#define QTDEMUX_N_STREAMS(demux) ((demux)->active_streams->len)
+#define QTDEMUX_NTH_STREAM(demux,idx) \
+ QTDEMUX_STREAM(g_ptr_array_index((demux)->active_streams,idx))
+#define QTDEMUX_NTH_OLD_STREAM(demux,idx) \
+ QTDEMUX_STREAM(g_ptr_array_index((demux)->old_streams,idx))
GST_DEBUG_CATEGORY (qtdemux_debug);
#define GST_CAT_DEFAULT qtdemux_debug
gboolean keyframe; /* TRUE when this packet is a keyframe */
};
+#ifdef TIZEN_FEATURE_QTDEMUX_MODIFICATION
+typedef struct _QtDemuxSphericalMetadata QtDemuxSphericalMetadata;
+
+struct _QtDemuxSphericalMetadata
+{
+ gboolean is_spherical;
+ gboolean is_stitched;
+ char *stitching_software;
+ char *projection_type;
+ char *stereo_mode;
+ int source_count;
+ int init_view_heading;
+ int init_view_pitch;
+ int init_view_roll;
+ int timestamp;
+ int full_pano_width_pixels;
+ int full_pano_height_pixels;
+ int cropped_area_image_width;
+ int cropped_area_image_height;
+ int cropped_area_left;
+ int cropped_area_top;
+ QTDEMUX_AMBISONIC_TYPE ambisonic_type;
+ QTDEMUX_AMBISONIC_FORMAT ambisonic_format;
+ QTDEMUX_AMBISONIC_ORDER ambisonic_order;
+};
+
+#endif /* TIZEN_FEATURE_QTDEMUX_MODIFICATION */
+
/* Macros for converting to/from timescale */
#define QTSTREAMTIME_TO_GSTTIME(stream, value) (gst_util_uint64_scale((value), GST_SECOND, (stream)->timescale))
#define GSTTIME_TO_QTSTREAMTIME(stream, value) (gst_util_uint64_scale((value), (stream)->timescale, GST_SECOND))
/* track id */
guint track_id;
+#ifdef TIZEN_FEATURE_QTDEMUX_DURATION
+ guint64 tkhd_duration;
+#endif
+
/* duration/scale */
guint64 duration; /* in timescale units */
guint32 timescale;
/* buffer needs some custom processing, e.g. subtitles */
gboolean need_process;
+ /* buffer needs potentially be split, e.g. CEA608 subtitles */
+ gboolean need_split;
/* current position */
guint32 segment_index;
guint32 protection_scheme_version;
gpointer protection_scheme_info; /* specific to the protection scheme */
GQueue protection_scheme_event_queue;
+
+ gint ref_count; /* atomic */
};
/* Contains properties and cryptographic info for a set of samples from a
GstBuffer * inbuf);
static gboolean gst_qtdemux_handle_sink_event (GstPad * pad, GstObject * parent,
GstEvent * event);
+static gboolean gst_qtdemux_handle_sink_query (GstPad * pad, GstObject * parent,
+ GstQuery * query);
static gboolean gst_qtdemux_setcaps (GstQTDemux * qtdemux, GstCaps * caps);
static gboolean gst_qtdemux_configure_stream (GstQTDemux * qtdemux,
QtDemuxStream * stream);
static GstFlowReturn gst_qtdemux_process_adapter (GstQTDemux * demux,
gboolean force);
+static void gst_qtdemux_check_seekability (GstQTDemux * demux);
+
static gboolean qtdemux_parse_moov (GstQTDemux * qtdemux,
const guint8 * buffer, guint length);
static gboolean qtdemux_parse_node (GstQTDemux * qtdemux, GNode * node,
static gboolean qtdemux_parse_samples (GstQTDemux * qtdemux,
QtDemuxStream * stream, guint32 n);
static GstFlowReturn qtdemux_expose_streams (GstQTDemux * qtdemux);
-static void gst_qtdemux_stream_free (QtDemuxStream * stream);
+static QtDemuxStream *gst_qtdemux_stream_ref (QtDemuxStream * stream);
+static void gst_qtdemux_stream_unref (QtDemuxStream * stream);
static void gst_qtdemux_stream_clear (QtDemuxStream * stream);
-static void gst_qtdemux_remove_stream (GstQTDemux * qtdemux,
- QtDemuxStream * stream);
static GstFlowReturn qtdemux_prepare_streams (GstQTDemux * qtdemux);
-static void qtdemux_do_allocation (GstQTDemux * qtdemux,
- QtDemuxStream * stream);
+static void qtdemux_do_allocation (QtDemuxStream * stream,
+ GstQTDemux * qtdemux);
static gboolean gst_qtdemux_activate_segment (GstQTDemux * qtdemux,
QtDemuxStream * stream, guint32 seg_idx, GstClockTime offset);
static gboolean gst_qtdemux_stream_update_segment (GstQTDemux * qtdemux,
static void qtdemux_gst_structure_free (GstStructure * gststructure);
static void gst_qtdemux_reset (GstQTDemux * qtdemux, gboolean hard);
+#ifdef TIZEN_FEATURE_QTDEMUX_MODIFICATION
+static void gst_tag_register_spherical_tags (void);
+#endif /* TIZEN_FEATURE_QTDEMUX_MODIFICATION */
+
static void
gst_qtdemux_class_init (GstQTDemuxClass * klass)
{
gst_tag_register_musicbrainz_tags ();
+#ifdef TIZEN_FEATURE_QTDEMUX_MODIFICATION
+ gst_tag_register_spherical_tags ();
+#endif /* TIZEN_FEATURE_QTDEMUX_MODIFICATION */
+
gst_element_class_add_static_pad_template (gstelement_class,
&gst_qtdemux_sink_template);
gst_element_class_add_static_pad_template (gstelement_class,
qtdemux_sink_activate_mode);
gst_pad_set_chain_function (qtdemux->sinkpad, gst_qtdemux_chain);
gst_pad_set_event_function (qtdemux->sinkpad, gst_qtdemux_handle_sink_event);
+ gst_pad_set_query_function (qtdemux->sinkpad, gst_qtdemux_handle_sink_query);
gst_element_add_pad (GST_ELEMENT_CAST (qtdemux), qtdemux->sinkpad);
qtdemux->adapter = gst_adapter_new ();
qtdemux->flowcombiner = gst_flow_combiner_new ();
g_mutex_init (&qtdemux->expose_lock);
+ qtdemux->active_streams = g_ptr_array_new_with_free_func
+ ((GDestroyNotify) gst_qtdemux_stream_unref);
+ qtdemux->old_streams = g_ptr_array_new_with_free_func
+ ((GDestroyNotify) gst_qtdemux_stream_unref);
+
+#ifdef TIZEN_FEATURE_QTDEMUX_MODIFICATION
+ qtdemux->spherical_metadata = (QtDemuxSphericalMetadata *)
+ malloc (sizeof (QtDemuxSphericalMetadata));
+
+ if (qtdemux->spherical_metadata) {
+ qtdemux->spherical_metadata->is_spherical = FALSE;
+ qtdemux->spherical_metadata->is_stitched = FALSE;
+ qtdemux->spherical_metadata->stitching_software = NULL;
+ qtdemux->spherical_metadata->projection_type = NULL;
+ qtdemux->spherical_metadata->stereo_mode = NULL;
+ qtdemux->spherical_metadata->source_count = 0;
+ qtdemux->spherical_metadata->init_view_heading = 0;
+ qtdemux->spherical_metadata->init_view_pitch = 0;
+ qtdemux->spherical_metadata->init_view_roll = 0;
+ qtdemux->spherical_metadata->timestamp = 0;
+ qtdemux->spherical_metadata->full_pano_width_pixels = 0;
+ qtdemux->spherical_metadata->full_pano_height_pixels = 0;
+ qtdemux->spherical_metadata->cropped_area_image_width = 0;
+ qtdemux->spherical_metadata->cropped_area_image_height = 0;
+ qtdemux->spherical_metadata->cropped_area_left = 0;
+ qtdemux->spherical_metadata->cropped_area_top = 0;
+ qtdemux->spherical_metadata->ambisonic_type = QTDEMUX_AMBISONIC_TYPE_UNKNOWN;
+ qtdemux->spherical_metadata->ambisonic_format = QTDEMUX_AMBISONIC_FORMAT_UNKNOWN;
+ qtdemux->spherical_metadata->ambisonic_order = QTDEMUX_AMBISONIC_ORDER_UNKNOWN;
+ }
+#endif /* TIZEN_FEATURE_QTDEMUX_MODIFICATION */
+
GST_OBJECT_FLAG_SET (qtdemux, GST_ELEMENT_FLAG_INDEXABLE);
gst_qtdemux_reset (qtdemux, TRUE);
{
GstQTDemux *qtdemux = GST_QTDEMUX (object);
+#ifdef TIZEN_FEATURE_QTDEMUX_MODIFICATION
+ if (qtdemux->spherical_metadata) {
+ if (qtdemux->spherical_metadata->stitching_software)
+ free(qtdemux->spherical_metadata->stitching_software);
+ if (qtdemux->spherical_metadata->projection_type)
+ free(qtdemux->spherical_metadata->projection_type);
+ if (qtdemux->spherical_metadata->stereo_mode)
+ free(qtdemux->spherical_metadata->stereo_mode);
+
+ free(qtdemux->spherical_metadata);
+ qtdemux->spherical_metadata = NULL;
+ }
+#endif /* TIZEN_FEATURE_QTDEMUX_MODIFICATION */
+
if (qtdemux->adapter) {
g_object_unref (G_OBJECT (qtdemux->adapter));
qtdemux->adapter = NULL;
qtdemux->cenc_aux_info_sizes = NULL;
g_mutex_clear (&qtdemux->expose_lock);
+ g_ptr_array_free (qtdemux->active_streams, TRUE);
+ g_ptr_array_free (qtdemux->old_streams, TRUE);
+
G_OBJECT_CLASS (parent_class)->dispose (object);
}
stream->stream_tags);
gst_pad_push_event (stream->pad,
gst_event_new_tag (gst_tag_list_ref (stream->stream_tags)));
+#ifdef TIZEN_FEATURE_QTDEMUX_MODIFICATION
+ /* post message qtdemux tag (for early recive application) */
+ gst_element_post_message (GST_ELEMENT_CAST (qtdemux),
+ gst_message_new_tag (GST_OBJECT_CAST (qtdemux),
+ gst_tag_list_copy (stream->stream_tags)));
+#endif
}
if (G_UNLIKELY (stream->send_global_tags)) {
{
gboolean has_valid_stream = FALSE;
GstEventType etype = GST_EVENT_TYPE (event);
- GList *iter;
+ guint i;
GST_DEBUG_OBJECT (qtdemux, "pushing %s event on all source pads",
GST_EVENT_TYPE_NAME (event));
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
GstPad *pad;
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (qtdemux, i);
GST_DEBUG_OBJECT (qtdemux, "pushing on track-id %u", stream->track_id);
if ((pad = stream->pad)) {
{
guint64 min_offset;
gint64 min_byte_offset = -1;
- GList *iter;
+ guint i;
min_offset = desired_time;
/* for each stream, find the index of the sample in the segment
* and move back to the previous keyframe. */
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
QtDemuxStream *str;
guint32 index, kindex;
guint32 seg_idx;
QtDemuxSegment *seg;
gboolean empty_segment = FALSE;
- str = QTDEMUX_STREAM (iter->data);
+ str = QTDEMUX_NTH_STREAM (qtdemux, i);
if (CUR_STREAM (str)->sparse && !use_sparse)
continue;
guint32 seqnum, GstSeekFlags flags)
{
gint64 desired_offset;
- GList *iter;
+ guint i;
desired_offset = segment->position;
/* and set all streams to the final position */
gst_flow_combiner_reset (qtdemux->flowcombiner);
qtdemux->segment_seqnum = seqnum;
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (qtdemux, i);
stream->time_position = desired_offset;
stream->accumulated_base = 0;
static gboolean
qtdemux_ensure_index (GstQTDemux * qtdemux)
{
- GList *iter;
+ guint i;
GST_DEBUG_OBJECT (qtdemux, "collecting all metadata for all streams");
/* Build complete index */
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (qtdemux, i);
if (!qtdemux_parse_samples (qtdemux, stream, stream->n_samples - 1)) {
GST_LOG_OBJECT (qtdemux,
} else if (gst_pad_push_event (qtdemux->sinkpad, gst_event_ref (event))) {
GST_DEBUG_OBJECT (qtdemux, "Upstream successfully seeked");
res = TRUE;
- } else if (qtdemux->state == QTDEMUX_STATE_MOVIE && qtdemux->n_streams
+ } else if (qtdemux->state == QTDEMUX_STATE_MOVIE
+ && QTDEMUX_N_STREAMS (qtdemux)
&& !qtdemux->fragmented) {
res = gst_qtdemux_do_push_seek (qtdemux, pad, event);
} else {
gint i, index;
gint64 time, min_time;
QtDemuxStream *stream;
- GList *iter;
+ gint iter;
min_time = -1;
stream = NULL;
index = -1;
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
+ for (iter = 0; iter < QTDEMUX_N_STREAMS (qtdemux); iter++) {
QtDemuxStream *str;
gint inc;
gboolean set_sample;
- str = QTDEMUX_STREAM (iter->data);
+ str = QTDEMUX_NTH_STREAM (qtdemux, iter);
set_sample = !set;
if (fw) {
stream->stream_tags = gst_tag_list_new_empty ();
gst_tag_list_set_scope (stream->stream_tags, GST_TAG_SCOPE_STREAM);
g_queue_init (&stream->protection_scheme_event_queue);
+ stream->ref_count = 1;
+ /* consistent default for push based mode */
+ gst_segment_init (&stream->segment, GST_FORMAT_TIME);
return stream;
}
demux->fragmented = TRUE;
demux->mss_mode = TRUE;
- if (demux->n_streams > 1) {
+ if (QTDEMUX_N_STREAMS (demux) > 1) {
/* can't do this, we can only renegotiate for another mss format */
return FALSE;
}
/* TODO update when stream changes during playback */
- if (demux->n_streams == 0) {
+ if (QTDEMUX_N_STREAMS (demux) == 0) {
stream = _create_stream (demux, 1);
- demux->active_streams = g_list_append (demux->active_streams, stream);
- demux->n_streams = 1;
+ g_ptr_array_add (demux->active_streams, stream);
/* mss has no stsd/stsd entry, use id 0 as default */
stream->stsd_entries_length = 1;
stream->stsd_sample_description_id = stream->cur_stsd_entry_index = 0;
stream->stsd_entries = g_new0 (QtDemuxStreamStsdEntry, 1);
} else {
- stream = QTDEMUX_FIRST_STREAM (demux);
+ stream = QTDEMUX_NTH_STREAM (demux, 0);
}
timescale_v = gst_structure_get_value (structure, "timescale");
static void
gst_qtdemux_reset (GstQTDemux * qtdemux, gboolean hard)
{
- GList *iter;
+ gint i;
GST_DEBUG_OBJECT (qtdemux, "Resetting demux");
gst_pad_stop_task (qtdemux->sinkpad);
if (hard) {
qtdemux->segment_seqnum = GST_SEQNUM_INVALID;
- g_list_free_full (qtdemux->active_streams,
- (GDestroyNotify) gst_qtdemux_stream_free);
- g_list_free_full (qtdemux->old_streams,
- (GDestroyNotify) gst_qtdemux_stream_free);
- qtdemux->active_streams = NULL;
- qtdemux->old_streams = NULL;
- qtdemux->n_streams = 0;
+ g_ptr_array_set_size (qtdemux->active_streams, 0);
+ g_ptr_array_set_size (qtdemux->old_streams, 0);
qtdemux->n_video_streams = 0;
qtdemux->n_audio_streams = 0;
qtdemux->n_sub_streams = 0;
}
} else if (qtdemux->mss_mode) {
gst_flow_combiner_reset (qtdemux->flowcombiner);
- g_list_foreach (qtdemux->active_streams,
+ g_ptr_array_foreach (qtdemux->active_streams,
(GFunc) gst_qtdemux_stream_clear, NULL);
} else {
gst_flow_combiner_reset (qtdemux->flowcombiner);
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (qtdemux, i);
stream->sent_eos = FALSE;
stream->time_position = 0;
stream->accumulated_base = 0;
static void
gst_qtdemux_map_and_push_segments (GstQTDemux * qtdemux, GstSegment * segment)
{
- gint i;
- GList *iter;
+ gint i, iter;
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
+ for (iter = 0; iter < QTDEMUX_N_STREAMS (qtdemux); iter++) {
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (qtdemux, iter);
stream->time_position = segment->start;
}
}
+static void
+gst_qtdemux_stream_concat (GstQTDemux * qtdemux, GPtrArray * dest,
+ GPtrArray * src)
+{
+ guint i;
+ guint len;
+
+ len = src->len;
+
+ if (len == 0)
+ return;
+
+ for (i = 0; i < len; i++) {
+ QtDemuxStream *stream = g_ptr_array_index (src, i);
+
+#ifndef GST_DISABLE_GST_DEBUG
+ GST_DEBUG_OBJECT (qtdemux, "Move stream %p (stream-id %s) to %p",
+ stream, GST_STR_NULL (stream->stream_id), dest);
+#endif
+ g_ptr_array_add (dest, gst_qtdemux_stream_ref (stream));
+ }
+
+ g_ptr_array_set_size (src, 0);
+}
+
static gboolean
gst_qtdemux_handle_sink_event (GstPad * sinkpad, GstObject * parent,
GstEvent * event)
"not in time format");
/* chain will send initial newsegment after pads have been added */
- if (demux->state != QTDEMUX_STATE_MOVIE || !demux->n_streams) {
+ if (demux->state != QTDEMUX_STATE_MOVIE || !QTDEMUX_N_STREAMS (demux)) {
GST_DEBUG_OBJECT (demux, "still starting, eating event");
goto exit;
}
GST_DEBUG_OBJECT (demux, "Pushing newseg %" GST_SEGMENT_FORMAT, &segment);
/* map segment to internal qt segments and push on each stream */
- if (demux->n_streams) {
+ if (QTDEMUX_N_STREAMS (demux)) {
demux->need_segment = TRUE;
gst_qtdemux_check_send_pending_segment (demux);
}
/* If we are in push mode, and get an EOS before we've seen any streams,
* then error out - we have nowhere to send the EOS */
if (!demux->pullbased) {
- GList *iter;
+ gint i;
gboolean has_valid_stream = FALSE;
- for (iter = demux->active_streams; iter; iter = g_list_next (iter)) {
- if (QTDEMUX_STREAM (iter->data)->pad != NULL) {
+ for (i = 0; i < QTDEMUX_N_STREAMS (demux); i++) {
+ if (QTDEMUX_NTH_STREAM (demux, i)->pad != NULL) {
has_valid_stream = TRUE;
break;
}
gst_qtdemux_process_adapter (demux, TRUE);
gst_qtdemux_reset (demux, FALSE);
/* We expect new moov box after new stream-start event */
- demux->old_streams =
- g_list_concat (demux->old_streams, demux->active_streams);
- demux->active_streams = NULL;
+ if (demux->exposed) {
+ gst_qtdemux_stream_concat (demux,
+ demux->old_streams, demux->active_streams);
+ }
goto drop;
}
return res;
}
+static gboolean
+gst_qtdemux_handle_sink_query (GstPad * pad, GstObject * parent,
+ GstQuery * query)
+{
+ GstQTDemux *demux = GST_QTDEMUX (parent);
+ gboolean res = FALSE;
+
+ switch (GST_QUERY_TYPE (query)) {
+ case GST_QUERY_BITRATE:
+ {
+ GstClockTime duration;
+
+ /* populate demux->upstream_size if not done yet */
+ gst_qtdemux_check_seekability (demux);
+
+ if (demux->upstream_size != -1
+ && gst_qtdemux_get_duration (demux, &duration)) {
+ guint bitrate =
+ gst_util_uint64_scale (8 * demux->upstream_size, GST_SECOND,
+ duration);
+
+ GST_LOG_OBJECT (demux, "bitrate query byte length: %" G_GUINT64_FORMAT
+ " duration %" GST_TIME_FORMAT " resulting a bitrate of %u",
+ demux->upstream_size, GST_TIME_ARGS (duration), bitrate);
+
+ /* TODO: better results based on ranges/index tables */
+ gst_query_set_bitrate (query, bitrate);
+ res = TRUE;
+ }
+ break;
+ }
+ default:
+ res = gst_pad_query_default (pad, (GstObject *) demux, query);
+ break;
+ }
+
+ return res;
+}
+
+
#if 0
static void
gst_qtdemux_set_index (GstElement * element, GstIndex * index)
stream->stsd_entries_length = 0;
}
-
-static void
-gst_qtdemux_stream_free (QtDemuxStream * stream)
+static QtDemuxStream *
+gst_qtdemux_stream_ref (QtDemuxStream * stream)
{
- gst_qtdemux_stream_reset (stream);
- gst_tag_list_unref (stream->stream_tags);
- if (stream->pad) {
- GstQTDemux *demux = stream->demux;
- gst_element_remove_pad (GST_ELEMENT_CAST (demux), stream->pad);
- gst_flow_combiner_remove_pad (demux->flowcombiner, stream->pad);
- }
- g_free (stream->stream_id);
- g_free (stream);
+ g_atomic_int_add (&stream->ref_count, 1);
+
+ return stream;
}
static void
-gst_qtdemux_remove_stream (GstQTDemux * qtdemux, QtDemuxStream * stream)
+gst_qtdemux_stream_unref (QtDemuxStream * stream)
{
- qtdemux->active_streams = g_list_remove (qtdemux->active_streams, stream);
- gst_qtdemux_stream_free (stream);
- qtdemux->n_streams--;
+ if (g_atomic_int_dec_and_test (&stream->ref_count)) {
+ gst_qtdemux_stream_reset (stream);
+ gst_tag_list_unref (stream->stream_tags);
+ if (stream->pad) {
+ GstQTDemux *demux = stream->demux;
+ gst_element_remove_pad (GST_ELEMENT_CAST (demux), stream->pad);
+ gst_flow_combiner_remove_pad (demux->flowcombiner, stream->pad);
+ }
+ g_free (stream->stream_id);
+ g_free (stream);
+ }
}
static GstStateChangeReturn
}
}
+#ifdef TIZEN_FEATURE_QTDEMUX_MODIFICATION
+static void
+_get_int_value_from_xml_string (GstQTDemux * qtdemux,
+ const char *xml_str, const char *param_name, int *value)
+{
+ char *value_start, *value_end, *endptr;
+ const short value_length_max = 12;
+ char init_view_ret[12];
+ int value_length = 0;
+ int i = 0;
+
+ value_start = (xml_str && param_name) ? strstr (xml_str, param_name) : NULL;
+
+ if (!value_start) {
+ GST_WARNING_OBJECT (qtdemux, "error: parameter does not exist: %s\n",
+ param_name);
+ return;
+ }
+
+ value_start += strlen (param_name);
+ while ((value_start[0] == ' ') || (value_start[0] == '\t'))
+ value_start++;
+
+ value_end = strchr (value_start, '<');
+ if (!value_end) {
+ GST_ERROR_OBJECT (qtdemux, "error: incorrect XML\n");
+ return;
+ }
+
+ value_length = value_end - value_start;
+ while ((value_length >= 1) && ((value_start[value_length - 1] == ' ')
+ || (value_start[value_length - 1] == '\t')))
+ value_length--;
+
+ if (value_start[i] == '+' || value_start[i] == '-')
+ i++;
+ while (i < value_length) {
+ if (value_start[i] < '0' || value_start[i] > '9') {
+ GST_ERROR_OBJECT (qtdemux,
+ "error: incorrect value, integer was expected\n");
+ return;
+ }
+ i++;
+ }
+
+ if (value_length >= value_length_max || value_length < 1) {
+ GST_ERROR_OBJECT (qtdemux, "error: empty XML value or incorrect range\n");
+ return;
+ }
+
+ strncpy (init_view_ret, value_start, value_length_max);
+ init_view_ret[value_length] = '\0';
+
+ *value = strtol (init_view_ret, &endptr, 10);
+ if (endptr == init_view_ret) {
+ GST_ERROR_OBJECT (qtdemux, "error: no digits were found\n");
+ return;
+ }
+
+ return;
+}
+
+static void
+_get_string_value_from_xml_string (GstQTDemux * qtdemux,
+ const char *xml_str, const char *param_name, char **value)
+{
+ char *value_start, *value_end;
+ const short value_length_max = 256;
+ int value_length = 0;
+
+ value_start = (xml_str && param_name) ? strstr (xml_str, param_name) : NULL;
+
+ if (!value_start) {
+ GST_WARNING_OBJECT (qtdemux, "error: parameter does not exist: %s\n",
+ param_name);
+ return;
+ }
+
+ value_start += strlen (param_name);
+ while ((value_start[0] == ' ') || (value_start[0] == '\t'))
+ value_start++;
+
+ value_end = strchr (value_start, '<');
+ if (!value_end) {
+ GST_ERROR_OBJECT (qtdemux, "error: incorrect XML\n");
+ return;
+ }
+
+ value_length = value_end - value_start;
+ while ((value_length >= 1) && ((value_start[value_length - 1] == ' ')
+ || (value_start[value_length - 1] == '\t')))
+ value_length--;
+
+ if (value_length >= value_length_max || value_length < 1) {
+ GST_ERROR_OBJECT (qtdemux, "error: empty XML value or incorrect range\n");
+ return;
+ }
+
+ *value = strndup(value_start, value_length);
+
+ return;
+}
+
+static void
+_get_bool_value_from_xml_string (GstQTDemux * qtdemux,
+ const char *xml_str, const char *param_name, gboolean * value)
+{
+ char *value_start, *value_end;
+ int value_length = 0;
+
+ value_start = (xml_str && param_name) ? strstr (xml_str, param_name) : NULL;
+
+ if (!value_start) {
+ GST_WARNING_OBJECT (qtdemux, "error: parameter does not exist: %s\n",
+ param_name);
+ return;
+ }
+
+ value_start += strlen (param_name);
+ while ((value_start[0] == ' ') || (value_start[0] == '\t'))
+ value_start++;
+
+ value_end = strchr (value_start, '<');
+ if (!value_end) {
+ GST_ERROR_OBJECT (qtdemux, "error: incorrect XML\n");
+ return;
+ }
+
+ value_length = value_end - value_start;
+ while ((value_length >= 1) && ((value_start[value_length - 1] == ' ')
+ || (value_start[value_length - 1] == '\t')))
+ value_length--;
+
+ if (value_length < 1) {
+ GST_ERROR_OBJECT (qtdemux, "error: empty XML value or incorrect range\n");
+ return;
+ }
+
+ *value = g_strstr_len(value_start, value_length, "true") ? TRUE : FALSE;
+
+ return;
+}
+
+static void
+_parse_spatial_video_metadata_from_xml_string (GstQTDemux * qtdemux, const char *xmlStr)
+{
+ const char is_spherical_str[] = "<GSpherical:Spherical>";
+ const char is_stitched_str[] = "<GSpherical:Stitched>";
+ const char stitching_software_str[] = "<GSpherical:StitchingSoftware>";
+ const char projection_type_str[] = "<GSpherical:ProjectionType>";
+ const char stereo_mode_str[] = "<GSpherical:StereoMode>";
+ const char source_count_str[] = "<GSpherical:SourceCount>";
+ const char init_view_heading_str[] = "<GSpherical:InitialViewHeadingDegrees>";
+ const char init_view_pitch_str[] = "<GSpherical:InitialViewPitchDegrees>";
+ const char init_view_roll_str[] = "<GSpherical:InitialViewRollDegrees>";
+ const char timestamp_str[] = "<GSpherical:Timestamp>";
+ const char full_pano_width_str[] = "<GSpherical:FullPanoWidthPixels>";
+ const char full_pano_height_str[] = "<GSpherical:FullPanoHeightPixels>";
+ const char cropped_area_image_width_str[] =
+ "<GSpherical:CroppedAreaImageWidthPixels>";
+ const char cropped_area_image_height_str[] =
+ "<GSpherical:CroppedAreaImageHeightPixels>";
+ const char cropped_area_left_str[] = "<GSpherical:CroppedAreaLeftPixels>";
+ const char cropped_area_top_str[] = "<GSpherical:CroppedAreaTopPixels>";
+
+ QtDemuxSphericalMetadata * spherical_metadata = qtdemux->spherical_metadata;
+
+ _get_bool_value_from_xml_string (qtdemux, xmlStr, is_spherical_str,
+ (gboolean *) & spherical_metadata->is_spherical);
+ _get_bool_value_from_xml_string (qtdemux, xmlStr, is_stitched_str,
+ (gboolean *) & spherical_metadata->is_stitched);
+
+ if (spherical_metadata->is_spherical && spherical_metadata->is_stitched) {
+ _get_string_value_from_xml_string (qtdemux, xmlStr,
+ stitching_software_str, &spherical_metadata->stitching_software);
+ _get_string_value_from_xml_string (qtdemux, xmlStr,
+ projection_type_str, &spherical_metadata->projection_type);
+ _get_string_value_from_xml_string (qtdemux, xmlStr, stereo_mode_str,
+ &spherical_metadata->stereo_mode);
+ _get_int_value_from_xml_string (qtdemux, xmlStr, source_count_str,
+ &spherical_metadata->source_count);
+ _get_int_value_from_xml_string (qtdemux, xmlStr,
+ init_view_heading_str, &spherical_metadata->init_view_heading);
+ _get_int_value_from_xml_string (qtdemux, xmlStr, init_view_pitch_str,
+ &spherical_metadata->init_view_pitch);
+ _get_int_value_from_xml_string (qtdemux, xmlStr, init_view_roll_str,
+ &spherical_metadata->init_view_roll);
+ _get_int_value_from_xml_string (qtdemux, xmlStr, timestamp_str,
+ &spherical_metadata->timestamp);
+ _get_int_value_from_xml_string (qtdemux, xmlStr, full_pano_width_str,
+ &spherical_metadata->full_pano_width_pixels);
+ _get_int_value_from_xml_string (qtdemux, xmlStr,
+ full_pano_height_str, &spherical_metadata->full_pano_height_pixels);
+ _get_int_value_from_xml_string (qtdemux, xmlStr,
+ cropped_area_image_width_str,
+ &spherical_metadata->cropped_area_image_width);
+ _get_int_value_from_xml_string (qtdemux, xmlStr,
+ cropped_area_image_height_str,
+ &spherical_metadata->cropped_area_image_height);
+ _get_int_value_from_xml_string (qtdemux, xmlStr, cropped_area_left_str,
+ &spherical_metadata->cropped_area_left);
+ _get_int_value_from_xml_string (qtdemux, xmlStr, cropped_area_top_str,
+ &spherical_metadata->cropped_area_top);
+ }
+
+ return;
+}
+
+static void
+gst_tag_register_spherical_tags (void) {
+ gst_tag_register ("is_spherical", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-spherical"),
+ _("Flag indicating if the video is a spherical video"),
+ NULL);
+ gst_tag_register ("is_stitched", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-stitched"),
+ _("Flag indicating if the video is stitched"),
+ NULL);
+ gst_tag_register ("stitching_software", GST_TAG_FLAG_META,
+ G_TYPE_STRING,
+ _("tag-stitching-software"),
+ _("Software used to stitch the spherical video"),
+ NULL);
+ gst_tag_register ("projection_type", GST_TAG_FLAG_META,
+ G_TYPE_STRING,
+ _("tag-projection-type"),
+ _("Projection type used in the video frames"),
+ NULL);
+ gst_tag_register ("stereo_mode", GST_TAG_FLAG_META,
+ G_TYPE_STRING,
+ _("tag-stereo-mode"),
+ _("Description of stereoscopic 3D layout"),
+ NULL);
+ gst_tag_register ("source_count", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-source-count"),
+ _("Number of cameras used to create the spherical video"),
+ NULL);
+ gst_tag_register ("init_view_heading", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-init-view-heading"),
+ _("The heading angle of the initial view in degrees"),
+ NULL);
+ gst_tag_register ("init_view_pitch", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-init-view-pitch"),
+ _("The pitch angle of the initial view in degrees"),
+ NULL);
+ gst_tag_register ("init_view_roll", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-init-view-roll"),
+ _("The roll angle of the initial view in degrees"),
+ NULL);
+ gst_tag_register ("timestamp", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-timestamp"),
+ _("Epoch timestamp of when the first frame in the video was recorded"),
+ NULL);
+ gst_tag_register ("full_pano_width_pixels", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-full-pano-width"),
+ _("Width of the encoded video frame in pixels"),
+ NULL);
+ gst_tag_register ("full_pano_height_pixels", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-full-pano-height"),
+ _("Height of the encoded video frame in pixels"),
+ NULL);
+ gst_tag_register ("cropped_area_image_width", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-cropped-area-image-width"),
+ _("Width of the video frame to display (e.g. cropping)"),
+ NULL);
+ gst_tag_register ("cropped_area_image_height", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-cropped-area-image-height"),
+ _("Height of the video frame to display (e.g. cropping)"),
+ NULL);
+ gst_tag_register ("cropped_area_left", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-cropped-area-left"),
+ _("Column where the left edge of the image was cropped from the"
+ " full sized panorama"),
+ NULL);
+ gst_tag_register ("cropped_area_top", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-cropped-area-top"),
+ _("Row where the top edge of the image was cropped from the"
+ " full sized panorama"),
+ NULL);
+ gst_tag_register ("ambisonic_type", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-ambisonic-type"),
+ _("Specifies the type of ambisonic audio represented"),
+ NULL);
+ gst_tag_register ("ambisonic_format", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-ambisonic-format"),
+ _("Specifies the ambisonic audio format"),
+ NULL);
+ gst_tag_register ("ambisonic_order", GST_TAG_FLAG_META,
+ G_TYPE_INT,
+ _("tag-ambisonic-order"),
+ _("Specifies the ambisonic audio channel order"),
+ NULL);
+
+ return;
+}
+
+static void
+_send_spherical_metadata_msg_to_bus (GstQTDemux * qtdemux)
+{
+ GstTagList *taglist;
+ QtDemuxSphericalMetadata *spherical_metadata = qtdemux->spherical_metadata;
+
+ GST_DEBUG_OBJECT (qtdemux, "is_spherical = %d",
+ spherical_metadata->is_spherical);
+ GST_DEBUG_OBJECT (qtdemux, "is_stitched = %d",
+ spherical_metadata->is_stitched);
+ GST_DEBUG_OBJECT (qtdemux, "stitching_software = %s",
+ spherical_metadata->stitching_software);
+ GST_DEBUG_OBJECT (qtdemux, "projection_type = %s",
+ spherical_metadata->projection_type);
+ GST_DEBUG_OBJECT (qtdemux, "stereo_mode = %s",
+ spherical_metadata->stereo_mode);
+ GST_DEBUG_OBJECT (qtdemux, "source_count %d",
+ spherical_metadata->source_count);
+ GST_DEBUG_OBJECT (qtdemux, "init_view_heading = %d",
+ spherical_metadata->init_view_heading);
+ GST_DEBUG_OBJECT (qtdemux, "init_view_pitch = %d",
+ spherical_metadata->init_view_pitch);
+ GST_DEBUG_OBJECT (qtdemux, "init_view_roll = %d",
+ spherical_metadata->init_view_roll);
+ GST_DEBUG_OBJECT (qtdemux, "timestamp = %d", spherical_metadata->timestamp);
+ GST_DEBUG_OBJECT (qtdemux, "full_pano_width_pixels = %d",
+ spherical_metadata->full_pano_width_pixels);
+ GST_DEBUG_OBJECT (qtdemux, "full_pano_height_pixels = %d",
+ spherical_metadata->full_pano_height_pixels);
+ GST_DEBUG_OBJECT (qtdemux, "cropped_area_image_width = %d",
+ spherical_metadata->cropped_area_image_width);
+ GST_DEBUG_OBJECT (qtdemux, "cropped_area_image_height = %d",
+ spherical_metadata->cropped_area_image_height);
+ GST_DEBUG_OBJECT (qtdemux, "cropped_area_left = %d",
+ spherical_metadata->cropped_area_left);
+ GST_DEBUG_OBJECT (qtdemux, "cropped_area_top = %d",
+ spherical_metadata->cropped_area_top);
+ GST_DEBUG_OBJECT (qtdemux, "ambisonic_type = %d",
+ spherical_metadata->ambisonic_type);
+ GST_DEBUG_OBJECT (qtdemux, "ambisonic_order = %d",
+ spherical_metadata->ambisonic_order);
+ GST_DEBUG_OBJECT (qtdemux, "ambisonic_format = %d",
+ spherical_metadata->ambisonic_format);
+
+ taglist = gst_tag_list_new_empty ();
+ gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE,
+ "is_spherical", spherical_metadata->is_spherical,
+ "is_stitched", spherical_metadata->is_stitched,
+ "source_count", spherical_metadata->source_count,
+ "init_view_heading", spherical_metadata->init_view_heading,
+ "init_view_pitch", spherical_metadata->init_view_pitch,
+ "init_view_roll", spherical_metadata->init_view_roll,
+ "timestamp", spherical_metadata->timestamp,
+ "full_pano_width_pixels", spherical_metadata->full_pano_width_pixels,
+ "full_pano_height_pixels", spherical_metadata->full_pano_height_pixels,
+ "cropped_area_image_width", spherical_metadata->cropped_area_image_width,
+ "cropped_area_image_height", spherical_metadata->cropped_area_image_height,
+ "cropped_area_left", spherical_metadata->cropped_area_left,
+ "cropped_area_top", spherical_metadata->cropped_area_top,
+ "ambisonic_type", spherical_metadata->ambisonic_type,
+ "ambisonic_format", spherical_metadata->ambisonic_format,
+ "ambisonic_order", spherical_metadata->ambisonic_order,
+ NULL);
+
+ if (spherical_metadata->stitching_software)
+ gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE,
+ "stitching_software", spherical_metadata->stitching_software,
+ NULL);
+ if (spherical_metadata->projection_type)
+ gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE,
+ "projection_type", spherical_metadata->projection_type,
+ NULL);
+ if (spherical_metadata->stereo_mode)
+ gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE,
+ "stereo_mode", spherical_metadata->stereo_mode,
+ NULL);
+
+ gst_element_post_message (GST_ELEMENT_CAST (qtdemux),
+ gst_message_new_tag (GST_OBJECT_CAST (qtdemux),
+ gst_tag_list_copy (taglist)));
+
+ gst_tag_list_unref(taglist);
+
+ return;
+}
+
+static void
+qtdemux_parse_SA3D (GstQTDemux * qtdemux, const guint8 * buffer, gint length)
+{
+ guint offset = 0;
+
+ guint8 version = 0;
+ guint8 ambisonic_type = 0;
+ guint32 ambisonic_order = 0;
+ guint8 ambisonic_channel_ordering = 0;
+ guint8 ambisonic_normalization = 0;
+ guint32 num_channels = 0;
+ guint32 channel_map[49] = { 0 }; /* Up to 6th order */
+
+ int i;
+
+ GST_DEBUG_OBJECT (qtdemux, "qtdemux_parse_SA3D");
+
+ qtdemux->header_size += length;
+ offset = (QT_UINT32 (buffer) == 0) ? 16 : 8;
+
+ if (length <= offset + 16) {
+ GST_DEBUG_OBJECT (qtdemux, "SA3D atom is too short, skipping");
+ return;
+ }
+
+ version = QT_UINT8 (buffer + offset);
+ ambisonic_type = QT_UINT8 (buffer + offset + 1);
+ ambisonic_order = QT_UINT32 (buffer + offset + 2);
+ ambisonic_channel_ordering = QT_UINT8 (buffer + offset + 6);
+ ambisonic_normalization = QT_UINT8 (buffer + offset + 7);
+ num_channels = QT_UINT32 (buffer + offset + 8);
+ for (i = 0; i < num_channels; ++i)
+ channel_map[i] = QT_UINT32 (buffer + offset + 12 + i * 4);
+
+ GST_DEBUG_OBJECT (qtdemux, "version: %d", version);
+ GST_DEBUG_OBJECT (qtdemux, "ambisonic_type: %d", ambisonic_type);
+ GST_DEBUG_OBJECT (qtdemux, "ambisonic_order: %d", ambisonic_order);
+ GST_DEBUG_OBJECT (qtdemux, "ambisonic_channel_ordering: %d",
+ ambisonic_channel_ordering);
+ GST_DEBUG_OBJECT (qtdemux, "ambisonic_normalization: %d",
+ ambisonic_normalization);
+ GST_DEBUG_OBJECT (qtdemux, "num_channels: %d", num_channels);
+ for (i = 0; i < num_channels; ++i)
+ GST_DEBUG_OBJECT (qtdemux, "channel_map: %d", channel_map[i]);
+
+ if (version == RFC_AMBISONIC_SA3DBOX_VERSION_SUPPORTED) {
+ if (ambisonic_type == RFC_AMBISONIC_TYPE_PERIPHONIC)
+ qtdemux->spherical_metadata->ambisonic_type = QTDEMUX_AMBISONIC_TYPE_PERIPHONIC;
+
+ if (ambisonic_order == RFC_AMBISONIC_ORDER_FOA) {
+ if (num_channels == 4) {
+ qtdemux->spherical_metadata->ambisonic_order = QTDEMUX_AMBISONIC_ORDER_FOA;
+
+ if ((ambisonic_channel_ordering == RFC_AMBISONIC_CHANNEL_ORDERING_ACN)
+ && (ambisonic_normalization == RFC_AMBISONIC_NORMALIZATION_SN3D)
+ && (channel_map[0] == 0) && (channel_map[1] == 1)
+ && (channel_map[2] == 2) && (channel_map[3] == 3))
+ qtdemux->spherical_metadata->ambisonic_format = QTDEMUX_AMBISONIC_FORMAT_AMBIX;
+
+ if ((ambisonic_channel_ordering == RFC_AMBISONIC_CHANNEL_ORDERING_FUMA)
+ && (ambisonic_normalization == RFC_AMBISONIC_NORMALIZATION_FUMA)
+ && (channel_map[0] == 0) && (channel_map[1] == 3)
+ && (channel_map[2] == 1) && (channel_map[3] == 2))
+ qtdemux->spherical_metadata->ambisonic_format = QTDEMUX_AMBISONIC_FORMAT_AMB;
+ }
+ }
+ }
+
+ return;
+}
+#endif /* TIZEN_FEATURE_QTDEMUX_MODIFICATION */
+
static void
qtdemux_update_default_sample_encryption_settings (GstQTDemux * qtdemux,
QtDemuxCencSampleSetInfo * info, guint32 is_encrypted, guint8 iv_size,
gboolean uses_sub_sample_encryption = FALSE;
guint32 sample_count;
- stream = QTDEMUX_FIRST_STREAM (qtdemux);
- if (!stream)
+ if (QTDEMUX_N_STREAMS (qtdemux) == 0)
return;
+ stream = QTDEMUX_NTH_STREAM (qtdemux, 0);
+
structure = gst_caps_get_structure (CUR_STREAM (stream)->caps, 0);
if (!gst_structure_has_name (structure, "application/x-cenc")) {
GST_WARNING_OBJECT (qtdemux,
0xa2, 0x44, 0x6c, 0x42, 0x7c, 0x64, 0x8d, 0xf4
};
+#ifdef TIZEN_FEATURE_QTDEMUX_MODIFICATION
+ static const guint8 spherical_uuid[] = {
+ 0xff, 0xcc, 0x82, 0x63, 0xf8, 0x55, 0x4a, 0x93,
+ 0x88, 0x14, 0x58, 0x7a, 0x02, 0x52, 0x1f, 0xdd
+ };
+#endif /* TIZEN_FEATURE_QTDEMUX_MODIFICATION */
+
guint offset;
/* counts as header data */
return;
}
+#ifdef TIZEN_FEATURE_QTDEMUX_MODIFICATION
+ if (memcmp (buffer + offset, spherical_uuid, 16) == 0) {
+ const char *contents;
+
+ GST_DEBUG_OBJECT (qtdemux, "spherical uuid was found");
+ contents = (char *) (buffer + offset + 16);
+ GST_DEBUG_OBJECT (qtdemux, "contents: %s\n", contents);
+
+ if (qtdemux->spherical_metadata)
+ _parse_spatial_video_metadata_from_xml_string (qtdemux, contents);
+
+ return;
+ }
+#endif /* TIZEN_FEATURE_QTDEMUX_MODIFICATION */
+
if (memcmp (buffer + offset, xmp_uuid, 16) == 0) {
GstBuffer *buf;
GstTagList *taglist;
guint i;
guint64 movdur;
GstClockTime prevdur;
- GList *iter;
movdur = GSTTIME_TO_QTTIME (qtdemux, duration);
qtdemux->segment.stop = fixeddur;
}
}
- for (iter = qtdemux->active_streams, i = 0; iter;
- iter = g_list_next (iter), i++) {
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
+
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (qtdemux, i);
movdur = GSTTIME_TO_QTSTREAMTIME (stream, duration);
if (movdur > stream->duration) {
qtdemux_find_stream (GstQTDemux * qtdemux, guint32 id)
{
QtDemuxStream *stream;
- GList *iter;
+ gint i;
/* check */
if (G_UNLIKELY (!id)) {
return NULL;
}
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
- stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ stream = QTDEMUX_NTH_STREAM (qtdemux, i);
if (stream->track_id == id)
return stream;
}
if (qtdemux->mss_mode) {
/* mss should have only 1 stream anyway */
- return QTDEMUX_FIRST_STREAM (qtdemux);
+ return QTDEMUX_NTH_STREAM (qtdemux, 0);
}
return NULL;
GstBuffer *pssh = NULL;
GstEvent *event = NULL;
guint32 parent_box_type;
- GList *iter;
+ gint i;
if (G_UNLIKELY (pssh_size < 32U)) {
GST_ERROR_OBJECT (qtdemux, "invalid box size");
/* Push an event containing the pssh box onto the queues of all streams. */
event = gst_event_new_protection (sysid_string, pssh,
(parent_box_type == FOURCC_moov) ? "isobmff/moov" : "isobmff/moof");
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (qtdemux, i);
GST_TRACE_OBJECT (qtdemux,
"adding protection event for stream %s and system %s",
stream->stream_id, sysid_string);
* This way if the user opens a file where the first tfdt is 1 hour
* into the presentation, they will not have to wait 1 hour for run
* time to catch up and actual playback to start. */
- GList *iter;
+ gint i;
GST_DEBUG_OBJECT (qtdemux, "First fragment has a non-zero tfdt, "
"performing an internal seek to %" GST_TIME_FORMAT,
qtdemux->segment.start = min_dts;
qtdemux->segment.time = qtdemux->segment.position = min_dts;
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (qtdemux, i);
stream->time_position = min_dts;
}
beach:
if (ret == GST_FLOW_EOS && (qtdemux->got_moov || qtdemux->media_caps)) {
/* digested all data, show what we have */
+#ifdef TIZEN_FEATURE_QTDEMUX_MODIFICATION
+ if (qtdemux->spherical_metadata)
+ _send_spherical_metadata_msg_to_bus (qtdemux);
+#endif /* TIZEN_FEATURE_QTDEMUX_MODIFICATION */
qtdemux_prepare_streams (qtdemux);
QTDEMUX_EXPOSE_LOCK (qtdemux);
ret = qtdemux_expose_streams (qtdemux);
QtDemuxStream *ref_str = NULL;
guint64 seg_media_start_mov; /* segment media start time in mov format */
guint64 target_ts;
- GList *iter;
+ gint i;
/* Now we choose an arbitrary stream, get the previous keyframe timestamp
* and finally align all the other streams on that timestamp with their
* respective keyframes */
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
- QtDemuxStream *str = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *str = QTDEMUX_NTH_STREAM (qtdemux, i);
/* No candidate yet, take the first stream */
if (!ref_str) {
seg_media_start_mov = seg->trak_media_start;
GST_LOG_OBJECT (qtdemux, "keyframe index %u ts %" G_GUINT64_FORMAT
- " seg start %" G_GUINT64_FORMAT " %" GST_TIME_FORMAT "\n",
+ " seg start %" G_GUINT64_FORMAT " %" GST_TIME_FORMAT,
k_index, target_ts, seg_media_start_mov,
GST_TIME_ARGS (seg->media_start));
ref_k_index = k_index;
/* Align them all on this */
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
guint32 index = 0;
GstClockTime seg_time = 0;
- QtDemuxStream *str = QTDEMUX_STREAM (iter->data);
+ QtDemuxStream *str = QTDEMUX_NTH_STREAM (qtdemux, i);
/* aligning reference stream again might lead to backing up to yet another
* keyframe (due to timestamp rounding issues),
if (G_UNLIKELY (stream->segment_index != seg_idx))
gst_qtdemux_activate_segment (qtdemux, stream, seg_idx, time_position);
- if (G_UNLIKELY (QTSEGMENT_IS_EMPTY (&stream->segments[stream->
- segment_index]))) {
+ if (G_UNLIKELY (QTSEGMENT_IS_EMPTY (&stream->
+ segments[stream->segment_index]))) {
QtDemuxSegment *seg = &stream->segments[stream->segment_index];
GST_LOG_OBJECT (qtdemux, "Empty segment activated,"
static void
gst_qtdemux_sync_streams (GstQTDemux * demux)
{
- GList *iter;
+ gint i;
- if (demux->n_streams <= 1)
+ if (QTDEMUX_N_STREAMS (demux) <= 1)
return;
- for (iter = demux->active_streams; iter; iter = g_list_next (iter)) {
+ for (i = 0; i < QTDEMUX_N_STREAMS (demux); i++) {
QtDemuxStream *stream;
GstClockTime end_time;
- stream = QTDEMUX_STREAM (iter->data);
+ stream = QTDEMUX_NTH_STREAM (demux, i);
if (!stream->pad)
continue;
}
static guint8 *
-convert_to_ccdata (const guint8 * ccpair, guint8 ccpair_size, guint field,
+convert_to_s334_1a (const guint8 * ccpair, guint8 ccpair_size, guint field,
gsize * res)
{
guint8 *storage;
*res = ccpair_size / 2 * 3;
storage = g_malloc (*res);
for (i = 0; i * 2 < ccpair_size; i += 1) {
+ /* FIXME: Use line offset 0 as we simply can't know here */
if (field == 1)
- storage[i * 3] = 0xfc;
+ storage[i * 3] = 0x80 | 0x00;
else
- storage[i * 3] = 0xfd;
+ storage[i * 3] = 0x00 | 0x00;
storage[i * 3 + 1] = ccpair[i * 2];
storage[i * 3 + 2] = ccpair[i * 2 + 1];
}
goto invalid_cdat;
}
- /* Convert to cc_data triplet */
+ /* Convert to S334-1 Annex A byte triplet */
if (fourcc == FOURCC_cdat)
- cdat = convert_to_ccdata (data + 8, atom_length - 8, 1, &cdat_size);
+ cdat = convert_to_s334_1a (data + 8, atom_length - 8, 1, &cdat_size);
else
- cdt2 = convert_to_ccdata (data + 8, atom_length - 8, 2, &cdt2_size);
+ cdt2 = convert_to_s334_1a (data + 8, atom_length - 8, 2, &cdt2_size);
GST_DEBUG_OBJECT (stream->pad, "size:%" G_GSIZE_FORMAT " atom_length:%u",
size, atom_length);
if (fourcc == FOURCC_cdat) {
if (cdat == NULL)
cdat =
- convert_to_ccdata (data + atom_length + 8,
+ convert_to_s334_1a (data + atom_length + 8,
new_atom_length - 8, 1, &cdat_size);
else
GST_WARNING_OBJECT (stream->pad,
} else {
if (cdt2 == NULL)
cdt2 =
- convert_to_ccdata (data + atom_length + 8,
+ convert_to_s334_1a (data + atom_length + 8,
new_atom_length - 8, 2, &cdt2_size);
else
GST_WARNING_OBJECT (stream->pad,
return buf;
}
- gst_buffer_map (buf, &map, GST_MAP_READ);
+ gst_buffer_map (buf, &map, GST_MAP_READ);
+
+ /* empty buffer is sent to terminate previous subtitle */
+ if (map.size <= 2) {
+ gst_buffer_unmap (buf, &map);
+ gst_buffer_unref (buf);
+ return NULL;
+ }
+ if (stream->subtype == FOURCC_subp) {
+ /* That's all the processing needed for subpictures */
+ gst_buffer_unmap (buf, &map);
+ return buf;
+ }
+
+ if (stream->subtype == FOURCC_clcp) {
+ guint8 *cc;
+ gsize cclen = 0;
+ /* For closed caption, we need to extract the information from the
+ * [cdat],[cdt2] or [ccdp] atom */
+ cc = extract_cc_from_data (stream, map.data, map.size, &cclen);
+ gst_buffer_unmap (buf, &map);
+ gst_buffer_unref (buf);
+ if (cc) {
+ buf = _gst_buffer_new_wrapped (cc, cclen, g_free);
+ } else {
+ /* Conversion failed or there's nothing */
+ buf = NULL;
+ }
+ return buf;
+ }
+
+ nsize = GST_READ_UINT16_BE (map.data);
+ nsize = MIN (nsize, map.size - 2);
+
+ GST_LOG_OBJECT (qtdemux, "3GPP timed text subtitle: %d/%" G_GSIZE_FORMAT "",
+ nsize, map.size);
+
+ /* takes care of UTF-8 validation or UTF-16 recognition,
+ * no other encoding expected */
+ str = gst_tag_freeform_string_to_utf8 ((gchar *) map.data + 2, nsize, NULL);
+ gst_buffer_unmap (buf, &map);
+ if (str) {
+ gst_buffer_unref (buf);
+ buf = _gst_buffer_new_wrapped (str, strlen (str), g_free);
+ } else {
+ /* this should not really happen unless the subtitle is corrupted */
+ gst_buffer_unref (buf);
+ buf = NULL;
+ }
+
+ /* FIXME ? convert optional subsequent style info to markup */
+
+ return buf;
+}
+
+static GstFlowReturn
+gst_qtdemux_push_buffer (GstQTDemux * qtdemux, QtDemuxStream * stream,
+ GstBuffer * buf)
+{
+ GstFlowReturn ret = GST_FLOW_OK;
+ GstClockTime pts, duration;
+
+ if (stream->need_clip)
+ buf = gst_qtdemux_clip_buffer (qtdemux, stream, buf);
+
+ if (G_UNLIKELY (buf == NULL))
+ goto exit;
+
+ if (G_UNLIKELY (stream->discont)) {
+ GST_LOG_OBJECT (qtdemux, "marking discont buffer");
+ GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
+ stream->discont = FALSE;
+ } else {
+ GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
+ }
+
+ GST_LOG_OBJECT (qtdemux,
+ "Pushing buffer with dts %" GST_TIME_FORMAT ", pts %" GST_TIME_FORMAT
+ ", duration %" GST_TIME_FORMAT " on pad %s",
+ GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
+ GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
+ GST_TIME_ARGS (GST_BUFFER_DURATION (buf)), GST_PAD_NAME (stream->pad));
+
+ if (stream->protected && stream->protection_scheme_type == FOURCC_cenc) {
+ GstStructure *crypto_info;
+ QtDemuxCencSampleSetInfo *info =
+ (QtDemuxCencSampleSetInfo *) stream->protection_scheme_info;
+ gint index;
+ GstEvent *event;
+
+ while ((event = g_queue_pop_head (&stream->protection_scheme_event_queue))) {
+ GST_TRACE_OBJECT (stream->pad, "pushing protection event: %"
+ GST_PTR_FORMAT, event);
+ gst_pad_push_event (stream->pad, event);
+ }
+
+ if (info->crypto_info == NULL) {
+ GST_DEBUG_OBJECT (qtdemux,
+ "cenc metadata hasn't been parsed yet, pushing buffer as if it wasn't encrypted");
+ } else {
+ /* The end of the crypto_info array matches our n_samples position,
+ * so count backward from there */
+ index = stream->sample_index - stream->n_samples + info->crypto_info->len;
+ if (G_LIKELY (index >= 0 && index < info->crypto_info->len)) {
+ /* steal structure from array */
+ crypto_info = g_ptr_array_index (info->crypto_info, index);
+ g_ptr_array_index (info->crypto_info, index) = NULL;
+ GST_LOG_OBJECT (qtdemux, "attaching cenc metadata [%u/%u]", index,
+ info->crypto_info->len);
+ if (!crypto_info || !gst_buffer_add_protection_meta (buf, crypto_info))
+ GST_ERROR_OBJECT (qtdemux,
+ "failed to attach cenc metadata to buffer");
+ } else {
+ GST_INFO_OBJECT (qtdemux, "No crypto info with index %d and sample %d",
+ index, stream->sample_index);
+ }
+ }
+ }
+
+ if (stream->alignment > 1)
+ buf = gst_qtdemux_align_buffer (qtdemux, buf, stream->alignment);
+
+ pts = GST_BUFFER_PTS (buf);
+ duration = GST_BUFFER_DURATION (buf);
+
+ ret = gst_pad_push (stream->pad, buf);
+
+ if (GST_CLOCK_TIME_IS_VALID (pts) && GST_CLOCK_TIME_IS_VALID (duration)) {
+ /* mark position in stream, we'll need this to know when to send GAP event */
+ stream->segment.position = pts + duration;
+ }
+
+exit:
+
+ return ret;
+}
+
+static GstFlowReturn
+gst_qtdemux_split_and_push_buffer (GstQTDemux * qtdemux, QtDemuxStream * stream,
+ GstBuffer * buf)
+{
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ if (stream->subtype == FOURCC_clcp
+ && CUR_STREAM (stream)->fourcc == FOURCC_c608 && stream->need_split) {
+ GstMapInfo map;
+ guint n_output_buffers, n_field1 = 0, n_field2 = 0;
+ guint n_triplets, i;
+ guint field1_off = 0, field2_off = 0;
+
+ /* We have to split CEA608 buffers so that each outgoing buffer contains
+ * one byte pair per field according to the framerate of the video track.
+ *
+ * If there is only a single byte pair per field we don't have to do
+ * anything
+ */
+
+ gst_buffer_map (buf, &map, GST_MAP_READ);
+
+ n_triplets = map.size / 3;
+ for (i = 0; i < n_triplets; i++) {
+ if (map.data[3 * i] & 0x80)
+ n_field1++;
+ else
+ n_field2++;
+ }
+
+ g_assert (n_field1 || n_field2);
+
+ /* If there's more than 1 frame we have to split, otherwise we can just
+ * pass through */
+ if (n_field1 > 1 || n_field2 > 1) {
+ n_output_buffers =
+ gst_util_uint64_scale (GST_BUFFER_DURATION (buf),
+ CUR_STREAM (stream)->fps_n, GST_SECOND * CUR_STREAM (stream)->fps_d);
+
+ for (i = 0; i < n_output_buffers; i++) {
+ GstBuffer *outbuf =
+ gst_buffer_new_and_alloc ((n_field1 ? 3 : 0) + (n_field2 ? 3 : 0));
+ GstMapInfo outmap;
+ guint8 *outptr;
+
+ gst_buffer_map (outbuf, &outmap, GST_MAP_WRITE);
+ outptr = outmap.data;
+
+ if (n_field1) {
+ gboolean found = FALSE;
+
+ while (map.data + field1_off < map.data + map.size) {
+ if (map.data[field1_off] & 0x80) {
+ memcpy (outptr, &map.data[field1_off], 3);
+ field1_off += 3;
+ found = TRUE;
+ break;
+ }
+ field1_off += 3;
+ }
+
+ if (!found) {
+ const guint8 empty[] = { 0x80, 0x80, 0x80 };
+
+ memcpy (outptr, empty, 3);
+ }
+
+ outptr += 3;
+ }
+
+ if (n_field2) {
+ gboolean found = FALSE;
+
+ while (map.data + field2_off < map.data + map.size) {
+ if ((map.data[field2_off] & 0x80) == 0) {
+ memcpy (outptr, &map.data[field2_off], 3);
+ field2_off += 3;
+ found = TRUE;
+ break;
+ }
+ field2_off += 3;
+ }
+
+ if (!found) {
+ const guint8 empty[] = { 0x00, 0x80, 0x80 };
- /* empty buffer is sent to terminate previous subtitle */
- if (map.size <= 2) {
- gst_buffer_unmap (buf, &map);
- gst_buffer_unref (buf);
- return NULL;
- }
- if (stream->subtype == FOURCC_subp) {
- /* That's all the processing needed for subpictures */
- gst_buffer_unmap (buf, &map);
- return buf;
- }
+ memcpy (outptr, empty, 3);
+ }
- if (stream->subtype == FOURCC_clcp) {
- guint8 *cc;
- gsize cclen = 0;
- /* For closed caption, we need to extract the information from the
- * [cdat],[cdt2] or [ccdp] atom */
- cc = extract_cc_from_data (stream, map.data, map.size, &cclen);
- gst_buffer_unmap (buf, &map);
- gst_buffer_unref (buf);
- if (cc) {
- buf = _gst_buffer_new_wrapped (cc, cclen, g_free);
- } else {
- /* Conversion failed or there's nothing */
- buf = NULL;
- }
- return buf;
- }
+ outptr += 3;
+ }
- nsize = GST_READ_UINT16_BE (map.data);
- nsize = MIN (nsize, map.size - 2);
+ gst_buffer_unmap (outbuf, &outmap);
- GST_LOG_OBJECT (qtdemux, "3GPP timed text subtitle: %d/%" G_GSIZE_FORMAT "",
- nsize, map.size);
+ GST_BUFFER_PTS (outbuf) =
+ GST_BUFFER_PTS (buf) + gst_util_uint64_scale (i,
+ GST_SECOND * CUR_STREAM (stream)->fps_d,
+ CUR_STREAM (stream)->fps_n);
+ GST_BUFFER_DURATION (outbuf) =
+ gst_util_uint64_scale (GST_SECOND, CUR_STREAM (stream)->fps_d,
+ CUR_STREAM (stream)->fps_n);
+ GST_BUFFER_OFFSET (outbuf) = -1;
+ GST_BUFFER_OFFSET_END (outbuf) = -1;
- /* takes care of UTF-8 validation or UTF-16 recognition,
- * no other encoding expected */
- str = gst_tag_freeform_string_to_utf8 ((gchar *) map.data + 2, nsize, NULL);
- gst_buffer_unmap (buf, &map);
- if (str) {
- gst_buffer_unref (buf);
- buf = _gst_buffer_new_wrapped (str, strlen (str), g_free);
+ ret = gst_qtdemux_push_buffer (qtdemux, stream, outbuf);
+
+ if (ret != GST_FLOW_OK && ret != GST_FLOW_NOT_LINKED)
+ break;
+ }
+ gst_buffer_unmap (buf, &map);
+ gst_buffer_unref (buf);
+ } else {
+ gst_buffer_unmap (buf, &map);
+ ret = gst_qtdemux_push_buffer (qtdemux, stream, buf);
+ }
} else {
- /* this should not really happen unless the subtitle is corrupted */
- gst_buffer_unref (buf);
- buf = NULL;
+ ret = gst_qtdemux_push_buffer (qtdemux, stream, buf);
}
- /* FIXME ? convert optional subsequent style info to markup */
-
- return buf;
+ return ret;
}
/* Sets a buffer's attributes properly and pushes it downstream.
GST_BUFFER_OFFSET (buf) = -1;
GST_BUFFER_OFFSET_END (buf) = -1;
+ if (!keyframe) {
+ GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
+ stream->on_keyframe = FALSE;
+ } else {
+ stream->on_keyframe = TRUE;
+ }
+
if (G_UNLIKELY (CUR_STREAM (stream)->rgb8_palette))
gst_buffer_append_memory (buf,
gst_memory_ref (CUR_STREAM (stream)->rgb8_palette));
}
#endif
- if (stream->need_clip)
- buf = gst_qtdemux_clip_buffer (qtdemux, stream, buf);
-
- if (G_UNLIKELY (buf == NULL))
- goto exit;
-
- if (G_UNLIKELY (stream->discont)) {
- GST_LOG_OBJECT (qtdemux, "marking discont buffer");
- GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
- stream->discont = FALSE;
- } else {
- GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
- }
-
- if (!keyframe) {
- GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
- stream->on_keyframe = FALSE;
- } else {
- stream->on_keyframe = TRUE;
- }
-
-
- GST_LOG_OBJECT (qtdemux,
- "Pushing buffer with dts %" GST_TIME_FORMAT ", pts %" GST_TIME_FORMAT
- ", duration %" GST_TIME_FORMAT " on pad %s", GST_TIME_ARGS (dts),
- GST_TIME_ARGS (pts), GST_TIME_ARGS (duration),
- GST_PAD_NAME (stream->pad));
-
- if (stream->protected && stream->protection_scheme_type == FOURCC_cenc) {
- GstStructure *crypto_info;
- QtDemuxCencSampleSetInfo *info =
- (QtDemuxCencSampleSetInfo *) stream->protection_scheme_info;
- gint index;
- GstEvent *event;
-
- while ((event = g_queue_pop_head (&stream->protection_scheme_event_queue))) {
- GST_TRACE_OBJECT (stream->pad, "pushing protection event: %"
- GST_PTR_FORMAT, event);
- gst_pad_push_event (stream->pad, event);
- }
-
- if (info->crypto_info == NULL) {
- GST_DEBUG_OBJECT (qtdemux,
- "cenc metadata hasn't been parsed yet, pushing buffer as if it wasn't encrypted");
- } else {
- /* The end of the crypto_info array matches our n_samples position,
- * so count backward from there */
- index = stream->sample_index - stream->n_samples + info->crypto_info->len;
- if (G_LIKELY (index >= 0 && index < info->crypto_info->len)) {
- /* steal structure from array */
- crypto_info = g_ptr_array_index (info->crypto_info, index);
- g_ptr_array_index (info->crypto_info, index) = NULL;
- GST_LOG_OBJECT (qtdemux, "attaching cenc metadata [%u/%u]", index,
- info->crypto_info->len);
- if (!crypto_info || !gst_buffer_add_protection_meta (buf, crypto_info))
- GST_ERROR_OBJECT (qtdemux,
- "failed to attach cenc metadata to buffer");
- } else {
- GST_INFO_OBJECT (qtdemux, "No crypto info with index %d and sample %d",
- index, stream->sample_index);
- }
- }
- }
-
- if (stream->alignment > 1)
- buf = gst_qtdemux_align_buffer (qtdemux, buf, stream->alignment);
-
- ret = gst_pad_push (stream->pad, buf);
-
- if (GST_CLOCK_TIME_IS_VALID (pts) && GST_CLOCK_TIME_IS_VALID (duration)) {
- /* mark position in stream, we'll need this to know when to send GAP event */
- stream->segment.position = pts + duration;
- }
+ ret = gst_qtdemux_split_and_push_buffer (qtdemux, stream, buf);
exit:
return ret;
gst_qtdemux_do_fragmented_seek (GstQTDemux * qtdemux)
{
const QtDemuxRandomAccessEntry *best_entry = NULL;
- GList *iter;
+ gint i;
GST_OBJECT_LOCK (qtdemux);
- g_assert (qtdemux->n_streams > 0);
+ g_assert (QTDEMUX_N_STREAMS (qtdemux) > 0);
/* first see if we can determine where to go to using mfra,
* before we start clearing things */
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
const QtDemuxRandomAccessEntry *entry;
QtDemuxStream *stream;
gboolean is_audio_or_video;
- stream = QTDEMUX_STREAM (iter->data);
+ stream = QTDEMUX_NTH_STREAM (qtdemux, i);
if (stream->ra_entries == NULL)
continue;
}
/* ok, now we can prepare for processing as of located moof */
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
QtDemuxStream *stream;
- stream = QTDEMUX_STREAM (iter->data);
+ stream = QTDEMUX_NTH_STREAM (qtdemux, i);
g_free (stream->samples);
stream->samples = NULL;
GST_INFO_OBJECT (qtdemux, "seek to %" GST_TIME_FORMAT ", best fragment "
"moof offset: %" G_GUINT64_FORMAT ", ts %" GST_TIME_FORMAT,
- GST_TIME_ARGS (QTDEMUX_FIRST_STREAM (qtdemux)->time_position),
+ GST_TIME_ARGS (QTDEMUX_NTH_STREAM (qtdemux, 0)->time_position),
best_entry->moof_offset, GST_TIME_ARGS (best_entry->ts));
qtdemux->moof_offset = best_entry->moof_offset;
guint sample_size = 0;
gboolean empty = 0;
guint size;
- GList *iter;
+ gint i;
if (qtdemux->fragmented_seek_pending) {
GST_INFO_OBJECT (qtdemux, "pending fragmented seek");
/* Figure out the next stream sample to output, min_time is expressed in
* global time and runs over the edit list segments. */
min_time = G_MAXUINT64;
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
GstClockTime position;
- stream = QTDEMUX_STREAM (iter->data);
+ stream = QTDEMUX_NTH_STREAM (qtdemux, i);
position = stream->time_position;
/* position of -1 is EOS */
}
/* gap events for subtitle streams */
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
- stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ stream = QTDEMUX_NTH_STREAM (qtdemux, i);
if (stream->pad && (stream->subtype == FOURCC_subp
|| stream->subtype == FOURCC_text
|| stream->subtype == FOURCC_sbtl)) {
gst_qtdemux_stream_check_and_change_stsd_index (qtdemux, stream);
if (stream->new_caps) {
gst_qtdemux_configure_stream (qtdemux, stream);
- qtdemux_do_allocation (qtdemux, stream);
+ qtdemux_do_allocation (stream, qtdemux);
}
/* If we're doing a keyframe-only trickmode, only push keyframes on video streams */
- if (G_UNLIKELY (qtdemux->
- segment.flags & GST_SEGMENT_FLAG_TRICKMODE_KEY_UNITS)) {
+ if (G_UNLIKELY (qtdemux->segment.
+ flags & GST_SEGMENT_FLAG_TRICKMODE_KEY_UNITS)) {
if (stream->subtype == FOURCC_vide && !keyframe) {
GST_LOG_OBJECT (qtdemux, "Skipping non-keyframe on track-id %u",
stream->track_id);
/* fatal errors need special actions */
/* check EOS */
if (ret == GST_FLOW_EOS) {
- if (qtdemux->n_streams == 0) {
+ if (QTDEMUX_N_STREAMS (qtdemux) == 0) {
/* we have no streams, post an error */
gst_qtdemux_post_no_playable_stream_error (qtdemux);
}
has_next_entry (GstQTDemux * demux)
{
QtDemuxStream *stream;
- GList *iter;
+ gint i;
GST_DEBUG_OBJECT (demux, "Checking if there are samples not played yet");
- for (iter = demux->active_streams; iter; iter = g_list_next (iter)) {
- stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (demux); i++) {
+ stream = QTDEMUX_NTH_STREAM (demux, i);
if (stream->sample_index == -1) {
stream->sample_index = 0;
QtDemuxStream *stream, *target_stream = NULL;
guint64 smalloffs = (guint64) - 1;
QtDemuxSample *sample;
- GList *iter;
+ gint i;
GST_LOG_OBJECT (demux, "Finding entry at offset %" G_GUINT64_FORMAT,
demux->offset);
- for (iter = demux->active_streams; iter; iter = g_list_next (iter)) {
- stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (demux); i++) {
+ stream = QTDEMUX_NTH_STREAM (demux, i);
if (stream->sample_index == -1) {
stream->sample_index = 0;
{
if (G_UNLIKELY (demux->need_segment)) {
gint i;
- GList *iter;
if (!demux->upstream_format_is_time) {
gst_qtdemux_map_and_push_segments (demux, &demux->segment);
demux->need_segment = FALSE;
/* clear to send tags on all streams */
- for (iter = demux->active_streams, i = 0; iter;
- iter = g_list_next (iter), i++) {
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (demux); i++) {
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (demux, i);
gst_qtdemux_push_tags (demux, stream);
if (CUR_STREAM (stream)->sparse) {
GST_INFO_OBJECT (demux, "Sending gap event on stream %d", i);
if (GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_DISCONT)) {
gboolean is_gap_input = FALSE;
- GList *iter;
+ gint i;
GST_DEBUG_OBJECT (demux, "Got DISCONT, marking all streams as DISCONT");
- for (iter = demux->active_streams; iter; iter = g_list_next (iter)) {
- QTDEMUX_STREAM (iter->data)->discont = TRUE;
+ for (i = 0; i < QTDEMUX_N_STREAMS (demux); i++) {
+ QTDEMUX_NTH_STREAM (demux, i)->discont = TRUE;
}
/* Check if we can land back on our feet in the case where upstream is
* in the case of trick-mode DASH for example) */
if (demux->upstream_format_is_time
&& GST_BUFFER_OFFSET (inbuf) != GST_BUFFER_OFFSET_NONE) {
- for (iter = demux->active_streams; iter; iter = g_list_next (iter)) {
+ for (i = 0; i < QTDEMUX_N_STREAMS (demux); i++) {
guint32 res;
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (demux, i);
GST_LOG_OBJECT (demux,
"track-id #%u , checking if offset %" G_GUINT64_FORMAT
" is a sample start", stream->track_id, GST_BUFFER_OFFSET (inbuf));
* previously received one. */
if (!is_gap_input && demux->fragmented && demux->segment.rate < 0) {
gst_qtdemux_process_adapter (demux, TRUE);
- g_list_foreach (demux->active_streams,
+ g_ptr_array_foreach (demux->active_streams,
(GFunc) gst_qtdemux_stream_flush_samples_data, NULL);
}
}
}
if (fourcc == FOURCC_mdat) {
gint next_entry = next_entry_size (demux);
- if (demux->n_streams > 0 && (next_entry != -1 || !demux->fragmented)) {
+ if (QTDEMUX_N_STREAMS (demux) > 0 && (next_entry != -1
+ || !demux->fragmented)) {
/* we have the headers, start playback */
demux->state = QTDEMUX_STATE_MOVIE;
demux->neededbytes = next_entry;
if (fourcc == FOURCC_moov) {
/* in usual fragmented setup we could try to scan for more
* and end up at the the moov (after mdat) again */
- if (demux->got_moov && demux->n_streams > 0 &&
+ if (demux->got_moov && QTDEMUX_N_STREAMS (demux) > 0 &&
(!demux->fragmented
|| demux->last_moov_offset == demux->offset)) {
GST_DEBUG_OBJECT (demux,
demux->last_moov_offset = demux->offset;
/* Update streams with new moov */
- demux->old_streams =
- g_list_concat (demux->old_streams, demux->active_streams);
- demux->active_streams = NULL;
+ gst_qtdemux_stream_concat (demux,
+ demux->old_streams, demux->active_streams);
qtdemux_parse_moov (demux, data, demux->neededbytes);
qtdemux_node_dump (demux, demux->moov_node);
gst_adapter_unmap (demux->adapter);
data = NULL;
- if (demux->mdatbuffer && demux->n_streams) {
+ if (demux->mdatbuffer && QTDEMUX_N_STREAMS (demux)) {
gsize remaining_data_size = 0;
/* the mdat was before the header */
GST_DEBUG_OBJECT (demux, "We have n_streams:%d and mdatbuffer:%p",
- demux->n_streams, demux->mdatbuffer);
+ QTDEMUX_N_STREAMS (demux), demux->mdatbuffer);
/* restore our adapter/offset view of things with upstream;
* put preceding buffered data ahead of current moov data.
* This should also handle evil mdat, moov, mdat cases and alike */
QtDemuxSample *sample;
GstClockTime dts, pts, duration;
gboolean keyframe;
- GList *iter;
+ gint i;
GST_DEBUG_OBJECT (demux,
"BEGIN // in MOVIE for offset %" G_GUINT64_FORMAT, demux->offset);
data = gst_adapter_map (demux->adapter, demux->todrop);
gst_byte_reader_init (&br, data + 8, demux->todrop);
if (!qtdemux_parse_cenc_aux_info (demux,
- QTDEMUX_FIRST_STREAM (demux), &br,
+ QTDEMUX_NTH_STREAM (demux, 0), &br,
demux->cenc_aux_info_sizes, demux->cenc_aux_sample_count)) {
GST_ERROR_OBJECT (demux, "failed to parse cenc auxiliary info");
ret = GST_FLOW_ERROR;
gst_qtdemux_check_send_pending_segment (demux);
/* Figure out which stream this packet belongs to */
- for (iter = demux->active_streams; iter; iter = g_list_next (iter)) {
- stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (demux); i++) {
+ stream = QTDEMUX_NTH_STREAM (demux, i);
if (stream->sample_index >= stream->n_samples) {
/* reset to be checked below G_UNLIKELY (stream == NULL) */
stream = NULL;
/* check if all streams are eos */
ret = GST_FLOW_EOS;
- for (iter = demux->active_streams; iter; iter = g_list_next (iter)) {
- if (!STREAM_IS_EOS (QTDEMUX_STREAM (iter->data))) {
+ for (i = 0; i < QTDEMUX_N_STREAMS (demux); i++) {
+ if (!STREAM_IS_EOS (QTDEMUX_NTH_STREAM (demux, i))) {
ret = GST_FLOW_OK;
break;
}
qtdemux_parse_container (qtdemux, node, buffer + 36, end);
break;
}
+#ifdef TIZEN_FEATURE_QTDEMUX_MODIFICATION
+ case FOURCC_SA3D:
+ {
+ qtdemux_parse_SA3D (qtdemux, buffer, end - buffer);
+ break;
+ }
+#endif /* TIZEN_FEATURE_QTDEMUX_MODIFICATION */
default:
if (!strcmp (type->name, "unknown"))
GST_MEMDUMP ("Unknown tag", buffer + 4, end - buffer - 4);
}
static void
-qtdemux_do_allocation (GstQTDemux * qtdemux, QtDemuxStream * stream)
+qtdemux_do_allocation (QtDemuxStream * stream, GstQTDemux * qtdemux)
{
/* FIXME: This can only reliably work if demuxers have a
* separate streaming thread per srcpad. This should be
}
static gboolean
-gst_qtdemux_configure_stream (GstQTDemux * qtdemux, QtDemuxStream * stream)
+gst_qtdemux_guess_framerate (GstQTDemux * qtdemux, QtDemuxStream * stream)
{
- if (stream->subtype == FOURCC_vide) {
- /* fps is calculated base on the duration of the average framerate since
- * qt does not have a fixed framerate. */
- gboolean fps_available = TRUE;
- guint32 first_duration = 0;
-
- if (stream->n_samples > 0)
- first_duration = stream->samples[0].duration;
-
- if ((stream->n_samples == 1 && first_duration == 0)
- || (qtdemux->fragmented && stream->n_samples_moof == 1)) {
- /* still frame */
- CUR_STREAM (stream)->fps_n = 0;
+ /* fps is calculated base on the duration of the average framerate since
+ * qt does not have a fixed framerate. */
+ gboolean fps_available = TRUE;
+ guint32 first_duration = 0;
+
+ if (stream->n_samples > 0)
+ first_duration = stream->samples[0].duration;
+
+ if ((stream->n_samples == 1 && first_duration == 0)
+ || (qtdemux->fragmented && stream->n_samples_moof == 1)) {
+ /* still frame */
+ CUR_STREAM (stream)->fps_n = 0;
+ CUR_STREAM (stream)->fps_d = 1;
+ } else {
+ if (stream->duration == 0 || stream->n_samples < 2) {
+ CUR_STREAM (stream)->fps_n = stream->timescale;
CUR_STREAM (stream)->fps_d = 1;
+ fps_available = FALSE;
} else {
- if (stream->duration == 0 || stream->n_samples < 2) {
- CUR_STREAM (stream)->fps_n = stream->timescale;
- CUR_STREAM (stream)->fps_d = 1;
- fps_available = FALSE;
+ GstClockTime avg_duration;
+ guint64 duration;
+ guint32 n_samples;
+
+ /* duration and n_samples can be updated for fragmented format
+ * so, framerate of fragmented format is calculated using data in a moof */
+ if (qtdemux->fragmented && stream->n_samples_moof > 0
+ && stream->duration_moof > 0) {
+ n_samples = stream->n_samples_moof;
+ duration = stream->duration_moof;
} else {
- GstClockTime avg_duration;
- guint64 duration;
- guint32 n_samples;
-
- /* duration and n_samples can be updated for fragmented format
- * so, framerate of fragmented format is calculated using data in a moof */
- if (qtdemux->fragmented && stream->n_samples_moof > 0
- && stream->duration_moof > 0) {
- n_samples = stream->n_samples_moof;
- duration = stream->duration_moof;
- } else {
- n_samples = stream->n_samples;
- duration = stream->duration;
- }
-
- /* Calculate a framerate, ignoring the first sample which is sometimes truncated */
- /* stream->duration is guint64, timescale, n_samples are guint32 */
- avg_duration =
- gst_util_uint64_scale_round (duration -
- first_duration, GST_SECOND,
- (guint64) (stream->timescale) * (n_samples - 1));
+ n_samples = stream->n_samples;
+ duration = stream->duration;
+ }
- GST_LOG_OBJECT (qtdemux,
- "Calculating avg sample duration based on stream (or moof) duration %"
- G_GUINT64_FORMAT
- " minus first sample %u, leaving %d samples gives %"
- GST_TIME_FORMAT, duration, first_duration,
- n_samples - 1, GST_TIME_ARGS (avg_duration));
+ /* Calculate a framerate, ignoring the first sample which is sometimes truncated */
+ /* stream->duration is guint64, timescale, n_samples are guint32 */
+ avg_duration =
+ gst_util_uint64_scale_round (duration -
+ first_duration, GST_SECOND,
+ (guint64) (stream->timescale) * (n_samples - 1));
- gst_video_guess_framerate (avg_duration, &CUR_STREAM (stream)->fps_n,
- &CUR_STREAM (stream)->fps_d);
+ GST_LOG_OBJECT (qtdemux,
+ "Calculating avg sample duration based on stream (or moof) duration %"
+ G_GUINT64_FORMAT
+ " minus first sample %u, leaving %d samples gives %"
+ GST_TIME_FORMAT, duration, first_duration,
+ n_samples - 1, GST_TIME_ARGS (avg_duration));
+
+#ifdef TIZEN_FEATURE_QTDEMUX_MODIFICATION
+ gst_video_guess_framerate (avg_duration,
+ &CUR_STREAM (stream)->fps_n, &CUR_STREAM (stream)->fps_d);
+ if (CUR_STREAM (stream)->fps_d == 0)
+ fps_available = FALSE;
+#else
+ fps_available =
+ gst_video_guess_framerate (avg_duration,
+ &CUR_STREAM (stream)->fps_n, &CUR_STREAM (stream)->fps_d);
+#endif
- GST_DEBUG_OBJECT (qtdemux,
- "Calculating framerate, timescale %u gave fps_n %d fps_d %d",
- stream->timescale, CUR_STREAM (stream)->fps_n,
- CUR_STREAM (stream)->fps_d);
- }
+ GST_DEBUG_OBJECT (qtdemux,
+ "Calculating framerate, timescale %u gave fps_n %d fps_d %d",
+ stream->timescale, CUR_STREAM (stream)->fps_n,
+ CUR_STREAM (stream)->fps_d);
}
+ }
+
+ return fps_available;
+}
+
+static gboolean
+gst_qtdemux_configure_stream (GstQTDemux * qtdemux, QtDemuxStream * stream)
+{
+ if (stream->subtype == FOURCC_vide) {
+ gboolean fps_available = gst_qtdemux_guess_framerate (qtdemux, stream);
if (CUR_STREAM (stream)->caps) {
CUR_STREAM (stream)->caps =
}
}
+ else if (stream->subtype == FOURCC_clcp && CUR_STREAM (stream)->caps) {
+ const GstStructure *s;
+ QtDemuxStream *fps_stream = NULL;
+ gboolean fps_available = FALSE;
+
+ /* CEA608 closed caption tracks are a bit special in that each sample
+ * can contain CCs for multiple frames, and CCs can be omitted and have to
+ * be inferred from the duration of the sample then.
+ *
+ * As such we take the framerate from the (first) video track here for
+ * CEA608 as there must be one CC byte pair for every video frame
+ * according to the spec.
+ *
+ * For CEA708 all is fine and there is one sample per frame.
+ */
+
+ s = gst_caps_get_structure (CUR_STREAM (stream)->caps, 0);
+ if (gst_structure_has_name (s, "closedcaption/x-cea-608")) {
+ gint i;
+
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *tmp = QTDEMUX_NTH_STREAM (qtdemux, i);
+
+ if (tmp->subtype == FOURCC_vide) {
+ fps_stream = tmp;
+ break;
+ }
+ }
+
+ if (fps_stream) {
+ fps_available = gst_qtdemux_guess_framerate (qtdemux, fps_stream);
+ CUR_STREAM (stream)->fps_n = CUR_STREAM (fps_stream)->fps_n;
+ CUR_STREAM (stream)->fps_d = CUR_STREAM (fps_stream)->fps_d;
+ }
+ } else {
+ fps_available = gst_qtdemux_guess_framerate (qtdemux, stream);
+ fps_stream = stream;
+ }
+
+ CUR_STREAM (stream)->caps =
+ gst_caps_make_writable (CUR_STREAM (stream)->caps);
+
+ /* set framerate if calculated framerate is reliable */
+ if (fps_available) {
+ gst_caps_set_simple (CUR_STREAM (stream)->caps,
+ "framerate", GST_TYPE_FRACTION, CUR_STREAM (stream)->fps_n,
+ CUR_STREAM (stream)->fps_d, NULL);
+ }
+ }
+
if (stream->pad) {
GstCaps *prev_caps = NULL;
QtDemuxStream * stream, GstTagList * list)
{
gboolean ret = TRUE;
- /* consistent default for push based mode */
- gst_segment_init (&stream->segment, GST_FORMAT_TIME);
if (stream->subtype == FOURCC_vide) {
gchar *name = g_strdup_printf ("video_%u", qtdemux->n_video_streams);
gst_byte_reader_skip_unchecked (&stream->ctts, 4);
offset = gst_byte_reader_get_int32_be_unchecked (&stream->ctts);
+ /* HACK: if sample_offset is larger than 2 * duration, ignore the box.
+ * slightly inaccurate PTS could be more usable than corrupted one */
+ if (G_UNLIKELY ((ABS (offset) / 2) > stream->duration)) {
+ GST_WARNING_OBJECT (qtdemux,
+ "Ignore corrupted ctts, sample_offset %" G_GINT32_FORMAT
+ " larger than duration %" G_GUINT64_FORMAT,
+ offset, stream->duration);
+
+ stream->cslg_shift = 0;
+ stream->ctts_present = FALSE;
+ return TRUE;
+ }
if (offset < cslg_least)
cslg_least = offset;
}
static gint
-qtdemux_track_id_compare_func (QtDemuxStream * stream1, QtDemuxStream * stream2)
+qtdemux_track_id_compare_func (QtDemuxStream ** stream1,
+ QtDemuxStream ** stream2)
{
- return (gint) stream1->track_id - (gint) stream2->track_id;
+ return (gint) (*stream1)->track_id - (gint) (*stream2)->track_id;
}
/* parse the traks.
stream = _create_stream (qtdemux, track_id);
stream->stream_tags = gst_tag_list_make_writable (stream->stream_tags);
+#ifdef TIZEN_FEATURE_QTDEMUX_DURATION
+ if (!gst_byte_reader_skip (&tkhd, 4))
+ goto corrupt_file;
+
+ if (tkhd_version == 1) {
+ if (!gst_byte_reader_get_uint64_be (&tkhd, &stream->tkhd_duration))
+ goto corrupt_file;
+ } else {
+ guint32 dur = 0;
+ if (!gst_byte_reader_get_uint32_be (&tkhd, &dur))
+ goto corrupt_file;
+ stream->tkhd_duration = dur;
+ }
+ GST_INFO_OBJECT (qtdemux, "tkhd duration: %" G_GUINT64_FORMAT,
+ stream->tkhd_duration);
+#endif
/* need defaults for fragments */
qtdemux_parse_trex (qtdemux, stream, &dummy, &dummy, &dummy);
version = QT_UINT32 ((guint8 *) mdhd->data + 8);
GST_LOG_OBJECT (qtdemux, "track version/flags: %08x", version);
if (version == 0x01000000) {
- if (len < 38)
+ if (len < 42)
goto corrupt_file;
stream->timescale = QT_UINT32 ((guint8 *) mdhd->data + 28);
stream->duration = QT_UINT64 ((guint8 *) mdhd->data + 32);
- lang_code = QT_UINT16 ((guint8 *) mdhd->data + 36);
+ lang_code = QT_UINT16 ((guint8 *) mdhd->data + 40);
} else {
if (len < 30)
goto corrupt_file;
"found, assuming preview image or something; skipping track",
stream->duration, stream->timescale, qtdemux->duration,
qtdemux->timescale);
- gst_qtdemux_stream_free (stream);
+ gst_qtdemux_stream_unref (stream);
return TRUE;
}
}
guint32 matrix[9];
/* version 1 uses some 64-bit ints */
+#ifdef TIZEN_FEATURE_QTDEMUX_DURATION
+ if (!gst_byte_reader_skip (&tkhd, 16))
+#else
if (!gst_byte_reader_skip (&tkhd, 20 + value_size))
+#endif
goto corrupt_file;
if (!qtdemux_parse_transformation_matrix (qtdemux, &tkhd, matrix, "tkhd"))
if (stsd_len < 24) {
/* .. but skip stream with empty stsd produced by some Vivotek cameras */
if (stream->subtype == FOURCC_vivo) {
- gst_qtdemux_stream_free (stream);
+ gst_qtdemux_stream_unref (stream);
return TRUE;
} else {
goto corrupt_file;
fiel = NULL;
/* pick 'the' stsd child */
mp4v = qtdemux_tree_get_child_by_index (stsd, stsd_index);
- if (!stream->protected) {
- if (QTDEMUX_TREE_NODE_FOURCC (mp4v) != fourcc) {
+ // We should skip parsing the stsd for non-protected streams if
+ // the entry doesn't match the fourcc, since they don't change
+ // format. However, for protected streams we can have partial
+ // encryption, where parts of the stream are encrypted and parts
+ // not. For both parts of such streams, we should ensure the
+ // esds overrides are parsed for both from the stsd.
+ if (QTDEMUX_TREE_NODE_FOURCC (mp4v) != fourcc) {
+ if (stream->protected && QTDEMUX_TREE_NODE_FOURCC (mp4v) != FOURCC_encv)
mp4v = NULL;
- }
- } else {
- if (QTDEMUX_TREE_NODE_FOURCC (mp4v) != FOURCC_encv) {
+ else if (!stream->protected)
mp4v = NULL;
- }
}
if (mp4v) {
}
mp4a = qtdemux_tree_get_child_by_index (stsd, stsd_index);
- if (!stream->protected) {
- } else {
- if (QTDEMUX_TREE_NODE_FOURCC (mp4v) != FOURCC_encv) {
- mp4v = NULL;
- }
- }
- if (stream->protected && fourcc == FOURCC_mp4a) {
- if (QTDEMUX_TREE_NODE_FOURCC (mp4a) != FOURCC_enca) {
+ if (QTDEMUX_TREE_NODE_FOURCC (mp4a) != fourcc) {
+ if (stream->protected && QTDEMUX_TREE_NODE_FOURCC (mp4a) != FOURCC_enca)
mp4a = NULL;
- }
- } else {
- if (QTDEMUX_TREE_NODE_FOURCC (mp4a) != FOURCC_mp4a) {
+ else if (!stream->protected)
mp4a = NULL;
- }
}
wave = NULL;
/* Insert and sort new stream in track-id order.
* This will help in comparing old/new streams during stream update check */
- qtdemux->active_streams =
- g_list_insert_sorted (qtdemux->active_streams, stream,
+ g_ptr_array_add (qtdemux->active_streams, stream);
+ g_ptr_array_sort (qtdemux->active_streams,
(GCompareFunc) qtdemux_track_id_compare_func);
- qtdemux->n_streams++;
- GST_DEBUG_OBJECT (qtdemux, "n_streams is now %d", qtdemux->n_streams);
+ GST_DEBUG_OBJECT (qtdemux, "n_streams is now %d",
+ QTDEMUX_N_STREAMS (qtdemux));
return TRUE;
GST_ELEMENT_ERROR (qtdemux, STREAM, DEMUX,
(_("This file is corrupt and cannot be played.")), (NULL));
if (stream)
- gst_qtdemux_stream_free (stream);
+ gst_qtdemux_stream_unref (stream);
return FALSE;
}
error_encrypted:
{
GST_ELEMENT_ERROR (qtdemux, STREAM, DECRYPT, (NULL), (NULL));
- gst_qtdemux_stream_free (stream);
+ gst_qtdemux_stream_unref (stream);
return FALSE;
}
samples_failed:
/* we posted an error already */
/* free stbl sub-atoms */
gst_qtdemux_stbl_free (stream);
- gst_qtdemux_stream_free (stream);
+ gst_qtdemux_stream_unref (stream);
return FALSE;
}
existing_stream:
{
GST_INFO_OBJECT (qtdemux, "unknown subtype %" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (stream->subtype));
- gst_qtdemux_stream_free (stream);
+ gst_qtdemux_stream_unref (stream);
return TRUE;
}
}
gint64 size, sys_bitrate, sum_bitrate = 0;
GstClockTime duration;
guint bitrate;
- GList *iter;
+ gint i;
if (qtdemux->fragmented)
return;
return;
}
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
- QtDemuxStream *str = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *str = QTDEMUX_NTH_STREAM (qtdemux, i);
switch (str->subtype) {
case FOURCC_soun:
case FOURCC_vide:
qtdemux_prepare_streams (GstQTDemux * qtdemux)
{
GstFlowReturn ret = GST_FLOW_OK;
- GList *iter, *next;
+#ifdef TIZEN_FEATURE_QTDEMUX_DURATION
+ guint64 tkhd_max_duration = 0;
+#endif
+ gint i;
GST_DEBUG_OBJECT (qtdemux, "prepare streams");
- for (iter = qtdemux->active_streams; ret == GST_FLOW_OK && iter; iter = next) {
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (qtdemux, i);
guint32 sample_num = 0;
- next = iter->next;
-
GST_DEBUG_OBJECT (qtdemux, "track-id %u, fourcc %" GST_FOURCC_FORMAT,
stream->track_id, GST_FOURCC_ARGS (CUR_STREAM (stream)->fourcc));
} else {
/* discard any stray moof */
qtdemux->moof_offset = 0;
+#ifdef TIZEN_FEATURE_QTDEMUX_DURATION
+ if (tkhd_max_duration < stream->tkhd_duration)
+ tkhd_max_duration = stream->tkhd_duration;
+#endif
}
/* prepare braking */
* in push mode, we'll just have to deal with it */
if (G_UNLIKELY (qtdemux->pullbased && !stream->n_samples)) {
GST_DEBUG_OBJECT (qtdemux, "no samples for stream; discarding");
- gst_qtdemux_remove_stream (qtdemux, stream);
+ g_ptr_array_remove_index (qtdemux->active_streams, i);
+ i--;
continue;
} else if (stream->track_id == qtdemux->chapters_track_id &&
(stream->subtype == FOURCC_text || stream->subtype == FOURCC_sbtl)) {
/* TODO - parse chapters track and expose it as GstToc; For now just ignore it
so that it doesn't look like a subtitle track */
- gst_qtdemux_remove_stream (qtdemux, stream);
+ g_ptr_array_remove_index (qtdemux->active_streams, i);
+ i--;
continue;
}
}
}
+#ifdef TIZEN_FEATURE_QTDEMUX_DURATION
+ if (!qtdemux->fragmented && (qtdemux->duration > tkhd_max_duration)) {
+ GST_INFO_OBJECT (qtdemux,
+ "Update duration: %" G_GUINT64_FORMAT " -> %" G_GUINT64_FORMAT,
+ qtdemux->duration, tkhd_max_duration);
+ qtdemux->duration = tkhd_max_duration;
+ }
+#endif
+
return ret;
}
-static GList *
-_stream_in_list (GList * list, QtDemuxStream * stream)
+static gboolean
+_stream_equal_func (const QtDemuxStream * stream, const gchar * stream_id)
{
- GList *iter;
-
- for (iter = list; iter; iter = g_list_next (iter)) {
- QtDemuxStream *tmp = QTDEMUX_STREAM (iter->data);
- if (!g_strcmp0 (tmp->stream_id, stream->stream_id))
- return iter;
- }
-
- return NULL;
+ return g_strcmp0 (stream->stream_id, stream_id) == 0;
}
static gboolean
qtdemux_is_streams_update (GstQTDemux * qtdemux)
{
- GList *new, *old;
+ gint i;
- if (!qtdemux->active_streams)
- return FALSE;
+ /* Different length, updated */
+ if (QTDEMUX_N_STREAMS (qtdemux) != qtdemux->old_streams->len)
+ return TRUE;
/* streams in list are sorted in track-id order */
- for (new = qtdemux->active_streams, old = qtdemux->old_streams; new && old;
- new = g_list_next (new), old = g_list_next (old)) {
-
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
/* Different stream-id, updated */
- if (g_strcmp0 (QTDEMUX_STREAM (new->data)->stream_id,
- QTDEMUX_STREAM (old->data)->stream_id))
+ if (g_strcmp0 (QTDEMUX_NTH_STREAM (qtdemux, i)->stream_id,
+ QTDEMUX_NTH_OLD_STREAM (qtdemux, i)->stream_id))
return TRUE;
}
- /* Different length, updated */
- if (new != NULL || old != NULL)
- return TRUE;
-
return FALSE;
}
return gst_qtdemux_configure_stream (qtdemux, newstream);
}
+/* g_ptr_array_find_with_equal_func is available since 2.54,
+ * replacement until we can depend unconditionally on the real one in GLib */
+#if !GLIB_CHECK_VERSION(2,54,0)
+#define g_ptr_array_find_with_equal_func qtdemux_ptr_array_find_with_equal_func
+static gboolean
+qtdemux_ptr_array_find_with_equal_func (GPtrArray * haystack,
+ gconstpointer needle, GEqualFunc equal_func, guint * index_)
+{
+ guint i;
+
+ g_return_val_if_fail (haystack != NULL, FALSE);
+
+ if (equal_func == NULL)
+ equal_func = g_direct_equal;
+
+ for (i = 0; i < haystack->len; i++) {
+ if (equal_func (g_ptr_array_index (haystack, i), needle)) {
+ if (index_ != NULL)
+ *index_ = i;
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+#endif
+
static gboolean
qtdemux_update_streams (GstQTDemux * qtdemux)
{
- GList *iter, *next;
+ gint i;
g_assert (qtdemux->streams_aware);
/* At below, figure out which stream in active_streams has identical stream-id
* old_streams : existing streams (belong to previous moov)
*/
- /* Count n_streams again */
- qtdemux->n_streams = 0;
-
- for (iter = qtdemux->active_streams; iter; iter = next) {
- GList *tmp;
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
-
- next = iter->next;
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (qtdemux, i);
+ QtDemuxStream *oldstream = NULL;
+ guint target;
GST_DEBUG_OBJECT (qtdemux, "track-id %u, fourcc %" GST_FOURCC_FORMAT,
stream->track_id, GST_FOURCC_ARGS (CUR_STREAM (stream)->fourcc));
- qtdemux->n_streams++;
+ if (g_ptr_array_find_with_equal_func (qtdemux->old_streams,
+ stream->stream_id, (GEqualFunc) _stream_equal_func, &target)) {
+ oldstream = QTDEMUX_NTH_OLD_STREAM (qtdemux, target);
- if ((tmp = _stream_in_list (qtdemux->old_streams, stream)) != NULL
- && QTDEMUX_STREAM (tmp->data)->pad) {
- QtDemuxStream *oldstream = QTDEMUX_STREAM (tmp->data);
+ /* null pad stream cannot be reused */
+ if (oldstream->pad == NULL)
+ oldstream = NULL;
+ }
+ if (oldstream) {
GST_DEBUG_OBJECT (qtdemux, "Reuse track-id %d", oldstream->track_id);
if (!qtdemux_reuse_and_configure_stream (qtdemux, oldstream, stream))
return FALSE;
- qtdemux->old_streams = g_list_remove (qtdemux->old_streams, oldstream);
- gst_qtdemux_stream_free (oldstream);
+ /* we don't need to preserve order of old streams */
+ g_ptr_array_remove_fast (qtdemux->old_streams, oldstream);
} else {
GstTagList *list;
static GstFlowReturn
qtdemux_expose_streams (GstQTDemux * qtdemux)
{
- GList *iter, *next;
+ gint i;
GST_DEBUG_OBJECT (qtdemux, "exposing streams");
if (!qtdemux_is_streams_update (qtdemux)) {
- GList *new, *old;
-
GST_DEBUG_OBJECT (qtdemux, "Reuse all streams");
- for (new = qtdemux->active_streams, old = qtdemux->old_streams; new && old;
- new = g_list_next (new), old = g_list_next (old)) {
- if (!qtdemux_reuse_and_configure_stream (qtdemux,
- QTDEMUX_STREAM (old->data), QTDEMUX_STREAM (new->data)))
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *new_stream = QTDEMUX_NTH_STREAM (qtdemux, i);
+ QtDemuxStream *old_stream = QTDEMUX_NTH_OLD_STREAM (qtdemux, i);
+ if (!qtdemux_reuse_and_configure_stream (qtdemux, old_stream, new_stream))
return GST_FLOW_ERROR;
}
- g_list_free_full (qtdemux->old_streams,
- (GDestroyNotify) gst_qtdemux_stream_free);
- qtdemux->old_streams = NULL;
+ g_ptr_array_set_size (qtdemux->old_streams, 0);
+ qtdemux->need_segment = TRUE;
return GST_FLOW_OK;
}
if (!qtdemux_update_streams (qtdemux))
return GST_FLOW_ERROR;
} else {
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
+ for (i = 0; i < QTDEMUX_N_STREAMS (qtdemux); i++) {
+ QtDemuxStream *stream = QTDEMUX_NTH_STREAM (qtdemux, i);
GstTagList *list;
/* now we have all info and can expose */
gst_element_no_more_pads (GST_ELEMENT_CAST (qtdemux));
/* If we have still old_streams, it's no more used stream */
- for (iter = qtdemux->old_streams; iter; iter = next) {
- QtDemuxStream *stream = QTDEMUX_STREAM (iter->data);
- next = g_list_next (iter);
+ for (i = 0; i < qtdemux->old_streams->len; i++) {
+ QtDemuxStream *stream = QTDEMUX_NTH_OLD_STREAM (qtdemux, i);
if (stream->pad) {
GstEvent *event;
gst_pad_push_event (stream->pad, event);
}
-
- qtdemux->old_streams = g_list_remove (qtdemux->old_streams, stream);
- gst_qtdemux_stream_free (stream);
}
+ g_ptr_array_set_size (qtdemux->old_streams, 0);
+
/* check if we should post a redirect in case there is a single trak
* and it is a redirecting trak */
- if (qtdemux->n_streams == 1 &&
- QTDEMUX_FIRST_STREAM (qtdemux)->redirect_uri != NULL) {
+ if (QTDEMUX_N_STREAMS (qtdemux) == 1 &&
+ QTDEMUX_NTH_STREAM (qtdemux, 0)->redirect_uri != NULL) {
GstMessage *m;
GST_INFO_OBJECT (qtdemux, "Issuing a redirect due to a single track with "
m = gst_message_new_element (GST_OBJECT_CAST (qtdemux),
gst_structure_new ("redirect",
"new-location", G_TYPE_STRING,
- QTDEMUX_FIRST_STREAM (qtdemux)->redirect_uri, NULL));
+ QTDEMUX_NTH_STREAM (qtdemux, 0)->redirect_uri, NULL));
gst_element_post_message (GST_ELEMENT_CAST (qtdemux), m);
qtdemux->posted_redirect = TRUE;
}
- for (iter = qtdemux->active_streams; iter; iter = g_list_next (iter)) {
- qtdemux_do_allocation (qtdemux, QTDEMUX_STREAM (iter->data));
- }
+ g_ptr_array_foreach (qtdemux->active_streams,
+ (GFunc) qtdemux_do_allocation, qtdemux);
qtdemux->need_segment = TRUE;
_codec ("CEA 608 Closed Caption");
caps =
gst_caps_new_simple ("closedcaption/x-cea-608", "format",
- G_TYPE_STRING, "cc_data", NULL);
+ G_TYPE_STRING, "s334-1a", NULL);
stream->need_process = TRUE;
+ stream->need_split = TRUE;
break;
case FOURCC_c708:
_codec ("CEA 708 Closed Caption");