"mpegts", G_TYPE_UINT64, mpegts, NULL));
gst_pad_push_event (self->srcpad, event);
}
+
+static void
+send_fragment_timestamp_event (GstSubParse * self, GstClockTime timestamp)
+{
+ GstEvent *event = NULL;
+
+ if (!GST_CLOCK_TIME_IS_VALID(timestamp))
+ return;
+
+ GST_LOG ("send fragment_timestamp %" GST_TIME_FORMAT ,
+ GST_TIME_ARGS (timestamp));
+
+ event = gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM,
+ gst_structure_new ("fragment_timestamp",
+ "timestamp", G_TYPE_UINT64, timestamp, NULL));
+
+ gst_pad_push_event (self->srcpad, event);
+}
#endif
static gchar *
GstMessage *m = NULL;
#endif
#ifdef TIZEN_FEATURE_HLS_WEBVTT
- GstClockTime reference_time = GST_CLOCK_TIME_NONE;
+ GstClockTime fragment_timestamp = GST_CLOCK_TIME_NONE;
#endif
if (self->first_buffer) {
#ifdef TIZEN_FEATURE_HLS_WEBVTT
if (GST_BUFFER_IS_DISCONT (buf) && GST_BUFFER_PTS_IS_VALID (buf))
- reference_time = GST_BUFFER_PTS (buf);
+ fragment_timestamp = GST_BUFFER_PTS (buf);
#endif
feed_textbuf (self, buf);
gst_pad_push_event (self->srcpad, gst_event_new_tag (tags));
}
}
+#ifdef TIZEN_FEATURE_HLS_WEBVTT
+ if (self->parser_type == GST_SUB_PARSE_FORMAT_VTT)
+ send_fragment_timestamp_event (self, fragment_timestamp);
+#endif
while (!self->flushing && (line = get_next_line (self))) {
guint offset = 0;
GST_BUFFER_TIMESTAMP (buf) = self->state.start_time;
GST_BUFFER_DURATION (buf) = self->state.duration;
-#ifdef TIZEN_FEATURE_HLS_WEBVTT
- if ((self->parser_type == GST_SUB_PARSE_FORMAT_VTT)
- && GST_CLOCK_TIME_IS_VALID (reference_time)) {
- GstCaps *sink_caps = gst_pad_get_current_caps (self->sinkpad);
- if (sink_caps) {
- gst_buffer_add_reference_timestamp_meta (buf, sink_caps,
- reference_time, GST_CLOCK_TIME_NONE);
- gst_caps_unref (sink_caps);
- }
- }
-#endif
/* in some cases (e.g. tmplayer) we can only determine the duration
* of a text chunk from the timestamp of the next text chunk; in those
* cases, we probably want to limit the duration to something