3 * Copyright (C) 2018 Sebastian Dröge <sebastian@centricular.com>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18 * Boston, MA 02110-1301, USA.
27 #include <gst/base/base.h>
28 #include <gst/video/video.h>
31 #include "gstcccombiner.h"
33 GST_DEBUG_CATEGORY_STATIC (gst_cc_combiner_debug);
34 #define GST_CAT_DEFAULT gst_cc_combiner_debug
36 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink",
41 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src",
46 static GstStaticPadTemplate captiontemplate =
47 GST_STATIC_PAD_TEMPLATE ("caption",
51 ("closedcaption/x-cea-608,format={ (string) raw, (string) s334-1a}; "
52 "closedcaption/x-cea-708,format={ (string) cc_data, (string) cdp }"));
54 G_DEFINE_TYPE (GstCCCombiner, gst_cc_combiner, GST_TYPE_AGGREGATOR);
55 #define parent_class gst_cc_combiner_parent_class
59 GstVideoCaptionType caption_type;
64 caption_data_clear (CaptionData * data)
66 gst_buffer_unref (data->buffer);
70 gst_cc_combiner_finalize (GObject * object)
72 GstCCCombiner *self = GST_CCCOMBINER (object);
74 g_array_unref (self->current_frame_captions);
75 self->current_frame_captions = NULL;
76 gst_caps_replace (&self->video_caps, NULL);
78 G_OBJECT_CLASS (parent_class)->finalize (object);
81 #define GST_FLOW_NEED_DATA GST_FLOW_CUSTOM_SUCCESS
84 gst_cc_combiner_collect_captions (GstCCCombiner * self, gboolean timeout)
86 GstAggregatorPad *src_pad =
87 GST_AGGREGATOR_PAD (GST_AGGREGATOR_SRC_PAD (self));
88 GstAggregatorPad *caption_pad;
91 g_assert (self->current_video_buffer != NULL);
94 GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
97 /* No caption pad, forward buffer directly */
99 GST_LOG_OBJECT (self, "No caption pad, passing through video");
100 video_buf = self->current_video_buffer;
101 self->current_video_buffer = NULL;
105 GST_LOG_OBJECT (self, "Trying to collect captions for queued video buffer");
107 GstBuffer *caption_buf;
108 GstClockTime caption_time;
109 CaptionData caption_data;
111 caption_buf = gst_aggregator_pad_peek_buffer (caption_pad);
113 if (gst_aggregator_pad_is_eos (caption_pad)) {
114 GST_DEBUG_OBJECT (self, "Caption pad is EOS, we're done");
116 } else if (!timeout) {
117 GST_DEBUG_OBJECT (self, "Need more caption data");
118 gst_object_unref (caption_pad);
119 return GST_FLOW_NEED_DATA;
121 GST_DEBUG_OBJECT (self, "No caption data on timeout");
126 caption_time = GST_BUFFER_PTS (caption_buf);
127 if (!GST_CLOCK_TIME_IS_VALID (caption_time)) {
128 GST_ERROR_OBJECT (self, "Caption buffer without PTS");
130 gst_buffer_unref (caption_buf);
131 gst_object_unref (caption_pad);
133 return GST_FLOW_ERROR;
137 gst_segment_to_running_time (&caption_pad->segment, GST_FORMAT_TIME,
140 if (!GST_CLOCK_TIME_IS_VALID (caption_time)) {
141 GST_DEBUG_OBJECT (self, "Caption buffer outside segment, dropping");
143 gst_aggregator_pad_drop_buffer (caption_pad);
144 gst_buffer_unref (caption_buf);
149 /* Collected all caption buffers for this video buffer */
150 if (caption_time >= self->current_video_running_time_end) {
151 gst_buffer_unref (caption_buf);
153 } else if (caption_time < self->current_video_running_time) {
154 GST_DEBUG_OBJECT (self,
155 "Caption buffer before current video frame, dropping");
157 gst_aggregator_pad_drop_buffer (caption_pad);
158 gst_buffer_unref (caption_buf);
162 /* This caption buffer has to be collected */
163 GST_LOG_OBJECT (self,
164 "Collecting caption buffer %p %" GST_TIME_FORMAT " for video buffer %p",
165 caption_buf, GST_TIME_ARGS (caption_time), self->current_video_buffer);
166 caption_data.caption_type = self->current_caption_type;
167 caption_data.buffer = caption_buf;
168 g_array_append_val (self->current_frame_captions, caption_data);
169 gst_aggregator_pad_drop_buffer (caption_pad);
172 if (self->current_frame_captions->len > 0) {
175 GST_LOG_OBJECT (self, "Attaching %u captions to buffer %p",
176 self->current_frame_captions->len, self->current_video_buffer);
177 video_buf = gst_buffer_make_writable (self->current_video_buffer);
178 self->current_video_buffer = NULL;
180 for (i = 0; i < self->current_frame_captions->len; i++) {
181 CaptionData *caption_data =
182 &g_array_index (self->current_frame_captions, CaptionData, i);
185 gst_buffer_map (caption_data->buffer, &map, GST_MAP_READ);
186 gst_buffer_add_video_caption_meta (video_buf, caption_data->caption_type,
188 gst_buffer_unmap (caption_data->buffer, &map);
191 g_array_set_size (self->current_frame_captions, 0);
193 GST_LOG_OBJECT (self, "No captions for buffer %p",
194 self->current_video_buffer);
195 video_buf = self->current_video_buffer;
196 self->current_video_buffer = NULL;
199 gst_object_unref (caption_pad);
202 src_pad->segment.position =
203 GST_BUFFER_PTS (video_buf) + GST_BUFFER_DURATION (video_buf);
205 return gst_aggregator_finish_buffer (GST_AGGREGATOR_CAST (self), video_buf);
209 gst_cc_combiner_aggregate (GstAggregator * aggregator, gboolean timeout)
211 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
212 GstFlowReturn flow_ret = GST_FLOW_OK;
214 /* If we have no current video buffer, queue one. If we have one but
215 * its end running time is not known yet, try to determine it from the
216 * next video buffer */
217 if (!self->current_video_buffer
218 || !GST_CLOCK_TIME_IS_VALID (self->current_video_running_time_end)) {
219 GstAggregatorPad *video_pad;
220 GstClockTime video_start;
221 GstBuffer *video_buf;
224 GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
225 (aggregator), "sink"));
226 video_buf = gst_aggregator_pad_peek_buffer (video_pad);
228 if (gst_aggregator_pad_is_eos (video_pad)) {
229 GST_DEBUG_OBJECT (aggregator, "Video pad is EOS, we're done");
231 /* Assume that this buffer ends where it started +50ms (25fps) and handle it */
232 if (self->current_video_buffer) {
233 self->current_video_running_time_end =
234 self->current_video_running_time + 50 * GST_MSECOND;
235 flow_ret = gst_cc_combiner_collect_captions (self, timeout);
238 /* If we collected all captions for the remaining video frame we're
239 * done, otherwise get called another time and go directly into the
240 * outer branch for finishing the current video frame */
241 if (flow_ret == GST_FLOW_NEED_DATA)
242 flow_ret = GST_FLOW_OK;
244 flow_ret = GST_FLOW_EOS;
246 flow_ret = GST_FLOW_OK;
249 gst_object_unref (video_pad);
253 video_start = GST_BUFFER_PTS (video_buf);
254 if (!GST_CLOCK_TIME_IS_VALID (video_start)) {
255 gst_buffer_unref (video_buf);
256 gst_object_unref (video_pad);
258 GST_ERROR_OBJECT (aggregator, "Video buffer without PTS");
260 return GST_FLOW_ERROR;
264 gst_segment_to_running_time (&video_pad->segment, GST_FORMAT_TIME,
266 if (!GST_CLOCK_TIME_IS_VALID (video_start)) {
267 GST_DEBUG_OBJECT (aggregator, "Buffer outside segment, dropping");
268 gst_aggregator_pad_drop_buffer (video_pad);
269 gst_buffer_unref (video_buf);
270 gst_object_unref (video_pad);
274 if (self->current_video_buffer) {
275 /* If we already have a video buffer just update the current end running
276 * time accordingly. That's what was missing and why we got here */
277 self->current_video_running_time_end = video_start;
278 gst_buffer_unref (video_buf);
279 GST_LOG_OBJECT (self,
280 "Determined end timestamp for video buffer: %p %" GST_TIME_FORMAT
281 " - %" GST_TIME_FORMAT, self->current_video_buffer,
282 GST_TIME_ARGS (self->current_video_running_time),
283 GST_TIME_ARGS (self->current_video_running_time_end));
285 /* Otherwise we had no buffer queued currently. Let's do that now
286 * so that we can collect captions for it */
287 gst_buffer_replace (&self->current_video_buffer, video_buf);
288 self->current_video_running_time = video_start;
289 gst_aggregator_pad_drop_buffer (video_pad);
290 gst_buffer_unref (video_buf);
292 if (GST_BUFFER_DURATION_IS_VALID (video_buf)) {
293 GstClockTime end_time =
294 GST_BUFFER_PTS (video_buf) + GST_BUFFER_DURATION (video_buf);
295 if (video_pad->segment.stop != -1 && end_time > video_pad->segment.stop)
296 end_time = video_pad->segment.stop;
297 self->current_video_running_time_end =
298 gst_segment_to_running_time (&video_pad->segment, GST_FORMAT_TIME,
300 } else if (self->video_fps_n != 0 && self->video_fps_d != 0) {
301 GstClockTime end_time =
302 GST_BUFFER_PTS (video_buf) + gst_util_uint64_scale_int (GST_SECOND,
303 self->video_fps_d, self->video_fps_n);
304 if (video_pad->segment.stop != -1 && end_time > video_pad->segment.stop)
305 end_time = video_pad->segment.stop;
306 self->current_video_running_time_end =
307 gst_segment_to_running_time (&video_pad->segment, GST_FORMAT_TIME,
310 self->current_video_running_time_end = GST_CLOCK_TIME_NONE;
313 GST_LOG_OBJECT (self,
314 "Queued new video buffer: %p %" GST_TIME_FORMAT " - %"
315 GST_TIME_FORMAT, self->current_video_buffer,
316 GST_TIME_ARGS (self->current_video_running_time),
317 GST_TIME_ARGS (self->current_video_running_time_end));
320 gst_object_unref (video_pad);
323 /* At this point we have a video buffer queued and can start collecting
324 * caption buffers for it */
325 g_assert (self->current_video_buffer != NULL);
326 g_assert (GST_CLOCK_TIME_IS_VALID (self->current_video_running_time));
327 g_assert (GST_CLOCK_TIME_IS_VALID (self->current_video_running_time_end));
329 flow_ret = gst_cc_combiner_collect_captions (self, timeout);
331 /* Only if we collected all captions we replace the current video buffer
332 * with NULL and continue with the next one on the next call */
333 if (flow_ret == GST_FLOW_NEED_DATA) {
334 flow_ret = GST_FLOW_OK;
336 gst_buffer_replace (&self->current_video_buffer, NULL);
337 self->current_video_running_time = self->current_video_running_time_end =
345 gst_cc_combiner_sink_event (GstAggregator * aggregator,
346 GstAggregatorPad * agg_pad, GstEvent * event)
348 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
350 switch (GST_EVENT_TYPE (event)) {
351 case GST_EVENT_CAPS:{
355 gst_event_parse_caps (event, &caps);
356 s = gst_caps_get_structure (caps, 0);
358 if (strcmp (GST_OBJECT_NAME (agg_pad), "caption") == 0) {
359 self->current_caption_type = gst_video_caption_type_from_caps (caps);
365 gst_structure_get_fraction (s, "framerate", &fps_n, &fps_d);
367 if (fps_n != self->video_fps_n || fps_d != self->video_fps_d) {
368 GstClockTime latency;
370 latency = gst_util_uint64_scale (GST_SECOND, fps_d, fps_n);
371 gst_aggregator_set_latency (aggregator, latency, latency);
374 self->video_fps_n = fps_n;
375 self->video_fps_d = fps_d;
377 self->video_caps = gst_caps_ref (caps);
386 return GST_AGGREGATOR_CLASS (parent_class)->sink_event (aggregator, agg_pad,
391 gst_cc_combiner_stop (GstAggregator * aggregator)
393 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
395 self->video_fps_n = self->video_fps_d = 0;
396 self->current_video_running_time = self->current_video_running_time_end =
398 gst_buffer_replace (&self->current_video_buffer, NULL);
399 gst_caps_replace (&self->video_caps, NULL);
401 g_array_set_size (self->current_frame_captions, 0);
402 self->current_caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;
408 gst_cc_combiner_flush (GstAggregator * aggregator)
410 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
411 GstAggregatorPad *src_pad =
412 GST_AGGREGATOR_PAD (GST_AGGREGATOR_SRC_PAD (aggregator));
414 self->current_video_running_time = self->current_video_running_time_end =
416 gst_buffer_replace (&self->current_video_buffer, NULL);
418 g_array_set_size (self->current_frame_captions, 0);
420 src_pad->segment.position = GST_CLOCK_TIME_NONE;
425 static GstAggregatorPad *
426 gst_cc_combiner_create_new_pad (GstAggregator * aggregator,
427 GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps)
429 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
430 GstAggregatorPad *agg_pad;
432 if (templ->direction != GST_PAD_SINK)
435 if (templ->presence != GST_PAD_REQUEST)
438 if (strcmp (templ->name_template, "caption") != 0)
441 GST_OBJECT_LOCK (self);
442 agg_pad = g_object_new (GST_TYPE_AGGREGATOR_PAD,
443 "name", "caption", "direction", GST_PAD_SINK, "template", templ, NULL);
444 self->current_caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;
445 GST_OBJECT_UNLOCK (self);
451 gst_cc_combiner_update_src_caps (GstAggregator * agg,
452 GstCaps * caps, GstCaps ** ret)
454 GstFlowReturn res = GST_AGGREGATOR_FLOW_NEED_DATA;
455 GstCCCombiner *self = GST_CCCOMBINER (agg);
457 if (self->video_caps) {
458 *ret = gst_caps_intersect (caps, self->video_caps);
466 gst_cc_combiner_class_init (GstCCCombinerClass * klass)
468 GObjectClass *gobject_class;
469 GstElementClass *gstelement_class;
470 GstAggregatorClass *aggregator_class;
472 gobject_class = (GObjectClass *) klass;
473 gstelement_class = (GstElementClass *) klass;
474 aggregator_class = (GstAggregatorClass *) klass;
476 gobject_class->finalize = gst_cc_combiner_finalize;
478 gst_element_class_set_static_metadata (gstelement_class,
479 "Closed Caption Combiner",
481 "Combines GstVideoCaptionMeta with video input stream",
482 "Sebastian Dröge <sebastian@centricular.com>");
484 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
485 &sinktemplate, GST_TYPE_AGGREGATOR_PAD);
486 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
487 &srctemplate, GST_TYPE_AGGREGATOR_PAD);
488 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
489 &captiontemplate, GST_TYPE_AGGREGATOR_PAD);
491 aggregator_class->aggregate = gst_cc_combiner_aggregate;
492 aggregator_class->stop = gst_cc_combiner_stop;
493 aggregator_class->flush = gst_cc_combiner_flush;
494 aggregator_class->create_new_pad = gst_cc_combiner_create_new_pad;
495 aggregator_class->sink_event = gst_cc_combiner_sink_event;
496 aggregator_class->update_src_caps = gst_cc_combiner_update_src_caps;
497 aggregator_class->get_next_time = gst_aggregator_simple_get_next_time;
499 GST_DEBUG_CATEGORY_INIT (gst_cc_combiner_debug, "cccombiner",
500 0, "Closed Caption combiner");
504 gst_cc_combiner_init (GstCCCombiner * self)
506 GstPadTemplate *templ;
507 GstAggregatorPad *agg_pad;
509 templ = gst_static_pad_template_get (&sinktemplate);
510 agg_pad = g_object_new (GST_TYPE_AGGREGATOR_PAD,
511 "name", "sink", "direction", GST_PAD_SINK, "template", templ, NULL);
512 gst_object_unref (templ);
513 gst_element_add_pad (GST_ELEMENT_CAST (self), GST_PAD_CAST (agg_pad));
515 self->current_frame_captions =
516 g_array_new (FALSE, FALSE, sizeof (CaptionData));
517 g_array_set_clear_func (self->current_frame_captions,
518 (GDestroyNotify) caption_data_clear);
520 self->current_video_running_time = self->current_video_running_time_end =
523 self->current_caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;