3 * Copyright (C) 2018 Sebastian Dröge <sebastian@centricular.com>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18 * Boston, MA 02110-1301, USA.
27 #include <gst/base/base.h>
28 #include <gst/video/video.h>
32 #include "gstcccombiner.h"
34 GST_DEBUG_CATEGORY_STATIC (gst_cc_combiner_debug);
35 #define GST_CAT_DEFAULT gst_cc_combiner_debug
37 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink",
42 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src",
47 static GstStaticPadTemplate captiontemplate =
48 GST_STATIC_PAD_TEMPLATE ("caption",
52 ("closedcaption/x-cea-608,format={ (string) raw, (string) s334-1a}; "
53 "closedcaption/x-cea-708,format={ (string) cc_data, (string) cdp }"));
55 #define parent_class gst_cc_combiner_parent_class
56 G_DEFINE_TYPE (GstCCCombiner, gst_cc_combiner, GST_TYPE_AGGREGATOR);
57 GST_ELEMENT_REGISTER_DEFINE (cccombiner, "cccombiner",
58 GST_RANK_NONE, GST_TYPE_CCCOMBINER);
68 #define DEFAULT_MAX_SCHEDULED 30
69 #define DEFAULT_SCHEDULE TRUE
70 #define DEFAULT_OUTPUT_PADDING TRUE
74 GstVideoCaptionType caption_type;
81 GstClockTime running_time;
82 GstClockTime stream_time;
86 caption_data_clear (CaptionData * data)
88 gst_buffer_unref (data->buffer);
92 clear_scheduled (CaptionQueueItem * item)
94 gst_buffer_unref (item->buffer);
98 gst_cc_combiner_finalize (GObject * object)
100 GstCCCombiner *self = GST_CCCOMBINER (object);
102 gst_queue_array_free (self->scheduled[0]);
103 gst_queue_array_free (self->scheduled[1]);
104 g_array_unref (self->current_frame_captions);
105 self->current_frame_captions = NULL;
107 G_OBJECT_CLASS (parent_class)->finalize (object);
110 #define GST_FLOW_NEED_DATA GST_FLOW_CUSTOM_SUCCESS
112 static const guint8 *
113 extract_cdp (const guint8 * cdp, guint cdp_len, guint * cc_data_len)
120 const guint8 *cc_data = NULL;
124 /* Header + footer length */
129 gst_byte_reader_init (&br, cdp, cdp_len);
130 u16 = gst_byte_reader_get_uint16_be_unchecked (&br);
135 u8 = gst_byte_reader_get_uint8_unchecked (&br);
140 gst_byte_reader_skip_unchecked (&br, 1);
142 flags = gst_byte_reader_get_uint8_unchecked (&br);
145 if ((flags & 0x40) == 0) {
149 /* cdp_hdr_sequence_cntr */
150 gst_byte_reader_skip_unchecked (&br, 2);
152 /* time_code_present */
154 if (gst_byte_reader_get_remaining (&br) < 5) {
157 gst_byte_reader_skip_unchecked (&br, 5);
164 if (gst_byte_reader_get_remaining (&br) < 2) {
167 u8 = gst_byte_reader_get_uint8_unchecked (&br);
172 cc_count = gst_byte_reader_get_uint8_unchecked (&br);
173 if ((cc_count & 0xe0) != 0xe0) {
182 if (gst_byte_reader_get_remaining (&br) < len)
185 cc_data = gst_byte_reader_get_data_unchecked (&br, len);
193 #define MAX_CDP_PACKET_LEN 256
194 #define MAX_CEA608_LEN 32
195 #define CDP_MODE (GST_CC_CDP_MODE_CC_DATA | GST_CC_CDP_MODE_TIME_CODE)
198 make_cdp (GstCCCombiner * self, const guint8 * cc_data, guint cc_data_len,
199 const struct cdp_fps_entry *fps_entry, const GstVideoTimeCode * tc)
202 GstBuffer *ret = gst_buffer_new_allocate (NULL, MAX_CDP_PACKET_LEN, NULL);
205 gst_buffer_map (ret, &map, GST_MAP_WRITE);
207 len = convert_cea708_cc_data_to_cdp (GST_OBJECT (self), CDP_MODE,
208 self->cdp_hdr_sequence_cntr, cc_data, cc_data_len, map.data, map.size,
210 self->cdp_hdr_sequence_cntr++;
212 gst_buffer_unmap (ret, &map);
214 gst_buffer_set_size (ret, len);
220 make_padding (GstCCCombiner * self, const GstVideoTimeCode * tc, guint field)
222 GstBuffer *ret = NULL;
224 switch (self->caption_type) {
225 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
227 const guint8 cc_data[6] = { 0xfc, 0x80, 0x80, 0xf9, 0x80, 0x80 };
229 ret = make_cdp (self, cc_data, 6, self->cdp_fps_entry, tc);
232 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
236 ret = gst_buffer_new_allocate (NULL, 3, NULL);
238 gst_buffer_map (ret, &map, GST_MAP_WRITE);
240 map.data[0] = 0xfc | (field & 0x01);
244 gst_buffer_unmap (ret, &map);
247 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
251 ret = gst_buffer_new_allocate (NULL, 3, NULL);
253 gst_buffer_map (ret, &map, GST_MAP_WRITE);
255 map.data[0] = field == 0 ? 0x80 : 0x00;
259 gst_buffer_unmap (ret, &map);
262 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
266 ret = gst_buffer_new_allocate (NULL, 2, NULL);
268 gst_buffer_map (ret, &map, GST_MAP_WRITE);
273 gst_buffer_unmap (ret, &map);
284 queue_caption (GstCCCombiner * self, GstBuffer * scheduled, guint field)
286 GstAggregatorPad *caption_pad;
287 CaptionQueueItem item;
289 if (self->progressive && field == 1) {
290 gst_buffer_unref (scheduled);
295 GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
298 g_assert (gst_queue_array_get_length (self->scheduled[field]) <=
299 self->max_scheduled);
301 if (gst_queue_array_get_length (self->scheduled[field]) ==
302 self->max_scheduled) {
303 CaptionQueueItem *dropped =
304 gst_queue_array_pop_tail_struct (self->scheduled[field]);
306 GST_WARNING_OBJECT (self,
307 "scheduled queue runs too long, dropping %" GST_PTR_FORMAT, dropped);
309 gst_element_post_message (GST_ELEMENT_CAST (self),
310 gst_message_new_qos (GST_OBJECT_CAST (self), FALSE,
311 dropped->running_time, dropped->stream_time,
312 GST_BUFFER_PTS (dropped->buffer), GST_BUFFER_DURATION (dropped)));
314 gst_buffer_unref (dropped->buffer);
317 gst_object_unref (caption_pad);
319 item.buffer = scheduled;
321 gst_segment_to_running_time (&caption_pad->segment, GST_FORMAT_TIME,
322 GST_BUFFER_PTS (scheduled));
324 gst_segment_to_stream_time (&caption_pad->segment, GST_FORMAT_TIME,
325 GST_BUFFER_PTS (scheduled));
327 gst_queue_array_push_tail_struct (self->scheduled[field], &item);
331 schedule_cdp (GstCCCombiner * self, const GstVideoTimeCode * tc,
332 const guint8 * data, guint len, GstClockTime pts, GstClockTime duration)
334 const guint8 *cc_data;
336 gboolean inject = FALSE;
338 if ((cc_data = extract_cdp (data, len, &cc_data_len))) {
341 for (i = 0; i < cc_data_len / 3; i++) {
342 gboolean cc_valid = (cc_data[i * 3] & 0x04) == 0x04;
343 guint8 cc_type = cc_data[i * 3] & 0x03;
348 if (cc_type == 0x00 || cc_type == 0x01) {
349 if (cc_data[i * 3 + 1] != 0x80 || cc_data[i * 3 + 2] != 0x80) {
363 make_cdp (self, cc_data, cc_data_len, self->cdp_fps_entry, tc);
365 /* We only set those for QoS reporting purposes */
366 GST_BUFFER_PTS (buf) = pts;
367 GST_BUFFER_DURATION (buf) = duration;
369 queue_caption (self, buf, 0);
374 schedule_cea608_s334_1a (GstCCCombiner * self, guint8 * data, guint len,
375 GstClockTime pts, GstClockTime duration)
377 guint8 field0_data[3], field1_data[3];
378 guint field0_len = 0, field1_len = 0;
382 GST_WARNING ("Invalid cc_data buffer size %u. Truncating to a multiple "
384 len = len - (len % 3);
387 for (i = 0; i < len / 3; i++) {
388 if (data[i * 3] & 0x80) {
389 if (data[i * 3 + 1] == 0x80 && data[i * 3 + 2] == 0x80)
392 field0_data[field0_len++] = data[i * 3];
393 field0_data[field0_len++] = data[i * 3 + 1];
394 field0_data[field0_len++] = data[i * 3 + 2];
396 if (data[i * 3 + 1] == 0x80 && data[i * 3 + 2] == 0x80)
399 field1_data[field1_len++] = data[i * 3];
400 field1_data[field1_len++] = data[i * 3 + 1];
401 field1_data[field1_len++] = data[i * 3 + 2];
405 if (field0_len > 0) {
406 GstBuffer *buf = gst_buffer_new_allocate (NULL, field0_len, NULL);
408 gst_buffer_fill (buf, 0, field0_data, field0_len);
409 GST_BUFFER_PTS (buf) = pts;
410 GST_BUFFER_DURATION (buf) = duration;
412 queue_caption (self, buf, 0);
415 if (field1_len > 0) {
416 GstBuffer *buf = gst_buffer_new_allocate (NULL, field1_len, NULL);
418 gst_buffer_fill (buf, 0, field1_data, field1_len);
419 GST_BUFFER_PTS (buf) = pts;
420 GST_BUFFER_DURATION (buf) = duration;
422 queue_caption (self, buf, 1);
427 schedule_cea708_raw (GstCCCombiner * self, guint8 * data, guint len,
428 GstClockTime pts, GstClockTime duration)
430 guint8 field0_data[MAX_CDP_PACKET_LEN], field1_data[3];
431 guint field0_len = 0, field1_len = 0;
433 gboolean started_ccp = FALSE;
436 GST_WARNING ("Invalid cc_data buffer size %u. Truncating to a multiple "
438 len = len - (len % 3);
441 for (i = 0; i < len / 3; i++) {
442 gboolean cc_valid = (data[i * 3] & 0x04) == 0x04;
443 guint8 cc_type = data[i * 3] & 0x03;
446 if (cc_type == 0x00) {
450 if (data[i * 3 + 1] == 0x80 && data[i * 3 + 2] == 0x80)
453 field0_data[field0_len++] = data[i * 3];
454 field0_data[field0_len++] = data[i * 3 + 1];
455 field0_data[field0_len++] = data[i * 3 + 2];
456 } else if (cc_type == 0x01) {
460 if (data[i * 3 + 1] == 0x80 && data[i * 3 + 2] == 0x80)
463 field1_data[field1_len++] = data[i * 3];
464 field1_data[field1_len++] = data[i * 3 + 1];
465 field1_data[field1_len++] = data[i * 3 + 2];
477 if (cc_type == 0x00 || cc_type == 0x01)
480 field0_data[field0_len++] = data[i * 3];
481 field0_data[field0_len++] = data[i * 3 + 1];
482 field0_data[field0_len++] = data[i * 3 + 2];
485 if (field0_len > 0) {
486 GstBuffer *buf = gst_buffer_new_allocate (NULL, field0_len, NULL);
488 gst_buffer_fill (buf, 0, field0_data, field0_len);
489 GST_BUFFER_PTS (buf) = pts;
490 GST_BUFFER_DURATION (buf) = duration;
492 queue_caption (self, buf, 0);
495 if (field1_len > 0) {
496 GstBuffer *buf = gst_buffer_new_allocate (NULL, field1_len, NULL);
498 gst_buffer_fill (buf, 0, field1_data, field1_len);
499 GST_BUFFER_PTS (buf) = pts;
500 GST_BUFFER_DURATION (buf) = duration;
502 queue_caption (self, buf, 1);
507 schedule_cea608_raw (GstCCCombiner * self, guint8 * data, guint len,
514 if (data[0] != 0x80 || data[1] != 0x80) {
515 queue_caption (self, gst_buffer_ref (buffer), 0);
521 schedule_caption (GstCCCombiner * self, GstBuffer * caption_buf,
522 const GstVideoTimeCode * tc)
525 GstClockTime pts, duration;
527 pts = GST_BUFFER_PTS (caption_buf);
528 duration = GST_BUFFER_DURATION (caption_buf);
530 gst_buffer_map (caption_buf, &map, GST_MAP_READ);
532 switch (self->caption_type) {
533 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
534 schedule_cdp (self, tc, map.data, map.size, pts, duration);
536 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
537 schedule_cea708_raw (self, map.data, map.size, pts, duration);
539 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
540 schedule_cea608_s334_1a (self, map.data, map.size, pts, duration);
542 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
543 schedule_cea608_raw (self, map.data, map.size, caption_buf);
549 gst_buffer_unmap (caption_buf, &map);
553 dequeue_caption_one_field (GstCCCombiner * self, const GstVideoTimeCode * tc,
554 guint field, gboolean drain)
556 CaptionQueueItem *scheduled;
557 CaptionData caption_data;
559 if ((scheduled = gst_queue_array_pop_head_struct (self->scheduled[field]))) {
560 caption_data.buffer = scheduled->buffer;
561 caption_data.caption_type = self->caption_type;
562 g_array_append_val (self->current_frame_captions, caption_data);
563 } else if (!drain && self->output_padding) {
564 caption_data.caption_type = self->caption_type;
565 caption_data.buffer = make_padding (self, tc, field);
566 g_array_append_val (self->current_frame_captions, caption_data);
571 dequeue_caption_both_fields (GstCCCombiner * self, const GstVideoTimeCode * tc,
574 CaptionQueueItem *field0_scheduled, *field1_scheduled;
575 GstBuffer *field0_buffer = NULL, *field1_buffer = NULL;
576 CaptionData caption_data;
578 field0_scheduled = gst_queue_array_pop_head_struct (self->scheduled[0]);
579 field1_scheduled = gst_queue_array_pop_head_struct (self->scheduled[1]);
581 if (drain && !field0_scheduled && !field1_scheduled) {
585 if (field0_scheduled) {
586 field0_buffer = field0_scheduled->buffer;
587 } else if (self->output_padding) {
588 field0_buffer = make_padding (self, tc, 0);
591 if (field1_scheduled) {
592 field1_buffer = field1_scheduled->buffer;
593 } else if (self->output_padding) {
594 field1_buffer = make_padding (self, tc, 1);
597 if (field0_buffer || field1_buffer) {
598 if (field0_buffer && field1_buffer) {
599 caption_data.buffer = gst_buffer_append (field0_buffer, field1_buffer);
600 } else if (field0_buffer) {
601 caption_data.buffer = field0_buffer;
602 } else if (field1_buffer) {
603 caption_data.buffer = field1_buffer;
605 g_assert_not_reached ();
608 caption_data.caption_type = self->caption_type;
610 g_array_append_val (self->current_frame_captions, caption_data);
615 gst_cc_combiner_collect_captions (GstCCCombiner * self, gboolean timeout)
617 GstAggregatorPad *src_pad =
618 GST_AGGREGATOR_PAD (GST_AGGREGATOR_SRC_PAD (self));
619 GstAggregatorPad *caption_pad;
620 GstBuffer *video_buf;
621 GstVideoTimeCodeMeta *tc_meta;
622 GstVideoTimeCode *tc = NULL;
623 gboolean caption_pad_is_eos = FALSE;
625 g_assert (self->current_video_buffer != NULL);
628 GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
630 /* No caption pad, forward buffer directly */
632 GST_LOG_OBJECT (self, "No caption pad, passing through video");
633 video_buf = self->current_video_buffer;
634 gst_aggregator_selected_samples (GST_AGGREGATOR_CAST (self),
635 GST_BUFFER_PTS (video_buf), GST_BUFFER_DTS (video_buf),
636 GST_BUFFER_DURATION (video_buf), NULL);
637 self->current_video_buffer = NULL;
641 tc_meta = gst_buffer_get_video_time_code_meta (self->current_video_buffer);
647 GST_LOG_OBJECT (self, "Trying to collect captions for queued video buffer");
649 GstBuffer *caption_buf;
650 GstClockTime caption_time;
651 CaptionData caption_data;
653 caption_buf = gst_aggregator_pad_peek_buffer (caption_pad);
655 if (gst_aggregator_pad_is_eos (caption_pad)) {
656 GST_DEBUG_OBJECT (self, "Caption pad is EOS, we're done");
658 caption_pad_is_eos = TRUE;
660 } else if (!timeout) {
661 GST_DEBUG_OBJECT (self, "Need more caption data");
662 gst_object_unref (caption_pad);
663 return GST_FLOW_NEED_DATA;
665 GST_DEBUG_OBJECT (self, "No caption data on timeout");
670 caption_time = GST_BUFFER_PTS (caption_buf);
671 if (!GST_CLOCK_TIME_IS_VALID (caption_time)) {
672 GST_ERROR_OBJECT (self, "Caption buffer without PTS");
674 gst_buffer_unref (caption_buf);
675 gst_object_unref (caption_pad);
677 return GST_FLOW_ERROR;
681 gst_segment_to_running_time (&caption_pad->segment, GST_FORMAT_TIME,
684 if (!GST_CLOCK_TIME_IS_VALID (caption_time)) {
685 GST_DEBUG_OBJECT (self, "Caption buffer outside segment, dropping");
687 gst_aggregator_pad_drop_buffer (caption_pad);
688 gst_buffer_unref (caption_buf);
693 if (gst_buffer_get_size (caption_buf) == 0 &&
694 GST_BUFFER_FLAG_IS_SET (caption_buf, GST_BUFFER_FLAG_GAP)) {
695 /* This is a gap, we can go ahead. We only consume it once its end point
696 * is behind the current video running time. Important to note that
697 * we can't deal with gaps with no duration (-1)
699 if (!GST_CLOCK_TIME_IS_VALID (GST_BUFFER_DURATION (caption_buf))) {
700 GST_ERROR_OBJECT (self, "GAP buffer without a duration");
702 gst_buffer_unref (caption_buf);
703 gst_object_unref (caption_pad);
705 return GST_FLOW_ERROR;
708 gst_buffer_unref (caption_buf);
710 if (caption_time + GST_BUFFER_DURATION (caption_buf) <
711 self->current_video_running_time_end) {
712 gst_aggregator_pad_drop_buffer (caption_pad);
719 /* Collected all caption buffers for this video buffer */
720 if (caption_time >= self->current_video_running_time_end) {
721 gst_buffer_unref (caption_buf);
723 } else if (!self->schedule) {
724 if (GST_CLOCK_TIME_IS_VALID (self->previous_video_running_time_end)) {
725 if (caption_time < self->previous_video_running_time_end) {
726 GST_WARNING_OBJECT (self,
727 "Caption buffer before end of last video frame, dropping");
729 gst_aggregator_pad_drop_buffer (caption_pad);
730 gst_buffer_unref (caption_buf);
733 } else if (caption_time < self->current_video_running_time) {
734 GST_WARNING_OBJECT (self,
735 "Caption buffer before current video frame, dropping");
737 gst_aggregator_pad_drop_buffer (caption_pad);
738 gst_buffer_unref (caption_buf);
743 /* This caption buffer has to be collected */
744 GST_LOG_OBJECT (self,
745 "Collecting caption buffer %p %" GST_TIME_FORMAT " for video buffer %p",
746 caption_buf, GST_TIME_ARGS (caption_time), self->current_video_buffer);
748 caption_data.caption_type = self->caption_type;
750 gst_aggregator_pad_drop_buffer (caption_pad);
752 if (!self->schedule) {
753 caption_data.buffer = caption_buf;
754 g_array_append_val (self->current_frame_captions, caption_data);
756 schedule_caption (self, caption_buf, tc);
757 gst_buffer_unref (caption_buf);
761 /* FIXME pad correctly according to fps */
762 if (self->schedule) {
763 g_assert (self->current_frame_captions->len == 0);
765 switch (self->caption_type) {
766 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
768 /* Only relevant in alternate and mixed mode, no need to look at the caps */
769 if (GST_BUFFER_FLAG_IS_SET (self->current_video_buffer,
770 GST_VIDEO_BUFFER_FLAG_INTERLACED)) {
771 if (!GST_VIDEO_BUFFER_IS_BOTTOM_FIELD (self->current_video_buffer)) {
772 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
775 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
779 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
780 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
782 if (self->progressive) {
783 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
784 } else if (GST_BUFFER_FLAG_IS_SET (self->current_video_buffer,
785 GST_VIDEO_BUFFER_FLAG_INTERLACED) &&
786 GST_BUFFER_FLAG_IS_SET (self->current_video_buffer,
787 GST_VIDEO_BUFFER_FLAG_ONEFIELD)) {
788 if (GST_VIDEO_BUFFER_IS_TOP_FIELD (self->current_video_buffer)) {
789 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
791 dequeue_caption_one_field (self, tc, 1, caption_pad_is_eos);
794 dequeue_caption_both_fields (self, tc, caption_pad_is_eos);
798 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
800 if (self->progressive) {
801 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
802 } else if (GST_BUFFER_FLAG_IS_SET (self->current_video_buffer,
803 GST_VIDEO_BUFFER_FLAG_INTERLACED)) {
804 if (!GST_VIDEO_BUFFER_IS_BOTTOM_FIELD (self->current_video_buffer)) {
805 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
808 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
817 gst_aggregator_selected_samples (GST_AGGREGATOR_CAST (self),
818 GST_BUFFER_PTS (self->current_video_buffer),
819 GST_BUFFER_DTS (self->current_video_buffer),
820 GST_BUFFER_DURATION (self->current_video_buffer), NULL);
822 GST_LOG_OBJECT (self, "Attaching %u captions to buffer %p",
823 self->current_frame_captions->len, self->current_video_buffer);
825 if (self->current_frame_captions->len > 0) {
828 video_buf = gst_buffer_make_writable (self->current_video_buffer);
829 self->current_video_buffer = NULL;
831 for (i = 0; i < self->current_frame_captions->len; i++) {
832 CaptionData *caption_data =
833 &g_array_index (self->current_frame_captions, CaptionData, i);
836 gst_buffer_map (caption_data->buffer, &map, GST_MAP_READ);
837 gst_buffer_add_video_caption_meta (video_buf, caption_data->caption_type,
839 gst_buffer_unmap (caption_data->buffer, &map);
842 g_array_set_size (self->current_frame_captions, 0);
844 GST_LOG_OBJECT (self, "No captions for buffer %p",
845 self->current_video_buffer);
846 video_buf = self->current_video_buffer;
847 self->current_video_buffer = NULL;
850 gst_object_unref (caption_pad);
853 src_pad->segment.position =
854 GST_BUFFER_PTS (video_buf) + GST_BUFFER_DURATION (video_buf);
856 return gst_aggregator_finish_buffer (GST_AGGREGATOR_CAST (self), video_buf);
860 gst_cc_combiner_aggregate (GstAggregator * aggregator, gboolean timeout)
862 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
863 GstFlowReturn flow_ret = GST_FLOW_OK;
865 /* If we have no current video buffer, queue one. If we have one but
866 * its end running time is not known yet, try to determine it from the
867 * next video buffer */
868 if (!self->current_video_buffer
869 || !GST_CLOCK_TIME_IS_VALID (self->current_video_running_time_end)) {
870 GstAggregatorPad *video_pad;
871 GstClockTime video_start;
872 GstBuffer *video_buf;
875 GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
876 (aggregator), "sink"));
877 video_buf = gst_aggregator_pad_peek_buffer (video_pad);
879 if (gst_aggregator_pad_is_eos (video_pad)) {
880 GST_DEBUG_OBJECT (aggregator, "Video pad is EOS, we're done");
882 /* Assume that this buffer ends where it started +50ms (25fps) and handle it */
883 if (self->current_video_buffer) {
884 self->current_video_running_time_end =
885 self->current_video_running_time + 50 * GST_MSECOND;
886 flow_ret = gst_cc_combiner_collect_captions (self, timeout);
889 /* If we collected all captions for the remaining video frame we're
890 * done, otherwise get called another time and go directly into the
891 * outer branch for finishing the current video frame */
892 if (flow_ret == GST_FLOW_NEED_DATA)
893 flow_ret = GST_FLOW_OK;
895 flow_ret = GST_FLOW_EOS;
897 flow_ret = GST_FLOW_OK;
900 gst_object_unref (video_pad);
904 video_start = GST_BUFFER_PTS (video_buf);
905 if (!GST_CLOCK_TIME_IS_VALID (video_start)) {
906 gst_buffer_unref (video_buf);
907 gst_object_unref (video_pad);
909 GST_ERROR_OBJECT (aggregator, "Video buffer without PTS");
911 return GST_FLOW_ERROR;
915 gst_segment_to_running_time (&video_pad->segment, GST_FORMAT_TIME,
917 if (!GST_CLOCK_TIME_IS_VALID (video_start)) {
918 GST_DEBUG_OBJECT (aggregator, "Buffer outside segment, dropping");
919 gst_aggregator_pad_drop_buffer (video_pad);
920 gst_buffer_unref (video_buf);
921 gst_object_unref (video_pad);
925 if (self->current_video_buffer) {
926 /* If we already have a video buffer just update the current end running
927 * time accordingly. That's what was missing and why we got here */
928 self->current_video_running_time_end = video_start;
929 gst_buffer_unref (video_buf);
930 GST_LOG_OBJECT (self,
931 "Determined end timestamp for video buffer: %p %" GST_TIME_FORMAT
932 " - %" GST_TIME_FORMAT, self->current_video_buffer,
933 GST_TIME_ARGS (self->current_video_running_time),
934 GST_TIME_ARGS (self->current_video_running_time_end));
936 /* Otherwise we had no buffer queued currently. Let's do that now
937 * so that we can collect captions for it */
938 gst_buffer_replace (&self->current_video_buffer, video_buf);
939 self->current_video_running_time = video_start;
940 gst_aggregator_pad_drop_buffer (video_pad);
941 gst_buffer_unref (video_buf);
943 if (GST_BUFFER_DURATION_IS_VALID (video_buf)) {
944 GstClockTime end_time =
945 GST_BUFFER_PTS (video_buf) + GST_BUFFER_DURATION (video_buf);
946 if (video_pad->segment.stop != -1 && end_time > video_pad->segment.stop)
947 end_time = video_pad->segment.stop;
948 self->current_video_running_time_end =
949 gst_segment_to_running_time (&video_pad->segment, GST_FORMAT_TIME,
951 } else if (self->video_fps_n != 0 && self->video_fps_d != 0) {
952 GstClockTime end_time =
953 GST_BUFFER_PTS (video_buf) + gst_util_uint64_scale_int (GST_SECOND,
954 self->video_fps_d, self->video_fps_n);
955 if (video_pad->segment.stop != -1 && end_time > video_pad->segment.stop)
956 end_time = video_pad->segment.stop;
957 self->current_video_running_time_end =
958 gst_segment_to_running_time (&video_pad->segment, GST_FORMAT_TIME,
961 self->current_video_running_time_end = GST_CLOCK_TIME_NONE;
964 GST_LOG_OBJECT (self,
965 "Queued new video buffer: %p %" GST_TIME_FORMAT " - %"
966 GST_TIME_FORMAT, self->current_video_buffer,
967 GST_TIME_ARGS (self->current_video_running_time),
968 GST_TIME_ARGS (self->current_video_running_time_end));
971 gst_object_unref (video_pad);
974 /* At this point we have a video buffer queued and can start collecting
975 * caption buffers for it */
976 g_assert (self->current_video_buffer != NULL);
977 g_assert (GST_CLOCK_TIME_IS_VALID (self->current_video_running_time));
978 g_assert (GST_CLOCK_TIME_IS_VALID (self->current_video_running_time_end));
980 flow_ret = gst_cc_combiner_collect_captions (self, timeout);
982 /* Only if we collected all captions we replace the current video buffer
983 * with NULL and continue with the next one on the next call */
984 if (flow_ret == GST_FLOW_NEED_DATA) {
985 flow_ret = GST_FLOW_OK;
987 gst_buffer_replace (&self->current_video_buffer, NULL);
988 self->previous_video_running_time_end =
989 self->current_video_running_time_end;
990 self->current_video_running_time = self->current_video_running_time_end =
998 gst_cc_combiner_sink_event (GstAggregator * aggregator,
999 GstAggregatorPad * agg_pad, GstEvent * event)
1001 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
1003 switch (GST_EVENT_TYPE (event)) {
1004 case GST_EVENT_CAPS:{
1008 gst_event_parse_caps (event, &caps);
1009 s = gst_caps_get_structure (caps, 0);
1011 if (strcmp (GST_OBJECT_NAME (agg_pad), "caption") == 0) {
1012 GstVideoCaptionType caption_type =
1013 gst_video_caption_type_from_caps (caps);
1015 if (self->caption_type != GST_VIDEO_CAPTION_TYPE_UNKNOWN &&
1016 caption_type != self->caption_type) {
1017 GST_ERROR_OBJECT (self, "Changing caption type is not allowed");
1019 GST_ELEMENT_ERROR (self, CORE, NEGOTIATION, (NULL),
1020 ("Changing caption type is not allowed"));
1024 self->caption_type = caption_type;
1027 const gchar *interlace_mode;
1031 gst_structure_get_fraction (s, "framerate", &fps_n, &fps_d);
1033 interlace_mode = gst_structure_get_string (s, "interlace-mode");
1035 self->progressive = !interlace_mode
1036 || !g_strcmp0 (interlace_mode, "progressive");
1038 if (fps_n != self->video_fps_n || fps_d != self->video_fps_d) {
1039 GstClockTime latency;
1041 latency = gst_util_uint64_scale (GST_SECOND, fps_d, fps_n);
1042 gst_aggregator_set_latency (aggregator, latency, latency);
1045 self->video_fps_n = fps_n;
1046 self->video_fps_d = fps_d;
1048 self->cdp_fps_entry = cdp_fps_entry_from_fps (fps_n, fps_d);
1050 gst_aggregator_set_src_caps (aggregator, caps);
1055 case GST_EVENT_SEGMENT:{
1056 if (strcmp (GST_OBJECT_NAME (agg_pad), "sink") == 0) {
1057 const GstSegment *segment;
1059 gst_event_parse_segment (event, &segment);
1060 gst_aggregator_update_segment (aggregator, segment);
1068 return GST_AGGREGATOR_CLASS (parent_class)->sink_event (aggregator, agg_pad,
1073 gst_cc_combiner_stop (GstAggregator * aggregator)
1075 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
1077 self->video_fps_n = self->video_fps_d = 0;
1078 self->current_video_running_time = self->current_video_running_time_end =
1079 self->previous_video_running_time_end = GST_CLOCK_TIME_NONE;
1080 gst_buffer_replace (&self->current_video_buffer, NULL);
1082 g_array_set_size (self->current_frame_captions, 0);
1083 self->caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;
1085 gst_queue_array_clear (self->scheduled[0]);
1086 gst_queue_array_clear (self->scheduled[1]);
1087 self->cdp_fps_entry = &null_fps_entry;
1092 static GstFlowReturn
1093 gst_cc_combiner_flush (GstAggregator * aggregator)
1095 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
1096 GstAggregatorPad *src_pad =
1097 GST_AGGREGATOR_PAD (GST_AGGREGATOR_SRC_PAD (aggregator));
1099 self->current_video_running_time = self->current_video_running_time_end =
1100 self->previous_video_running_time_end = GST_CLOCK_TIME_NONE;
1101 gst_buffer_replace (&self->current_video_buffer, NULL);
1103 g_array_set_size (self->current_frame_captions, 0);
1105 src_pad->segment.position = GST_CLOCK_TIME_NONE;
1107 self->cdp_hdr_sequence_cntr = 0;
1108 gst_queue_array_clear (self->scheduled[0]);
1109 gst_queue_array_clear (self->scheduled[1]);
1114 static GstAggregatorPad *
1115 gst_cc_combiner_create_new_pad (GstAggregator * aggregator,
1116 GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps)
1118 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
1119 GstAggregatorPad *agg_pad;
1121 if (templ->direction != GST_PAD_SINK)
1124 if (templ->presence != GST_PAD_REQUEST)
1127 if (strcmp (templ->name_template, "caption") != 0)
1130 GST_OBJECT_LOCK (self);
1131 agg_pad = g_object_new (GST_TYPE_AGGREGATOR_PAD,
1132 "name", "caption", "direction", GST_PAD_SINK, "template", templ, NULL);
1133 self->caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;
1134 GST_OBJECT_UNLOCK (self);
1140 gst_cc_combiner_src_query (GstAggregator * aggregator, GstQuery * query)
1142 GstPad *video_sinkpad =
1143 gst_element_get_static_pad (GST_ELEMENT_CAST (aggregator), "sink");
1146 switch (GST_QUERY_TYPE (query)) {
1147 case GST_QUERY_POSITION:
1148 case GST_QUERY_DURATION:
1150 case GST_QUERY_CAPS:
1151 case GST_QUERY_ALLOCATION:
1152 ret = gst_pad_peer_query (video_sinkpad, query);
1154 case GST_QUERY_ACCEPT_CAPS:{
1156 GstCaps *templ = gst_static_pad_template_get_caps (&srctemplate);
1158 gst_query_parse_accept_caps (query, &caps);
1159 gst_query_set_accept_caps_result (query, gst_caps_is_subset (caps,
1161 gst_caps_unref (templ);
1166 ret = GST_AGGREGATOR_CLASS (parent_class)->src_query (aggregator, query);
1170 gst_object_unref (video_sinkpad);
1176 gst_cc_combiner_sink_query (GstAggregator * aggregator,
1177 GstAggregatorPad * aggpad, GstQuery * query)
1179 GstPad *video_sinkpad =
1180 gst_element_get_static_pad (GST_ELEMENT_CAST (aggregator), "sink");
1181 GstPad *srcpad = GST_AGGREGATOR_SRC_PAD (aggregator);
1185 switch (GST_QUERY_TYPE (query)) {
1186 case GST_QUERY_POSITION:
1187 case GST_QUERY_DURATION:
1189 case GST_QUERY_ALLOCATION:
1190 if (GST_PAD_CAST (aggpad) == video_sinkpad) {
1191 ret = gst_pad_peer_query (srcpad, query);
1194 GST_AGGREGATOR_CLASS (parent_class)->sink_query (aggregator,
1198 case GST_QUERY_CAPS:
1199 if (GST_PAD_CAST (aggpad) == video_sinkpad) {
1200 ret = gst_pad_peer_query (srcpad, query);
1203 GstCaps *templ = gst_static_pad_template_get_caps (&captiontemplate);
1205 gst_query_parse_caps (query, &filter);
1209 gst_caps_intersect_full (filter, templ, GST_CAPS_INTERSECT_FIRST);
1210 gst_query_set_caps_result (query, caps);
1211 gst_caps_unref (caps);
1213 gst_query_set_caps_result (query, templ);
1215 gst_caps_unref (templ);
1219 case GST_QUERY_ACCEPT_CAPS:
1220 if (GST_PAD_CAST (aggpad) == video_sinkpad) {
1221 ret = gst_pad_peer_query (srcpad, query);
1224 GstCaps *templ = gst_static_pad_template_get_caps (&captiontemplate);
1226 gst_query_parse_accept_caps (query, &caps);
1227 gst_query_set_accept_caps_result (query, gst_caps_is_subset (caps,
1229 gst_caps_unref (templ);
1234 ret = GST_AGGREGATOR_CLASS (parent_class)->sink_query (aggregator,
1239 gst_object_unref (video_sinkpad);
1245 gst_cc_combiner_peek_next_sample (GstAggregator * agg,
1246 GstAggregatorPad * aggpad)
1248 GstAggregatorPad *caption_pad, *video_pad;
1249 GstCCCombiner *self = GST_CCCOMBINER (agg);
1250 GstSample *res = NULL;
1253 GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
1254 (self), "caption"));
1256 GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
1259 if (aggpad == caption_pad) {
1260 if (self->current_frame_captions->len > 0) {
1261 GstCaps *caps = gst_pad_get_current_caps (GST_PAD (aggpad));
1262 GstBufferList *buflist = gst_buffer_list_new ();
1265 for (i = 0; i < self->current_frame_captions->len; i++) {
1266 CaptionData *caption_data =
1267 &g_array_index (self->current_frame_captions, CaptionData, i);
1268 gst_buffer_list_add (buflist, gst_buffer_ref (caption_data->buffer));
1271 res = gst_sample_new (NULL, caps, &aggpad->segment, NULL);
1272 gst_caps_unref (caps);
1274 gst_sample_set_buffer_list (res, buflist);
1275 gst_buffer_list_unref (buflist);
1277 } else if (aggpad == video_pad) {
1278 if (self->current_video_buffer) {
1279 GstCaps *caps = gst_pad_get_current_caps (GST_PAD (aggpad));
1280 res = gst_sample_new (self->current_video_buffer,
1281 caps, &aggpad->segment, NULL);
1282 gst_caps_unref (caps);
1287 gst_object_unref (caption_pad);
1290 gst_object_unref (video_pad);
1295 static GstStateChangeReturn
1296 gst_cc_combiner_change_state (GstElement * element, GstStateChange transition)
1298 GstCCCombiner *self = GST_CCCOMBINER (element);
1300 switch (transition) {
1301 case GST_STATE_CHANGE_READY_TO_PAUSED:
1302 self->schedule = self->prop_schedule;
1303 self->max_scheduled = self->prop_max_scheduled;
1304 self->output_padding = self->prop_output_padding;
1310 return GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
1314 gst_cc_combiner_set_property (GObject * object, guint prop_id,
1315 const GValue * value, GParamSpec * pspec)
1317 GstCCCombiner *self = GST_CCCOMBINER (object);
1321 self->prop_schedule = g_value_get_boolean (value);
1323 case PROP_MAX_SCHEDULED:
1324 self->prop_max_scheduled = g_value_get_uint (value);
1326 case PROP_OUTPUT_PADDING:
1327 self->prop_output_padding = g_value_get_boolean (value);
1330 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1336 gst_cc_combiner_get_property (GObject * object, guint prop_id, GValue * value,
1339 GstCCCombiner *self = GST_CCCOMBINER (object);
1343 g_value_set_boolean (value, self->prop_schedule);
1345 case PROP_MAX_SCHEDULED:
1346 g_value_set_uint (value, self->prop_max_scheduled);
1348 case PROP_OUTPUT_PADDING:
1349 g_value_set_boolean (value, self->prop_output_padding);
1352 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1358 gst_cc_combiner_class_init (GstCCCombinerClass * klass)
1360 GObjectClass *gobject_class;
1361 GstElementClass *gstelement_class;
1362 GstAggregatorClass *aggregator_class;
1364 gobject_class = (GObjectClass *) klass;
1365 gstelement_class = (GstElementClass *) klass;
1366 aggregator_class = (GstAggregatorClass *) klass;
1368 gobject_class->finalize = gst_cc_combiner_finalize;
1369 gobject_class->set_property = gst_cc_combiner_set_property;
1370 gobject_class->get_property = gst_cc_combiner_get_property;
1372 gst_element_class_set_static_metadata (gstelement_class,
1373 "Closed Caption Combiner",
1375 "Combines GstVideoCaptionMeta with video input stream",
1376 "Sebastian Dröge <sebastian@centricular.com>");
1379 * GstCCCombiner:schedule:
1381 * Controls whether caption buffers should be smoothly scheduled
1382 * in order to have exactly one per output video buffer.
1384 * This can involve rewriting input captions, for example when the
1385 * input is CDP sequence counters are rewritten, time codes are dropped
1386 * and potentially re-injected if the input video frame had a time code
1389 * Caption buffers may also get split up in order to assign captions to
1390 * the correct field when the input is interlaced.
1392 * This can also imply that the input will drift from synchronization,
1393 * when there isn't enough padding in the input stream to catch up. In
1394 * that case the element will start dropping old caption buffers once
1395 * the number of buffers in its internal queue reaches
1396 * #GstCCCombiner:max-scheduled.
1398 * When this is set to %FALSE, the behaviour of this element is essentially
1403 g_object_class_install_property (G_OBJECT_CLASS (klass),
1404 PROP_SCHEDULE, g_param_spec_boolean ("schedule",
1406 "Schedule caption buffers so that exactly one is output per video frame",
1408 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1409 GST_PARAM_MUTABLE_READY));
1412 * GstCCCombiner:max-scheduled:
1414 * Controls the number of scheduled buffers after which the element
1415 * will start dropping old buffers from its internal queues. See
1416 * #GstCCCombiner:schedule.
1420 g_object_class_install_property (G_OBJECT_CLASS (klass),
1421 PROP_MAX_SCHEDULED, g_param_spec_uint ("max-scheduled",
1423 "Maximum number of buffers to queue for scheduling", 0, G_MAXUINT,
1424 DEFAULT_MAX_SCHEDULED,
1425 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1426 GST_PARAM_MUTABLE_READY));
1429 * GstCCCombiner:output-padding:
1431 * When #GstCCCombiner:schedule is %TRUE, this property controls
1432 * whether the output closed caption meta stream will be padded.
1436 g_object_class_install_property (G_OBJECT_CLASS (klass),
1437 PROP_OUTPUT_PADDING, g_param_spec_boolean ("output-padding",
1439 "Whether to output padding packets when schedule=true",
1440 DEFAULT_OUTPUT_PADDING,
1441 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1442 GST_PARAM_MUTABLE_READY));
1445 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
1446 &sinktemplate, GST_TYPE_AGGREGATOR_PAD);
1447 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
1448 &srctemplate, GST_TYPE_AGGREGATOR_PAD);
1449 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
1450 &captiontemplate, GST_TYPE_AGGREGATOR_PAD);
1452 gstelement_class->change_state =
1453 GST_DEBUG_FUNCPTR (gst_cc_combiner_change_state);
1455 aggregator_class->aggregate = gst_cc_combiner_aggregate;
1456 aggregator_class->stop = gst_cc_combiner_stop;
1457 aggregator_class->flush = gst_cc_combiner_flush;
1458 aggregator_class->create_new_pad = gst_cc_combiner_create_new_pad;
1459 aggregator_class->sink_event = gst_cc_combiner_sink_event;
1460 aggregator_class->negotiate = NULL;
1461 aggregator_class->get_next_time = gst_aggregator_simple_get_next_time;
1462 aggregator_class->src_query = gst_cc_combiner_src_query;
1463 aggregator_class->sink_query = gst_cc_combiner_sink_query;
1464 aggregator_class->peek_next_sample = gst_cc_combiner_peek_next_sample;
1466 GST_DEBUG_CATEGORY_INIT (gst_cc_combiner_debug, "cccombiner",
1467 0, "Closed Caption combiner");
1471 gst_cc_combiner_init (GstCCCombiner * self)
1473 GstPadTemplate *templ;
1474 GstAggregatorPad *agg_pad;
1476 templ = gst_static_pad_template_get (&sinktemplate);
1477 agg_pad = g_object_new (GST_TYPE_AGGREGATOR_PAD,
1478 "name", "sink", "direction", GST_PAD_SINK, "template", templ, NULL);
1479 gst_object_unref (templ);
1480 gst_element_add_pad (GST_ELEMENT_CAST (self), GST_PAD_CAST (agg_pad));
1482 self->current_frame_captions =
1483 g_array_new (FALSE, FALSE, sizeof (CaptionData));
1484 g_array_set_clear_func (self->current_frame_captions,
1485 (GDestroyNotify) caption_data_clear);
1487 self->current_video_running_time = self->current_video_running_time_end =
1488 self->previous_video_running_time_end = GST_CLOCK_TIME_NONE;
1490 self->caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;
1492 self->prop_schedule = DEFAULT_SCHEDULE;
1493 self->prop_max_scheduled = DEFAULT_MAX_SCHEDULED;
1494 self->prop_output_padding = DEFAULT_OUTPUT_PADDING;
1495 self->scheduled[0] =
1496 gst_queue_array_new_for_struct (sizeof (CaptionQueueItem), 0);
1497 self->scheduled[1] =
1498 gst_queue_array_new_for_struct (sizeof (CaptionQueueItem), 0);
1499 gst_queue_array_set_clear_func (self->scheduled[0],
1500 (GDestroyNotify) clear_scheduled);
1501 gst_queue_array_set_clear_func (self->scheduled[1],
1502 (GDestroyNotify) clear_scheduled);
1503 self->cdp_hdr_sequence_cntr = 0;
1504 self->cdp_fps_entry = &null_fps_entry;