3 * Copyright (C) 2018 Sebastian Dröge <sebastian@centricular.com>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18 * Boston, MA 02110-1301, USA.
27 #include <gst/base/base.h>
28 #include <gst/video/video.h>
31 #include "gstcccombiner.h"
33 GST_DEBUG_CATEGORY_STATIC (gst_cc_combiner_debug);
34 #define GST_CAT_DEFAULT gst_cc_combiner_debug
36 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink",
41 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src",
46 static GstStaticPadTemplate captiontemplate =
47 GST_STATIC_PAD_TEMPLATE ("caption",
51 ("closedcaption/x-cea-608,format={ (string) raw, (string) s334-1a}; "
52 "closedcaption/x-cea-708,format={ (string) cc_data, (string) cdp }"));
54 #define parent_class gst_cc_combiner_parent_class
55 G_DEFINE_TYPE (GstCCCombiner, gst_cc_combiner, GST_TYPE_AGGREGATOR);
56 GST_ELEMENT_REGISTER_DEFINE (cccombiner, "cccombiner",
57 GST_RANK_NONE, GST_TYPE_CCCOMBINER);
67 #define DEFAULT_MAX_SCHEDULED 30
68 #define DEFAULT_SCHEDULE TRUE
69 #define DEFAULT_OUTPUT_PADDING TRUE
73 GstVideoCaptionType caption_type;
80 GstClockTime running_time;
81 GstClockTime stream_time;
85 caption_data_clear (CaptionData * data)
87 gst_buffer_unref (data->buffer);
91 clear_scheduled (CaptionQueueItem * item)
93 gst_buffer_unref (item->buffer);
97 gst_cc_combiner_finalize (GObject * object)
99 GstCCCombiner *self = GST_CCCOMBINER (object);
101 gst_queue_array_free (self->scheduled[0]);
102 gst_queue_array_free (self->scheduled[1]);
103 g_array_unref (self->current_frame_captions);
104 self->current_frame_captions = NULL;
106 G_OBJECT_CLASS (parent_class)->finalize (object);
109 #define GST_FLOW_NEED_DATA GST_FLOW_CUSTOM_SUCCESS
111 static const guint8 *
112 extract_cdp (const guint8 * cdp, guint cdp_len, guint * cc_data_len)
119 const guint8 *cc_data = NULL;
123 /* Header + footer length */
128 gst_byte_reader_init (&br, cdp, cdp_len);
129 u16 = gst_byte_reader_get_uint16_be_unchecked (&br);
134 u8 = gst_byte_reader_get_uint8_unchecked (&br);
139 gst_byte_reader_skip_unchecked (&br, 1);
141 flags = gst_byte_reader_get_uint8_unchecked (&br);
144 if ((flags & 0x40) == 0) {
148 /* cdp_hdr_sequence_cntr */
149 gst_byte_reader_skip_unchecked (&br, 2);
151 /* time_code_present */
153 if (gst_byte_reader_get_remaining (&br) < 5) {
156 gst_byte_reader_skip_unchecked (&br, 5);
163 if (gst_byte_reader_get_remaining (&br) < 2) {
166 u8 = gst_byte_reader_get_uint8_unchecked (&br);
171 cc_count = gst_byte_reader_get_uint8_unchecked (&br);
172 if ((cc_count & 0xe0) != 0xe0) {
181 if (gst_byte_reader_get_remaining (&br) < len)
184 cc_data = gst_byte_reader_get_data_unchecked (&br, len);
192 #define MAX_CDP_PACKET_LEN 256
193 #define MAX_CEA608_LEN 32
195 static const struct cdp_fps_entry cdp_fps_table[] = {
196 {0x1f, 24000, 1001, 25, 22, 3 /* FIXME: alternating max cea608 count! */ },
197 {0x2f, 24, 1, 25, 22, 2},
198 {0x3f, 25, 1, 24, 22, 2},
199 {0x4f, 30000, 1001, 20, 18, 2},
200 {0x5f, 30, 1, 20, 18, 2},
201 {0x6f, 50, 1, 12, 11, 1},
202 {0x7f, 60000, 1001, 10, 9, 1},
203 {0x8f, 60, 1, 10, 9, 1},
205 static const struct cdp_fps_entry null_fps_entry = { 0, 0, 0, 0 };
207 static const struct cdp_fps_entry *
208 cdp_fps_entry_from_fps (guint fps_n, guint fps_d)
211 for (i = 0; i < G_N_ELEMENTS (cdp_fps_table); i++) {
212 if (cdp_fps_table[i].fps_n == fps_n && cdp_fps_table[i].fps_d == fps_d)
213 return &cdp_fps_table[i];
215 return &null_fps_entry;
220 make_cdp (GstCCCombiner * self, const guint8 * cc_data, guint cc_data_len,
221 const struct cdp_fps_entry *fps_entry, const GstVideoTimeCode * tc)
224 guint8 flags, checksum;
226 GstBuffer *ret = gst_buffer_new_allocate (NULL, MAX_CDP_PACKET_LEN, NULL);
229 gst_buffer_map (ret, &map, GST_MAP_WRITE);
231 gst_byte_writer_init_with_data (&bw, map.data, MAX_CDP_PACKET_LEN, FALSE);
232 gst_byte_writer_put_uint16_be_unchecked (&bw, 0x9669);
233 /* Write a length of 0 for now */
234 gst_byte_writer_put_uint8_unchecked (&bw, 0);
236 gst_byte_writer_put_uint8_unchecked (&bw, fps_entry->fps_idx);
238 /* caption_service_active */
244 if (tc && tc->config.fps_n > 0)
250 gst_byte_writer_put_uint8_unchecked (&bw, flags);
252 gst_byte_writer_put_uint16_be_unchecked (&bw, self->cdp_hdr_sequence_cntr);
254 if (tc && tc->config.fps_n > 0) {
257 gst_byte_writer_put_uint8_unchecked (&bw, 0x71);
258 /* reserved 11 - 2 bits */
260 /* tens of hours - 2 bits */
261 u8 |= ((tc->hours / 10) & 0x3) << 4;
262 /* units of hours - 4 bits */
263 u8 |= (tc->hours % 10) & 0xf;
264 gst_byte_writer_put_uint8_unchecked (&bw, u8);
266 /* reserved 1 - 1 bit */
268 /* tens of minutes - 3 bits */
269 u8 |= ((tc->minutes / 10) & 0x7) << 4;
270 /* units of minutes - 4 bits */
271 u8 |= (tc->minutes % 10) & 0xf;
272 gst_byte_writer_put_uint8_unchecked (&bw, u8);
274 /* field flag - 1 bit */
275 u8 = tc->field_count < 2 ? 0x00 : 0x80;
276 /* tens of seconds - 3 bits */
277 u8 |= ((tc->seconds / 10) & 0x7) << 4;
278 /* units of seconds - 4 bits */
279 u8 |= (tc->seconds % 10) & 0xf;
280 gst_byte_writer_put_uint8_unchecked (&bw, u8);
282 /* drop frame flag - 1 bit */
283 u8 = (tc->config.flags & GST_VIDEO_TIME_CODE_FLAGS_DROP_FRAME) ? 0x80 :
285 /* reserved0 - 1 bit */
286 /* tens of frames - 2 bits */
287 u8 |= ((tc->frames / 10) & 0x3) << 4;
288 /* units of frames 4 bits */
289 u8 |= (tc->frames % 10) & 0xf;
290 gst_byte_writer_put_uint8_unchecked (&bw, u8);
293 gst_byte_writer_put_uint8_unchecked (&bw, 0x72);
294 gst_byte_writer_put_uint8_unchecked (&bw, 0xe0 | fps_entry->max_cc_count);
295 gst_byte_writer_put_data_unchecked (&bw, cc_data, cc_data_len);
296 while (fps_entry->max_cc_count > cc_data_len / 3) {
297 gst_byte_writer_put_uint8_unchecked (&bw, 0xfa);
298 gst_byte_writer_put_uint8_unchecked (&bw, 0x00);
299 gst_byte_writer_put_uint8_unchecked (&bw, 0x00);
303 gst_byte_writer_put_uint8_unchecked (&bw, 0x74);
304 gst_byte_writer_put_uint16_be_unchecked (&bw, self->cdp_hdr_sequence_cntr);
305 self->cdp_hdr_sequence_cntr++;
306 /* We calculate the checksum afterwards */
307 gst_byte_writer_put_uint8_unchecked (&bw, 0);
309 len = gst_byte_writer_get_pos (&bw);
310 gst_byte_writer_set_pos (&bw, 2);
311 gst_byte_writer_put_uint8_unchecked (&bw, len);
314 for (i = 0; i < len; i++) {
315 checksum += map.data[i];
318 checksum = 256 - checksum;
319 map.data[len - 1] = checksum;
321 gst_buffer_unmap (ret, &map);
323 gst_buffer_set_size (ret, len);
329 make_padding (GstCCCombiner * self, const GstVideoTimeCode * tc, guint field)
331 GstBuffer *ret = NULL;
333 switch (self->caption_type) {
334 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
336 const guint8 cc_data[6] = { 0xfc, 0x80, 0x80, 0xf9, 0x80, 0x80 };
338 ret = make_cdp (self, cc_data, 6, self->cdp_fps_entry, tc);
341 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
345 ret = gst_buffer_new_allocate (NULL, 3, NULL);
347 gst_buffer_map (ret, &map, GST_MAP_WRITE);
349 map.data[0] = 0xfc | (field & 0x01);
353 gst_buffer_unmap (ret, &map);
356 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
360 ret = gst_buffer_new_allocate (NULL, 3, NULL);
362 gst_buffer_map (ret, &map, GST_MAP_WRITE);
364 map.data[0] = field == 0 ? 0x80 : 0x00;
368 gst_buffer_unmap (ret, &map);
371 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
375 ret = gst_buffer_new_allocate (NULL, 2, NULL);
377 gst_buffer_map (ret, &map, GST_MAP_WRITE);
382 gst_buffer_unmap (ret, &map);
393 queue_caption (GstCCCombiner * self, GstBuffer * scheduled, guint field)
395 GstAggregatorPad *caption_pad;
396 CaptionQueueItem item;
398 if (self->progressive && field == 1) {
399 gst_buffer_unref (scheduled);
404 GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
407 g_assert (gst_queue_array_get_length (self->scheduled[field]) <=
408 self->max_scheduled);
410 if (gst_queue_array_get_length (self->scheduled[field]) ==
411 self->max_scheduled) {
412 CaptionQueueItem *dropped =
413 gst_queue_array_pop_tail_struct (self->scheduled[field]);
415 GST_WARNING_OBJECT (self,
416 "scheduled queue runs too long, dropping %" GST_PTR_FORMAT, dropped);
418 gst_element_post_message (GST_ELEMENT_CAST (self),
419 gst_message_new_qos (GST_OBJECT_CAST (self), FALSE,
420 dropped->running_time, dropped->stream_time,
421 GST_BUFFER_PTS (dropped->buffer), GST_BUFFER_DURATION (dropped)));
423 gst_buffer_unref (dropped->buffer);
426 gst_object_unref (caption_pad);
428 item.buffer = scheduled;
430 gst_segment_to_running_time (&caption_pad->segment, GST_FORMAT_TIME,
431 GST_BUFFER_PTS (scheduled));
433 gst_segment_to_stream_time (&caption_pad->segment, GST_FORMAT_TIME,
434 GST_BUFFER_PTS (scheduled));
436 gst_queue_array_push_tail_struct (self->scheduled[field], &item);
440 schedule_cdp (GstCCCombiner * self, const GstVideoTimeCode * tc,
441 const guint8 * data, guint len, GstClockTime pts, GstClockTime duration)
443 const guint8 *cc_data;
445 gboolean inject = FALSE;
447 if ((cc_data = extract_cdp (data, len, &cc_data_len))) {
450 for (i = 0; i < cc_data_len / 3; i++) {
451 gboolean cc_valid = (cc_data[i * 3] & 0x04) == 0x04;
452 guint8 cc_type = cc_data[i * 3] & 0x03;
457 if (cc_type == 0x00 || cc_type == 0x01) {
458 if (cc_data[i * 3 + 1] != 0x80 || cc_data[i * 3 + 2] != 0x80) {
472 make_cdp (self, cc_data, cc_data_len, self->cdp_fps_entry, tc);
474 /* We only set those for QoS reporting purposes */
475 GST_BUFFER_PTS (buf) = pts;
476 GST_BUFFER_DURATION (buf) = duration;
478 queue_caption (self, buf, 0);
483 schedule_cea608_s334_1a (GstCCCombiner * self, guint8 * data, guint len,
484 GstClockTime pts, GstClockTime duration)
486 guint8 field0_data[3], field1_data[3];
487 guint field0_len = 0, field1_len = 0;
489 gboolean field0_608 = FALSE, field1_608 = FALSE;
492 GST_WARNING ("Invalid cc_data buffer size %u. Truncating to a multiple "
494 len = len - (len % 3);
497 for (i = 0; i < len / 3; i++) {
498 if (data[i * 3] & 0x80) {
504 if (data[i * 3 + 1] == 0x80 && data[i * 3 + 2] == 0x80)
507 field0_data[field0_len++] = data[i * 3];
508 field0_data[field0_len++] = data[i * 3 + 1];
509 field0_data[field0_len++] = data[i * 3 + 2];
516 if (data[i * 3 + 1] == 0x80 && data[i * 3 + 2] == 0x80)
519 field1_data[field1_len++] = data[i * 3];
520 field1_data[field1_len++] = data[i * 3 + 1];
521 field1_data[field1_len++] = data[i * 3 + 2];
525 if (field0_len > 0) {
526 GstBuffer *buf = gst_buffer_new_allocate (NULL, field0_len, NULL);
528 gst_buffer_fill (buf, 0, field0_data, field0_len);
529 GST_BUFFER_PTS (buf) = pts;
530 GST_BUFFER_DURATION (buf) = duration;
532 queue_caption (self, buf, 0);
535 if (field1_len > 0) {
536 GstBuffer *buf = gst_buffer_new_allocate (NULL, field1_len, NULL);
538 gst_buffer_fill (buf, 0, field1_data, field1_len);
539 GST_BUFFER_PTS (buf) = pts;
540 GST_BUFFER_DURATION (buf) = duration;
542 queue_caption (self, buf, 1);
547 schedule_cea708_raw (GstCCCombiner * self, guint8 * data, guint len,
548 GstClockTime pts, GstClockTime duration)
550 guint8 field0_data[MAX_CDP_PACKET_LEN], field1_data[3];
551 guint field0_len = 0, field1_len = 0;
553 gboolean field0_608 = FALSE, field1_608 = FALSE;
554 gboolean started_ccp = FALSE;
557 GST_WARNING ("Invalid cc_data buffer size %u. Truncating to a multiple "
559 len = len - (len % 3);
562 for (i = 0; i < len / 3; i++) {
563 gboolean cc_valid = (data[i * 3] & 0x04) == 0x04;
564 guint8 cc_type = data[i * 3] & 0x03;
567 if (cc_type == 0x00) {
576 if (data[i * 3 + 1] == 0x80 && data[i * 3 + 2] == 0x80)
579 field0_data[field0_len++] = data[i * 3];
580 field0_data[field0_len++] = data[i * 3 + 1];
581 field0_data[field0_len++] = data[i * 3 + 2];
582 } else if (cc_type == 0x01) {
591 if (data[i * 3 + 1] == 0x80 && data[i * 3 + 2] == 0x80)
594 field1_data[field1_len++] = data[i * 3];
595 field1_data[field1_len++] = data[i * 3 + 1];
596 field1_data[field1_len++] = data[i * 3 + 2];
608 if (cc_type == 0x00 || cc_type == 0x01)
611 field0_data[field0_len++] = data[i * 3];
612 field0_data[field0_len++] = data[i * 3 + 1];
613 field0_data[field0_len++] = data[i * 3 + 2];
616 if (field0_len > 0) {
617 GstBuffer *buf = gst_buffer_new_allocate (NULL, field0_len, NULL);
619 gst_buffer_fill (buf, 0, field0_data, field0_len);
620 GST_BUFFER_PTS (buf) = pts;
621 GST_BUFFER_DURATION (buf) = duration;
623 queue_caption (self, buf, 0);
626 if (field1_len > 0) {
627 GstBuffer *buf = gst_buffer_new_allocate (NULL, field1_len, NULL);
629 gst_buffer_fill (buf, 0, field1_data, field1_len);
630 GST_BUFFER_PTS (buf) = pts;
631 GST_BUFFER_DURATION (buf) = duration;
633 queue_caption (self, buf, 1);
638 schedule_cea608_raw (GstCCCombiner * self, guint8 * data, guint len,
645 if (data[0] != 0x80 || data[1] != 0x80) {
646 queue_caption (self, gst_buffer_ref (buffer), 0);
652 schedule_caption (GstCCCombiner * self, GstBuffer * caption_buf,
653 const GstVideoTimeCode * tc)
656 GstClockTime pts, duration;
658 pts = GST_BUFFER_PTS (caption_buf);
659 duration = GST_BUFFER_DURATION (caption_buf);
661 gst_buffer_map (caption_buf, &map, GST_MAP_READ);
663 switch (self->caption_type) {
664 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
665 schedule_cdp (self, tc, map.data, map.size, pts, duration);
667 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
668 schedule_cea708_raw (self, map.data, map.size, pts, duration);
670 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
671 schedule_cea608_s334_1a (self, map.data, map.size, pts, duration);
673 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
674 schedule_cea608_raw (self, map.data, map.size, caption_buf);
680 gst_buffer_unmap (caption_buf, &map);
684 dequeue_caption_one_field (GstCCCombiner * self, const GstVideoTimeCode * tc,
685 guint field, gboolean drain)
687 CaptionQueueItem *scheduled;
688 CaptionData caption_data;
690 if ((scheduled = gst_queue_array_pop_head_struct (self->scheduled[field]))) {
691 caption_data.buffer = scheduled->buffer;
692 caption_data.caption_type = self->caption_type;
693 g_array_append_val (self->current_frame_captions, caption_data);
694 } else if (!drain && self->output_padding) {
695 caption_data.caption_type = self->caption_type;
696 caption_data.buffer = make_padding (self, tc, field);
697 g_array_append_val (self->current_frame_captions, caption_data);
702 dequeue_caption_both_fields (GstCCCombiner * self, const GstVideoTimeCode * tc,
705 CaptionQueueItem *field0_scheduled, *field1_scheduled;
706 GstBuffer *field0_buffer = NULL, *field1_buffer = NULL;
707 CaptionData caption_data;
709 field0_scheduled = gst_queue_array_pop_head_struct (self->scheduled[0]);
710 field1_scheduled = gst_queue_array_pop_head_struct (self->scheduled[1]);
712 if (drain && !field0_scheduled && !field1_scheduled) {
716 if (field0_scheduled) {
717 field0_buffer = field0_scheduled->buffer;
718 } else if (self->output_padding) {
719 field0_buffer = make_padding (self, tc, 0);
722 if (field1_scheduled) {
723 field1_buffer = field1_scheduled->buffer;
724 } else if (self->output_padding) {
725 field1_buffer = make_padding (self, tc, 1);
728 if (field0_buffer || field1_buffer) {
729 if (field0_buffer && field1_buffer) {
730 caption_data.buffer = gst_buffer_append (field0_buffer, field1_buffer);
731 } else if (field0_buffer) {
732 caption_data.buffer = field0_buffer;
733 } else if (field1_buffer) {
734 caption_data.buffer = field1_buffer;
736 g_assert_not_reached ();
739 caption_data.caption_type = self->caption_type;
741 g_array_append_val (self->current_frame_captions, caption_data);
746 gst_cc_combiner_collect_captions (GstCCCombiner * self, gboolean timeout)
748 GstAggregatorPad *src_pad =
749 GST_AGGREGATOR_PAD (GST_AGGREGATOR_SRC_PAD (self));
750 GstAggregatorPad *caption_pad;
751 GstBuffer *video_buf;
752 GstVideoTimeCodeMeta *tc_meta;
753 GstVideoTimeCode *tc = NULL;
754 gboolean caption_pad_is_eos = FALSE;
756 g_assert (self->current_video_buffer != NULL);
759 GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
761 /* No caption pad, forward buffer directly */
763 GST_LOG_OBJECT (self, "No caption pad, passing through video");
764 video_buf = self->current_video_buffer;
765 gst_aggregator_selected_samples (GST_AGGREGATOR_CAST (self),
766 GST_BUFFER_PTS (video_buf), GST_BUFFER_DTS (video_buf),
767 GST_BUFFER_DURATION (video_buf), NULL);
768 self->current_video_buffer = NULL;
772 tc_meta = gst_buffer_get_video_time_code_meta (self->current_video_buffer);
778 GST_LOG_OBJECT (self, "Trying to collect captions for queued video buffer");
780 GstBuffer *caption_buf;
781 GstClockTime caption_time;
782 CaptionData caption_data;
784 caption_buf = gst_aggregator_pad_peek_buffer (caption_pad);
786 if (gst_aggregator_pad_is_eos (caption_pad)) {
787 GST_DEBUG_OBJECT (self, "Caption pad is EOS, we're done");
789 caption_pad_is_eos = TRUE;
791 } else if (!timeout) {
792 GST_DEBUG_OBJECT (self, "Need more caption data");
793 gst_object_unref (caption_pad);
794 return GST_FLOW_NEED_DATA;
796 GST_DEBUG_OBJECT (self, "No caption data on timeout");
801 caption_time = GST_BUFFER_PTS (caption_buf);
802 if (!GST_CLOCK_TIME_IS_VALID (caption_time)) {
803 GST_ERROR_OBJECT (self, "Caption buffer without PTS");
805 gst_buffer_unref (caption_buf);
806 gst_object_unref (caption_pad);
808 return GST_FLOW_ERROR;
812 gst_segment_to_running_time (&caption_pad->segment, GST_FORMAT_TIME,
815 if (!GST_CLOCK_TIME_IS_VALID (caption_time)) {
816 GST_DEBUG_OBJECT (self, "Caption buffer outside segment, dropping");
818 gst_aggregator_pad_drop_buffer (caption_pad);
819 gst_buffer_unref (caption_buf);
824 if (gst_buffer_get_size (caption_buf) == 0 &&
825 GST_BUFFER_FLAG_IS_SET (caption_buf, GST_BUFFER_FLAG_GAP)) {
826 /* This is a gap, we can go ahead. We only consume it once its end point
827 * is behind the current video running time. Important to note that
828 * we can't deal with gaps with no duration (-1)
830 if (!GST_CLOCK_TIME_IS_VALID (GST_BUFFER_DURATION (caption_buf))) {
831 GST_ERROR_OBJECT (self, "GAP buffer without a duration");
833 gst_buffer_unref (caption_buf);
834 gst_object_unref (caption_pad);
836 return GST_FLOW_ERROR;
839 gst_buffer_unref (caption_buf);
841 if (caption_time + GST_BUFFER_DURATION (caption_buf) <
842 self->current_video_running_time_end) {
843 gst_aggregator_pad_drop_buffer (caption_pad);
850 /* Collected all caption buffers for this video buffer */
851 if (caption_time >= self->current_video_running_time_end) {
852 gst_buffer_unref (caption_buf);
854 } else if (!self->schedule) {
855 if (GST_CLOCK_TIME_IS_VALID (self->previous_video_running_time_end)) {
856 if (caption_time < self->previous_video_running_time_end) {
857 GST_WARNING_OBJECT (self,
858 "Caption buffer before end of last video frame, dropping");
860 gst_aggregator_pad_drop_buffer (caption_pad);
861 gst_buffer_unref (caption_buf);
864 } else if (caption_time < self->current_video_running_time) {
865 GST_WARNING_OBJECT (self,
866 "Caption buffer before current video frame, dropping");
868 gst_aggregator_pad_drop_buffer (caption_pad);
869 gst_buffer_unref (caption_buf);
874 /* This caption buffer has to be collected */
875 GST_LOG_OBJECT (self,
876 "Collecting caption buffer %p %" GST_TIME_FORMAT " for video buffer %p",
877 caption_buf, GST_TIME_ARGS (caption_time), self->current_video_buffer);
879 caption_data.caption_type = self->caption_type;
881 gst_aggregator_pad_drop_buffer (caption_pad);
883 if (!self->schedule) {
884 caption_data.buffer = caption_buf;
885 g_array_append_val (self->current_frame_captions, caption_data);
887 schedule_caption (self, caption_buf, tc);
888 gst_buffer_unref (caption_buf);
892 /* FIXME pad correctly according to fps */
893 if (self->schedule) {
894 g_assert (self->current_frame_captions->len == 0);
896 switch (self->caption_type) {
897 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
899 /* Only relevant in alternate and mixed mode, no need to look at the caps */
900 if (GST_BUFFER_FLAG_IS_SET (self->current_video_buffer,
901 GST_VIDEO_BUFFER_FLAG_INTERLACED)) {
902 if (!GST_VIDEO_BUFFER_IS_BOTTOM_FIELD (self->current_video_buffer)) {
903 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
906 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
910 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
911 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
913 if (self->progressive) {
914 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
915 } else if (GST_BUFFER_FLAG_IS_SET (self->current_video_buffer,
916 GST_VIDEO_BUFFER_FLAG_INTERLACED) &&
917 GST_BUFFER_FLAG_IS_SET (self->current_video_buffer,
918 GST_VIDEO_BUFFER_FLAG_ONEFIELD)) {
919 if (GST_VIDEO_BUFFER_IS_TOP_FIELD (self->current_video_buffer)) {
920 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
922 dequeue_caption_one_field (self, tc, 1, caption_pad_is_eos);
925 dequeue_caption_both_fields (self, tc, caption_pad_is_eos);
929 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
931 if (self->progressive) {
932 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
933 } else if (GST_BUFFER_FLAG_IS_SET (self->current_video_buffer,
934 GST_VIDEO_BUFFER_FLAG_INTERLACED)) {
935 if (!GST_VIDEO_BUFFER_IS_BOTTOM_FIELD (self->current_video_buffer)) {
936 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
939 dequeue_caption_one_field (self, tc, 0, caption_pad_is_eos);
948 gst_aggregator_selected_samples (GST_AGGREGATOR_CAST (self),
949 GST_BUFFER_PTS (self->current_video_buffer),
950 GST_BUFFER_DTS (self->current_video_buffer),
951 GST_BUFFER_DURATION (self->current_video_buffer), NULL);
953 GST_LOG_OBJECT (self, "Attaching %u captions to buffer %p",
954 self->current_frame_captions->len, self->current_video_buffer);
956 if (self->current_frame_captions->len > 0) {
959 video_buf = gst_buffer_make_writable (self->current_video_buffer);
960 self->current_video_buffer = NULL;
962 for (i = 0; i < self->current_frame_captions->len; i++) {
963 CaptionData *caption_data =
964 &g_array_index (self->current_frame_captions, CaptionData, i);
967 gst_buffer_map (caption_data->buffer, &map, GST_MAP_READ);
968 gst_buffer_add_video_caption_meta (video_buf, caption_data->caption_type,
970 gst_buffer_unmap (caption_data->buffer, &map);
973 g_array_set_size (self->current_frame_captions, 0);
975 GST_LOG_OBJECT (self, "No captions for buffer %p",
976 self->current_video_buffer);
977 video_buf = self->current_video_buffer;
978 self->current_video_buffer = NULL;
981 gst_object_unref (caption_pad);
984 src_pad->segment.position =
985 GST_BUFFER_PTS (video_buf) + GST_BUFFER_DURATION (video_buf);
987 return gst_aggregator_finish_buffer (GST_AGGREGATOR_CAST (self), video_buf);
991 gst_cc_combiner_aggregate (GstAggregator * aggregator, gboolean timeout)
993 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
994 GstFlowReturn flow_ret = GST_FLOW_OK;
996 /* If we have no current video buffer, queue one. If we have one but
997 * its end running time is not known yet, try to determine it from the
998 * next video buffer */
999 if (!self->current_video_buffer
1000 || !GST_CLOCK_TIME_IS_VALID (self->current_video_running_time_end)) {
1001 GstAggregatorPad *video_pad;
1002 GstClockTime video_start;
1003 GstBuffer *video_buf;
1006 GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
1007 (aggregator), "sink"));
1008 video_buf = gst_aggregator_pad_peek_buffer (video_pad);
1010 if (gst_aggregator_pad_is_eos (video_pad)) {
1011 GST_DEBUG_OBJECT (aggregator, "Video pad is EOS, we're done");
1013 /* Assume that this buffer ends where it started +50ms (25fps) and handle it */
1014 if (self->current_video_buffer) {
1015 self->current_video_running_time_end =
1016 self->current_video_running_time + 50 * GST_MSECOND;
1017 flow_ret = gst_cc_combiner_collect_captions (self, timeout);
1020 /* If we collected all captions for the remaining video frame we're
1021 * done, otherwise get called another time and go directly into the
1022 * outer branch for finishing the current video frame */
1023 if (flow_ret == GST_FLOW_NEED_DATA)
1024 flow_ret = GST_FLOW_OK;
1026 flow_ret = GST_FLOW_EOS;
1028 flow_ret = GST_FLOW_OK;
1031 gst_object_unref (video_pad);
1035 video_start = GST_BUFFER_PTS (video_buf);
1036 if (!GST_CLOCK_TIME_IS_VALID (video_start)) {
1037 gst_buffer_unref (video_buf);
1038 gst_object_unref (video_pad);
1040 GST_ERROR_OBJECT (aggregator, "Video buffer without PTS");
1042 return GST_FLOW_ERROR;
1046 gst_segment_to_running_time (&video_pad->segment, GST_FORMAT_TIME,
1048 if (!GST_CLOCK_TIME_IS_VALID (video_start)) {
1049 GST_DEBUG_OBJECT (aggregator, "Buffer outside segment, dropping");
1050 gst_aggregator_pad_drop_buffer (video_pad);
1051 gst_buffer_unref (video_buf);
1052 gst_object_unref (video_pad);
1056 if (self->current_video_buffer) {
1057 /* If we already have a video buffer just update the current end running
1058 * time accordingly. That's what was missing and why we got here */
1059 self->current_video_running_time_end = video_start;
1060 gst_buffer_unref (video_buf);
1061 GST_LOG_OBJECT (self,
1062 "Determined end timestamp for video buffer: %p %" GST_TIME_FORMAT
1063 " - %" GST_TIME_FORMAT, self->current_video_buffer,
1064 GST_TIME_ARGS (self->current_video_running_time),
1065 GST_TIME_ARGS (self->current_video_running_time_end));
1067 /* Otherwise we had no buffer queued currently. Let's do that now
1068 * so that we can collect captions for it */
1069 gst_buffer_replace (&self->current_video_buffer, video_buf);
1070 self->current_video_running_time = video_start;
1071 gst_aggregator_pad_drop_buffer (video_pad);
1072 gst_buffer_unref (video_buf);
1074 if (GST_BUFFER_DURATION_IS_VALID (video_buf)) {
1075 GstClockTime end_time =
1076 GST_BUFFER_PTS (video_buf) + GST_BUFFER_DURATION (video_buf);
1077 if (video_pad->segment.stop != -1 && end_time > video_pad->segment.stop)
1078 end_time = video_pad->segment.stop;
1079 self->current_video_running_time_end =
1080 gst_segment_to_running_time (&video_pad->segment, GST_FORMAT_TIME,
1082 } else if (self->video_fps_n != 0 && self->video_fps_d != 0) {
1083 GstClockTime end_time =
1084 GST_BUFFER_PTS (video_buf) + gst_util_uint64_scale_int (GST_SECOND,
1085 self->video_fps_d, self->video_fps_n);
1086 if (video_pad->segment.stop != -1 && end_time > video_pad->segment.stop)
1087 end_time = video_pad->segment.stop;
1088 self->current_video_running_time_end =
1089 gst_segment_to_running_time (&video_pad->segment, GST_FORMAT_TIME,
1092 self->current_video_running_time_end = GST_CLOCK_TIME_NONE;
1095 GST_LOG_OBJECT (self,
1096 "Queued new video buffer: %p %" GST_TIME_FORMAT " - %"
1097 GST_TIME_FORMAT, self->current_video_buffer,
1098 GST_TIME_ARGS (self->current_video_running_time),
1099 GST_TIME_ARGS (self->current_video_running_time_end));
1102 gst_object_unref (video_pad);
1105 /* At this point we have a video buffer queued and can start collecting
1106 * caption buffers for it */
1107 g_assert (self->current_video_buffer != NULL);
1108 g_assert (GST_CLOCK_TIME_IS_VALID (self->current_video_running_time));
1109 g_assert (GST_CLOCK_TIME_IS_VALID (self->current_video_running_time_end));
1111 flow_ret = gst_cc_combiner_collect_captions (self, timeout);
1113 /* Only if we collected all captions we replace the current video buffer
1114 * with NULL and continue with the next one on the next call */
1115 if (flow_ret == GST_FLOW_NEED_DATA) {
1116 flow_ret = GST_FLOW_OK;
1118 gst_buffer_replace (&self->current_video_buffer, NULL);
1119 self->previous_video_running_time_end =
1120 self->current_video_running_time_end;
1121 self->current_video_running_time = self->current_video_running_time_end =
1122 GST_CLOCK_TIME_NONE;
1129 gst_cc_combiner_sink_event (GstAggregator * aggregator,
1130 GstAggregatorPad * agg_pad, GstEvent * event)
1132 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
1134 switch (GST_EVENT_TYPE (event)) {
1135 case GST_EVENT_CAPS:{
1139 gst_event_parse_caps (event, &caps);
1140 s = gst_caps_get_structure (caps, 0);
1142 if (strcmp (GST_OBJECT_NAME (agg_pad), "caption") == 0) {
1143 GstVideoCaptionType caption_type =
1144 gst_video_caption_type_from_caps (caps);
1146 if (self->caption_type != GST_VIDEO_CAPTION_TYPE_UNKNOWN &&
1147 caption_type != self->caption_type) {
1148 GST_ERROR_OBJECT (self, "Changing caption type is not allowed");
1150 GST_ELEMENT_ERROR (self, CORE, NEGOTIATION, (NULL),
1151 ("Changing caption type is not allowed"));
1155 self->caption_type = caption_type;
1158 const gchar *interlace_mode;
1162 gst_structure_get_fraction (s, "framerate", &fps_n, &fps_d);
1164 interlace_mode = gst_structure_get_string (s, "interlace-mode");
1166 self->progressive = !interlace_mode
1167 || !g_strcmp0 (interlace_mode, "progressive");
1169 if (fps_n != self->video_fps_n || fps_d != self->video_fps_d) {
1170 GstClockTime latency;
1172 latency = gst_util_uint64_scale (GST_SECOND, fps_d, fps_n);
1173 gst_aggregator_set_latency (aggregator, latency, latency);
1176 self->video_fps_n = fps_n;
1177 self->video_fps_d = fps_d;
1179 self->cdp_fps_entry = cdp_fps_entry_from_fps (fps_n, fps_d);
1181 gst_aggregator_set_src_caps (aggregator, caps);
1186 case GST_EVENT_SEGMENT:{
1187 if (strcmp (GST_OBJECT_NAME (agg_pad), "sink") == 0) {
1188 const GstSegment *segment;
1190 gst_event_parse_segment (event, &segment);
1191 gst_aggregator_update_segment (aggregator, segment);
1199 return GST_AGGREGATOR_CLASS (parent_class)->sink_event (aggregator, agg_pad,
1204 gst_cc_combiner_stop (GstAggregator * aggregator)
1206 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
1208 self->video_fps_n = self->video_fps_d = 0;
1209 self->current_video_running_time = self->current_video_running_time_end =
1210 self->previous_video_running_time_end = GST_CLOCK_TIME_NONE;
1211 gst_buffer_replace (&self->current_video_buffer, NULL);
1213 g_array_set_size (self->current_frame_captions, 0);
1214 self->caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;
1216 gst_queue_array_clear (self->scheduled[0]);
1217 gst_queue_array_clear (self->scheduled[1]);
1218 self->cdp_fps_entry = &null_fps_entry;
1223 static GstFlowReturn
1224 gst_cc_combiner_flush (GstAggregator * aggregator)
1226 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
1227 GstAggregatorPad *src_pad =
1228 GST_AGGREGATOR_PAD (GST_AGGREGATOR_SRC_PAD (aggregator));
1230 self->current_video_running_time = self->current_video_running_time_end =
1231 self->previous_video_running_time_end = GST_CLOCK_TIME_NONE;
1232 gst_buffer_replace (&self->current_video_buffer, NULL);
1234 g_array_set_size (self->current_frame_captions, 0);
1236 src_pad->segment.position = GST_CLOCK_TIME_NONE;
1238 self->cdp_hdr_sequence_cntr = 0;
1239 gst_queue_array_clear (self->scheduled[0]);
1240 gst_queue_array_clear (self->scheduled[1]);
1245 static GstAggregatorPad *
1246 gst_cc_combiner_create_new_pad (GstAggregator * aggregator,
1247 GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps)
1249 GstCCCombiner *self = GST_CCCOMBINER (aggregator);
1250 GstAggregatorPad *agg_pad;
1252 if (templ->direction != GST_PAD_SINK)
1255 if (templ->presence != GST_PAD_REQUEST)
1258 if (strcmp (templ->name_template, "caption") != 0)
1261 GST_OBJECT_LOCK (self);
1262 agg_pad = g_object_new (GST_TYPE_AGGREGATOR_PAD,
1263 "name", "caption", "direction", GST_PAD_SINK, "template", templ, NULL);
1264 self->caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;
1265 GST_OBJECT_UNLOCK (self);
1271 gst_cc_combiner_src_query (GstAggregator * aggregator, GstQuery * query)
1273 GstPad *video_sinkpad =
1274 gst_element_get_static_pad (GST_ELEMENT_CAST (aggregator), "sink");
1277 switch (GST_QUERY_TYPE (query)) {
1278 case GST_QUERY_POSITION:
1279 case GST_QUERY_DURATION:
1281 case GST_QUERY_CAPS:
1282 case GST_QUERY_ALLOCATION:
1283 ret = gst_pad_peer_query (video_sinkpad, query);
1285 case GST_QUERY_ACCEPT_CAPS:{
1287 GstCaps *templ = gst_static_pad_template_get_caps (&srctemplate);
1289 gst_query_parse_accept_caps (query, &caps);
1290 gst_query_set_accept_caps_result (query, gst_caps_is_subset (caps,
1292 gst_caps_unref (templ);
1297 ret = GST_AGGREGATOR_CLASS (parent_class)->src_query (aggregator, query);
1301 gst_object_unref (video_sinkpad);
1307 gst_cc_combiner_sink_query (GstAggregator * aggregator,
1308 GstAggregatorPad * aggpad, GstQuery * query)
1310 GstPad *video_sinkpad =
1311 gst_element_get_static_pad (GST_ELEMENT_CAST (aggregator), "sink");
1312 GstPad *srcpad = GST_AGGREGATOR_SRC_PAD (aggregator);
1316 switch (GST_QUERY_TYPE (query)) {
1317 case GST_QUERY_POSITION:
1318 case GST_QUERY_DURATION:
1320 case GST_QUERY_ALLOCATION:
1321 if (GST_PAD_CAST (aggpad) == video_sinkpad) {
1322 ret = gst_pad_peer_query (srcpad, query);
1325 GST_AGGREGATOR_CLASS (parent_class)->sink_query (aggregator,
1329 case GST_QUERY_CAPS:
1330 if (GST_PAD_CAST (aggpad) == video_sinkpad) {
1331 ret = gst_pad_peer_query (srcpad, query);
1334 GstCaps *templ = gst_static_pad_template_get_caps (&captiontemplate);
1336 gst_query_parse_caps (query, &filter);
1340 gst_caps_intersect_full (filter, templ, GST_CAPS_INTERSECT_FIRST);
1341 gst_query_set_caps_result (query, caps);
1342 gst_caps_unref (caps);
1344 gst_query_set_caps_result (query, templ);
1346 gst_caps_unref (templ);
1350 case GST_QUERY_ACCEPT_CAPS:
1351 if (GST_PAD_CAST (aggpad) == video_sinkpad) {
1352 ret = gst_pad_peer_query (srcpad, query);
1355 GstCaps *templ = gst_static_pad_template_get_caps (&captiontemplate);
1357 gst_query_parse_accept_caps (query, &caps);
1358 gst_query_set_accept_caps_result (query, gst_caps_is_subset (caps,
1360 gst_caps_unref (templ);
1365 ret = GST_AGGREGATOR_CLASS (parent_class)->sink_query (aggregator,
1370 gst_object_unref (video_sinkpad);
1376 gst_cc_combiner_peek_next_sample (GstAggregator * agg,
1377 GstAggregatorPad * aggpad)
1379 GstAggregatorPad *caption_pad, *video_pad;
1380 GstCCCombiner *self = GST_CCCOMBINER (agg);
1381 GstSample *res = NULL;
1384 GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
1385 (self), "caption"));
1387 GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
1390 if (aggpad == caption_pad) {
1391 if (self->current_frame_captions->len > 0) {
1392 GstCaps *caps = gst_pad_get_current_caps (GST_PAD (aggpad));
1393 GstBufferList *buflist = gst_buffer_list_new ();
1396 for (i = 0; i < self->current_frame_captions->len; i++) {
1397 CaptionData *caption_data =
1398 &g_array_index (self->current_frame_captions, CaptionData, i);
1399 gst_buffer_list_add (buflist, gst_buffer_ref (caption_data->buffer));
1402 res = gst_sample_new (NULL, caps, &aggpad->segment, NULL);
1403 gst_caps_unref (caps);
1405 gst_sample_set_buffer_list (res, buflist);
1406 gst_buffer_list_unref (buflist);
1408 } else if (aggpad == video_pad) {
1409 if (self->current_video_buffer) {
1410 GstCaps *caps = gst_pad_get_current_caps (GST_PAD (aggpad));
1411 res = gst_sample_new (self->current_video_buffer,
1412 caps, &aggpad->segment, NULL);
1413 gst_caps_unref (caps);
1418 gst_object_unref (caption_pad);
1421 gst_object_unref (video_pad);
1426 static GstStateChangeReturn
1427 gst_cc_combiner_change_state (GstElement * element, GstStateChange transition)
1429 GstCCCombiner *self = GST_CCCOMBINER (element);
1431 switch (transition) {
1432 case GST_STATE_CHANGE_READY_TO_PAUSED:
1433 self->schedule = self->prop_schedule;
1434 self->max_scheduled = self->prop_max_scheduled;
1435 self->output_padding = self->prop_output_padding;
1441 return GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
1445 gst_cc_combiner_set_property (GObject * object, guint prop_id,
1446 const GValue * value, GParamSpec * pspec)
1448 GstCCCombiner *self = GST_CCCOMBINER (object);
1452 self->prop_schedule = g_value_get_boolean (value);
1454 case PROP_MAX_SCHEDULED:
1455 self->prop_max_scheduled = g_value_get_uint (value);
1457 case PROP_OUTPUT_PADDING:
1458 self->prop_output_padding = g_value_get_boolean (value);
1461 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1467 gst_cc_combiner_get_property (GObject * object, guint prop_id, GValue * value,
1470 GstCCCombiner *self = GST_CCCOMBINER (object);
1474 g_value_set_boolean (value, self->prop_schedule);
1476 case PROP_MAX_SCHEDULED:
1477 g_value_set_uint (value, self->prop_max_scheduled);
1479 case PROP_OUTPUT_PADDING:
1480 g_value_set_boolean (value, self->prop_output_padding);
1483 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1489 gst_cc_combiner_class_init (GstCCCombinerClass * klass)
1491 GObjectClass *gobject_class;
1492 GstElementClass *gstelement_class;
1493 GstAggregatorClass *aggregator_class;
1495 gobject_class = (GObjectClass *) klass;
1496 gstelement_class = (GstElementClass *) klass;
1497 aggregator_class = (GstAggregatorClass *) klass;
1499 gobject_class->finalize = gst_cc_combiner_finalize;
1500 gobject_class->set_property = gst_cc_combiner_set_property;
1501 gobject_class->get_property = gst_cc_combiner_get_property;
1503 gst_element_class_set_static_metadata (gstelement_class,
1504 "Closed Caption Combiner",
1506 "Combines GstVideoCaptionMeta with video input stream",
1507 "Sebastian Dröge <sebastian@centricular.com>");
1510 * GstCCCombiner:schedule:
1512 * Controls whether caption buffers should be smoothly scheduled
1513 * in order to have exactly one per output video buffer.
1515 * This can involve rewriting input captions, for example when the
1516 * input is CDP sequence counters are rewritten, time codes are dropped
1517 * and potentially re-injected if the input video frame had a time code
1520 * Caption buffers may also get split up in order to assign captions to
1521 * the correct field when the input is interlaced.
1523 * This can also imply that the input will drift from synchronization,
1524 * when there isn't enough padding in the input stream to catch up. In
1525 * that case the element will start dropping old caption buffers once
1526 * the number of buffers in its internal queue reaches
1527 * #GstCCCombiner:max-scheduled.
1529 * When this is set to %FALSE, the behaviour of this element is essentially
1534 g_object_class_install_property (G_OBJECT_CLASS (klass),
1535 PROP_SCHEDULE, g_param_spec_boolean ("schedule",
1537 "Schedule caption buffers so that exactly one is output per video frame",
1539 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1540 GST_PARAM_MUTABLE_READY));
1543 * GstCCCombiner:max-scheduled:
1545 * Controls the number of scheduled buffers after which the element
1546 * will start dropping old buffers from its internal queues. See
1547 * #GstCCCombiner:schedule.
1551 g_object_class_install_property (G_OBJECT_CLASS (klass),
1552 PROP_MAX_SCHEDULED, g_param_spec_uint ("max-scheduled",
1554 "Maximum number of buffers to queue for scheduling", 0, G_MAXUINT,
1555 DEFAULT_MAX_SCHEDULED,
1556 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1557 GST_PARAM_MUTABLE_READY));
1560 * GstCCCombiner:output-padding:
1562 * When #GstCCCombiner:schedule is %TRUE, this property controls
1563 * whether the output closed caption meta stream will be padded.
1567 g_object_class_install_property (G_OBJECT_CLASS (klass),
1568 PROP_OUTPUT_PADDING, g_param_spec_boolean ("output-padding",
1570 "Whether to output padding packets when schedule=true",
1571 DEFAULT_OUTPUT_PADDING,
1572 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
1573 GST_PARAM_MUTABLE_READY));
1576 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
1577 &sinktemplate, GST_TYPE_AGGREGATOR_PAD);
1578 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
1579 &srctemplate, GST_TYPE_AGGREGATOR_PAD);
1580 gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
1581 &captiontemplate, GST_TYPE_AGGREGATOR_PAD);
1583 gstelement_class->change_state =
1584 GST_DEBUG_FUNCPTR (gst_cc_combiner_change_state);
1586 aggregator_class->aggregate = gst_cc_combiner_aggregate;
1587 aggregator_class->stop = gst_cc_combiner_stop;
1588 aggregator_class->flush = gst_cc_combiner_flush;
1589 aggregator_class->create_new_pad = gst_cc_combiner_create_new_pad;
1590 aggregator_class->sink_event = gst_cc_combiner_sink_event;
1591 aggregator_class->negotiate = NULL;
1592 aggregator_class->get_next_time = gst_aggregator_simple_get_next_time;
1593 aggregator_class->src_query = gst_cc_combiner_src_query;
1594 aggregator_class->sink_query = gst_cc_combiner_sink_query;
1595 aggregator_class->peek_next_sample = gst_cc_combiner_peek_next_sample;
1597 GST_DEBUG_CATEGORY_INIT (gst_cc_combiner_debug, "cccombiner",
1598 0, "Closed Caption combiner");
1602 gst_cc_combiner_init (GstCCCombiner * self)
1604 GstPadTemplate *templ;
1605 GstAggregatorPad *agg_pad;
1607 templ = gst_static_pad_template_get (&sinktemplate);
1608 agg_pad = g_object_new (GST_TYPE_AGGREGATOR_PAD,
1609 "name", "sink", "direction", GST_PAD_SINK, "template", templ, NULL);
1610 gst_object_unref (templ);
1611 gst_element_add_pad (GST_ELEMENT_CAST (self), GST_PAD_CAST (agg_pad));
1613 self->current_frame_captions =
1614 g_array_new (FALSE, FALSE, sizeof (CaptionData));
1615 g_array_set_clear_func (self->current_frame_captions,
1616 (GDestroyNotify) caption_data_clear);
1618 self->current_video_running_time = self->current_video_running_time_end =
1619 self->previous_video_running_time_end = GST_CLOCK_TIME_NONE;
1621 self->caption_type = GST_VIDEO_CAPTION_TYPE_UNKNOWN;
1623 self->prop_schedule = DEFAULT_SCHEDULE;
1624 self->prop_max_scheduled = DEFAULT_MAX_SCHEDULED;
1625 self->prop_output_padding = DEFAULT_OUTPUT_PADDING;
1626 self->scheduled[0] =
1627 gst_queue_array_new_for_struct (sizeof (CaptionQueueItem), 0);
1628 self->scheduled[1] =
1629 gst_queue_array_new_for_struct (sizeof (CaptionQueueItem), 0);
1630 gst_queue_array_set_clear_func (self->scheduled[0],
1631 (GDestroyNotify) clear_scheduled);
1632 gst_queue_array_set_clear_func (self->scheduled[1],
1633 (GDestroyNotify) clear_scheduled);
1634 self->cdp_hdr_sequence_cntr = 0;
1635 self->cdp_fps_entry = &null_fps_entry;