3 * Copyright (C) 2018 Sebastian Dröge <sebastian@centricular.com>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18 * Boston, MA 02110-1301, USA.
27 #include <gst/base/base.h>
28 #include <gst/video/video.h>
32 #include "gstccconverter.h"
34 GST_DEBUG_CATEGORY_STATIC (gst_cc_converter_debug);
35 #define GST_CAT_DEFAULT gst_cc_converter_debug
38 * GstCCConverterCDPMode:
39 * @GST_CC_CONVERTER_CDP_MODE_TIME_CODE: Store time code information in CDP packets
40 * @GST_CC_CONVERTER_CDP_MODE_CC_DATA: Store CC data in CDP packets
41 * @GST_CC_CONVERTER_CDP_MODE_CC_SVC_INFO: Store CC service information in CDP packets
52 #define DEFAULT_CDP_MODE (GST_CC_CONVERTER_CDP_MODE_TIME_CODE | GST_CC_CONVERTER_CDP_MODE_CC_DATA | GST_CC_CONVERTER_CDP_MODE_CC_SVC_INFO)
54 /* Ordered by the amount of information they can contain */
56 "closedcaption/x-cea-708,format=(string) cdp; " \
57 "closedcaption/x-cea-708,format=(string) cc_data; " \
58 "closedcaption/x-cea-608,format=(string) s334-1a; " \
59 "closedcaption/x-cea-608,format=(string) raw"
61 #define VAL_OR_0(v) ((v) ? (*(v)) : 0)
63 static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE ("sink",
66 GST_STATIC_CAPS (CC_CAPS));
68 static GstStaticPadTemplate srctemplate = GST_STATIC_PAD_TEMPLATE ("src",
71 GST_STATIC_CAPS (CC_CAPS));
73 #define parent_class gst_cc_converter_parent_class
74 G_DEFINE_TYPE (GstCCConverter, gst_cc_converter, GST_TYPE_BASE_TRANSFORM);
75 GST_ELEMENT_REGISTER_DEFINE (ccconverter, "ccconverter",
76 GST_RANK_NONE, GST_TYPE_CCCONVERTER);
78 #define GST_TYPE_CC_CONVERTER_CDP_MODE (gst_cc_converter_cdp_mode_get_type())
80 gst_cc_converter_cdp_mode_get_type (void)
82 static const GFlagsValue values[] = {
83 {GST_CC_CDP_MODE_TIME_CODE,
84 "Store time code information in CDP packets", "time-code"},
85 {GST_CC_CDP_MODE_CC_DATA, "Store CC data in CDP packets",
87 {GST_CC_CDP_MODE_CC_SVC_INFO,
88 "Store CC service information in CDP packets", "cc-svc-info"},
93 if (g_once_init_enter ((gsize *) & id)) {
96 _id = g_flags_register_static ("GstCCConverterCDPMode", values);
98 g_once_init_leave ((gsize *) & id, _id);
105 gst_cc_converter_transform_size (GstBaseTransform * base,
106 GstPadDirection direction,
107 GstCaps * caps, gsize size, GstCaps * othercaps, gsize * othersize)
109 /* We can't really convert from an output size to an input size */
110 if (direction != GST_PAD_SINK)
113 /* Assume worst-case here and over-allocate, and in ::transform() we then
114 * downsize the buffer as needed. The worst-case is one CDP packet, which
115 * can be up to MAX_CDP_PACKET_LEN bytes large */
117 *othersize = MAX_CDP_PACKET_LEN;
123 gst_cc_converter_transform_caps (GstBaseTransform * base,
124 GstPadDirection direction, GstCaps * caps, GstCaps * filter)
126 static GstStaticCaps non_cdp_caps =
127 GST_STATIC_CAPS ("closedcaption/x-cea-708, format=(string)cc_data; "
128 "closedcaption/x-cea-608,format=(string) s334-1a; "
129 "closedcaption/x-cea-608,format=(string) raw");
130 static GstStaticCaps cdp_caps =
131 GST_STATIC_CAPS ("closedcaption/x-cea-708, format=(string)cdp");
132 static GstStaticCaps cdp_caps_framerate =
133 GST_STATIC_CAPS ("closedcaption/x-cea-708, format=(string)cdp, "
134 "framerate=(fraction){60/1, 60000/1001, 50/1, 30/1, 30000/1001, 25/1, 24/1, 24000/1001}");
136 GstCCConverter *self = GST_CCCONVERTER (base);
138 GstCaps *res, *templ;
140 templ = gst_pad_get_pad_template_caps (base->srcpad);
142 GST_DEBUG_OBJECT (self, "direction %s from caps %" GST_PTR_FORMAT,
143 direction == GST_PAD_SRC ? "src" : "sink", caps);
145 res = gst_caps_new_empty ();
146 n = gst_caps_get_size (caps);
147 for (i = 0; i < n; i++) {
148 const GstStructure *s = gst_caps_get_structure (caps, i);
149 const GValue *framerate = gst_structure_get_value (s, "framerate");
151 if (gst_structure_has_name (s, "closedcaption/x-cea-608")) {
153 if (direction == GST_PAD_SRC) {
154 /* SRC direction: We produce upstream caps
156 * Downstream wanted CEA608 caps. If it had a framerate, we
157 * also need upstream to provide exactly that same framerate
158 * and otherwise we don't care.
160 * We can convert everything to CEA608.
162 res = gst_caps_merge (res, gst_static_caps_get (&cdp_caps_framerate));
164 /* we can only keep the same framerate for non-cdp */
167 tmp = gst_caps_make_writable (gst_static_caps_get (&non_cdp_caps));
168 gst_caps_set_value (tmp, "framerate", framerate);
169 res = gst_caps_merge (res, tmp);
171 res = gst_caps_merge (res, gst_static_caps_get (&non_cdp_caps));
174 /* SINK: We produce downstream caps
176 * Upstream provided CEA608 caps. We can convert that to CDP if
177 * also a CDP compatible framerate was provided, and we can convert
178 * it to anything else regardless.
180 * If upstream provided a framerate we can pass that through, possibly
181 * filtered for the CDP case.
187 /* Create caps that contain the intersection of all framerates with
188 * the CDP allowed framerates */
190 gst_caps_make_writable (gst_static_caps_get
191 (&cdp_caps_framerate));
192 t = gst_caps_get_structure (tmp, 0);
193 gst_structure_set_name (t, "closedcaption/x-cea-608");
194 gst_structure_remove_field (t, "format");
195 if (gst_structure_can_intersect (s, t)) {
196 gst_caps_unref (tmp);
199 gst_caps_make_writable (gst_static_caps_get
200 (&cdp_caps_framerate));
202 res = gst_caps_merge (res, tmp);
204 gst_caps_unref (tmp);
206 /* And we can convert to everything else with the given framerate */
207 tmp = gst_caps_make_writable (gst_static_caps_get (&non_cdp_caps));
208 gst_caps_set_value (tmp, "framerate", framerate);
209 res = gst_caps_merge (res, tmp);
211 res = gst_caps_merge (res, gst_static_caps_get (&non_cdp_caps));
214 } else if (gst_structure_has_name (s, "closedcaption/x-cea-708")) {
215 if (direction == GST_PAD_SRC) {
216 /* SRC direction: We produce upstream caps
218 * Downstream wanted CEA708 caps. If downstream wants *only* CDP we
219 * either need CDP from upstream, or anything else with a CDP
221 * If downstream also wants non-CDP we can accept anything.
223 * We pass through any framerate as-is, except for filtering
224 * for CDP framerates if downstream wants only CDP.
227 if (g_strcmp0 (gst_structure_get_string (s, "format"), "cdp") == 0) {
228 /* Downstream wants only CDP */
230 /* We need CDP from upstream in that case */
231 res = gst_caps_merge (res, gst_static_caps_get (&cdp_caps_framerate));
233 /* Or anything else with a CDP framerate */
237 const GValue *cdp_framerate;
239 /* Create caps that contain the intersection of all framerates with
240 * the CDP allowed framerates */
242 gst_caps_make_writable (gst_static_caps_get
243 (&cdp_caps_framerate));
244 t = gst_caps_get_structure (tmp, 0);
246 /* There's an intersection between the framerates so we can convert
247 * into CDP with exactly those framerates from anything else */
248 cdp_framerate = gst_structure_get_value (t, "framerate");
249 tmp = gst_caps_make_writable (gst_static_caps_get (&non_cdp_caps));
250 gst_caps_set_value (tmp, "framerate", cdp_framerate);
251 res = gst_caps_merge (res, tmp);
253 GstCaps *tmp, *cdp_caps;
254 const GValue *cdp_framerate;
256 /* Get all CDP framerates, we can accept anything that has those
258 cdp_caps = gst_static_caps_get (&cdp_caps_framerate);
260 gst_structure_get_value (gst_caps_get_structure (cdp_caps, 0),
263 tmp = gst_caps_make_writable (gst_static_caps_get (&non_cdp_caps));
264 gst_caps_set_value (tmp, "framerate", cdp_framerate);
265 gst_caps_unref (cdp_caps);
267 res = gst_caps_merge (res, tmp);
270 /* Downstream wants not only CDP, we can do everything */
271 res = gst_caps_merge (res, gst_static_caps_get (&cdp_caps_framerate));
273 /* we can only keep the same framerate for non-cdp */
276 tmp = gst_caps_make_writable (gst_static_caps_get (&non_cdp_caps));
277 gst_caps_set_value (tmp, "framerate", framerate);
278 res = gst_caps_merge (res, tmp);
280 res = gst_caps_merge (res, gst_static_caps_get (&non_cdp_caps));
286 /* SINK: We produce downstream caps
288 * Upstream provided CEA708 caps. If upstream provided CDP we can
289 * output CDP, no matter what (-> passthrough). If upstream did not
290 * provide CDP, we can output CDP only if the framerate fits.
291 * We can always produce everything else apart from CDP.
293 * If upstream provided a framerate we pass that through for non-CDP
294 * output, and pass it through filtered for CDP output.
297 if (gst_structure_can_intersect (s,
298 gst_caps_get_structure (gst_static_caps_get (&cdp_caps), 0))) {
299 /* Upstream provided CDP caps, we can do everything independent of
301 res = gst_caps_merge (res, gst_static_caps_get (&cdp_caps_framerate));
302 } else if (framerate) {
303 const GValue *cdp_framerate;
306 /* Upstream did not provide CDP. We can only do CDP if upstream
307 * happened to have a CDP framerate */
309 /* Create caps that contain the intersection of all framerates with
310 * the CDP allowed framerates */
312 gst_caps_make_writable (gst_static_caps_get
313 (&cdp_caps_framerate));
314 t = gst_caps_get_structure (tmp, 0);
316 /* There's an intersection between the framerates so we can convert
317 * into CDP with exactly those framerates */
318 cdp_framerate = gst_structure_get_value (t, "framerate");
319 if (gst_value_intersect (NULL, cdp_framerate, framerate)) {
320 gst_caps_set_value (tmp, "framerate", cdp_framerate);
322 res = gst_caps_merge (res, tmp);
324 gst_clear_caps (&tmp);
327 /* We can always convert CEA708 to all non-CDP formats */
329 /* we can only keep the same framerate for non-cdp */
332 tmp = gst_caps_make_writable (gst_static_caps_get (&non_cdp_caps));
333 gst_caps_set_value (tmp, "framerate", framerate);
334 res = gst_caps_merge (res, tmp);
336 res = gst_caps_merge (res, gst_static_caps_get (&non_cdp_caps));
340 g_assert_not_reached ();
344 GST_DEBUG_OBJECT (self, "pre filter caps %" GST_PTR_FORMAT, res);
346 /* We can convert anything into anything but it might involve loss of
347 * information so always filter according to the order in our template caps
351 filter = gst_caps_intersect_full (templ, filter, GST_CAPS_INTERSECT_FIRST);
353 tmp = gst_caps_intersect_full (filter, res, GST_CAPS_INTERSECT_FIRST);
354 gst_caps_unref (res);
358 gst_caps_unref (templ);
360 GST_DEBUG_OBJECT (self, "Transformed in direction %s caps %" GST_PTR_FORMAT,
361 direction == GST_PAD_SRC ? "src" : "sink", caps);
362 GST_DEBUG_OBJECT (self, "filter %" GST_PTR_FORMAT, filter);
363 GST_DEBUG_OBJECT (self, "to %" GST_PTR_FORMAT, res);
365 gst_clear_caps (&filter);
371 gst_cc_converter_fixate_caps (GstBaseTransform * base,
372 GstPadDirection direction, GstCaps * incaps, GstCaps * outcaps)
374 GstCCConverter *self = GST_CCCONVERTER (base);
375 const GstStructure *s;
377 const GValue *framerate;
378 GstCaps *intersection, *templ;
380 GST_DEBUG_OBJECT (self, "Fixating in direction %s incaps %" GST_PTR_FORMAT,
381 direction == GST_PAD_SRC ? "src" : "sink", incaps);
382 GST_DEBUG_OBJECT (self, "and outcaps %" GST_PTR_FORMAT, outcaps);
384 /* Prefer passthrough if we can */
385 if (gst_caps_is_subset (incaps, outcaps)) {
386 gst_caps_unref (outcaps);
387 return GST_BASE_TRANSFORM_CLASS (parent_class)->fixate_caps (base,
388 direction, incaps, gst_caps_ref (incaps));
391 /* Otherwise prefer caps in the order of our template caps */
392 templ = gst_pad_get_pad_template_caps (base->srcpad);
394 gst_caps_intersect_full (templ, outcaps, GST_CAPS_INTERSECT_FIRST);
395 gst_caps_unref (outcaps);
396 outcaps = intersection;
399 GST_BASE_TRANSFORM_CLASS (parent_class)->fixate_caps (base, direction,
402 s = gst_caps_get_structure (incaps, 0);
403 framerate = gst_structure_get_value (s, "framerate");
404 outcaps = gst_caps_make_writable (outcaps);
405 t = gst_caps_get_structure (outcaps, 0);
407 /* remove any output framerate that might've been added by basetransform
408 * due to intersecting with downstream */
409 gst_structure_remove_field (t, "framerate");
411 /* or passthrough the input framerate if possible */
414 n = gst_value_get_fraction_numerator (framerate);
415 d = gst_value_get_fraction_denominator (framerate);
417 if (gst_structure_has_field (t, "framerate"))
418 gst_structure_fixate_field_nearest_fraction (t, "framerate", n, d);
420 gst_structure_set (t, "framerate", GST_TYPE_FRACTION, n, d, NULL);
423 GST_DEBUG_OBJECT (self,
424 "Fixated caps %" GST_PTR_FORMAT " to %" GST_PTR_FORMAT, incaps, outcaps);
430 gst_cc_converter_set_caps (GstBaseTransform * base, GstCaps * incaps,
433 GstCCConverter *self = GST_CCCONVERTER (base);
434 const GstStructure *s;
435 gboolean passthrough;
437 self->input_caption_type = gst_video_caption_type_from_caps (incaps);
438 self->output_caption_type = gst_video_caption_type_from_caps (outcaps);
440 if (self->input_caption_type == GST_VIDEO_CAPTION_TYPE_UNKNOWN ||
441 self->output_caption_type == GST_VIDEO_CAPTION_TYPE_UNKNOWN)
444 s = gst_caps_get_structure (incaps, 0);
445 if (!gst_structure_get_fraction (s, "framerate", &self->in_fps_n,
447 self->in_fps_n = self->in_fps_d = 0;
449 s = gst_caps_get_structure (outcaps, 0);
450 if (!gst_structure_get_fraction (s, "framerate", &self->out_fps_n,
452 self->out_fps_n = self->out_fps_d = 0;
454 gst_video_time_code_clear (&self->current_output_timecode);
456 /* Caps can be different but we can passthrough as long as they can
457 * intersect, i.e. have same caps name and format */
458 passthrough = gst_caps_can_intersect (incaps, outcaps);
459 gst_base_transform_set_passthrough (base, passthrough);
461 GST_DEBUG_OBJECT (self,
462 "Got caps %" GST_PTR_FORMAT " to %" GST_PTR_FORMAT " (passthrough %d)",
463 incaps, outcaps, passthrough);
469 GST_ERROR_OBJECT (self,
470 "Invalid caps: in %" GST_PTR_FORMAT " out: %" GST_PTR_FORMAT, incaps,
477 get_framerate_output_scale (GstCCConverter * self,
478 const struct cdp_fps_entry *in_fps_entry, gint * scale_n, gint * scale_d)
480 if (self->in_fps_n == 0 || self->out_fps_d == 0) {
486 /* compute the relative rates of the two framerates */
487 if (!gst_util_fraction_multiply (in_fps_entry->fps_d, in_fps_entry->fps_n,
488 self->out_fps_n, self->out_fps_d, scale_n, scale_d))
489 /* we should never overflow */
490 g_assert_not_reached ();
494 interpolate_time_code_with_framerate (GstCCConverter * self,
495 const GstVideoTimeCode * tc, gint out_fps_n, gint out_fps_d,
496 gint scale_n, gint scale_d, GstVideoTimeCode * out)
499 gint output_n, output_d;
501 GstVideoTimeCodeFlags flags;
503 g_return_val_if_fail (out != NULL, FALSE);
504 /* out_n/d can only be 0 if scale_n/d are 1/1 */
505 g_return_val_if_fail ((scale_n == 1 && scale_d == 1) || (out_fps_n != 0
506 && out_fps_d != 0), FALSE);
508 if (!tc || tc->config.fps_n == 0)
511 if (!gst_util_fraction_multiply (tc->frames, 1, scale_n, scale_d, &output_n,
513 /* we should never overflow */
514 g_assert_not_reached ();
516 tc_str = gst_video_time_code_to_string (tc);
517 GST_TRACE_OBJECT (self, "interpolating time code %s with scale %d/%d "
518 "to frame %d/%d", tc_str, scale_n, scale_d, output_n, output_d);
521 if (out_fps_n == 0 || out_fps_d == 0) {
522 out_fps_n = tc->config.fps_n;
523 out_fps_d = tc->config.fps_d;
526 flags = tc->config.flags;
527 if ((flags & GST_VIDEO_TIME_CODE_FLAGS_DROP_FRAME) != 0 && out_fps_d != 1001
528 && out_fps_n != 60000 && out_fps_n != 30000) {
529 flags &= ~GST_VIDEO_TIME_CODE_FLAGS_DROP_FRAME;
530 } else if ((flags & GST_VIDEO_TIME_CODE_FLAGS_DROP_FRAME) == 0
531 && out_fps_d == 1001 && (out_fps_n == 60000 || out_fps_n == 30000)) {
532 /* XXX: theoretically, not quite correct however this is an assumption
533 * we have elsewhere that these framerates are always drop-framed */
534 flags |= GST_VIDEO_TIME_CODE_FLAGS_DROP_FRAME;
537 output_frame = output_n / output_d;
539 *out = (GstVideoTimeCode) GST_VIDEO_TIME_CODE_INIT;
541 /* here we try to find the next available valid timecode. The dropped
542 * (when they exist) frames in time codes are that the beginning of each
544 gst_video_time_code_clear (out);
545 gst_video_time_code_init (out, out_fps_n, out_fps_d,
546 tc->config.latest_daily_jam, flags, tc->hours, tc->minutes,
547 tc->seconds, output_frame, tc->field_count);
549 } while ((flags & GST_VIDEO_TIME_CODE_FLAGS_DROP_FRAME) != 0
550 && output_frame < 10 && !gst_video_time_code_is_valid (out));
552 tc_str = gst_video_time_code_to_string (out);
553 GST_TRACE_OBJECT (self, "interpolated to %s", tc_str);
560 can_take_buffer (GstCCConverter * self,
561 const struct cdp_fps_entry *in_fps_entry,
562 const struct cdp_fps_entry *out_fps_entry,
563 const GstVideoTimeCode * in_tc, GstVideoTimeCode * out_tc)
565 int input_frame_n, input_frame_d, output_frame_n, output_frame_d;
566 int output_time_cmp, scale_n, scale_d;
568 /* TODO: handle input discont */
570 if (self->in_fps_n == 0) {
571 input_frame_n = self->input_frames;
574 /* compute the relative frame count for each */
575 if (!gst_util_fraction_multiply (self->in_fps_d, self->in_fps_n,
576 self->input_frames, 1, &input_frame_n, &input_frame_d))
577 /* we should never overflow */
578 g_assert_not_reached ();
581 if (self->in_fps_n == 0) {
582 output_frame_n = self->output_frames;
585 if (!gst_util_fraction_multiply (self->out_fps_d, self->out_fps_n,
586 self->output_frames, 1, &output_frame_n, &output_frame_d))
587 /* we should never overflow */
588 g_assert_not_reached ();
591 output_time_cmp = gst_util_fraction_compare (input_frame_n, input_frame_d,
592 output_frame_n, output_frame_d);
594 if (output_time_cmp == 0) {
595 self->output_frames = 0;
596 self->input_frames = 0;
599 in_fps_entry = cdp_fps_entry_from_fps (self->in_fps_n, self->in_fps_d);
600 if (!in_fps_entry || in_fps_entry->fps_n == 0)
601 g_assert_not_reached ();
603 /* compute the relative rates of the two framerates */
604 get_framerate_output_scale (self, in_fps_entry, &scale_n, &scale_d);
606 GST_TRACE_OBJECT (self, "performing conversion at scale %d/%d, "
607 "time comparison %i", scale_n, scale_d, output_time_cmp);
609 if (output_time_cmp < 0) {
610 /* we can't generate an output yet */
613 interpolate_time_code_with_framerate (self, in_tc, out_fps_entry->fps_n,
614 out_fps_entry->fps_d, scale_n, scale_d, out_tc);
620 convert_cea708_cc_data_cea708_cdp_internal (GstCCConverter * self,
621 const guint8 * cc_data, guint cc_data_len, guint8 * cdp, guint cdp_len,
622 const GstVideoTimeCode * tc, const struct cdp_fps_entry *fps_entry)
626 ret = convert_cea708_cc_data_to_cdp (GST_OBJECT (self),
627 (GstCCCDPMode) self->cdp_mode, self->cdp_hdr_sequence_cntr, cc_data,
628 cc_data_len, cdp, cdp_len, tc, fps_entry);
629 self->cdp_hdr_sequence_cntr++;
635 push_cdp_buffer (GstCCConverter * self, GstBuffer * inbuf,
636 GstVideoTimeCode * out_tc, const struct cdp_fps_entry **in_fps_entry)
638 guint8 cc_data[MAX_CDP_PACKET_LEN];
639 guint cc_data_len = 0;
643 gst_buffer_map (inbuf, &in, GST_MAP_READ);
646 convert_cea708_cdp_to_cc_data (GST_OBJECT (self), in.data, in.size,
647 cc_data, out_tc, in_fps_entry);
649 cc_buffer_push_cc_data (self->cc_buffer, cc_data, cc_data_len);
651 gst_buffer_unmap (inbuf, &in);
652 self->input_frames++;
659 convert_cea608_raw_cea608_s334_1a (GstCCConverter * self, GstBuffer * inbuf,
665 n = gst_buffer_get_size (inbuf);
667 GST_WARNING_OBJECT (self, "Invalid raw CEA608 buffer size");
668 gst_buffer_set_size (outbuf, 0);
675 GST_WARNING_OBJECT (self, "Too many CEA608 pairs %u. Truncating to %u", n,
680 gst_buffer_set_size (outbuf, 3 * n);
682 gst_buffer_map (inbuf, &in, GST_MAP_READ);
683 gst_buffer_map (outbuf, &out, GST_MAP_WRITE);
685 /* We have to assume that each value is from the first field and
686 * don't know from which line offset it originally is */
687 for (i = 0; i < n; i++) {
688 out.data[i * 3] = 0x80;
689 out.data[i * 3 + 1] = in.data[i * 2];
690 out.data[i * 3 + 2] = in.data[i * 2 + 1];
693 gst_buffer_unmap (inbuf, &in);
694 gst_buffer_unmap (outbuf, &out);
700 convert_cea608_raw_cea708_cc_data (GstCCConverter * self, GstBuffer * inbuf,
706 n = gst_buffer_get_size (inbuf);
708 GST_WARNING_OBJECT (self, "Invalid raw CEA608 buffer size");
709 gst_buffer_set_size (outbuf, 0);
716 GST_WARNING_OBJECT (self, "Too many CEA608 pairs %u. Truncating to %u", n,
721 gst_buffer_set_size (outbuf, 3 * n);
723 gst_buffer_map (inbuf, &in, GST_MAP_READ);
724 gst_buffer_map (outbuf, &out, GST_MAP_WRITE);
726 /* We have to assume that each value is from the first field and
727 * don't know from which line offset it originally is */
728 for (i = 0; i < n; i++) {
729 out.data[i * 3] = 0xfc;
730 out.data[i * 3 + 1] = in.data[i * 2];
731 out.data[i * 3 + 2] = in.data[i * 2 + 1];
734 gst_buffer_unmap (inbuf, &in);
735 gst_buffer_unmap (outbuf, &out);
741 convert_cea608_raw_cea708_cdp (GstCCConverter * self, GstBuffer * inbuf,
742 GstBuffer * outbuf, const GstVideoTimeCodeMeta * tc_meta)
745 const struct cdp_fps_entry *in_fps_entry, *out_fps_entry;
746 guint cc_data_len = MAX_CDP_PACKET_LEN;
747 guint8 cc_data[MAX_CDP_PACKET_LEN];
749 in_fps_entry = cdp_fps_entry_from_fps (self->in_fps_n, self->in_fps_d);
750 if (!in_fps_entry || in_fps_entry->fps_n == 0)
751 g_assert_not_reached ();
756 n = gst_buffer_get_size (inbuf);
758 GST_WARNING_OBJECT (self, "Invalid raw CEA608 buffer size");
759 gst_buffer_set_size (outbuf, 0);
765 if (n > in_fps_entry->max_cea608_count) {
766 GST_WARNING_OBJECT (self, "Too many CEA608 pairs %u. Truncating to %u",
767 n, in_fps_entry->max_cea608_count);
768 n = in_fps_entry->max_cea608_count;
771 gst_buffer_map (inbuf, &in, GST_MAP_READ);
772 cc_buffer_push_separated (self->cc_buffer, in.data, in.size, NULL, 0, NULL,
774 gst_buffer_unmap (inbuf, &in);
775 self->input_frames++;
778 out_fps_entry = cdp_fps_entry_from_fps (self->out_fps_n, self->out_fps_d);
779 if (!out_fps_entry || out_fps_entry->fps_n == 0)
780 g_assert_not_reached ();
782 if (!can_take_buffer (self, in_fps_entry, out_fps_entry,
783 tc_meta ? &tc_meta->tc : NULL, &self->current_output_timecode))
786 cc_buffer_take_cc_data (self->cc_buffer, out_fps_entry, cc_data,
789 gst_buffer_map (outbuf, &out, GST_MAP_WRITE);
791 convert_cea708_cc_data_cea708_cdp_internal (self, cc_data, cc_data_len,
792 out.data, out.size, &self->current_output_timecode, out_fps_entry);
793 self->output_frames++;
794 gst_buffer_unmap (outbuf, &out);
797 gst_buffer_set_size (outbuf, cc_data_len);
807 convert_cea608_s334_1a_cea608_raw (GstCCConverter * self, GstBuffer * inbuf,
814 n = gst_buffer_get_size (inbuf);
816 GST_WARNING_OBJECT (self, "Invalid S334-1A CEA608 buffer size");
823 GST_WARNING_OBJECT (self, "Too many S334-1A CEA608 triplets %u", n);
827 gst_buffer_map (inbuf, &in, GST_MAP_READ);
828 gst_buffer_map (outbuf, &out, GST_MAP_WRITE);
830 for (i = 0; i < n; i++) {
831 if (in.data[i * 3] & 0x80) {
832 out.data[i * 2] = in.data[i * 3 + 1];
833 out.data[i * 2 + 1] = in.data[i * 3 + 2];
838 gst_buffer_unmap (inbuf, &in);
839 gst_buffer_unmap (outbuf, &out);
841 gst_buffer_set_size (outbuf, 2 * cea608);
847 convert_cea608_s334_1a_cea708_cc_data (GstCCConverter * self, GstBuffer * inbuf,
853 n = gst_buffer_get_size (inbuf);
855 GST_WARNING_OBJECT (self, "Invalid S334-1A CEA608 buffer size");
862 GST_WARNING_OBJECT (self, "Too many S334-1A CEA608 triplets %u", n);
866 gst_buffer_set_size (outbuf, 3 * n);
868 gst_buffer_map (inbuf, &in, GST_MAP_READ);
869 gst_buffer_map (outbuf, &out, GST_MAP_WRITE);
871 for (i = 0; i < n; i++) {
872 out.data[i * 3] = (in.data[i * 3] & 0x80) ? 0xfc : 0xfd;
873 out.data[i * 3 + 1] = in.data[i * 3 + 1];
874 out.data[i * 3 + 2] = in.data[i * 3 + 2];
877 gst_buffer_unmap (inbuf, &in);
878 gst_buffer_unmap (outbuf, &out);
884 convert_cea608_s334_1a_cea708_cdp (GstCCConverter * self, GstBuffer * inbuf,
885 GstBuffer * outbuf, const GstVideoTimeCodeMeta * tc_meta)
888 const struct cdp_fps_entry *in_fps_entry, *out_fps_entry;
889 guint cc_data_len = MAX_CDP_PACKET_LEN;
890 guint cea608_1_len = 0, cea608_2_len = 0;
891 guint8 cc_data[MAX_CDP_PACKET_LEN];
892 guint8 cea608_1[MAX_CEA608_LEN], cea608_2[MAX_CEA608_LEN];
895 in_fps_entry = cdp_fps_entry_from_fps (self->in_fps_n, self->in_fps_d);
896 if (!in_fps_entry || in_fps_entry->fps_n == 0)
897 g_assert_not_reached ();
900 n = gst_buffer_get_size (inbuf);
902 GST_WARNING_OBJECT (self, "Invalid S334-1A CEA608 buffer size");
908 if (n > in_fps_entry->max_cea608_count) {
909 GST_WARNING_OBJECT (self, "Too many S334-1A CEA608 triplets %u", n);
910 n = in_fps_entry->max_cea608_count;
913 gst_buffer_map (inbuf, &in, GST_MAP_READ);
915 for (i = 0; i < n; i++) {
916 guint byte1 = in.data[i * 3 + 1];
917 guint byte2 = in.data[i * 3 + 2];
919 if (in.data[i * 3] & 0x80) {
920 if (byte1 != 0x80 || byte2 != 0x80) {
921 cea608_1[cea608_1_len++] = byte1;
922 cea608_1[cea608_1_len++] = byte2;
925 if (byte1 != 0x80 || byte2 != 0x80) {
926 cea608_2[cea608_2_len++] = byte1;
927 cea608_2[cea608_2_len++] = byte2;
931 gst_buffer_unmap (inbuf, &in);
933 cc_buffer_push_separated (self->cc_buffer, cea608_1, cea608_1_len,
934 cea608_2, cea608_2_len, NULL, 0);
935 self->input_frames++;
938 out_fps_entry = cdp_fps_entry_from_fps (self->out_fps_n, self->out_fps_d);
939 if (!out_fps_entry || out_fps_entry->fps_n == 0)
940 g_assert_not_reached ();
942 if (!can_take_buffer (self, in_fps_entry, out_fps_entry,
943 tc_meta ? &tc_meta->tc : NULL, &self->current_output_timecode))
946 cc_buffer_take_cc_data (self->cc_buffer, out_fps_entry, cc_data,
949 gst_buffer_map (outbuf, &out, GST_MAP_WRITE);
951 convert_cea708_cc_data_cea708_cdp_internal (self, cc_data, cc_data_len,
952 out.data, out.size, &self->current_output_timecode, out_fps_entry);
953 self->output_frames++;
954 gst_buffer_unmap (outbuf, &out);
957 gst_buffer_set_size (outbuf, cc_data_len);
967 convert_cea708_cc_data_cea608_raw (GstCCConverter * self, GstBuffer * inbuf,
974 n = gst_buffer_get_size (inbuf);
976 GST_WARNING_OBJECT (self, "Invalid raw CEA708 buffer size");
983 GST_WARNING_OBJECT (self, "Too many CEA708 triplets %u", n);
987 gst_buffer_map (inbuf, &in, GST_MAP_READ);
988 gst_buffer_map (outbuf, &out, GST_MAP_WRITE);
990 for (i = 0; i < n; i++) {
991 /* We can only really copy the first field here as there can't be any
992 * signalling in raw CEA608 and we must not mix the streams of different
995 if (in.data[i * 3] == 0xfc) {
996 out.data[cea608 * 2] = in.data[i * 3 + 1];
997 out.data[cea608 * 2 + 1] = in.data[i * 3 + 2];
1002 gst_buffer_unmap (inbuf, &in);
1003 gst_buffer_unmap (outbuf, &out);
1005 gst_buffer_set_size (outbuf, 2 * cea608);
1010 static GstFlowReturn
1011 convert_cea708_cc_data_cea608_s334_1a (GstCCConverter * self, GstBuffer * inbuf,
1018 n = gst_buffer_get_size (inbuf);
1020 GST_WARNING_OBJECT (self, "Invalid raw CEA708 buffer size");
1027 GST_WARNING_OBJECT (self, "Too many CEA708 triplets %u", n);
1031 gst_buffer_map (inbuf, &in, GST_MAP_READ);
1032 gst_buffer_map (outbuf, &out, GST_MAP_WRITE);
1034 for (i = 0; i < n; i++) {
1035 if (in.data[i * 3] == 0xfc || in.data[i * 3] == 0xfd) {
1036 /* We have to assume a line offset of 0 */
1037 out.data[cea608 * 3] = in.data[i * 3] == 0xfc ? 0x80 : 0x00;
1038 out.data[cea608 * 3 + 1] = in.data[i * 3 + 1];
1039 out.data[cea608 * 3 + 2] = in.data[i * 3 + 2];
1044 gst_buffer_unmap (inbuf, &in);
1045 gst_buffer_unmap (outbuf, &out);
1047 gst_buffer_set_size (outbuf, 3 * cea608);
1052 static GstFlowReturn
1053 convert_cea708_cc_data_cea708_cdp (GstCCConverter * self, GstBuffer * inbuf,
1054 GstBuffer * outbuf, const GstVideoTimeCodeMeta * tc_meta)
1057 const struct cdp_fps_entry *in_fps_entry, *out_fps_entry;
1058 guint in_cc_data_len;
1059 guint cc_data_len = MAX_CDP_PACKET_LEN;
1060 guint8 cc_data[MAX_CDP_PACKET_LEN];
1064 gst_buffer_map (inbuf, &in, GST_MAP_READ);
1065 in_cc_data = in.data;
1066 in_cc_data_len = in.size;
1067 self->input_frames++;
1073 in_fps_entry = cdp_fps_entry_from_fps (self->in_fps_n, self->in_fps_d);
1074 if (!in_fps_entry || in_fps_entry->fps_n == 0)
1075 g_assert_not_reached ();
1077 out_fps_entry = cdp_fps_entry_from_fps (self->out_fps_n, self->out_fps_d);
1078 if (!out_fps_entry || out_fps_entry->fps_n == 0)
1079 g_assert_not_reached ();
1081 cc_buffer_push_cc_data (self->cc_buffer, in_cc_data, in_cc_data_len);
1083 gst_buffer_unmap (inbuf, &in);
1085 if (!can_take_buffer (self, in_fps_entry, out_fps_entry,
1086 tc_meta ? &tc_meta->tc : NULL, &self->current_output_timecode))
1089 cc_buffer_take_cc_data (self->cc_buffer, out_fps_entry, cc_data,
1092 gst_buffer_map (outbuf, &out, GST_MAP_WRITE);
1094 convert_cea708_cc_data_cea708_cdp_internal (self, cc_data, cc_data_len,
1095 out.data, out.size, &self->current_output_timecode, out_fps_entry);
1096 self->output_frames++;
1097 gst_buffer_unmap (outbuf, &out);
1100 gst_buffer_set_size (outbuf, cc_data_len);
1109 static GstFlowReturn
1110 convert_cea708_cdp_cea608_raw (GstCCConverter * self, GstBuffer * inbuf,
1111 GstBuffer * outbuf, const GstVideoTimeCodeMeta * tc_meta)
1114 GstVideoTimeCode tc = GST_VIDEO_TIME_CODE_INIT;
1116 const struct cdp_fps_entry *in_fps_entry = NULL, *out_fps_entry;
1118 if (!push_cdp_buffer (self, inbuf, &tc, &in_fps_entry)) {
1119 gst_buffer_set_size (outbuf, 0);
1123 out_fps_entry = cdp_fps_entry_from_fps (self->out_fps_n, self->out_fps_d);
1124 if (!out_fps_entry || out_fps_entry->fps_n == 0)
1125 out_fps_entry = in_fps_entry;
1127 if (!can_take_buffer (self, in_fps_entry, out_fps_entry, &tc,
1128 &self->current_output_timecode))
1131 gst_buffer_map (outbuf, &out, GST_MAP_WRITE);
1132 cea608_1_len = out.size;
1133 cc_buffer_take_separated (self->cc_buffer, out_fps_entry, out.data,
1134 &cea608_1_len, NULL, 0, NULL, 0);
1135 gst_buffer_unmap (outbuf, &out);
1136 self->output_frames++;
1138 if (self->current_output_timecode.config.fps_n != 0 && !tc_meta) {
1139 gst_buffer_add_video_time_code_meta (outbuf,
1140 &self->current_output_timecode);
1141 gst_video_time_code_increment_frame (&self->current_output_timecode);
1145 gst_buffer_set_size (outbuf, cea608_1_len);
1153 static GstFlowReturn
1154 convert_cea708_cdp_cea608_s334_1a (GstCCConverter * self, GstBuffer * inbuf,
1155 GstBuffer * outbuf, const GstVideoTimeCodeMeta * tc_meta)
1158 GstVideoTimeCode tc = GST_VIDEO_TIME_CODE_INIT;
1159 const struct cdp_fps_entry *in_fps_entry = NULL, *out_fps_entry;
1164 if (!push_cdp_buffer (self, inbuf, &tc, &in_fps_entry))
1167 out_fps_entry = cdp_fps_entry_from_fps (self->out_fps_n, self->out_fps_d);
1168 if (!out_fps_entry || out_fps_entry->fps_n == 0)
1169 out_fps_entry = in_fps_entry;
1171 if (!can_take_buffer (self, in_fps_entry, out_fps_entry, &tc,
1172 &self->current_output_timecode))
1175 gst_buffer_map (outbuf, &out, GST_MAP_READWRITE);
1177 cc_data_len = out.size;
1178 cc_buffer_take_cc_data (self->cc_buffer, out_fps_entry, out.data,
1180 s334_len = drop_ccp_from_cc_data (out.data, cc_data_len);
1184 for (i = 0; i < s334_len / 3; i++) {
1185 guint byte = out.data[i * 3];
1186 /* We have to assume a line offset of 0 */
1187 out.data[i * 3] = (byte == 0xfc || byte == 0xf8) ? 0x80 : 0x00;
1190 gst_buffer_unmap (outbuf, &out);
1191 self->output_frames++;
1193 gst_buffer_set_size (outbuf, s334_len);
1195 if (self->current_output_timecode.config.fps_n != 0 && !tc_meta) {
1196 gst_buffer_add_video_time_code_meta (outbuf,
1197 &self->current_output_timecode);
1198 gst_video_time_code_increment_frame (&self->current_output_timecode);
1204 gst_buffer_set_size (outbuf, 0);
1208 static GstFlowReturn
1209 convert_cea708_cdp_cea708_cc_data (GstCCConverter * self, GstBuffer * inbuf,
1210 GstBuffer * outbuf, const GstVideoTimeCodeMeta * tc_meta)
1213 GstVideoTimeCode tc = GST_VIDEO_TIME_CODE_INIT;
1214 const struct cdp_fps_entry *in_fps_entry = NULL, *out_fps_entry;
1217 if (!push_cdp_buffer (self, inbuf, &tc, &in_fps_entry))
1220 out_fps_entry = cdp_fps_entry_from_fps (self->out_fps_n, self->out_fps_d);
1221 if (!out_fps_entry || out_fps_entry->fps_n == 0)
1222 out_fps_entry = in_fps_entry;
1224 if (!can_take_buffer (self, in_fps_entry, out_fps_entry, &tc,
1225 &self->current_output_timecode))
1228 gst_buffer_map (outbuf, &out, GST_MAP_WRITE);
1229 out_len = (guint) out.size;
1230 cc_buffer_take_cc_data (self->cc_buffer, out_fps_entry, out.data, &out_len);
1232 gst_buffer_unmap (outbuf, &out);
1233 self->output_frames++;
1235 if (self->current_output_timecode.config.fps_n != 0 && !tc_meta) {
1236 gst_buffer_add_video_time_code_meta (outbuf,
1237 &self->current_output_timecode);
1238 gst_video_time_code_increment_frame (&self->current_output_timecode);
1242 gst_buffer_set_size (outbuf, out_len);
1247 static GstFlowReturn
1248 convert_cea708_cdp_cea708_cdp (GstCCConverter * self, GstBuffer * inbuf,
1252 GstVideoTimeCode tc = GST_VIDEO_TIME_CODE_INIT;
1253 const struct cdp_fps_entry *in_fps_entry = NULL, *out_fps_entry;
1254 guint8 cc_data[MAX_CDP_PACKET_LEN];
1255 guint cc_data_len = MAX_CDP_PACKET_LEN;
1258 if (!push_cdp_buffer (self, inbuf, &tc, &in_fps_entry))
1261 out_fps_entry = cdp_fps_entry_from_fps (self->out_fps_n, self->out_fps_d);
1262 if (!out_fps_entry || out_fps_entry->fps_n == 0)
1263 out_fps_entry = in_fps_entry;
1265 if (!can_take_buffer (self, in_fps_entry, out_fps_entry, &tc,
1266 &self->current_output_timecode))
1269 cc_buffer_take_cc_data (self->cc_buffer, out_fps_entry, cc_data,
1272 gst_buffer_map (outbuf, &out, GST_MAP_WRITE);
1274 convert_cea708_cc_data_cea708_cdp_internal (self, cc_data, cc_data_len,
1275 out.data, out.size, &self->current_output_timecode, out_fps_entry);
1277 gst_buffer_unmap (outbuf, &out);
1278 self->output_frames++;
1281 gst_buffer_set_size (outbuf, out_len);
1286 static GstFlowReturn
1287 gst_cc_converter_transform (GstCCConverter * self, GstBuffer * inbuf,
1290 GstVideoTimeCodeMeta *tc_meta = NULL;
1291 GstFlowReturn ret = GST_FLOW_OK;
1293 GST_DEBUG_OBJECT (self, "Converting %" GST_PTR_FORMAT " from %u to %u", inbuf,
1294 self->input_caption_type, self->output_caption_type);
1297 tc_meta = gst_buffer_get_video_time_code_meta (inbuf);
1300 if (self->current_output_timecode.config.fps_n <= 0) {
1301 /* XXX: this assumes the input time codes are well-formed and increase
1302 * at the rate of one frame for each input buffer */
1303 const struct cdp_fps_entry *in_fps_entry;
1304 gint scale_n, scale_d;
1306 in_fps_entry = cdp_fps_entry_from_fps (self->in_fps_n, self->in_fps_d);
1307 if (!in_fps_entry || in_fps_entry->fps_n == 0)
1308 scale_n = scale_d = 1;
1310 get_framerate_output_scale (self, in_fps_entry, &scale_n, &scale_d);
1312 interpolate_time_code_with_framerate (self, &tc_meta->tc,
1313 self->out_fps_n, self->out_fps_d, scale_n, scale_d,
1314 &self->current_output_timecode);
1318 switch (self->input_caption_type) {
1319 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
1321 switch (self->output_caption_type) {
1322 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
1323 ret = convert_cea608_raw_cea608_s334_1a (self, inbuf, outbuf);
1325 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
1326 ret = convert_cea608_raw_cea708_cc_data (self, inbuf, outbuf);
1328 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
1329 ret = convert_cea608_raw_cea708_cdp (self, inbuf, outbuf, tc_meta);
1331 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
1333 g_assert_not_reached ();
1338 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
1340 switch (self->output_caption_type) {
1341 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
1342 ret = convert_cea608_s334_1a_cea608_raw (self, inbuf, outbuf);
1344 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
1345 ret = convert_cea608_s334_1a_cea708_cc_data (self, inbuf, outbuf);
1347 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
1349 convert_cea608_s334_1a_cea708_cdp (self, inbuf, outbuf, tc_meta);
1351 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
1353 g_assert_not_reached ();
1358 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
1360 switch (self->output_caption_type) {
1361 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
1362 ret = convert_cea708_cc_data_cea608_raw (self, inbuf, outbuf);
1364 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
1365 ret = convert_cea708_cc_data_cea608_s334_1a (self, inbuf, outbuf);
1367 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
1369 convert_cea708_cc_data_cea708_cdp (self, inbuf, outbuf, tc_meta);
1371 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
1373 g_assert_not_reached ();
1378 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
1380 switch (self->output_caption_type) {
1381 case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:
1382 ret = convert_cea708_cdp_cea608_raw (self, inbuf, outbuf, tc_meta);
1384 case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
1386 convert_cea708_cdp_cea608_s334_1a (self, inbuf, outbuf, tc_meta);
1388 case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
1390 convert_cea708_cdp_cea708_cc_data (self, inbuf, outbuf, tc_meta);
1392 case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:
1393 ret = convert_cea708_cdp_cea708_cdp (self, inbuf, outbuf);
1396 g_assert_not_reached ();
1402 g_assert_not_reached ();
1406 if (ret != GST_FLOW_OK) {
1407 GST_DEBUG_OBJECT (self, "returning %s", gst_flow_get_name (ret));
1411 GST_DEBUG_OBJECT (self, "Converted to %" GST_PTR_FORMAT, outbuf);
1413 if (gst_buffer_get_size (outbuf) > 0) {
1414 if (self->current_output_timecode.config.fps_n > 0) {
1415 gst_buffer_add_video_time_code_meta (outbuf,
1416 &self->current_output_timecode);
1417 gst_video_time_code_increment_frame (&self->current_output_timecode);
1427 gst_cc_converter_transform_meta (GstBaseTransform * base, GstBuffer * outbuf,
1428 GstMeta * meta, GstBuffer * inbuf)
1430 const GstMetaInfo *info = meta->info;
1432 /* we do this manually for framerate scaling */
1433 if (info->api == GST_VIDEO_TIME_CODE_META_API_TYPE)
1436 return GST_BASE_TRANSFORM_CLASS (parent_class)->transform_meta (base, outbuf,
1441 can_generate_output (GstCCConverter * self)
1443 int input_frame_n, input_frame_d, output_frame_n, output_frame_d;
1444 int output_time_cmp;
1446 if (self->in_fps_n == 0 || self->out_fps_n == 0)
1449 /* compute the relative frame count for each */
1450 if (!gst_util_fraction_multiply (self->in_fps_d, self->in_fps_n,
1451 self->input_frames, 1, &input_frame_n, &input_frame_d))
1452 /* we should never overflow */
1453 g_assert_not_reached ();
1455 if (!gst_util_fraction_multiply (self->out_fps_d, self->out_fps_n,
1456 self->output_frames, 1, &output_frame_n, &output_frame_d))
1457 /* we should never overflow */
1458 g_assert_not_reached ();
1460 output_time_cmp = gst_util_fraction_compare (input_frame_n, input_frame_d,
1461 output_frame_n, output_frame_d);
1463 if (output_time_cmp == 0) {
1464 self->output_frames = 0;
1465 self->input_frames = 0;
1468 /* if the next output frame is at or before the current input frame */
1469 if (output_time_cmp >= 0)
1476 reset_counters (GstCCConverter * self)
1478 self->input_frames = 0;
1479 self->output_frames = 1;
1480 gst_video_time_code_clear (&self->current_output_timecode);
1481 gst_clear_buffer (&self->previous_buffer);
1482 cc_buffer_discard (self->cc_buffer);
1485 static GstFlowReturn
1486 drain_input (GstCCConverter * self)
1488 GstBaseTransformClass *bclass = GST_BASE_TRANSFORM_GET_CLASS (self);
1489 GstBaseTransform *trans = GST_BASE_TRANSFORM (self);
1490 GstFlowReturn ret = GST_FLOW_OK;
1491 guint cea608_1_len, cea608_2_len, ccp_len;
1493 cc_buffer_get_stored_size (self->cc_buffer, &cea608_1_len, &cea608_2_len,
1496 while (ccp_len > 0 || cea608_1_len > 0 || cea608_2_len > 0
1497 || can_generate_output (self)) {
1500 if (!self->previous_buffer) {
1501 GST_WARNING_OBJECT (self, "Attempt to draining without a previous "
1502 "buffer. Aborting");
1506 outbuf = gst_buffer_new_allocate (NULL, MAX_CDP_PACKET_LEN, NULL);
1508 if (bclass->copy_metadata) {
1509 if (!bclass->copy_metadata (trans, self->previous_buffer, outbuf)) {
1510 /* something failed, post a warning */
1511 GST_ELEMENT_WARNING (self, STREAM, NOT_IMPLEMENTED,
1512 ("could not copy metadata"), (NULL));
1516 ret = gst_cc_converter_transform (self, NULL, outbuf);
1517 cc_buffer_get_stored_size (self->cc_buffer, &cea608_1_len, &cea608_2_len,
1519 if (gst_buffer_get_size (outbuf) <= 0) {
1520 /* try to move the output along */
1521 self->input_frames++;
1522 gst_buffer_unref (outbuf);
1524 } else if (ret != GST_FLOW_OK) {
1525 gst_buffer_unref (outbuf);
1529 ret = gst_pad_push (GST_BASE_TRANSFORM_SRC_PAD (trans), outbuf);
1530 if (ret != GST_FLOW_OK) {
1538 static GstFlowReturn
1539 gst_cc_converter_generate_output (GstBaseTransform * base, GstBuffer ** outbuf)
1541 GstBaseTransformClass *bclass = GST_BASE_TRANSFORM_GET_CLASS (base);
1542 GstCCConverter *self = GST_CCCONVERTER (base);
1543 GstBuffer *inbuf = base->queued_buf;
1547 base->queued_buf = NULL;
1548 if (!inbuf && !can_generate_output (self)) {
1552 if (gst_base_transform_is_passthrough (base)) {
1556 if (inbuf && GST_BUFFER_IS_DISCONT (inbuf)) {
1557 ret = drain_input (self);
1558 reset_counters (self);
1559 if (ret != GST_FLOW_OK)
1563 *outbuf = gst_buffer_new_allocate (NULL, MAX_CDP_PACKET_LEN, NULL);
1564 if (*outbuf == NULL)
1568 gst_buffer_replace (&self->previous_buffer, inbuf);
1570 if (bclass->copy_metadata) {
1571 if (!bclass->copy_metadata (base, self->previous_buffer, *outbuf)) {
1572 /* something failed, post a warning */
1573 GST_ELEMENT_WARNING (self, STREAM, NOT_IMPLEMENTED,
1574 ("could not copy metadata"), (NULL));
1578 ret = gst_cc_converter_transform (self, inbuf, *outbuf);
1579 if (gst_buffer_get_size (*outbuf) <= 0) {
1580 gst_buffer_unref (*outbuf);
1586 gst_buffer_unref (inbuf);
1594 gst_buffer_unref (inbuf);
1596 GST_WARNING_OBJECT (self, "could not allocate buffer");
1597 return GST_FLOW_ERROR;
1602 gst_cc_converter_sink_event (GstBaseTransform * trans, GstEvent * event)
1604 GstCCConverter *self = GST_CCCONVERTER (trans);
1606 switch (GST_EVENT_TYPE (event)) {
1608 GST_DEBUG_OBJECT (self, "received EOS");
1613 case GST_EVENT_FLUSH_START:
1614 reset_counters (self);
1620 return GST_BASE_TRANSFORM_CLASS (parent_class)->sink_event (trans, event);
1624 gst_cc_converter_start (GstBaseTransform * base)
1626 GstCCConverter *self = GST_CCCONVERTER (base);
1628 /* Resetting this is not really needed but makes debugging easier */
1629 self->cdp_hdr_sequence_cntr = 0;
1630 self->current_output_timecode = (GstVideoTimeCode) GST_VIDEO_TIME_CODE_INIT;
1631 reset_counters (self);
1637 gst_cc_converter_stop (GstBaseTransform * base)
1639 GstCCConverter *self = GST_CCCONVERTER (base);
1641 gst_video_time_code_clear (&self->current_output_timecode);
1642 gst_clear_buffer (&self->previous_buffer);
1648 gst_cc_converter_set_property (GObject * object, guint prop_id,
1649 const GValue * value, GParamSpec * pspec)
1651 GstCCConverter *filter = GST_CCCONVERTER (object);
1655 filter->cdp_mode = g_value_get_flags (value);
1658 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1664 gst_cc_converter_get_property (GObject * object, guint prop_id, GValue * value,
1667 GstCCConverter *filter = GST_CCCONVERTER (object);
1671 g_value_set_flags (value, filter->cdp_mode);
1674 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1680 gst_cc_converter_finalize (GObject * object)
1682 GstCCConverter *self = GST_CCCONVERTER (object);
1684 gst_clear_object (&self->cc_buffer);
1686 G_OBJECT_CLASS (parent_class)->finalize (object);
1690 gst_cc_converter_class_init (GstCCConverterClass * klass)
1692 GObjectClass *gobject_class;
1693 GstElementClass *gstelement_class;
1694 GstBaseTransformClass *basetransform_class;
1696 gobject_class = (GObjectClass *) klass;
1697 gstelement_class = (GstElementClass *) klass;
1698 basetransform_class = (GstBaseTransformClass *) klass;
1700 gobject_class->set_property = gst_cc_converter_set_property;
1701 gobject_class->get_property = gst_cc_converter_get_property;
1702 gobject_class->finalize = gst_cc_converter_finalize;
1705 * GstCCConverter:cdp-mode
1707 * Only insert the selection sections into CEA 708 CDP packets.
1709 * Various software does not handle any other information than CC data
1710 * contained in CDP packets and might fail parsing the packets otherwise.
1714 g_object_class_install_property (G_OBJECT_CLASS (klass),
1715 PROP_CDP_MODE, g_param_spec_flags ("cdp-mode",
1717 "Select which CDP sections to store in CDP packets",
1718 GST_TYPE_CC_CONVERTER_CDP_MODE, DEFAULT_CDP_MODE,
1719 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
1721 gst_element_class_set_static_metadata (gstelement_class,
1722 "Closed Caption Converter",
1723 "Filter/ClosedCaption",
1724 "Converts Closed Captions between different formats",
1725 "Sebastian Dröge <sebastian@centricular.com>");
1727 gst_element_class_add_static_pad_template (gstelement_class, &sinktemplate);
1728 gst_element_class_add_static_pad_template (gstelement_class, &srctemplate);
1730 basetransform_class->start = GST_DEBUG_FUNCPTR (gst_cc_converter_start);
1731 basetransform_class->stop = GST_DEBUG_FUNCPTR (gst_cc_converter_stop);
1732 basetransform_class->sink_event =
1733 GST_DEBUG_FUNCPTR (gst_cc_converter_sink_event);
1734 basetransform_class->transform_size =
1735 GST_DEBUG_FUNCPTR (gst_cc_converter_transform_size);
1736 basetransform_class->transform_caps =
1737 GST_DEBUG_FUNCPTR (gst_cc_converter_transform_caps);
1738 basetransform_class->fixate_caps =
1739 GST_DEBUG_FUNCPTR (gst_cc_converter_fixate_caps);
1740 basetransform_class->set_caps = GST_DEBUG_FUNCPTR (gst_cc_converter_set_caps);
1741 basetransform_class->transform_meta =
1742 GST_DEBUG_FUNCPTR (gst_cc_converter_transform_meta);
1743 basetransform_class->generate_output =
1744 GST_DEBUG_FUNCPTR (gst_cc_converter_generate_output);
1745 basetransform_class->passthrough_on_same_caps = TRUE;
1747 GST_DEBUG_CATEGORY_INIT (gst_cc_converter_debug, "ccconverter",
1748 0, "Closed Caption converter");
1750 gst_type_mark_as_plugin_api (GST_TYPE_CC_CONVERTER_CDP_MODE, 0);
1754 gst_cc_converter_init (GstCCConverter * self)
1756 self->cdp_mode = DEFAULT_CDP_MODE;
1757 self->cc_buffer = cc_buffer_new ();