GST_PAD_SINK,
GST_PAD_REQUEST,
GST_STATIC_CAPS
- ("closedcaption/x-cea-608,format={ (string) raw, (string) cc_data}; "
+ ("closedcaption/x-cea-608,format={ (string) raw, (string) s334-1a}; "
"closedcaption/x-cea-708,format={ (string) cc_data, (string) cdp }"));
G_DEFINE_TYPE (GstCCCombiner, gst_cc_combiner, GST_TYPE_AGGREGATOR);
if (gst_structure_has_name (s, "closedcaption/x-cea-608")) {
if (strcmp (format, "raw") == 0) {
self->current_caption_type = GST_VIDEO_CAPTION_TYPE_CEA608_RAW;
- } else if (strcmp (format, "cc_data") == 0) {
- self->current_caption_type =
- GST_VIDEO_CAPTION_TYPE_CEA608_IN_CEA708_RAW;
+ } else if (strcmp (format, "s334-1a") == 0) {
+ self->current_caption_type = GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A;
} else {
g_assert_not_reached ();
}
GST_PAD_SRC,
GST_PAD_SOMETIMES,
GST_STATIC_CAPS
- ("closedcaption/x-cea-608,format={ (string) raw, (string) cc_data}; "
+ ("closedcaption/x-cea-608,format={ (string) raw, (string) s334-1a}; "
"closedcaption/x-cea-708,format={ (string) cc_data, (string) cdp }"));
G_DEFINE_TYPE (GstCCExtractor, gst_cc_extractor, GST_TYPE_ELEMENT);
caption_caps = gst_caps_new_simple ("closedcaption/x-cea-608",
"format", G_TYPE_STRING, "raw", NULL);
break;
- case GST_VIDEO_CAPTION_TYPE_CEA608_IN_CEA708_RAW:
+ case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:
caption_caps = gst_caps_new_simple ("closedcaption/x-cea-608",
- "format", G_TYPE_STRING, "cc_data", NULL);
+ "format", G_TYPE_STRING, "s334-1a", NULL);
break;
case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:
caption_caps = gst_caps_new_simple ("closedcaption/x-cea-708",
GST_DEBUG_OBJECT (self, "No CC found");
self->line21_offset = -1;
} else {
- guint8 ccdata[6] = { 0xfc, 0x80, 0x80, 0xfd, 0x80, 0x80 }; /* Initialize the ccdata */
+ guint base_line1 = 0, base_line2 = 0;
+ guint8 ccdata[6] = { 0x80, 0x80, 0x80, 0x00, 0x80, 0x80 }; /* Initialize the ccdata */
+
+ if (GST_VIDEO_FRAME_HEIGHT (frame) == 525) {
+ base_line1 = 9;
+ base_line2 = 272;
+ } else if (GST_VIDEO_FRAME_HEIGHT (frame) == 625) {
+ base_line1 = 5;
+ base_line2 = 318;
+ }
+
+ ccdata[0] |= (base_line1 < i ? i - base_line1 : 0) & 0x1f;
ccdata[1] = sliced[0].data[0];
ccdata[2] = sliced[0].data[1];
+ ccdata[3] |= (base_line2 < i ? i - base_line2 : 0) & 0x1f;
ccdata[4] = sliced[1].data[0];
ccdata[5] = sliced[1].data[1];
gst_buffer_add_video_caption_meta (frame->buffer,
- GST_VIDEO_CAPTION_TYPE_CEA608_IN_CEA708_RAW, ccdata, 6);
+ GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A, ccdata, 6);
GST_TRACE_OBJECT (self,
"Got CC 0x%02x 0x%02x / 0x%02x 0x%02x '%c%c / %c%c'", ccdata[1],
ccdata[2], ccdata[4], ccdata[5],
break;
}
- case GST_VIDEO_CAPTION_TYPE_CEA608_IN_CEA708_RAW:{
- guint8 data[3];
-
- /* This is the offset from line 9 for 525-line fields and from line
- * 5 for 625-line fields.
- *
- * The highest bit is set for field 1 but not for field 0
- */
- data[0] =
- self->info.height ==
- 525 ? self->caption_line - 9 : self->caption_line - 5;
- if (cc_meta->data[0] == 0xFD)
- data[0] |= 0x80;
- data[1] = cc_meta->data[1];
- data[2] = cc_meta->data[2];
-
+ case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:{
if (!gst_video_vbi_encoder_add_ancillary (self->vbiencoder,
FALSE,
GST_VIDEO_ANCILLARY_DID16_S334_EIA_708 >> 8,
- GST_VIDEO_ANCILLARY_DID16_S334_EIA_708 & 0xff, data, 3))
+ GST_VIDEO_ANCILLARY_DID16_S334_EIA_708 & 0xff, cc_meta->data, cc_meta->size))
GST_WARNING_OBJECT (self, "Couldn't add meta to ancillary data");
got_captions = TRUE;
GST_DEBUG_OBJECT (self,
"Adding CEA-608 meta to buffer for line %d", fi);
GST_MEMDUMP_OBJECT (self, "CEA608", gstanc.data, gstanc.data_count);
- /* The first byte actually contains the field and line offset but
- * for CEA608-in-CEA708 we can't store the line offset, and it's
- * generally not needed
- */
- gstanc.data[0] = (gstanc.data[0] & 0x80) ? 0xFD : 0xFC;
gst_buffer_add_video_caption_meta (*buffer,
- GST_VIDEO_CAPTION_TYPE_CEA608_IN_CEA708_RAW, gstanc.data,
+ GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A, gstanc.data,
gstanc.data_count);
break;
default: