{
MEDIAMUXER_FENTER();
int ret = MX_ERROR_NONE;
+ media_format_text_type_e text_type_e = 0;
MEDIAMUXER_CHECK_NULL(pHandle);
mxgst_handle_t *mx_handle_gst = (mxgst_handle_t *) pHandle;
media_format_mimetype_e mimetype = 0;
MX_I("Audio track added successfully: %p, with head: %p \n",
current->media_format, mx_handle_gst->track_info.track_head->media_format);
-
} else {
MX_E("Unsupported/Mismatched audio MIME Type: %x\n", mimetype);
}
+ } else if (media_format_get_text_info(media_format, &mimetype, &text_type_e) !=
+ MEDIA_FORMAT_ERROR_INVALID_OPERATION) {
+ if (mx_handle_gst->muxed_format == MEDIAMUXER_CONTAINER_FORMAT_MP4
+ && (mimetype == MEDIA_FORMAT_TEXT_MP4)) {
+
+ current->track_index = 2 + NO_OF_TRACK_TYPES*(mx_handle_gst->track_info.subtitle_track_cnt);
+ (mx_handle_gst->track_info.subtitle_track_cnt)++;
+ (mx_handle_gst->track_info.total_track_cnt)++;
+ *track_index = current->track_index;
+
+ MX_I("Text track added successfully: %p, with head: %p \n",
+ current->media_format, mx_handle_gst->track_info.track_head->media_format);
+ } else {
+ MX_E("Unsupported/Mismatched subtitle MIME Type: %x\n", mimetype);
+ }
+
} else {
- MX_E("Unsupported A/V MIME Type: %x\n", mimetype);
+ MX_E("Unsupported A/V/Subs MIME Type: %x\n", mimetype);
}
MEDIAMUXER_FLEAVE();
return ret;
MX_I("Video start feed called, however, current is null\n");
}
+static void _text_start_feed(GstElement *source, guint size, mx_gst_track *current)
+{
+ if (current) {
+ MX_I("Subtitle Start feeding cb... current->track_index = %d\n", current->track_index);
+ current->stop_feed = 0;
+ current->start_feed = 1;
+ } else
+ MX_I("Text start feed called, however, current is null\n");
+}
+
+
/*
* This callback triggers when appsrc has enough data and we can stop sending.
* We remove the idle handler from the mainloop.
MX_I("Video stop feed called, however, current is null\n");
}
+static void _text_stop_feed(GstElement *source, mx_gst_track *current)
+{
+ if (current) {
+ MX_I("\nText Stop feeding... current->track_index = %d\n", current->track_index);
+ current->stop_feed = 1;
+ current->start_feed = 0;
+ } else
+ MX_I("Text stop feed called, however, current is null\n");
+}
+
+
mx_ret_e _gst_create_pipeline(mxgst_handle_t *gst_handle)
{
MEDIAMUXER_FENTER();
GstBus *bus = NULL;
/* Note: Use a loop, if needed. GMainLoop *loop; */
GstPad *audio_pad, *video_pad, *aud_src, *vid_src;
+ GstPad *text_pad, *text_src;
char str_appsrc[MAX_STRING_LENGTH];
char str_parser[MAX_STRING_LENGTH];
char track_no[MAX_STRING_LENGTH];
int vid_track_cnt = 0;
int aud_track_cnt = 0;
+ int text_track_cnt = 0;
mx_gst_track *current = NULL;
media_format_mimetype_e mimetype = 0;
}
}
}
+
+ if (gst_handle->track_info.subtitle_track_cnt) { /* Text track(s) exist */
+ for (current = gst_handle->track_info.track_head; current; current = current->next) {
+ if (current->track_index%NO_OF_TRACK_TYPES == 2) {
+
+ snprintf(str_appsrc, MAX_STRING_LENGTH, "subtitle_appsrc%d", current->track_index);
+
+ current->appsrc = gst_element_factory_make("appsrc", str_appsrc);
+
+ if (!current->appsrc) {
+
+ MX_E("One element (text_appsrc) could not be created. Exiting.\n");
+ ret = MEDIAMUXER_ERROR_RESOURCE_LIMIT;
+ goto ERROR;
+ }
+
+ gst_bin_add_many(GST_BIN(gst_handle->pipeline), current->appsrc, NULL);
+
+ /* Set text caps for corresponding src elements */
+ g_object_set(current->appsrc, "caps", gst_caps_from_string(current->caps), NULL);
+ g_object_set (current->appsrc, "format", GST_FORMAT_TIME, NULL);
+
+#ifdef ASYCHRONOUS_WRITE
+ /* ToDo: Use a function pointer, and create independent functions to each track */
+ MX_I("\nRegistering text callback for cur->tr_ind = %d\n", current->track_index);
+ g_signal_connect(current->appsrc, "need-data", G_CALLBACK(_text_start_feed), current);
+ g_signal_connect(current->appsrc, "enough-data", G_CALLBACK(_text_stop_feed), current);
+#else
+ g_object_set(current->appsrc, "block", TRUE, NULL);
+ gst_app_src_set_stream_type((GstAppSrc *)current->appsrc, GST_APP_STREAM_TYPE_STREAM);
+#endif
+
+ snprintf(track_no, MAX_STRING_LENGTH, "subtitle_%.2d", text_track_cnt++); /* snprintf(track_no,"subtitle_00"); */
+ text_pad = gst_element_get_request_pad(gst_handle->muxer, track_no);
+ text_src = gst_element_get_static_pad(current->appsrc, "src");
+ MX_I("Linking subtitle-appsrc to muxersubtitle static-pad\n");
+ if (gst_pad_link(text_src, text_pad) != GST_PAD_LINK_OK)
+ MX_E("text_src and text_pad link failed");
+
+ gst_object_unref(GST_OBJECT(text_src));
+ gst_object_unref(GST_OBJECT(text_pad));
+ }
+ }
+ }
+
}
MX_I("Output_uri= %s\n", gst_handle->output_uri);
gst_caps_unref(new_cap);
}
break;
- case MEDIA_FORMAT_CONTAINER:
case MEDIA_FORMAT_TEXT:
+ /* Following check is safe but not mandatory. */
+ if ((current->track_index)%NO_OF_TRACK_TYPES != 2) {
+ MX_E("Subtitle track_index is not in 3*n+2 format\n");
+ goto ERROR;
+ }
+ if (media_packet_get_codec_data(packet,
+ (void **)&codec_data, &codec_data_size)) {
+ MX_E("media_packet_get_codec_data call failed\n");
+ ret = MX_ERROR_UNKNOWN;
+ break;
+ }
+ MX_I("codec data for text = %s size is %d\n", codec_data, codec_data_size);
+ if (current->caps == NULL ||
+ g_strcmp0(codec_data, current->caps) != 0) {
+
+#ifdef SEND_FULL_CAPS_VIA_CODEC_DATA
+ /* Debugging purpose. The whole caps filter can be sent via codec_data */
+ media_packet_get_codec_data(packet, (void **)&codec_data,
+ &codec_data_size);
+ MX_I("extracted codec data is =%s\n", codec_data);
+ new_cap = gst_caps_from_string(codec_data);
+ MX_I("New cap is=%s\n", codec_data);
+ g_object_set(current->appsrc, "caps", new_cap, NULL);
+ if (current->caps == NULL) {
+ current->caps = (char *)g_malloc(codec_data_size);
+ if (current->caps == NULL) {
+ MX_E("[%s][%d] memory allocation failed\n", __func__, __LINE__);
+ gst_caps_unref(new_cap);
+ ret = MX_ERROR_UNKNOWN;
+ break;
+ }
+ }
+ g_stpcpy(current->caps, codec_data);
+#else
+ media_format_mimetype_e mimetype = -1;
+ media_format_text_type_e text_type_e = -1;
+ if (media_format_get_text_info(format,
+ &mimetype, &text_type)) {
+ MX_E("media_format_get_text_info call failed\n");
+ ret = MX_ERROR_UNKNOWN;
+ break;
+ }
+ if (current->caps == NULL) {
+ current->caps = (char *)g_malloc(codec_data_size);
+ if (current->caps == NULL) {
+ MX_E("[%s][%d] memory allocation failed\n", __func__, __LINE__);
+ ret = MX_ERROR_UNKNOWN;
+ break;
+ }
+ }
+ new_cap = gst_caps_from_string(codec_data);
+ MX_I("New cap set by codec data is=%s\n", codec_data);
+ if (__gst_codec_specific_caps(new_cap, mimetype)) {
+ MX_E("Setting Subtitle caps failed\n");
+ gst_caps_unref(new_cap);
+ ret = MX_ERROR_UNKNOWN;
+ break;
+ }
+ g_stpcpy(current->caps, codec_data);
+ caps_string = gst_caps_to_string(new_cap);
+ MX_I("New cap set by codec data is = %s\n",
+ caps_string);
+ if (caps_string)
+ g_free(caps_string);
+ g_object_set(current->appsrc, "caps", new_cap, NULL);
+#endif
+ gst_caps_unref(new_cap);
+ } else if (current != NULL)
+ MX_I("appsrc caps already set to %s\n",current->caps);
+ break;
+
+ case MEDIA_FORMAT_CONTAINER:
case MEDIA_FORMAT_UNKNOWN:
default:
MX_E("Unknown format type\n");
/* We got some error, stop sending data */
MX_E("--audio appsrc push failed--\n");
}
+ } else if (track_index%NO_OF_TRACK_TYPES == 2) { /* NO_OF_TRACK_TYPES*n+2 for subtitle */
+ MX_I(" Waiting till start_feed of current subtitle, index=%d is active\n", current->track_index);
+#ifdef ASYCHRONOUS_WRITE
+ while (current->start_feed == 0)
+ g_usleep(WRITE_POLL_PERIOD);
+ MX_I("End of sleep, pushing subtitle data\n");
+ g_signal_emit_by_name(current->appsrc, "push-buffer", gst_inbuf2, &ret);
+#else
+ ret = gst_app_src_push_buffer((GstAppSrc *)current->appsrc, gst_inbuf2);
+#endif
+ MX_I("Attempted subtitle-buf push\n");
+ if (ret != GST_FLOW_OK) {
+ /* We got some error, stop sending data */
+ MX_E("--subtitle appsrc push failed--\n");
+ }
} else {
MX_E("Unsupported track index=%d. track_index-mod3= %d. Only 0/1/2 track index is vaild\n", track_index, track_index%NO_OF_TRACK_TYPES);
ret = MX_ERROR_INVALID_ARGUMENT;
} else if (track_index%NO_OF_TRACK_TYPES == 1) {
MX_I("\n-----EOS for audioappsrc-----\n");
gst_app_src_end_of_stream((GstAppSrc *)(current->appsrc));
+ } else if (track_index%NO_OF_TRACK_TYPES == 2) {
+ MX_I("\n-----EOS for textappsrc-----\n");
+ gst_app_src_end_of_stream((GstAppSrc *)(current->appsrc));
} else {
MX_E("Invalid track Index[%d].\n", track_index);
goto ERROR;
mediamuxer_h myMuxer = 0;
media_format_h media_format = NULL;
media_format_h media_format_a = NULL;
+media_format_h media_format_t = NULL;
media_format_h input_fmt = NULL;
#if DUMP_OUTBUF
bool have_mp4 = false;
bool have_vid_track = false;
bool have_aud_track = false;
+bool have_text_track = false;
int track_index_vid = -1;
int track_index_aud = -1;
int track_index_aud2 = -1;
+int track_index_text = -1;
int g_menu_state = CURRENT_STATUS_MAINMENU;
int demux_mp4();
return 0;
}
+int test_mediamuxer_add_track_text()
+{
+ media_format_mimetype_e mimetype;
+ media_format_text_type_e text_type;
+
+ g_print("test_mediamuxer_text_track_text\n");
+ media_format_create(&media_format_t);
+
+ if (strncmp(data_sink, "11", 1) == 0 || strncmp(data_sink, "12", 1) == 0
+ || strncmp(data_sink, "13", 1) == 0) {
+ if (media_format_set_text_mime(media_format_t, MEDIA_FORMAT_TEXT_MP4) == MEDIA_FORMAT_ERROR_INVALID_OPERATION)
+ g_print("Problem during media_format_set_text_mime operation in MP4\n");
+ } else {
+ g_print("Currently text is not supported for this format\n");
+ return 0;
+ }
+
+ media_format_get_text_info(media_format_t, &mimetype, &text_type);
+
+ g_print("Text Mimetype trying to set: %x (text : %x), type = %x\n", (int)(mimetype), (int)(MEDIA_FORMAT_TEXT_MP4), text_type);
+
+ /* To add text track */
+ mediamuxer_add_track(myMuxer, media_format_t, &track_index_text);
+
+ g_print("Text Track index returned is: %d\n", track_index_text);
+ return 0;
+}
+
int test_mediamuxer_prepare()
{
g_print("test_mediamuxer_prepare\n");
test_mediamuxer_add_track_video();
else
g_print("Ignoring, data_sink=%s doesnt need video track testing\n", data_sink);
+ } else if (strncmp(cmd, "x", 1) == 0) {
+ if (!validate_with_codec) {
+ have_text_track = true;
+ if (have_mp4 == false) {
+ g_menu_state = CURRENT_STATUS_MP4_FILENAME;
+ have_mp4 = true;
+ }
+ }
+ test_mediamuxer_add_track_text();
} else if (strncmp(cmd, "m", 1) == 0) {
test_mediamuxer_write_sample();
} else if (strncmp(cmd, "t", 1) == 0) {
g_print("c. Create \t");
g_print("o. Set Data Sink \n");
g_print("a. AddAudioTrack \t");
- g_print("v. AddVideoTrack \n");
+ g_print("v. AddVideoTrack \t");
+ g_print("x. AddTextTrack \n");
g_print("e. Prepare \t");
g_print("s. Start \n");
g_print("m. StartMuxing \n");
| GLOBAL VARIABLE DEFINITIONS: |
---------------------------------------------------------------------- */
char *aud_caps, *vid_caps;
+char *text_caps;
bool aud_eos = 0;
bool vid_eos = 0;
+bool text_eos = 0;
extern int track_index_vid, track_index_aud, track_index_aud2;
+extern int track_index_text;
extern mediamuxer_h myMuxer;
extern bool validate_multitrack;
extern char media_file[2048];
extern char media_file[2048];
extern bool have_aud_track;
extern bool have_vid_track;
+extern bool have_text_track;
const gchar *new_pad_type_aud = NULL; /* demuxer pad type for audio */
const gchar *new_pad_type_vid = NULL; /* demuxer pad type for video */
+const gchar *new_pad_type_text = NULL; /* demuxer pad type for subtitle */
/* demuxer sturcture for demux_mp4() */
typedef struct _CustomData {
GstElement *pipeline;
GstElement *demuxer;
GstElement *audioqueue;
GstElement *videoqueue;
+ GstElement *textqueue;
GstElement *audio_appsink; /* o/p of demuxer */
GstElement *video_appsink;
+ GstElement *text_appsink;
GstElement *dummysink;
char *saveLocation_audio; /* aac stream */
mediamuxer_close_track(myMuxer, track_index_aud2);
g_print("audio EOS cb reached \n");
aud_eos = 1;
- if (!have_vid_track || vid_eos == 1)
+ if ((!have_vid_track || vid_eos == 1) && (!have_text_track || text_eos == 1)) {
+ g_print("Audio initiated quit_main_loop\n");
g_main_loop_quit(data->loop);
+ }
}
/* demuxer video appsink eos callback */
mediamuxer_close_track(myMuxer, track_index_vid);
g_print("Encoded video EOS cb reached \n");
vid_eos = 1;
- if (!have_aud_track || aud_eos == 1)
+ if ((!have_aud_track || aud_eos == 1) && (!have_text_track || text_eos == 1)) {
+ g_print("Video initiated quit_main_loop \n");
g_main_loop_quit(data->loop);
+ }
}
+/* demuxer text appsink eos callback */
+void __text_app_sink_eos_callback(GstElement *sink, CustomData *data)
+{
+ g_print("__text_app_sink_eos_callback, closing track_index = %d\n", track_index_text);
+ mediamuxer_close_track(myMuxer, track_index_text);
+ g_print("Encoded text EOS cb reached \n");
+ text_eos = 1;
+ if ((!have_vid_track || vid_eos == 1) && (!have_aud_track || aud_eos == 1)) {
+ g_print("Text initiated quit_main_loop \n");
+ g_main_loop_quit(data->loop);
+ }
+}
+
+
+/* Demuxer text-appsink buffer receive callback */
+void __text_app_sink_callback(GstElement *sink, CustomData *data)
+{
+ GstBuffer *buffer;
+ media_format_h textfmt;
+ media_packet_h text_pkt;
+ GstState state;
+ uint64_t ns;
+ int key;
+ static int count = 0;
+ guint8 *dptr;
+ GstMapInfo map;
+ GstSample *sample;
+
+ if (count == 0)
+ g_print("Called __text_app_sink_callback\n");
+
+ gst_element_get_state(data->text_appsink, &state, NULL, GST_CLOCK_TIME_NONE);
+ g_print((state == GST_STATE_PLAYING)? "text_appsink GST_STATE_PLAYING\n" : "text appsink not palying\n");
+
+ g_signal_emit_by_name(sink, "pull-sample", &sample);
+ buffer = gst_sample_get_buffer(sample);
+
+ if (buffer) {
+ if (gst_buffer_map(buffer, &map, GST_MAP_READ)) {
+ if (media_format_create(&textfmt)) {
+ g_print("media_format_create(&textfmt) failed\n");
+ return;
+ }
+
+ if (media_format_set_text_mime(textfmt, MEDIA_FORMAT_TEXT_MP4)) {
+ g_print("media_format_set_text_mime failed\n");
+ return;
+ }
+
+ if (!GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_DELTA_UNIT)) {
+ /* Key Frame */
+ key = 1;
+ } else {
+ /* Not a Key Frame */
+ key = 0;
+ }
+
+ if (media_packet_create(textfmt, NULL, NULL, &text_pkt)) {
+ g_print("create text media_packet failed\n");
+ return;
+ }
+
+ if (media_packet_alloc(text_pkt)) {
+ g_print("text media_packet_alloc failed\n");
+ return;
+ }
+
+ media_packet_get_buffer_data_ptr(text_pkt, (void**)&dptr);
+ memcpy((char*)dptr, map.data, map.size);
+
+ if (media_packet_set_buffer_size(text_pkt, (uint64_t)(map.size))) {
+ g_print("text set_buffer_size failed\n");
+ return;
+ }
+
+ if (media_packet_get_buffer_size(text_pkt, &ns)) {
+ g_print("unable to get the buffer size actual");
+ return;
+ }
+
+ if (media_packet_set_codec_data(text_pkt, text_caps, strlen(text_caps)+1)) {
+ g_print("unable to set the text codec data e\n");
+ return;
+ }
+
+ if (media_packet_set_pts(text_pkt, buffer->pts)) {
+ g_print("unable to set the pts\n");
+ return;
+ }
+
+ if (media_packet_set_dts(text_pkt, buffer->dts)) {
+ g_print("unable to set the dts\n");
+ return;
+ }
+
+ if (media_packet_set_duration(text_pkt, buffer->duration)) {
+ g_print("unable to set the duration\n");
+ return;
+ }
+
+ if (media_packet_set_flags(text_pkt, key)) {
+ g_print("unable to set the flag size\n");
+ return;
+ }
+
+ /* Print count and size to indicate a received buffer */
+ g_print("Received text buffer count : %4d (size : %5"PRIu64", pts : %12"PRIu64")\n",
+ ++count, ns, buffer->pts);
+
+ mediamuxer_write_sample(myMuxer, track_index_text, text_pkt);
+ media_packet_destroy(text_pkt);
+ }
+ }
+
+}
+
+
/* demuxer on_pad callback */
static void __on_pad_added(GstElement *element, GstPad *pad, CustomData *data)
{
GstPadLinkReturn ret;
GstPad *sink_pad_audioqueue = gst_element_get_static_pad(data->audioqueue, "sink");
GstPad *sink_pad_videoqueue = gst_element_get_static_pad(data->videoqueue, "sink");
+ GstPad *sink_pad_textqueue = gst_element_get_static_pad(data->textqueue, "sink");
GstCaps *new_pad_aud_caps = NULL;
GstCaps *new_pad_vid_caps = NULL;
+ GstCaps *new_pad_text_caps = NULL;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
/* Link videoqueue->audio_appsink and save/Give to appsrc of muxer */
gst_element_set_state(data->video_appsink, GST_STATE_PLAYING);
/* one has to set the newly added element to the same state as the rest of the elements. */
+ } else if (have_text_track && g_str_has_prefix(new_pad_type, "text/x-raw")) {
+ new_pad_text_caps = gst_pad_get_current_caps(pad);
+ caps = gst_caps_to_string(new_pad_text_caps);
+ g_print("Subtitle caps :%s\n", caps);
+ text_caps = caps;
+
+ /* Link demuxer-pad with textqueue */
+ ret = gst_pad_link(pad, sink_pad_textqueue);
+ if (GST_PAD_LINK_FAILED(ret))
+ g_print("Type is '%s' but link failed.\n", new_pad_type);
+ else
+ g_print("Link succeeded (type '%s').\n", new_pad_type);
+ new_pad_type_text = new_pad_type;
+ gst_element_link(data->textqueue, data->text_appsink);
+ g_object_set(data->text_appsink, "emit-signals", TRUE, NULL);
+ g_signal_connect(data->text_appsink, "new-sample", G_CALLBACK(__text_app_sink_callback), data);
+ g_signal_connect(data->text_appsink, "eos", G_CALLBACK(__text_app_sink_eos_callback), data);
+ /* Link textqueue->text_appsink and save/Give to appsrc of muxer */
+ gst_element_set_state(data->text_appsink, GST_STATE_PLAYING);
+ /* one has to set the newly added element to the same state as the rest of the elements. */
} else {
- g_print(" It has type '%s' which is not raw A/V. Ignoring.\n", new_pad_type);
+ g_print(" It has type '%s' which is not raw A/V/Subs. Ignoring.\n", new_pad_type);
goto exit;
}
gst_object_unref(sink_pad_audioqueue);
gst_object_unref(sink_pad_videoqueue);
+ gst_object_unref(sink_pad_textqueue);
}
/* Demuxer bus_call */
data.demuxer = gst_element_factory_make("qtdemux", "mp4-demuxer");
data.audioqueue = gst_element_factory_make("queue", "audio-queue");
data.videoqueue = gst_element_factory_make("queue", "video-queue");
+ data.textqueue = gst_element_factory_make("queue", "subtitle-queue");
data.dummysink = gst_element_factory_make("fakesink", "fakesink");
data.video_appsink = gst_element_factory_make("appsink", "encoded_video_appsink");
data.audio_appsink = gst_element_factory_make("appsink", "encoded_audio_appsink");
+ data.text_appsink = gst_element_factory_make("appsink", "text_appsink");
if (!data.pipeline || !data.source || !data.demuxer || !data.audioqueue
- || !data.dummysink || !data.videoqueue || !data.audio_appsink || !data.video_appsink) {
+ || !data.dummysink || !data.videoqueue || !data.audio_appsink || !data.video_appsink
+ || !data.textqueue || !data.text_appsink) {
g_print("Test-Suite: One gst-element can't be created. Exiting\n");
return -1;
}
/* Add gstreamer-elements into gst-pipeline */
gst_bin_add_many(GST_BIN(data.pipeline), data.source, data.demuxer, data.dummysink, \
- data.audioqueue, data.videoqueue, data.audio_appsink, data.video_appsink, NULL);
+ data.audioqueue, data.videoqueue, data.audio_appsink, data.video_appsink, data.textqueue, data.text_appsink, NULL);
/* we set the input filename to the source element */
g_object_set(G_OBJECT(data.source), "location", media_file, NULL);