From: Sangchul Lee Date: Mon, 12 Jul 2021 04:40:46 +0000 (+0900) Subject: Add API for audio source loopback rendering X-Git-Tag: submit/tizen/20210729.023123~15 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=661ed9e40e3ede473521767751cb086d90264bc3;p=platform%2Fcore%2Fapi%2Fwebrtc.git Add API for audio source loopback rendering webrtc_media_source_set_audio_loopback() is added. This will be used to render the audio source with the particular sound stream information before sending the data to the remote peer. [Version] 0.2.45 [Issue Type] API Change-Id: Iab4815b3b41da3cc529fa4fe29cdfca7537bacaa Signed-off-by: Sangchul Lee --- diff --git a/include/webrtc.h b/include/webrtc.h index 88ea1b42..a4ca636b 100644 --- a/include/webrtc.h +++ b/include/webrtc.h @@ -1101,6 +1101,30 @@ int webrtc_set_encoded_video_frame_cb(webrtc_h webrtc, webrtc_encoded_frame_cb c */ int webrtc_unset_encoded_video_frame_cb(webrtc_h webrtc); +/** + * @brief Sets an audio loopback to render the audio frames of the media source. + * @details The following media source types are available for this function:\n + * #WEBRTC_MEDIA_SOURCE_TYPE_AUDIOTEST\n + * #WEBRTC_MEDIA_SOURCE_TYPE_MIC\n + * #WEBRTC_MEDIA_SOURCE_TYPE_FILE + * @since_tizen 6.5 + * @remarks The following sound stream types can be used for the @a stream_info:\n + * #SOUND_STREAM_TYPE_MEDIA\n + * #SOUND_STREAM_TYPE_VOIP\n + * #SOUND_STREAM_TYPE_MEDIA_EXTERNAL_ONLY + * @param[in] webrtc WebRTC handle + * @param[in] source_id The audio source id + * @param[in] stream_info The sound stream information + * @return @c 0 on success, + * otherwise a negative error value + * @retval #WEBRTC_ERROR_NONE Successful + * @retval #WEBRTC_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #WEBRTC_ERROR_INVALID_OPERATION Invalid operation + * @pre Add media source to @a webrtc to get @a source_id by calling webrtc_add_media_source(). + * @see webrtc_media_source_set_video_loopback() + */ +int webrtc_media_source_set_audio_loopback(webrtc_h webrtc, unsigned source_id, sound_stream_info_h stream_info); + /** * @brief Sets a video loopback to render the video frames of the media source. * @details The following media source types are available for this function:\n @@ -1119,6 +1143,7 @@ int webrtc_unset_encoded_video_frame_cb(webrtc_h webrtc); * @retval #WEBRTC_ERROR_INVALID_PARAMETER Invalid parameter * @retval #WEBRTC_ERROR_INVALID_OPERATION Invalid operation * @pre Add media source to @a webrtc to get @a source_id by calling webrtc_add_media_source(). + * @see webrtc_media_source_set_audio_loopback() */ int webrtc_media_source_set_video_loopback(webrtc_h webrtc, unsigned source_id, webrtc_display_type_e type, webrtc_display_h display); diff --git a/include/webrtc_private.h b/include/webrtc_private.h index 74c3d3c2..66691b65 100644 --- a/include/webrtc_private.h +++ b/include/webrtc_private.h @@ -208,7 +208,10 @@ do { \ } while (0) #define DEFAULT_ELEMENT_FAKESINK "fakesink" +#define DEFAULT_ELEMENT_AUDIOCONVERT "audioconvert" +#define DEFAULT_ELEMENT_AUDIORESAMPLE "audioresample" #define DEFAULT_ELEMENT_VIDEOCONVERT "videoconvert" +#define DEFAULT_ELEMENT_CAPSFILTER "capsfilter" #define DEFAULT_VIDEO_SINK_ELEMENT "tizenwlsink" #define DEFAULT_AUDIO_SINK_ELEMENT "pulsesink" @@ -566,6 +569,7 @@ int _add_rendering_sink_bin(webrtc_s *webrtc, GstPad *src_pad); int _add_forwarding_sink_bin(webrtc_s *webrtc, GstPad *src_pad, bool is_video); int _set_stream_info_to_sink(webrtc_s *webrtc, unsigned int track_id, sound_stream_info_h stream_info); int _set_display_to_sink(webrtc_s *webrtc, unsigned int track_id, unsigned int type, void *display); +int _set_audio_loopback(webrtc_s *webrtc, unsigned int source_id, sound_stream_info_h stream_info); int _set_video_loopback(webrtc_s *webrtc, unsigned int source_id, unsigned int type, void *display); int _decodebin_autoplug_select_cb(GstElement *decodebin, GstPad *pad, GstCaps *caps, GstElementFactory *factory, gpointer user_data); bool _is_owner_of_track_build_context(webrtc_s *webrtc, unsigned int track_id); diff --git a/packaging/capi-media-webrtc.spec b/packaging/capi-media-webrtc.spec index 01c2799c..8cc4499e 100644 --- a/packaging/capi-media-webrtc.spec +++ b/packaging/capi-media-webrtc.spec @@ -1,6 +1,6 @@ Name: capi-media-webrtc Summary: A WebRTC library in Tizen Native API -Version: 0.2.44 +Version: 0.2.45 Release: 0 Group: Multimedia/API License: Apache-2.0 diff --git a/src/webrtc.c b/src/webrtc.c index 0384723b..672b486a 100644 --- a/src/webrtc.c +++ b/src/webrtc.c @@ -733,6 +733,24 @@ int webrtc_unset_encoded_video_frame_cb(webrtc_h webrtc) return WEBRTC_ERROR_NONE; } +int webrtc_media_source_set_audio_loopback(webrtc_h webrtc, unsigned source_id, sound_stream_info_h stream_info) +{ + int ret = WEBRTC_ERROR_NONE; + webrtc_s *_webrtc = (webrtc_s*)webrtc; + + RET_VAL_IF(_webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL"); + RET_VAL_IF(source_id == 0, WEBRTC_ERROR_INVALID_PARAMETER, "source_id is 0"); + RET_VAL_IF(stream_info == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "stream_info is NULL"); + + g_mutex_lock(&_webrtc->mutex); + + ret = _set_audio_loopback(webrtc, source_id, stream_info); + + g_mutex_unlock(&_webrtc->mutex); + + return ret; +} + int webrtc_media_source_set_video_loopback(webrtc_h webrtc, unsigned source_id, webrtc_display_type_e type, webrtc_display_h display) { int ret = WEBRTC_ERROR_NONE; diff --git a/src/webrtc_display.c b/src/webrtc_display.c index 6ce20615..803eb73d 100644 --- a/src/webrtc_display.c +++ b/src/webrtc_display.c @@ -139,7 +139,7 @@ static int __get_tbm_surface_format(int pixel_format, uint32_t *tbm_format) *tbm_format = TBM_FORMAT_ARGB8888; break; default: - LOGE("not suppported pixel_format(%d)", pixel_format); + LOGE("not supported pixel_format(%d)", pixel_format); return WEBRTC_ERROR_INVALID_PARAMETER; } diff --git a/src/webrtc_sink.c b/src/webrtc_sink.c index 2a01dc27..999d2eff 100644 --- a/src/webrtc_sink.c +++ b/src/webrtc_sink.c @@ -19,10 +19,6 @@ #include #include -#define DEFAULT_ELEMENT_CAPSFILTER "capsfilter" -#define DEFAULT_ELEMENT_AUDIOCONVERT "audioconvert" -#define DEFAULT_ELEMENT_AUDIORESAMPLE "audioresample" - bool _is_owner_of_track_build_context(webrtc_s *webrtc, unsigned int track_id) { GMainContext *context; diff --git a/src/webrtc_source.c b/src/webrtc_source.c index dca3ff72..ec6e8d2c 100644 --- a/src/webrtc_source.c +++ b/src/webrtc_source.c @@ -34,8 +34,6 @@ #define DEFAULT_ELEMENT_AUDIOTESTSRC "audiotestsrc" #define DEFAULT_ELEMENT_APPSRC "appsrc" #define DEFAULT_ELEMENT_SCREENSRC "waylandsrc" -#define DEFAULT_ELEMENT_VIDEOCONVERT "videoconvert" -#define DEFAULT_ELEMENT_CAPSFILTER "capsfilter" #define DEFAULT_ELEMENT_QUEUE "queue" #define DEFAULT_ELEMENT_VOLUME "volume" #define DEFAULT_ELEMENT_INPUT_SELECTOR "input-selector" @@ -714,22 +712,25 @@ static GstPadProbeReturn __source_data_probe_cb(GstPad *pad, GstPadProbeInfo *in switch (probe_data->av_idx) { case AV_IDX_AUDIO: - /* TODO: implementation */ + if (!probe_data->source->sound_stream_info.type) + return GST_PAD_PROBE_OK; break; case AV_IDX_VIDEO: if (!probe_data->source->display) return GST_PAD_PROBE_OK; - appsrc = probe_data->source->av[probe_data->av_idx].render.appsrc; - if (appsrc) { - buffer = gst_pad_probe_info_get_buffer(info); - LOG_DEBUG("push buffer[%p] to the render pipeline, appsrc[%p]", buffer, appsrc); - g_signal_emit_by_name(G_OBJECT(appsrc), "push-buffer", buffer, &gst_ret, NULL); - if (gst_ret != GST_FLOW_OK) - LOG_ERROR("failed to 'push-buffer', gst_ret[0x%x]", gst_ret); - } break; default: - break; + LOG_ERROR_IF_REACHED("av_idx(%d)", probe_data->av_idx); + return GST_PAD_PROBE_OK; + } + + appsrc = probe_data->source->av[probe_data->av_idx].render.appsrc; + if (appsrc) { + buffer = gst_pad_probe_info_get_buffer(info); + LOG_DEBUG("push buffer[%p] to the render pipeline, appsrc[%p]", buffer, appsrc); + g_signal_emit_by_name(G_OBJECT(appsrc), "push-buffer", buffer, &gst_ret, NULL); + if (gst_ret != GST_FLOW_OK) + LOG_ERROR("failed to 'push-buffer', gst_ret[%d]", gst_ret); } return GST_PAD_PROBE_OK; @@ -1893,6 +1894,9 @@ void _source_slot_destroy_cb(gpointer data) if (source->display) _release_display(source->display); + if (source->sound_stream_info.type) + free(source->sound_stream_info.type); + g_free(source); } @@ -2582,7 +2586,7 @@ int _push_media_packet(webrtc_s *webrtc, unsigned int source_id, media_packet_h LOG_INFO("packet is NULL, emit EOS signal"); g_signal_emit_by_name(G_OBJECT(appsrc), "end-of-stream", &gst_ret, NULL); if (gst_ret != GST_FLOW_OK) { - LOG_ERROR("failed to 'end-of-stream', gst_ret[0x%x]", gst_ret); + LOG_ERROR("failed to 'end-of-stream', gst_ret[%d]", gst_ret); return WEBRTC_ERROR_INVALID_OPERATION; } return WEBRTC_ERROR_NONE; @@ -2597,7 +2601,7 @@ int _push_media_packet(webrtc_s *webrtc, unsigned int source_id, media_packet_h g_signal_emit_by_name(G_OBJECT(appsrc), "push-buffer", buffer, &gst_ret, NULL); if (gst_ret != GST_FLOW_OK) { - LOG_ERROR("failed to 'push-buffer', gst_ret[0x%x]", gst_ret); + LOG_ERROR("failed to 'push-buffer', gst_ret[%d]", gst_ret); gst_buffer_unref(buffer); return WEBRTC_ERROR_INVALID_OPERATION; } @@ -2621,7 +2625,7 @@ int _push_media_packet(webrtc_s *webrtc, unsigned int source_id, media_packet_h LOG_DEBUG("external gst buffer[%p]", buffer); g_signal_emit_by_name(G_OBJECT(appsrc), "push-buffer", buffer, &gst_ret, NULL); if (gst_ret != GST_FLOW_OK) { - LOG_ERROR("failed to 'push-buffer', gst_ret[0x%x]", gst_ret); + LOG_ERROR("failed to 'push-buffer', gst_ret[%d]", gst_ret); return WEBRTC_ERROR_INVALID_OPERATION; } media_packet_destroy(packet); @@ -2669,7 +2673,7 @@ int _push_media_packet(webrtc_s *webrtc, unsigned int source_id, media_packet_h g_signal_emit_by_name(G_OBJECT(appsrc), "push-buffer", buffer, &gst_ret, NULL); if (gst_ret != GST_FLOW_OK) { - LOG_ERROR("failed to 'push-buffer', gst_ret[0x%x]", gst_ret); + LOG_ERROR("failed to 'push-buffer', gst_ret[%d]", gst_ret); ret = WEBRTC_ERROR_INVALID_OPERATION; } @@ -2944,6 +2948,68 @@ int _get_audio_mute(webrtc_s *webrtc, unsigned int source_id, bool *muted) return WEBRTC_ERROR_NONE; } +static int __build_loopback_audiosink(webrtc_gst_slot_s *source, GstElement *link_with) +{ + webrtc_s *webrtc; + GstElement *audiosink; + GstElement *audioconvert; + GstElement *audioresample; + int ret = WEBRTC_ERROR_NONE; + + RET_VAL_IF(source == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "source is NULL"); + RET_VAL_IF(link_with == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "link_with is NULL"); + RET_VAL_IF(source->webrtc == NULL, WEBRTC_ERROR_INVALID_OPERATION, "webrtc is NULL"); + + webrtc = source->webrtc; + + if (!(audiosink = _create_element(webrtc->ini.rendering_sink.a_sink_element, NULL))) + return WEBRTC_ERROR_INVALID_OPERATION; + + if (g_object_class_find_property(G_OBJECT_GET_CLASS(G_OBJECT(audiosink)), "stream-properties")) { + if (source->sound_stream_info.type) { + ret = _apply_stream_info(audiosink, source->sound_stream_info.type, source->sound_stream_info.index); + if (ret != WEBRTC_ERROR_NONE) { + SAFE_GST_OBJECT_UNREF(audiosink); + return WEBRTC_ERROR_INVALID_OPERATION; + } + } + } + + if (!(audioconvert = _create_element(DEFAULT_ELEMENT_AUDIOCONVERT, NULL))) { + SAFE_GST_OBJECT_UNREF(audiosink); + return WEBRTC_ERROR_INVALID_OPERATION; + } + + if (!(audioresample = _create_element(DEFAULT_ELEMENT_AUDIORESAMPLE, NULL))) { + SAFE_GST_OBJECT_UNREF(audiosink); + SAFE_GST_OBJECT_UNREF(audioconvert); + return WEBRTC_ERROR_INVALID_OPERATION; + } + + gst_bin_add_many(GST_BIN(source->av[AV_IDX_AUDIO].render.pipeline), audioconvert, audioresample, audiosink, NULL); + + if (!gst_element_sync_state_with_parent(audioconvert)) { + LOG_ERROR("failed to gst_element_sync_state_with_parent() for [%s]", GST_ELEMENT_NAME(audioconvert)); + return WEBRTC_ERROR_INVALID_OPERATION; + } + if (!gst_element_sync_state_with_parent(audioresample)) { + LOG_ERROR("failed to gst_element_sync_state_with_parent() for [%s]", GST_ELEMENT_NAME(audioresample)); + return WEBRTC_ERROR_INVALID_OPERATION; + } + + if (!gst_element_sync_state_with_parent(audiosink)) { + LOG_ERROR("failed to gst_element_sync_state_with_parent() for [%s]", GST_ELEMENT_NAME(audiosink)); + return WEBRTC_ERROR_INVALID_OPERATION; + } + + if (!gst_element_link_many(link_with, audioconvert, audioresample, audiosink, NULL)) { + LOG_ERROR("failed to gst_element_link_many()"); + return WEBRTC_ERROR_INVALID_OPERATION; + } + + return WEBRTC_ERROR_NONE; +} + static int __build_loopback_videosink(webrtc_gst_slot_s *source, GstElement *link_with) { webrtc_s *webrtc; @@ -3003,24 +3069,20 @@ static int __build_loopback_videosink(webrtc_gst_slot_s *source, GstElement *lin if (!gst_element_sync_state_with_parent(videoconvert)) { LOG_ERROR("failed to gst_element_sync_state_with_parent() for [%s]", GST_ELEMENT_NAME(videoconvert)); - goto error; + return WEBRTC_ERROR_INVALID_OPERATION; } if (!gst_element_sync_state_with_parent(videosink)) { LOG_ERROR("failed to gst_element_sync_state_with_parent() for [%s]", GST_ELEMENT_NAME(videosink)); - goto error; + return WEBRTC_ERROR_INVALID_OPERATION; } if (!gst_element_link_many(link_with, videoconvert, videosink, NULL)) { LOG_ERROR("failed to gst_element_link_many()"); - goto error; + return WEBRTC_ERROR_INVALID_OPERATION; } return WEBRTC_ERROR_NONE; - -error: - SAFE_GST_OBJECT_UNREF(source->av[AV_IDX_VIDEO].render.pipeline); - return WEBRTC_ERROR_INVALID_OPERATION; } static void __loopback_decodebin_pad_added_cb(GstElement *decodebin, GstPad *new_pad, gpointer user_data) @@ -3037,11 +3099,15 @@ static void __loopback_decodebin_pad_added_cb(GstElement *decodebin, GstPad *new media_type = gst_structure_get_name(gst_caps_get_structure(gst_pad_get_current_caps(new_pad), 0)); LOG_INFO("source_id[%u], media_type[%s], new_pad[%s]", source->id, media_type, GST_PAD_NAME(new_pad)); - if (g_strrstr(media_type, "video")) { - ret = __build_loopback_videosink(source, decodebin); + if (g_strrstr(media_type, "audio")) { + ret = __build_loopback_audiosink(source, decodebin); + if (ret != WEBRTC_ERROR_NONE) + SAFE_GST_OBJECT_UNREF(source->av[AV_IDX_AUDIO].render.pipeline); - } else if (g_strrstr(media_type, "audio")) { - /* TODO : Implementation */ + } else if (g_strrstr(media_type, "video")) { + ret = __build_loopback_videosink(source, decodebin); + if (ret != WEBRTC_ERROR_NONE) + SAFE_GST_OBJECT_UNREF(source->av[AV_IDX_VIDEO].render.pipeline); } else { LOG_ERROR("not supported media type[%s]", media_type); @@ -3062,8 +3128,10 @@ static int __build_loopback_render_pipeline(webrtc_s *webrtc, webrtc_gst_slot_s RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL"); RET_VAL_IF(source == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "source is NULL"); - RET_VAL_IF(source->display == NULL, WEBRTC_ERROR_INVALID_OPERATION, "display is NULL"); - RET_VAL_IF(source->display->object == NULL, WEBRTC_ERROR_INVALID_OPERATION, "display->object is NULL"); + if (type == MEDIA_TYPE_VIDEO) { + RET_VAL_IF(source->display == NULL, WEBRTC_ERROR_INVALID_OPERATION, "display is NULL"); + RET_VAL_IF(source->display->object == NULL, WEBRTC_ERROR_INVALID_OPERATION, "display->object is NULL"); + } pipeline_name = g_strdup_printf("webrtc-source_%u-%s-render-pipeline", source->id, type == MEDIA_TYPE_AUDIO ? "audio" : "video"); source->av[idx].render.pipeline = gst_pipeline_new(pipeline_name); @@ -3097,8 +3165,15 @@ static int __build_loopback_render_pipeline(webrtc_s *webrtc, webrtc_gst_slot_s g_signal_connect(decodebin, "autoplug-select", G_CALLBACK(_decodebin_autoplug_select_cb), webrtc); } else { + int ret = WEBRTC_ERROR_NONE; + gst_bin_add(GST_BIN(source->av[idx].render.pipeline), appsrc); - if (__build_loopback_videosink(source, appsrc) != WEBRTC_ERROR_NONE) { + + if (type == MEDIA_TYPE_AUDIO) + ret = __build_loopback_audiosink(source, appsrc); + else + ret = __build_loopback_videosink(source, appsrc); + if (ret != WEBRTC_ERROR_NONE) { SAFE_GST_OBJECT_UNREF(appsrc); goto error; } @@ -3118,6 +3193,49 @@ error: return WEBRTC_ERROR_INVALID_OPERATION; } +int _set_audio_loopback(webrtc_s *webrtc, unsigned int source_id, sound_stream_info_h stream_info) +{ + webrtc_gst_slot_s *source; + char *stream_type; + int stream_index; + bool available; + int ret = SOUND_MANAGER_ERROR_NONE; + + RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL"); + RET_VAL_IF(source_id == 0, WEBRTC_ERROR_INVALID_PARAMETER, "source_id is 0"); + + RET_VAL_IF((source = _get_slot_by_id(webrtc->gst.source_slots, source_id)) == NULL, + WEBRTC_ERROR_INVALID_PARAMETER, "could not find source"); + RET_VAL_IF((source->media_types & MEDIA_TYPE_AUDIO) == 0x0, WEBRTC_ERROR_INVALID_PARAMETER, + "invalid media_type for source[media_types:0x%x, id:%u]", source->media_types, source_id); + RET_VAL_IF((source->type == WEBRTC_MEDIA_SOURCE_TYPE_MEDIA_PACKET), WEBRTC_ERROR_INVALID_PARAMETER, + "this API does not support the media packet source"); + RET_VAL_IF(stream_info == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "stream_info is NULL"); + + LOG_INFO("source_id[%u] stream_info[%p]", source_id, stream_info); + + sound_manager_get_type_from_stream_information(stream_info, &stream_type); + sound_manager_get_index_from_stream_information(stream_info, &stream_index); + + ret = sound_manager_is_available_stream_information(stream_info, NATIVE_API_WEBRTC, &available); + if (ret != SOUND_MANAGER_ERROR_NONE) { + LOG_ERROR("failed to sound_manager_is_available_stream_information()"); + return WEBRTC_ERROR_INVALID_OPERATION; + } + + if (!available) { + LOG_ERROR("this stream info[%p, type:%s, index:%d] is not allowed to this framework", stream_info, stream_type, stream_index); + return WEBRTC_ERROR_INVALID_PARAMETER; + } + + source->sound_stream_info.type = strdup(stream_type); + source->sound_stream_info.index = stream_index; + + LOG_INFO("source_id[%u] stream_info[%p, type:%s, index:%d]", source_id, stream_info, stream_type, stream_index); + + return __build_loopback_render_pipeline(webrtc, source, MEDIA_TYPE_AUDIO); +} + int _set_video_loopback(webrtc_s *webrtc, unsigned int source_id, unsigned int type, void *display) { int ret = WEBRTC_ERROR_NONE; diff --git a/test/webrtc_test.c b/test/webrtc_test.c index 456b49e0..36e7f021 100644 --- a/test/webrtc_test.c +++ b/test/webrtc_test.c @@ -72,6 +72,7 @@ enum { CURRENT_STATUS_MEDIA_PACKET_SOURCE_UNSET_BUFFER_STATE_CHANGED_CB, CURRENT_STATUS_MEDIA_PACKET_SOURCE_SET_FORMAT, CURRENT_STATUS_SET_DISPLAY_TYPE, + CURRENT_STATUS_MEDIA_SOURCE_SET_AUDIO_LOOPBACK, CURRENT_STATUS_MEDIA_SOURCE_SET_VIDEO_LOOPBACK, CURRENT_STATUS_DATA_CHANNEL_SEND_STRING, CURRENT_STATUS_DATA_CHANNEL_SEND_STRING_AS_BYTES, @@ -474,16 +475,6 @@ static void _webrtc_stop(int index) g_conns[index].recv_channels[i] = NULL; } - if (g_conns[index].source.stream_info) { - sound_manager_destroy_stream_information(g_conns[index].source.stream_info); - g_conns[index].source.stream_info = NULL; - } - - if (g_conns[index].render.stream_info) { - sound_manager_destroy_stream_information(g_conns[index].render.stream_info); - g_conns[index].render.stream_info = NULL; - } - #ifdef __DEBUG_VALIDATE_ENCODED_FRAME_CB__ if (g_conns[index].render_pipeline) { GstStateChangeReturn state_change_ret = gst_element_set_state(g_conns[index].render_pipeline, GST_STATE_NULL); @@ -610,6 +601,11 @@ static void _webrtc_add_media_source(int index, int value) RET_IF(ret != WEBRTC_ERROR_NONE, "ret[0x%x]", ret); if (type == WEBRTC_MEDIA_SOURCE_TYPE_MIC) { + if (g_conns[index].source.stream_info) { + sound_manager_destroy_stream_information(g_conns[index].source.stream_info); + g_conns[index].source.stream_info = NULL; + } + if (__get_sound_stream_info(&g_conns[index].source.stream_info) < 0) { g_printerr("failed to __get_sound_stream_info()\n"); @@ -833,6 +829,21 @@ static void _webrtc_set_display_type(int index, int type) g_print("display type[%d] is set, it'll be applied when starting rendering video.\n", type); } +static void _webrtc_media_source_set_audio_loopback(int index, unsigned int source_id) +{ + int ret = WEBRTC_ERROR_NONE; + + if (!g_conns[index].render.stream_info) { + ret = sound_manager_create_stream_information(SOUND_STREAM_TYPE_MEDIA, NULL, NULL, &g_conns[index].render.stream_info); + RET_IF(ret != SOUND_MANAGER_ERROR_NONE, "failed to sound_manager_create_stream_information(), ret[0x%x]", ret); + } + + ret = webrtc_media_source_set_audio_loopback(g_conns[index].webrtc, source_id, g_conns[index].render.stream_info); + RET_IF(ret != WEBRTC_ERROR_NONE, "ret[0x%x]", ret); + + g_print("webrtc_media_source_set_audio_loopback() success, source_id[%u]\n", source_id); +} + static void _webrtc_media_source_set_video_loopback(int index, unsigned int source_id) { int ret = WEBRTC_ERROR_NONE; @@ -3096,6 +3107,9 @@ void _interpret_main_menu(char *cmd) } else if (strncmp(cmd, "dt", 2) == 0) { g_conns[g_conn_index].menu_state = CURRENT_STATUS_SET_DISPLAY_TYPE; + } else if (strncmp(cmd, "al", 2) == 0) { + g_conns[g_conn_index].menu_state = CURRENT_STATUS_MEDIA_SOURCE_SET_AUDIO_LOOPBACK; + } else if (strncmp(cmd, "vl", 2) == 0) { g_conns[g_conn_index].menu_state = CURRENT_STATUS_MEDIA_SOURCE_SET_VIDEO_LOOPBACK; @@ -3353,6 +3367,7 @@ void display_sub_basic() g_print("gd. Get transceiver direction\n"); g_print("sf. Set media format to media packet source\n"); g_print("dt. Set display type\n"); + g_print("al. Set audio loopback\t"); g_print("vl. Set video loopback\n"); g_print("cd. Create data channel\t"); g_print("dd. Destroy data channel\n"); @@ -3479,6 +3494,9 @@ static void displaymenu() } else if (g_conns[g_conn_index].menu_state == CURRENT_STATUS_SET_DISPLAY_TYPE) { g_print("*** input display type.(1:overlay, 2:evas)\n"); + } else if (g_conns[g_conn_index].menu_state == CURRENT_STATUS_MEDIA_SOURCE_SET_AUDIO_LOOPBACK) { + g_print("*** input source id.\n"); + } else if (g_conns[g_conn_index].menu_state == CURRENT_STATUS_MEDIA_SOURCE_SET_VIDEO_LOOPBACK) { g_print("*** input source id.\n"); @@ -3738,6 +3756,12 @@ static void interpret(char *cmd) reset_menu_state(); break; } + case CURRENT_STATUS_MEDIA_SOURCE_SET_AUDIO_LOOPBACK: { + value = atoi(cmd); + _webrtc_media_source_set_audio_loopback(g_conn_index, value); + reset_menu_state(); + break; + } case CURRENT_STATUS_MEDIA_SOURCE_SET_VIDEO_LOOPBACK: { value = atoi(cmd); _webrtc_media_source_set_video_loopback(g_conn_index, value);