From: Sangchul Lee Date: Fri, 2 Jul 2021 01:30:25 +0000 (+0900) Subject: Add API to set sound stream info. to audio track received by the remote peer X-Git-Tag: submit/tizen/20210729.023123~30 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ecafca77520feb0e46e759d58ebc6fff8ebfd1b5;p=platform%2Fcore%2Fapi%2Fwebrtc.git Add API to set sound stream info. to audio track received by the remote peer webrtc_set_sound_stream_info() is added. When calling this new API with the stream info handle, the audio policy including routing and volume of the audio track is under control by the handle. [Version] 0.2.30 [Issue Type] API Change-Id: I3ba47c6f84d00023ef2b0bf09511a6d019444e20 Signed-off-by: Sangchul Lee --- diff --git a/include/webrtc.h b/include/webrtc.h index 83b30813..4f3acb4b 100644 --- a/include/webrtc.h +++ b/include/webrtc.h @@ -958,7 +958,30 @@ int webrtc_media_packet_source_push_packet(webrtc_h webrtc, unsigned int source_ */ /** - * @brief Set a display to the media track. + * @brief Sets a sound manager stream information to the audio track. + * @since_tizen 6.5 + * @remarks Call this function within webrtc_track_added_cb(), otherwise #WEBRTC_ERROR_INVALID_OPERATION will be returned.\n + * If webrtc_set_encoded_audio_frame_cb() has been called, it will return #WEBRTC_ERROR_INVALID_OPERATION.\n + * The following sound stream types can be used for the @a stream_info:\n + * #SOUND_STREAM_TYPE_MEDIA\n + * #SOUND_STREAM_TYPE_VOIP\n + * #SOUND_STREAM_TYPE_MEDIA_EXTERNAL_ONLY + * @param[in] webrtc WebRTC handle + * @param[in] track_id The track id + * @param[in] stream_info The sound stream information + * @return @c 0 on success, + * otherwise a negative error value + * @retval #WEBRTC_ERROR_NONE Successful + * @retval #WEBRTC_ERROR_INVALID_PARAMETER Invalid parameter + * @retval #WEBRTC_ERROR_INVALID_OPERATION Invalid operation + * @pre webrtc_track_added_cb() must be set by calling webrtc_set_track_added_cb(). + * @see webrtc_set_track_added_cb() + * @see webrtc_unset_track_added_cb() + */ +int webrtc_set_sound_stream_info(webrtc_h webrtc, unsigned int track_id, sound_stream_info_h stream_info); + +/** + * @brief Sets a display to the video track. * @since_tizen 6.5 * @remarks Call this function within webrtc_track_added_cb(), otherwise #WEBRTC_ERROR_INVALID_OPERATION will be returned.\n * If webrtc_set_encoded_video_frame_cb() has been called, it will return #WEBRTC_ERROR_INVALID_OPERATION. @@ -1540,7 +1563,7 @@ int webrtc_unset_data_channel_cb(webrtc_h webrtc); * 'max-retransmits' of type int : The number of times data will be attempted to be transmitted without acknowledgement before dropping. The default value is -1.\n * 'protocol' of type string : The subprotocol used by this channel. The default value is NULL.\n * 'id' of type int : Override the default identifier selection of this channel. The default value is -1.\n - * 'priority' of type int : The priority to use for this channel(1:very low, 2:low, 3:medium, 4:high). The default value is 2.\n + * 'priority' of type int : The priority to use for this channel(1:very low, 2:low, 3:medium, 4:high). The default value is 2. * @param[in] webrtc WebRTC handle * @param[in] label Name for the channel * @param[in] options Configuration options for creating the data channel (optional, this can be NULL) diff --git a/include/webrtc_private.h b/include/webrtc_private.h index d0e1e41d..13c88a0d 100644 --- a/include/webrtc_private.h +++ b/include/webrtc_private.h @@ -207,6 +207,9 @@ do { \ g_free(dot_name); \ } while (0) +#define DEFAULT_VIDEO_SINK_ELEMENT "tizenwlsink" +#define DEFAULT_AUDIO_SINK_ELEMENT "pulsesink" + #define MEDIA_TYPE_AUDIO_RAW "audio/x-raw" #define MEDIA_TYPE_AUDIO_OPUS "audio/x-opus" #define MEDIA_TYPE_AUDIO_VORBIS "audio/x-vorbis" @@ -435,6 +438,10 @@ typedef struct _webrtc_gst_slot_s { int width; int height; } video_info; + struct { + char *type; + int index; + } sound_stream_info; media_format_h media_format; bool zerocopy_enabled; GstAllocator *allocator; @@ -526,6 +533,7 @@ int _set_video_mute(webrtc_s *webrtc, unsigned int source_id, bool mute); int _get_video_mute(webrtc_s *webrtc, unsigned int source_id, bool *muted); int _set_video_resolution(webrtc_s *webrtc, unsigned int source_id, int width, int height); int _get_video_resolution(webrtc_s *webrtc, unsigned int source_id, int *width, int *height); +int _apply_stream_info(GstElement *element, const char *stream_type, int stream_index); int _set_sound_stream_info(webrtc_s *webrtc, unsigned int source_id, sound_stream_info_h stream_info); int _set_media_format(webrtc_s *webrtc, unsigned int source_id, media_format_h format); bool _check_if_format_is_set_to_packet_sources(webrtc_s *webrtc); @@ -543,6 +551,7 @@ int _add_no_target_ghostpad_to_slot(webrtc_gst_slot_s *slot, bool is_src, GstPad int _set_ghost_pad_target(GstPad *ghost_pad, GstElement *target_element, bool is_src); int _add_rendering_sink_bin(webrtc_s *webrtc, GstPad *src_pad); int _add_forwarding_sink_bin(webrtc_s *webrtc, GstPad *src_pad, bool is_video); +int _set_stream_info_to_sink(webrtc_s *webrtc, unsigned int track_id, sound_stream_info_h stream_info); int _set_display_to_sink(webrtc_s *webrtc, unsigned int track_id, unsigned int type, void *display); bool _is_owner_of_track_build_context(webrtc_s *webrtc, unsigned int track_id); void _track_build_context_destroy_cb(gpointer data); diff --git a/packaging/capi-media-webrtc.spec b/packaging/capi-media-webrtc.spec index f00ffad4..06ddbac1 100644 --- a/packaging/capi-media-webrtc.spec +++ b/packaging/capi-media-webrtc.spec @@ -1,6 +1,6 @@ Name: capi-media-webrtc Summary: A WebRTC library in Tizen Native API -Version: 0.2.29 +Version: 0.2.30 Release: 0 Group: Multimedia/API License: Apache-2.0 diff --git a/src/webrtc.c b/src/webrtc.c index 42f499b0..d8df02e9 100644 --- a/src/webrtc.c +++ b/src/webrtc.c @@ -596,6 +596,29 @@ int webrtc_media_packet_source_push_packet(webrtc_h webrtc, unsigned int source_ return ret; } +int webrtc_set_sound_stream_info(webrtc_h webrtc, unsigned int track_id, sound_stream_info_h stream_info) +{ + int ret = WEBRTC_ERROR_NONE; + webrtc_s *_webrtc = (webrtc_s*)webrtc; + + RET_VAL_IF(_webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL"); + RET_VAL_IF(track_id == 0, WEBRTC_ERROR_INVALID_PARAMETER, "track id is 0"); + RET_VAL_IF(stream_info == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "stream_info is NULL"); + + g_mutex_lock(&_webrtc->mutex); + + RET_VAL_WITH_UNLOCK_IF(_webrtc->track_added_cb.callback == NULL, WEBRTC_ERROR_INVALID_OPERATION, &_webrtc->mutex, "track added callback was not set"); + RET_VAL_WITH_UNLOCK_IF(_webrtc->encoded_audio_frame_cb.callback, WEBRTC_ERROR_INVALID_OPERATION, &_webrtc->mutex, "encoded audio frame callback was set"); + RET_VAL_WITH_UNLOCK_IF(!_is_owner_of_track_build_context(_webrtc, track_id), WEBRTC_ERROR_INVALID_OPERATION, &_webrtc->mutex, + "this function should be called within the track added callback"); + + ret = _set_stream_info_to_sink(webrtc, track_id, stream_info); + + g_mutex_unlock(&_webrtc->mutex); + + return ret; +} + int webrtc_set_display(webrtc_h webrtc, unsigned int track_id, webrtc_display_type_e type, webrtc_display_h display) { int ret = WEBRTC_ERROR_NONE; @@ -614,8 +637,6 @@ int webrtc_set_display(webrtc_h webrtc, unsigned int track_id, webrtc_display_ty "this function should be called within the track added callback"); ret = _set_display_to_sink(webrtc, track_id, (unsigned int)type, (void *)display); - if (ret == WEBRTC_ERROR_NONE) - LOG_INFO("track_id[%u] type[%d] display[%p]", track_id, type, display); g_mutex_unlock(&_webrtc->mutex); diff --git a/src/webrtc_ini.c b/src/webrtc_ini.c index b02b0802..25548d70 100644 --- a/src/webrtc_ini.c +++ b/src/webrtc_ini.c @@ -85,9 +85,6 @@ #define INI_ITEM_AUDIO_HW_DECODER_ELEMENTS "audio hw decoder elements" #define INI_ITEM_VIDEO_HW_DECODER_ELEMENTS "video hw decoder elements" -#define DEFAULT_VIDEO_SINK_ELEMENT "tizenwlsink" -#define DEFAULT_AUDIO_SINK_ELEMENT "pulsesink" - /* items for resource acquisition */ #define INI_ITEM_RESOURCE_CAMERA "camera" #define INI_ITEM_RESOURCE_VIDEO_ENCODER "video encoder" diff --git a/src/webrtc_private.c b/src/webrtc_private.c index 13de5aca..13d13757 100644 --- a/src/webrtc_private.c +++ b/src/webrtc_private.c @@ -1488,3 +1488,25 @@ int _webrtc_stop(webrtc_s *webrtc) return ret; } + +int _apply_stream_info(GstElement *element, const char *stream_type, int stream_index) +{ + GstStructure *structure; + char values[64] = {'\0',}; + + RET_VAL_IF(element == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "element is NULL"); + RET_VAL_IF(stream_type == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "stream_type is NULL"); + RET_VAL_IF(!g_object_class_find_property(G_OBJECT_GET_CLASS(G_OBJECT(element)), "stream-properties"), + WEBRTC_ERROR_INVALID_OPERATION, "could not find 'stream-properties'"); + + snprintf(values, sizeof(values) - 1, "props,media.role=%s, media.parent_id=%d", stream_type, stream_index); + RET_VAL_IF(!(structure = gst_structure_from_string(values, NULL)), + WEBRTC_ERROR_INVALID_OPERATION, "failed to gst_structure_from_string(), [%s]", values); + + LOG_INFO("stream-properties[%s]", values); + + g_object_set(G_OBJECT(element), "stream-properties", structure, NULL); + gst_structure_free(structure); + + return WEBRTC_ERROR_NONE; +} \ No newline at end of file diff --git a/src/webrtc_sink.c b/src/webrtc_sink.c index 65e06c72..caed09bd 100644 --- a/src/webrtc_sink.c +++ b/src/webrtc_sink.c @@ -132,7 +132,7 @@ static int __build_videosink(webrtc_s *webrtc, GstElement *decodebin, GstPad *sr RET_VAL_IF(sink == NULL, WEBRTC_ERROR_INVALID_OPERATION, "could not find an item by [%s] in sink slots", GST_ELEMENT_NAME(decodebin)); RET_VAL_IF(sink->bin == NULL, WEBRTC_ERROR_INVALID_OPERATION, "bin is NULL"); - sink->media_types |= MEDIA_TYPE_VIDEO; + sink->media_types = MEDIA_TYPE_VIDEO; if (!(videosink_factory_name = __get_videosink_factory_name(sink->display, &webrtc->ini, &display_is_set))) return WEBRTC_ERROR_INVALID_OPERATION; @@ -189,6 +189,7 @@ static int __build_audiosink(webrtc_s *webrtc, GstElement *decodebin, GstPad *sr GstElement *audioconvert; GstElement *audioresample; GstElement *audiosink; + int ret = WEBRTC_ERROR_NONE; RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL"); RET_VAL_IF(decodebin == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "decodebin is NULL"); @@ -198,7 +199,7 @@ static int __build_audiosink(webrtc_s *webrtc, GstElement *decodebin, GstPad *sr RET_VAL_IF(sink == NULL, WEBRTC_ERROR_INVALID_OPERATION, "could not find an item by [%s] in sink slots", GST_ELEMENT_NAME(decodebin)); RET_VAL_IF(sink->bin == NULL, WEBRTC_ERROR_INVALID_OPERATION, "bin is NULL"); - sink->media_types |= MEDIA_TYPE_AUDIO; + sink->media_types = MEDIA_TYPE_AUDIO; if (!(audioconvert = _create_element(DEFAULT_ELEMENT_AUDIOCONVERT, NULL))) { LOG_ERROR("failed to create audioconvert"); @@ -215,6 +216,14 @@ static int __build_audiosink(webrtc_s *webrtc, GstElement *decodebin, GstPad *sr return WEBRTC_ERROR_INVALID_OPERATION; } + if (g_object_class_find_property(G_OBJECT_GET_CLASS(G_OBJECT(audiosink)), "stream-properties")) { + if (sink->sound_stream_info.type) { + ret = _apply_stream_info(audiosink, sink->sound_stream_info.type, sink->sound_stream_info.index); + if (ret != WEBRTC_ERROR_NONE) /* FIXME: unref all the created elements */ + return WEBRTC_ERROR_INVALID_OPERATION; + } + } + gst_bin_add_many(sink->bin, audioconvert, audioresample, audiosink, NULL); if (!gst_element_sync_state_with_parent(audioconvert)) { @@ -436,6 +445,9 @@ void _sink_slot_destroy_cb(gpointer data) if (sink->display) _release_display(sink->display); + if (sink->sound_stream_info.type) + free(sink->sound_stream_info.type); + g_free(sink); } @@ -866,6 +878,57 @@ error_before_insert: return WEBRTC_ERROR_INVALID_OPERATION; } +int _set_stream_info_to_sink(webrtc_s *webrtc, unsigned int track_id, sound_stream_info_h stream_info) +{ + webrtc_gst_slot_s *sink; + gchar *track_name; + char *stream_type; + int stream_index; + bool available; + int ret; + + RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL"); + RET_VAL_IF(track_id == 0, WEBRTC_ERROR_INVALID_PARAMETER, "track id is 0"); + RET_VAL_IF(stream_info == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "stream_info is NULL"); + + track_name = g_strdup_printf("track_%u", track_id); + + sink = __find_sink_slot(webrtc, track_name); + if (sink == NULL) { + LOG_ERROR("could not find an item by [%s] in sink slots", track_name); + g_free(track_name); + return WEBRTC_ERROR_INVALID_PARAMETER; + } + g_free(track_name); + + RET_VAL_IF(sink->bin == NULL, WEBRTC_ERROR_INVALID_OPERATION, "bin is NULL"); + RET_VAL_IF(sink->encoded_frame_cb != NULL, WEBRTC_ERROR_INVALID_OPERATION, "it may be a forwarding sink for encoded frame callback"); + RET_VAL_IF((sink->media_types & MEDIA_TYPE_AUDIO) == 0x0, WEBRTC_ERROR_INVALID_OPERATION, "it's not a audio track"); + RET_VAL_IF(strcmp(webrtc->ini.rendering_sink.a_sink_element, DEFAULT_AUDIO_SINK_ELEMENT), WEBRTC_ERROR_INVALID_OPERATION, + "it requires [%s] as an audio renderer", DEFAULT_AUDIO_SINK_ELEMENT); + + sound_manager_get_type_from_stream_information(stream_info, &stream_type); + sound_manager_get_index_from_stream_information(stream_info, &stream_index); + + ret = sound_manager_is_available_stream_information(stream_info, NATIVE_API_WEBRTC, &available); + if (ret != SOUND_MANAGER_ERROR_NONE) { + LOG_ERROR("failed to sound_manager_is_available_stream_information()"); + return WEBRTC_ERROR_INVALID_OPERATION; + } + + if (!available) { + LOG_ERROR("this stream info[%p, type:%s, index:%d] is not allowed to this framework", stream_info, stream_type, stream_index); + return WEBRTC_ERROR_INVALID_PARAMETER; + } + + sink->sound_stream_info.type = strdup(stream_type); + sink->sound_stream_info.index = stream_index; + + LOG_INFO("track_id[%u] stream_info[%p, type:%s, index:%d]", track_id, stream_info, stream_type, stream_index); + + return WEBRTC_ERROR_NONE; +} + int _set_display_to_sink(webrtc_s *webrtc, unsigned int track_id, unsigned int type, void *display) { webrtc_gst_slot_s *sink; @@ -894,7 +957,7 @@ int _set_display_to_sink(webrtc_s *webrtc, unsigned int track_id, unsigned int t RET_VAL_IF(sink->display == NULL, WEBRTC_ERROR_INVALID_OPERATION, "sink->display is NULL"); } - LOG_INFO("type[%d] object[%p]", type, display); + LOG_INFO("track_id[%u] type[%d] object[%p]", track_id, type, display); sink->display->type = type; sink->display->object = display; diff --git a/src/webrtc_source.c b/src/webrtc_source.c index 0935ac9c..5009ec85 100644 --- a/src/webrtc_source.c +++ b/src/webrtc_source.c @@ -2117,12 +2117,10 @@ int _set_sound_stream_info(webrtc_s *webrtc, unsigned int source_id, sound_strea { webrtc_gst_slot_s *source; GstElement *element; - int ret = SOUND_MANAGER_ERROR_NONE; + int ret; bool available = false; char *stream_type; int stream_index; - GstStructure *structure; - char values[64] = {'\0',}; RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL"); RET_VAL_IF((source = _get_slot_by_id(webrtc->gst.source_slots, source_id)) == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "could not find source"); @@ -2148,18 +2146,7 @@ int _set_sound_stream_info(webrtc_s *webrtc, unsigned int source_id, sound_strea LOG_INFO("source_id[%u], stream_info[%p, type:%s, index:%d]", source_id, stream_info, stream_type, stream_index); - snprintf(values, sizeof(values) - 1, "props,media.role=%s, media.parent_id=%d", stream_type, stream_index); - structure = gst_structure_from_string(values, NULL); - if (!structure) { - LOG_ERROR("failed to gst_structure_from_string(), [%s]", values); - return WEBRTC_ERROR_INVALID_OPERATION; - } - - LOG_INFO("stream-properties[%s]", values); - g_object_set(G_OBJECT(element), "stream-properties", structure, NULL); - gst_structure_free(structure); - - return WEBRTC_ERROR_NONE; + return _apply_stream_info(element, stream_type, stream_index); } int _set_media_format(webrtc_s *webrtc, unsigned int source_id, media_format_h format) diff --git a/test/webrtc_test.c b/test/webrtc_test.c index fd9fa918..e46deaff 100644 --- a/test/webrtc_test.c +++ b/test/webrtc_test.c @@ -181,8 +181,11 @@ typedef struct _connection_s { gint64 expected_size; char* receive_buffer; - webrtc_display_type_e display_type; - Evas_Object *eo; + struct { + sound_stream_info_h stream_info; + webrtc_display_type_e display_type; + Evas_Object *eo; + } render; #ifdef __DEBUG_VALIDATE_ENCODED_FRAME_CB__ GstElement *render_pipeline; @@ -292,7 +295,7 @@ static int app_create(void *data) * | eo (remote1) | eo (remote2) | * * |____________________|____________________| */ for (i = 0; i < MAX_CONNECTION_LEN + 1; i++) { - eo = (i == 0) ? &g_eo_mine : &g_conns[i - 1].eo; + eo = (i == 0) ? &g_eo_mine : &g_conns[i - 1].render.eo; *eo = create_image_object(ad->win); evas_object_image_size_set(*eo, ad->win_width / 2, ad->win_height / 2); evas_object_image_fill_set(*eo, 0, 0, ad->win_width / 2, ad->win_height / 2); @@ -311,9 +314,9 @@ static int app_terminate(void *data) int i; for (i = 0; i < MAX_CONNECTION_LEN; i++) { - if (g_conns[i].eo) { - evas_object_del(g_conns[i].eo); - g_conns[i].eo = NULL; + if (g_conns[i].render.eo) { + evas_object_del(g_conns[i].render.eo); + g_conns[i].render.eo = NULL; } } @@ -415,6 +418,11 @@ static void _webrtc_destroy(int index) for (i = 0; i < MAX_MEDIA_PACKET_SOURCE_LEN; i++) __release_packet_source(index, i); + + if (g_conns[index].render.stream_info) { + sound_manager_destroy_stream_information(g_conns[index].render.stream_info); + g_conns[index].render.stream_info = NULL; + } } static void _webrtc_start(int index) @@ -444,6 +452,12 @@ static void _webrtc_stop(int index) if (g_conns[index].recv_channels[i] != NULL) g_conns[index].recv_channels[i] = NULL; } + + if (g_conns[index].render.stream_info) { + sound_manager_destroy_stream_information(g_conns[index].render.stream_info); + g_conns[index].render.stream_info = NULL; + } + #ifdef __DEBUG_VALIDATE_ENCODED_FRAME_CB__ if (g_conns[index].render_pipeline) { GstStateChangeReturn state_change_ret = gst_element_set_state(g_conns[index].render_pipeline, GST_STATE_NULL); @@ -733,7 +747,7 @@ static void _webrtc_media_source_set_transceiver_direction(int index, unsigned i static void _webrtc_set_display_type(int index, int type) { - g_conns[index].display_type = type; + g_conns[index].render.display_type = type; g_print("display type[%d] is set, it'll be applied when starting rendering video.\n", type); } @@ -1454,16 +1468,16 @@ static void __track_added_cb(webrtc_h webrtc, webrtc_media_type_e type, unsigned if (type == WEBRTC_MEDIA_TYPE_VIDEO) { #ifndef __DEBUG_VALIDATE_ENCODED_FRAME_CB__ - if (conn->display_type == WEBRTC_DISPLAY_TYPE_OVERLAY) { + if (conn->render.display_type == WEBRTC_DISPLAY_TYPE_OVERLAY) { g_print("Video track is added, set display - overlay, object[%p]\n", g_win_id); - webrtc_set_display(conn->webrtc, id, WEBRTC_DISPLAY_TYPE_OVERLAY, g_win_id); + webrtc_set_display(webrtc, id, WEBRTC_DISPLAY_TYPE_OVERLAY, g_win_id); - } else if (conn->display_type == WEBRTC_DISPLAY_TYPE_EVAS) { - g_print("Video track is added, set display - evas object[%p]\n", conn->eo); - webrtc_set_display(conn->webrtc, id, WEBRTC_DISPLAY_TYPE_EVAS, conn->eo); + } else if (conn->render.display_type == WEBRTC_DISPLAY_TYPE_EVAS) { + g_print("Video track is added, set display - evas object[%p]\n", conn->render.eo); + webrtc_set_display(webrtc, id, WEBRTC_DISPLAY_TYPE_EVAS, conn->render.eo); } else { - g_print("Video track is added, invalid display type[%d]\n", conn->display_type); + g_print("Video track is added, invalid display type[%d]\n", conn->render.display_type); } #else g_print("Video track is added\n"); @@ -1474,7 +1488,17 @@ static void __track_added_cb(webrtc_h webrtc, webrtc_media_type_e type, unsigned } #endif } else if (type == WEBRTC_MEDIA_TYPE_AUDIO) { + int ret; g_print("Audio track is added\n"); + + if (!conn->render.stream_info) { + ret = sound_manager_create_stream_information(SOUND_STREAM_TYPE_MEDIA, NULL, NULL, &conn->render.stream_info); + RET_IF(ret != SOUND_MANAGER_ERROR_NONE, "failed to sound_manager_create_stream_information(), ret[0x%x]", ret); + } + + ret = webrtc_set_sound_stream_info(webrtc, id, conn->render.stream_info); + if (ret != WEBRTC_ERROR_NONE) + g_printerr("failed to webrtc_set_sound_stream_info(), ret[0x%x]\n", ret); } }