webrtc_set_sound_stream_info() is added.
When calling this new API with the stream info handle, the audio policy
including routing and volume of the audio track is under control by the
handle.
[Version] 0.2.30
[Issue Type] API
Change-Id: I3ba47c6f84d00023ef2b0bf09511a6d019444e20
Signed-off-by: Sangchul Lee <sc11.lee@samsung.com>
*/
/**
- * @brief Set a display to the media track.
+ * @brief Sets a sound manager stream information to the audio track.
+ * @since_tizen 6.5
+ * @remarks Call this function within webrtc_track_added_cb(), otherwise #WEBRTC_ERROR_INVALID_OPERATION will be returned.\n
+ * If webrtc_set_encoded_audio_frame_cb() has been called, it will return #WEBRTC_ERROR_INVALID_OPERATION.\n
+ * The following sound stream types can be used for the @a stream_info:\n
+ * #SOUND_STREAM_TYPE_MEDIA\n
+ * #SOUND_STREAM_TYPE_VOIP\n
+ * #SOUND_STREAM_TYPE_MEDIA_EXTERNAL_ONLY
+ * @param[in] webrtc WebRTC handle
+ * @param[in] track_id The track id
+ * @param[in] stream_info The sound stream information
+ * @return @c 0 on success,
+ * otherwise a negative error value
+ * @retval #WEBRTC_ERROR_NONE Successful
+ * @retval #WEBRTC_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #WEBRTC_ERROR_INVALID_OPERATION Invalid operation
+ * @pre webrtc_track_added_cb() must be set by calling webrtc_set_track_added_cb().
+ * @see webrtc_set_track_added_cb()
+ * @see webrtc_unset_track_added_cb()
+ */
+int webrtc_set_sound_stream_info(webrtc_h webrtc, unsigned int track_id, sound_stream_info_h stream_info);
+
+/**
+ * @brief Sets a display to the video track.
* @since_tizen 6.5
* @remarks Call this function within webrtc_track_added_cb(), otherwise #WEBRTC_ERROR_INVALID_OPERATION will be returned.\n
* If webrtc_set_encoded_video_frame_cb() has been called, it will return #WEBRTC_ERROR_INVALID_OPERATION.
* 'max-retransmits' of type int : The number of times data will be attempted to be transmitted without acknowledgement before dropping. The default value is -1.\n
* 'protocol' of type string : The subprotocol used by this channel. The default value is NULL.\n
* 'id' of type int : Override the default identifier selection of this channel. The default value is -1.\n
- * 'priority' of type int : The priority to use for this channel(1:very low, 2:low, 3:medium, 4:high). The default value is 2.\n
+ * 'priority' of type int : The priority to use for this channel(1:very low, 2:low, 3:medium, 4:high). The default value is 2.
* @param[in] webrtc WebRTC handle
* @param[in] label Name for the channel
* @param[in] options Configuration options for creating the data channel (optional, this can be NULL)
g_free(dot_name); \
} while (0)
+#define DEFAULT_VIDEO_SINK_ELEMENT "tizenwlsink"
+#define DEFAULT_AUDIO_SINK_ELEMENT "pulsesink"
+
#define MEDIA_TYPE_AUDIO_RAW "audio/x-raw"
#define MEDIA_TYPE_AUDIO_OPUS "audio/x-opus"
#define MEDIA_TYPE_AUDIO_VORBIS "audio/x-vorbis"
int width;
int height;
} video_info;
+ struct {
+ char *type;
+ int index;
+ } sound_stream_info;
media_format_h media_format;
bool zerocopy_enabled;
GstAllocator *allocator;
int _get_video_mute(webrtc_s *webrtc, unsigned int source_id, bool *muted);
int _set_video_resolution(webrtc_s *webrtc, unsigned int source_id, int width, int height);
int _get_video_resolution(webrtc_s *webrtc, unsigned int source_id, int *width, int *height);
+int _apply_stream_info(GstElement *element, const char *stream_type, int stream_index);
int _set_sound_stream_info(webrtc_s *webrtc, unsigned int source_id, sound_stream_info_h stream_info);
int _set_media_format(webrtc_s *webrtc, unsigned int source_id, media_format_h format);
bool _check_if_format_is_set_to_packet_sources(webrtc_s *webrtc);
int _set_ghost_pad_target(GstPad *ghost_pad, GstElement *target_element, bool is_src);
int _add_rendering_sink_bin(webrtc_s *webrtc, GstPad *src_pad);
int _add_forwarding_sink_bin(webrtc_s *webrtc, GstPad *src_pad, bool is_video);
+int _set_stream_info_to_sink(webrtc_s *webrtc, unsigned int track_id, sound_stream_info_h stream_info);
int _set_display_to_sink(webrtc_s *webrtc, unsigned int track_id, unsigned int type, void *display);
bool _is_owner_of_track_build_context(webrtc_s *webrtc, unsigned int track_id);
void _track_build_context_destroy_cb(gpointer data);
Name: capi-media-webrtc
Summary: A WebRTC library in Tizen Native API
-Version: 0.2.29
+Version: 0.2.30
Release: 0
Group: Multimedia/API
License: Apache-2.0
return ret;
}
+int webrtc_set_sound_stream_info(webrtc_h webrtc, unsigned int track_id, sound_stream_info_h stream_info)
+{
+ int ret = WEBRTC_ERROR_NONE;
+ webrtc_s *_webrtc = (webrtc_s*)webrtc;
+
+ RET_VAL_IF(_webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL");
+ RET_VAL_IF(track_id == 0, WEBRTC_ERROR_INVALID_PARAMETER, "track id is 0");
+ RET_VAL_IF(stream_info == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "stream_info is NULL");
+
+ g_mutex_lock(&_webrtc->mutex);
+
+ RET_VAL_WITH_UNLOCK_IF(_webrtc->track_added_cb.callback == NULL, WEBRTC_ERROR_INVALID_OPERATION, &_webrtc->mutex, "track added callback was not set");
+ RET_VAL_WITH_UNLOCK_IF(_webrtc->encoded_audio_frame_cb.callback, WEBRTC_ERROR_INVALID_OPERATION, &_webrtc->mutex, "encoded audio frame callback was set");
+ RET_VAL_WITH_UNLOCK_IF(!_is_owner_of_track_build_context(_webrtc, track_id), WEBRTC_ERROR_INVALID_OPERATION, &_webrtc->mutex,
+ "this function should be called within the track added callback");
+
+ ret = _set_stream_info_to_sink(webrtc, track_id, stream_info);
+
+ g_mutex_unlock(&_webrtc->mutex);
+
+ return ret;
+}
+
int webrtc_set_display(webrtc_h webrtc, unsigned int track_id, webrtc_display_type_e type, webrtc_display_h display)
{
int ret = WEBRTC_ERROR_NONE;
"this function should be called within the track added callback");
ret = _set_display_to_sink(webrtc, track_id, (unsigned int)type, (void *)display);
- if (ret == WEBRTC_ERROR_NONE)
- LOG_INFO("track_id[%u] type[%d] display[%p]", track_id, type, display);
g_mutex_unlock(&_webrtc->mutex);
#define INI_ITEM_AUDIO_HW_DECODER_ELEMENTS "audio hw decoder elements"
#define INI_ITEM_VIDEO_HW_DECODER_ELEMENTS "video hw decoder elements"
-#define DEFAULT_VIDEO_SINK_ELEMENT "tizenwlsink"
-#define DEFAULT_AUDIO_SINK_ELEMENT "pulsesink"
-
/* items for resource acquisition */
#define INI_ITEM_RESOURCE_CAMERA "camera"
#define INI_ITEM_RESOURCE_VIDEO_ENCODER "video encoder"
return ret;
}
+
+int _apply_stream_info(GstElement *element, const char *stream_type, int stream_index)
+{
+ GstStructure *structure;
+ char values[64] = {'\0',};
+
+ RET_VAL_IF(element == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "element is NULL");
+ RET_VAL_IF(stream_type == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "stream_type is NULL");
+ RET_VAL_IF(!g_object_class_find_property(G_OBJECT_GET_CLASS(G_OBJECT(element)), "stream-properties"),
+ WEBRTC_ERROR_INVALID_OPERATION, "could not find 'stream-properties'");
+
+ snprintf(values, sizeof(values) - 1, "props,media.role=%s, media.parent_id=%d", stream_type, stream_index);
+ RET_VAL_IF(!(structure = gst_structure_from_string(values, NULL)),
+ WEBRTC_ERROR_INVALID_OPERATION, "failed to gst_structure_from_string(), [%s]", values);
+
+ LOG_INFO("stream-properties[%s]", values);
+
+ g_object_set(G_OBJECT(element), "stream-properties", structure, NULL);
+ gst_structure_free(structure);
+
+ return WEBRTC_ERROR_NONE;
+}
\ No newline at end of file
RET_VAL_IF(sink == NULL, WEBRTC_ERROR_INVALID_OPERATION, "could not find an item by [%s] in sink slots", GST_ELEMENT_NAME(decodebin));
RET_VAL_IF(sink->bin == NULL, WEBRTC_ERROR_INVALID_OPERATION, "bin is NULL");
- sink->media_types |= MEDIA_TYPE_VIDEO;
+ sink->media_types = MEDIA_TYPE_VIDEO;
if (!(videosink_factory_name = __get_videosink_factory_name(sink->display, &webrtc->ini, &display_is_set)))
return WEBRTC_ERROR_INVALID_OPERATION;
GstElement *audioconvert;
GstElement *audioresample;
GstElement *audiosink;
+ int ret = WEBRTC_ERROR_NONE;
RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL");
RET_VAL_IF(decodebin == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "decodebin is NULL");
RET_VAL_IF(sink == NULL, WEBRTC_ERROR_INVALID_OPERATION, "could not find an item by [%s] in sink slots", GST_ELEMENT_NAME(decodebin));
RET_VAL_IF(sink->bin == NULL, WEBRTC_ERROR_INVALID_OPERATION, "bin is NULL");
- sink->media_types |= MEDIA_TYPE_AUDIO;
+ sink->media_types = MEDIA_TYPE_AUDIO;
if (!(audioconvert = _create_element(DEFAULT_ELEMENT_AUDIOCONVERT, NULL))) {
LOG_ERROR("failed to create audioconvert");
return WEBRTC_ERROR_INVALID_OPERATION;
}
+ if (g_object_class_find_property(G_OBJECT_GET_CLASS(G_OBJECT(audiosink)), "stream-properties")) {
+ if (sink->sound_stream_info.type) {
+ ret = _apply_stream_info(audiosink, sink->sound_stream_info.type, sink->sound_stream_info.index);
+ if (ret != WEBRTC_ERROR_NONE) /* FIXME: unref all the created elements */
+ return WEBRTC_ERROR_INVALID_OPERATION;
+ }
+ }
+
gst_bin_add_many(sink->bin, audioconvert, audioresample, audiosink, NULL);
if (!gst_element_sync_state_with_parent(audioconvert)) {
if (sink->display)
_release_display(sink->display);
+ if (sink->sound_stream_info.type)
+ free(sink->sound_stream_info.type);
+
g_free(sink);
}
return WEBRTC_ERROR_INVALID_OPERATION;
}
+int _set_stream_info_to_sink(webrtc_s *webrtc, unsigned int track_id, sound_stream_info_h stream_info)
+{
+ webrtc_gst_slot_s *sink;
+ gchar *track_name;
+ char *stream_type;
+ int stream_index;
+ bool available;
+ int ret;
+
+ RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL");
+ RET_VAL_IF(track_id == 0, WEBRTC_ERROR_INVALID_PARAMETER, "track id is 0");
+ RET_VAL_IF(stream_info == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "stream_info is NULL");
+
+ track_name = g_strdup_printf("track_%u", track_id);
+
+ sink = __find_sink_slot(webrtc, track_name);
+ if (sink == NULL) {
+ LOG_ERROR("could not find an item by [%s] in sink slots", track_name);
+ g_free(track_name);
+ return WEBRTC_ERROR_INVALID_PARAMETER;
+ }
+ g_free(track_name);
+
+ RET_VAL_IF(sink->bin == NULL, WEBRTC_ERROR_INVALID_OPERATION, "bin is NULL");
+ RET_VAL_IF(sink->encoded_frame_cb != NULL, WEBRTC_ERROR_INVALID_OPERATION, "it may be a forwarding sink for encoded frame callback");
+ RET_VAL_IF((sink->media_types & MEDIA_TYPE_AUDIO) == 0x0, WEBRTC_ERROR_INVALID_OPERATION, "it's not a audio track");
+ RET_VAL_IF(strcmp(webrtc->ini.rendering_sink.a_sink_element, DEFAULT_AUDIO_SINK_ELEMENT), WEBRTC_ERROR_INVALID_OPERATION,
+ "it requires [%s] as an audio renderer", DEFAULT_AUDIO_SINK_ELEMENT);
+
+ sound_manager_get_type_from_stream_information(stream_info, &stream_type);
+ sound_manager_get_index_from_stream_information(stream_info, &stream_index);
+
+ ret = sound_manager_is_available_stream_information(stream_info, NATIVE_API_WEBRTC, &available);
+ if (ret != SOUND_MANAGER_ERROR_NONE) {
+ LOG_ERROR("failed to sound_manager_is_available_stream_information()");
+ return WEBRTC_ERROR_INVALID_OPERATION;
+ }
+
+ if (!available) {
+ LOG_ERROR("this stream info[%p, type:%s, index:%d] is not allowed to this framework", stream_info, stream_type, stream_index);
+ return WEBRTC_ERROR_INVALID_PARAMETER;
+ }
+
+ sink->sound_stream_info.type = strdup(stream_type);
+ sink->sound_stream_info.index = stream_index;
+
+ LOG_INFO("track_id[%u] stream_info[%p, type:%s, index:%d]", track_id, stream_info, stream_type, stream_index);
+
+ return WEBRTC_ERROR_NONE;
+}
+
int _set_display_to_sink(webrtc_s *webrtc, unsigned int track_id, unsigned int type, void *display)
{
webrtc_gst_slot_s *sink;
RET_VAL_IF(sink->display == NULL, WEBRTC_ERROR_INVALID_OPERATION, "sink->display is NULL");
}
- LOG_INFO("type[%d] object[%p]", type, display);
+ LOG_INFO("track_id[%u] type[%d] object[%p]", track_id, type, display);
sink->display->type = type;
sink->display->object = display;
{
webrtc_gst_slot_s *source;
GstElement *element;
- int ret = SOUND_MANAGER_ERROR_NONE;
+ int ret;
bool available = false;
char *stream_type;
int stream_index;
- GstStructure *structure;
- char values[64] = {'\0',};
RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL");
RET_VAL_IF((source = _get_slot_by_id(webrtc->gst.source_slots, source_id)) == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "could not find source");
LOG_INFO("source_id[%u], stream_info[%p, type:%s, index:%d]", source_id, stream_info, stream_type, stream_index);
- snprintf(values, sizeof(values) - 1, "props,media.role=%s, media.parent_id=%d", stream_type, stream_index);
- structure = gst_structure_from_string(values, NULL);
- if (!structure) {
- LOG_ERROR("failed to gst_structure_from_string(), [%s]", values);
- return WEBRTC_ERROR_INVALID_OPERATION;
- }
-
- LOG_INFO("stream-properties[%s]", values);
- g_object_set(G_OBJECT(element), "stream-properties", structure, NULL);
- gst_structure_free(structure);
-
- return WEBRTC_ERROR_NONE;
+ return _apply_stream_info(element, stream_type, stream_index);
}
int _set_media_format(webrtc_s *webrtc, unsigned int source_id, media_format_h format)
gint64 expected_size;
char* receive_buffer;
- webrtc_display_type_e display_type;
- Evas_Object *eo;
+ struct {
+ sound_stream_info_h stream_info;
+ webrtc_display_type_e display_type;
+ Evas_Object *eo;
+ } render;
#ifdef __DEBUG_VALIDATE_ENCODED_FRAME_CB__
GstElement *render_pipeline;
* | eo (remote1) | eo (remote2) | *
* |____________________|____________________| */
for (i = 0; i < MAX_CONNECTION_LEN + 1; i++) {
- eo = (i == 0) ? &g_eo_mine : &g_conns[i - 1].eo;
+ eo = (i == 0) ? &g_eo_mine : &g_conns[i - 1].render.eo;
*eo = create_image_object(ad->win);
evas_object_image_size_set(*eo, ad->win_width / 2, ad->win_height / 2);
evas_object_image_fill_set(*eo, 0, 0, ad->win_width / 2, ad->win_height / 2);
int i;
for (i = 0; i < MAX_CONNECTION_LEN; i++) {
- if (g_conns[i].eo) {
- evas_object_del(g_conns[i].eo);
- g_conns[i].eo = NULL;
+ if (g_conns[i].render.eo) {
+ evas_object_del(g_conns[i].render.eo);
+ g_conns[i].render.eo = NULL;
}
}
for (i = 0; i < MAX_MEDIA_PACKET_SOURCE_LEN; i++)
__release_packet_source(index, i);
+
+ if (g_conns[index].render.stream_info) {
+ sound_manager_destroy_stream_information(g_conns[index].render.stream_info);
+ g_conns[index].render.stream_info = NULL;
+ }
}
static void _webrtc_start(int index)
if (g_conns[index].recv_channels[i] != NULL)
g_conns[index].recv_channels[i] = NULL;
}
+
+ if (g_conns[index].render.stream_info) {
+ sound_manager_destroy_stream_information(g_conns[index].render.stream_info);
+ g_conns[index].render.stream_info = NULL;
+ }
+
#ifdef __DEBUG_VALIDATE_ENCODED_FRAME_CB__
if (g_conns[index].render_pipeline) {
GstStateChangeReturn state_change_ret = gst_element_set_state(g_conns[index].render_pipeline, GST_STATE_NULL);
static void _webrtc_set_display_type(int index, int type)
{
- g_conns[index].display_type = type;
+ g_conns[index].render.display_type = type;
g_print("display type[%d] is set, it'll be applied when starting rendering video.\n", type);
}
if (type == WEBRTC_MEDIA_TYPE_VIDEO) {
#ifndef __DEBUG_VALIDATE_ENCODED_FRAME_CB__
- if (conn->display_type == WEBRTC_DISPLAY_TYPE_OVERLAY) {
+ if (conn->render.display_type == WEBRTC_DISPLAY_TYPE_OVERLAY) {
g_print("Video track is added, set display - overlay, object[%p]\n", g_win_id);
- webrtc_set_display(conn->webrtc, id, WEBRTC_DISPLAY_TYPE_OVERLAY, g_win_id);
+ webrtc_set_display(webrtc, id, WEBRTC_DISPLAY_TYPE_OVERLAY, g_win_id);
- } else if (conn->display_type == WEBRTC_DISPLAY_TYPE_EVAS) {
- g_print("Video track is added, set display - evas object[%p]\n", conn->eo);
- webrtc_set_display(conn->webrtc, id, WEBRTC_DISPLAY_TYPE_EVAS, conn->eo);
+ } else if (conn->render.display_type == WEBRTC_DISPLAY_TYPE_EVAS) {
+ g_print("Video track is added, set display - evas object[%p]\n", conn->render.eo);
+ webrtc_set_display(webrtc, id, WEBRTC_DISPLAY_TYPE_EVAS, conn->render.eo);
} else {
- g_print("Video track is added, invalid display type[%d]\n", conn->display_type);
+ g_print("Video track is added, invalid display type[%d]\n", conn->render.display_type);
}
#else
g_print("Video track is added\n");
}
#endif
} else if (type == WEBRTC_MEDIA_TYPE_AUDIO) {
+ int ret;
g_print("Audio track is added\n");
+
+ if (!conn->render.stream_info) {
+ ret = sound_manager_create_stream_information(SOUND_STREAM_TYPE_MEDIA, NULL, NULL, &conn->render.stream_info);
+ RET_IF(ret != SOUND_MANAGER_ERROR_NONE, "failed to sound_manager_create_stream_information(), ret[0x%x]", ret);
+ }
+
+ ret = webrtc_set_sound_stream_info(webrtc, id, conn->render.stream_info);
+ if (ret != WEBRTC_ERROR_NONE)
+ g_printerr("failed to webrtc_set_sound_stream_info(), ret[0x%x]\n", ret);
}
}