#define DEFAULT_ELEMENT_AUDIOTESTSRC "audiotestsrc"
#define DEFAULT_ELEMENT_APPSRC "appsrc"
#define DEFAULT_ELEMENT_SCREENSRC "waylandsrc"
-#define DEFAULT_ELEMENT_VIDEOCONVERT "videoconvert"
-#define DEFAULT_ELEMENT_CAPSFILTER "capsfilter"
#define DEFAULT_ELEMENT_QUEUE "queue"
#define DEFAULT_ELEMENT_VOLUME "volume"
#define DEFAULT_ELEMENT_INPUT_SELECTOR "input-selector"
switch (probe_data->av_idx) {
case AV_IDX_AUDIO:
- /* TODO: implementation */
+ if (!probe_data->source->sound_stream_info.type)
+ return GST_PAD_PROBE_OK;
break;
case AV_IDX_VIDEO:
if (!probe_data->source->display)
return GST_PAD_PROBE_OK;
- appsrc = probe_data->source->av[probe_data->av_idx].render.appsrc;
- if (appsrc) {
- buffer = gst_pad_probe_info_get_buffer(info);
- LOG_DEBUG("push buffer[%p] to the render pipeline, appsrc[%p]", buffer, appsrc);
- g_signal_emit_by_name(G_OBJECT(appsrc), "push-buffer", buffer, &gst_ret, NULL);
- if (gst_ret != GST_FLOW_OK)
- LOG_ERROR("failed to 'push-buffer', gst_ret[0x%x]", gst_ret);
- }
break;
default:
- break;
+ LOG_ERROR_IF_REACHED("av_idx(%d)", probe_data->av_idx);
+ return GST_PAD_PROBE_OK;
+ }
+
+ appsrc = probe_data->source->av[probe_data->av_idx].render.appsrc;
+ if (appsrc) {
+ buffer = gst_pad_probe_info_get_buffer(info);
+ LOG_DEBUG("push buffer[%p] to the render pipeline, appsrc[%p]", buffer, appsrc);
+ g_signal_emit_by_name(G_OBJECT(appsrc), "push-buffer", buffer, &gst_ret, NULL);
+ if (gst_ret != GST_FLOW_OK)
+ LOG_ERROR("failed to 'push-buffer', gst_ret[%d]", gst_ret);
}
return GST_PAD_PROBE_OK;
if (source->display)
_release_display(source->display);
+ if (source->sound_stream_info.type)
+ free(source->sound_stream_info.type);
+
g_free(source);
}
LOG_INFO("packet is NULL, emit EOS signal");
g_signal_emit_by_name(G_OBJECT(appsrc), "end-of-stream", &gst_ret, NULL);
if (gst_ret != GST_FLOW_OK) {
- LOG_ERROR("failed to 'end-of-stream', gst_ret[0x%x]", gst_ret);
+ LOG_ERROR("failed to 'end-of-stream', gst_ret[%d]", gst_ret);
return WEBRTC_ERROR_INVALID_OPERATION;
}
return WEBRTC_ERROR_NONE;
g_signal_emit_by_name(G_OBJECT(appsrc), "push-buffer", buffer, &gst_ret, NULL);
if (gst_ret != GST_FLOW_OK) {
- LOG_ERROR("failed to 'push-buffer', gst_ret[0x%x]", gst_ret);
+ LOG_ERROR("failed to 'push-buffer', gst_ret[%d]", gst_ret);
gst_buffer_unref(buffer);
return WEBRTC_ERROR_INVALID_OPERATION;
}
LOG_DEBUG("external gst buffer[%p]", buffer);
g_signal_emit_by_name(G_OBJECT(appsrc), "push-buffer", buffer, &gst_ret, NULL);
if (gst_ret != GST_FLOW_OK) {
- LOG_ERROR("failed to 'push-buffer', gst_ret[0x%x]", gst_ret);
+ LOG_ERROR("failed to 'push-buffer', gst_ret[%d]", gst_ret);
return WEBRTC_ERROR_INVALID_OPERATION;
}
media_packet_destroy(packet);
g_signal_emit_by_name(G_OBJECT(appsrc), "push-buffer", buffer, &gst_ret, NULL);
if (gst_ret != GST_FLOW_OK) {
- LOG_ERROR("failed to 'push-buffer', gst_ret[0x%x]", gst_ret);
+ LOG_ERROR("failed to 'push-buffer', gst_ret[%d]", gst_ret);
ret = WEBRTC_ERROR_INVALID_OPERATION;
}
return WEBRTC_ERROR_NONE;
}
+static int __build_loopback_audiosink(webrtc_gst_slot_s *source, GstElement *link_with)
+{
+ webrtc_s *webrtc;
+ GstElement *audiosink;
+ GstElement *audioconvert;
+ GstElement *audioresample;
+ int ret = WEBRTC_ERROR_NONE;
+
+ RET_VAL_IF(source == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "source is NULL");
+ RET_VAL_IF(link_with == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "link_with is NULL");
+ RET_VAL_IF(source->webrtc == NULL, WEBRTC_ERROR_INVALID_OPERATION, "webrtc is NULL");
+
+ webrtc = source->webrtc;
+
+ if (!(audiosink = _create_element(webrtc->ini.rendering_sink.a_sink_element, NULL)))
+ return WEBRTC_ERROR_INVALID_OPERATION;
+
+ if (g_object_class_find_property(G_OBJECT_GET_CLASS(G_OBJECT(audiosink)), "stream-properties")) {
+ if (source->sound_stream_info.type) {
+ ret = _apply_stream_info(audiosink, source->sound_stream_info.type, source->sound_stream_info.index);
+ if (ret != WEBRTC_ERROR_NONE) {
+ SAFE_GST_OBJECT_UNREF(audiosink);
+ return WEBRTC_ERROR_INVALID_OPERATION;
+ }
+ }
+ }
+
+ if (!(audioconvert = _create_element(DEFAULT_ELEMENT_AUDIOCONVERT, NULL))) {
+ SAFE_GST_OBJECT_UNREF(audiosink);
+ return WEBRTC_ERROR_INVALID_OPERATION;
+ }
+
+ if (!(audioresample = _create_element(DEFAULT_ELEMENT_AUDIORESAMPLE, NULL))) {
+ SAFE_GST_OBJECT_UNREF(audiosink);
+ SAFE_GST_OBJECT_UNREF(audioconvert);
+ return WEBRTC_ERROR_INVALID_OPERATION;
+ }
+
+ gst_bin_add_many(GST_BIN(source->av[AV_IDX_AUDIO].render.pipeline), audioconvert, audioresample, audiosink, NULL);
+
+ if (!gst_element_sync_state_with_parent(audioconvert)) {
+ LOG_ERROR("failed to gst_element_sync_state_with_parent() for [%s]", GST_ELEMENT_NAME(audioconvert));
+ return WEBRTC_ERROR_INVALID_OPERATION;
+ }
+ if (!gst_element_sync_state_with_parent(audioresample)) {
+ LOG_ERROR("failed to gst_element_sync_state_with_parent() for [%s]", GST_ELEMENT_NAME(audioresample));
+ return WEBRTC_ERROR_INVALID_OPERATION;
+ }
+
+ if (!gst_element_sync_state_with_parent(audiosink)) {
+ LOG_ERROR("failed to gst_element_sync_state_with_parent() for [%s]", GST_ELEMENT_NAME(audiosink));
+ return WEBRTC_ERROR_INVALID_OPERATION;
+ }
+
+ if (!gst_element_link_many(link_with, audioconvert, audioresample, audiosink, NULL)) {
+ LOG_ERROR("failed to gst_element_link_many()");
+ return WEBRTC_ERROR_INVALID_OPERATION;
+ }
+
+ return WEBRTC_ERROR_NONE;
+}
+
static int __build_loopback_videosink(webrtc_gst_slot_s *source, GstElement *link_with)
{
webrtc_s *webrtc;
if (!gst_element_sync_state_with_parent(videoconvert)) {
LOG_ERROR("failed to gst_element_sync_state_with_parent() for [%s]", GST_ELEMENT_NAME(videoconvert));
- goto error;
+ return WEBRTC_ERROR_INVALID_OPERATION;
}
if (!gst_element_sync_state_with_parent(videosink)) {
LOG_ERROR("failed to gst_element_sync_state_with_parent() for [%s]", GST_ELEMENT_NAME(videosink));
- goto error;
+ return WEBRTC_ERROR_INVALID_OPERATION;
}
if (!gst_element_link_many(link_with, videoconvert, videosink, NULL)) {
LOG_ERROR("failed to gst_element_link_many()");
- goto error;
+ return WEBRTC_ERROR_INVALID_OPERATION;
}
return WEBRTC_ERROR_NONE;
-
-error:
- SAFE_GST_OBJECT_UNREF(source->av[AV_IDX_VIDEO].render.pipeline);
- return WEBRTC_ERROR_INVALID_OPERATION;
}
static void __loopback_decodebin_pad_added_cb(GstElement *decodebin, GstPad *new_pad, gpointer user_data)
media_type = gst_structure_get_name(gst_caps_get_structure(gst_pad_get_current_caps(new_pad), 0));
LOG_INFO("source_id[%u], media_type[%s], new_pad[%s]", source->id, media_type, GST_PAD_NAME(new_pad));
- if (g_strrstr(media_type, "video")) {
- ret = __build_loopback_videosink(source, decodebin);
+ if (g_strrstr(media_type, "audio")) {
+ ret = __build_loopback_audiosink(source, decodebin);
+ if (ret != WEBRTC_ERROR_NONE)
+ SAFE_GST_OBJECT_UNREF(source->av[AV_IDX_AUDIO].render.pipeline);
- } else if (g_strrstr(media_type, "audio")) {
- /* TODO : Implementation */
+ } else if (g_strrstr(media_type, "video")) {
+ ret = __build_loopback_videosink(source, decodebin);
+ if (ret != WEBRTC_ERROR_NONE)
+ SAFE_GST_OBJECT_UNREF(source->av[AV_IDX_VIDEO].render.pipeline);
} else {
LOG_ERROR("not supported media type[%s]", media_type);
RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL");
RET_VAL_IF(source == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "source is NULL");
- RET_VAL_IF(source->display == NULL, WEBRTC_ERROR_INVALID_OPERATION, "display is NULL");
- RET_VAL_IF(source->display->object == NULL, WEBRTC_ERROR_INVALID_OPERATION, "display->object is NULL");
+ if (type == MEDIA_TYPE_VIDEO) {
+ RET_VAL_IF(source->display == NULL, WEBRTC_ERROR_INVALID_OPERATION, "display is NULL");
+ RET_VAL_IF(source->display->object == NULL, WEBRTC_ERROR_INVALID_OPERATION, "display->object is NULL");
+ }
pipeline_name = g_strdup_printf("webrtc-source_%u-%s-render-pipeline", source->id, type == MEDIA_TYPE_AUDIO ? "audio" : "video");
source->av[idx].render.pipeline = gst_pipeline_new(pipeline_name);
g_signal_connect(decodebin, "autoplug-select", G_CALLBACK(_decodebin_autoplug_select_cb), webrtc);
} else {
+ int ret = WEBRTC_ERROR_NONE;
+
gst_bin_add(GST_BIN(source->av[idx].render.pipeline), appsrc);
- if (__build_loopback_videosink(source, appsrc) != WEBRTC_ERROR_NONE) {
+
+ if (type == MEDIA_TYPE_AUDIO)
+ ret = __build_loopback_audiosink(source, appsrc);
+ else
+ ret = __build_loopback_videosink(source, appsrc);
+ if (ret != WEBRTC_ERROR_NONE) {
SAFE_GST_OBJECT_UNREF(appsrc);
goto error;
}
return WEBRTC_ERROR_INVALID_OPERATION;
}
+int _set_audio_loopback(webrtc_s *webrtc, unsigned int source_id, sound_stream_info_h stream_info)
+{
+ webrtc_gst_slot_s *source;
+ char *stream_type;
+ int stream_index;
+ bool available;
+ int ret = SOUND_MANAGER_ERROR_NONE;
+
+ RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL");
+ RET_VAL_IF(source_id == 0, WEBRTC_ERROR_INVALID_PARAMETER, "source_id is 0");
+
+ RET_VAL_IF((source = _get_slot_by_id(webrtc->gst.source_slots, source_id)) == NULL,
+ WEBRTC_ERROR_INVALID_PARAMETER, "could not find source");
+ RET_VAL_IF((source->media_types & MEDIA_TYPE_AUDIO) == 0x0, WEBRTC_ERROR_INVALID_PARAMETER,
+ "invalid media_type for source[media_types:0x%x, id:%u]", source->media_types, source_id);
+ RET_VAL_IF((source->type == WEBRTC_MEDIA_SOURCE_TYPE_MEDIA_PACKET), WEBRTC_ERROR_INVALID_PARAMETER,
+ "this API does not support the media packet source");
+ RET_VAL_IF(stream_info == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "stream_info is NULL");
+
+ LOG_INFO("source_id[%u] stream_info[%p]", source_id, stream_info);
+
+ sound_manager_get_type_from_stream_information(stream_info, &stream_type);
+ sound_manager_get_index_from_stream_information(stream_info, &stream_index);
+
+ ret = sound_manager_is_available_stream_information(stream_info, NATIVE_API_WEBRTC, &available);
+ if (ret != SOUND_MANAGER_ERROR_NONE) {
+ LOG_ERROR("failed to sound_manager_is_available_stream_information()");
+ return WEBRTC_ERROR_INVALID_OPERATION;
+ }
+
+ if (!available) {
+ LOG_ERROR("this stream info[%p, type:%s, index:%d] is not allowed to this framework", stream_info, stream_type, stream_index);
+ return WEBRTC_ERROR_INVALID_PARAMETER;
+ }
+
+ source->sound_stream_info.type = strdup(stream_type);
+ source->sound_stream_info.index = stream_index;
+
+ LOG_INFO("source_id[%u] stream_info[%p, type:%s, index:%d]", source_id, stream_info, stream_type, stream_index);
+
+ return __build_loopback_render_pipeline(webrtc, source, MEDIA_TYPE_AUDIO);
+}
+
int _set_video_loopback(webrtc_s *webrtc, unsigned int source_id, unsigned int type, void *display)
{
int ret = WEBRTC_ERROR_NONE;
CURRENT_STATUS_MEDIA_PACKET_SOURCE_UNSET_BUFFER_STATE_CHANGED_CB,
CURRENT_STATUS_MEDIA_PACKET_SOURCE_SET_FORMAT,
CURRENT_STATUS_SET_DISPLAY_TYPE,
+ CURRENT_STATUS_MEDIA_SOURCE_SET_AUDIO_LOOPBACK,
CURRENT_STATUS_MEDIA_SOURCE_SET_VIDEO_LOOPBACK,
CURRENT_STATUS_DATA_CHANNEL_SEND_STRING,
CURRENT_STATUS_DATA_CHANNEL_SEND_STRING_AS_BYTES,
g_conns[index].recv_channels[i] = NULL;
}
- if (g_conns[index].source.stream_info) {
- sound_manager_destroy_stream_information(g_conns[index].source.stream_info);
- g_conns[index].source.stream_info = NULL;
- }
-
- if (g_conns[index].render.stream_info) {
- sound_manager_destroy_stream_information(g_conns[index].render.stream_info);
- g_conns[index].render.stream_info = NULL;
- }
-
#ifdef __DEBUG_VALIDATE_ENCODED_FRAME_CB__
if (g_conns[index].render_pipeline) {
GstStateChangeReturn state_change_ret = gst_element_set_state(g_conns[index].render_pipeline, GST_STATE_NULL);
RET_IF(ret != WEBRTC_ERROR_NONE, "ret[0x%x]", ret);
if (type == WEBRTC_MEDIA_SOURCE_TYPE_MIC) {
+ if (g_conns[index].source.stream_info) {
+ sound_manager_destroy_stream_information(g_conns[index].source.stream_info);
+ g_conns[index].source.stream_info = NULL;
+ }
+
if (__get_sound_stream_info(&g_conns[index].source.stream_info) < 0) {
g_printerr("failed to __get_sound_stream_info()\n");
g_print("display type[%d] is set, it'll be applied when starting rendering video.\n", type);
}
+static void _webrtc_media_source_set_audio_loopback(int index, unsigned int source_id)
+{
+ int ret = WEBRTC_ERROR_NONE;
+
+ if (!g_conns[index].render.stream_info) {
+ ret = sound_manager_create_stream_information(SOUND_STREAM_TYPE_MEDIA, NULL, NULL, &g_conns[index].render.stream_info);
+ RET_IF(ret != SOUND_MANAGER_ERROR_NONE, "failed to sound_manager_create_stream_information(), ret[0x%x]", ret);
+ }
+
+ ret = webrtc_media_source_set_audio_loopback(g_conns[index].webrtc, source_id, g_conns[index].render.stream_info);
+ RET_IF(ret != WEBRTC_ERROR_NONE, "ret[0x%x]", ret);
+
+ g_print("webrtc_media_source_set_audio_loopback() success, source_id[%u]\n", source_id);
+}
+
static void _webrtc_media_source_set_video_loopback(int index, unsigned int source_id)
{
int ret = WEBRTC_ERROR_NONE;
} else if (strncmp(cmd, "dt", 2) == 0) {
g_conns[g_conn_index].menu_state = CURRENT_STATUS_SET_DISPLAY_TYPE;
+ } else if (strncmp(cmd, "al", 2) == 0) {
+ g_conns[g_conn_index].menu_state = CURRENT_STATUS_MEDIA_SOURCE_SET_AUDIO_LOOPBACK;
+
} else if (strncmp(cmd, "vl", 2) == 0) {
g_conns[g_conn_index].menu_state = CURRENT_STATUS_MEDIA_SOURCE_SET_VIDEO_LOOPBACK;
g_print("gd. Get transceiver direction\n");
g_print("sf. Set media format to media packet source\n");
g_print("dt. Set display type\n");
+ g_print("al. Set audio loopback\t");
g_print("vl. Set video loopback\n");
g_print("cd. Create data channel\t");
g_print("dd. Destroy data channel\n");
} else if (g_conns[g_conn_index].menu_state == CURRENT_STATUS_SET_DISPLAY_TYPE) {
g_print("*** input display type.(1:overlay, 2:evas)\n");
+ } else if (g_conns[g_conn_index].menu_state == CURRENT_STATUS_MEDIA_SOURCE_SET_AUDIO_LOOPBACK) {
+ g_print("*** input source id.\n");
+
} else if (g_conns[g_conn_index].menu_state == CURRENT_STATUS_MEDIA_SOURCE_SET_VIDEO_LOOPBACK) {
g_print("*** input source id.\n");
reset_menu_state();
break;
}
+ case CURRENT_STATUS_MEDIA_SOURCE_SET_AUDIO_LOOPBACK: {
+ value = atoi(cmd);
+ _webrtc_media_source_set_audio_loopback(g_conn_index, value);
+ reset_menu_state();
+ break;
+ }
case CURRENT_STATUS_MEDIA_SOURCE_SET_VIDEO_LOOPBACK: {
value = atoi(cmd);
_webrtc_media_source_set_video_loopback(g_conn_index, value);