g_free(dot_name); \
} while (0)
+#define GET_MEDIA_TYPE_NAME(x_is_audio) (x_is_audio) ? "audio" : "video"
+
#define DEFAULT_ELEMENT_FAKESINK "fakesink"
#define DEFAULT_ELEMENT_AUDIOCONVERT "audioconvert"
#define DEFAULT_ELEMENT_AUDIORESAMPLE "audioresample"
int _add_no_target_ghostpad_to_slot(webrtc_gst_slot_s *slot, bool is_src, GstPad **new_pad);
int _set_ghost_pad_target(GstPad *ghost_pad, GstElement *target_element, bool is_src);
int _add_rendering_sink_bin(webrtc_s *webrtc, GstPad *src_pad);
-int _add_forwarding_sink_bin(webrtc_s *webrtc, GstPad *src_pad, bool is_video);
+int _add_forwarding_sink_bin(webrtc_s *webrtc, GstPad *src_pad, bool is_audio);
int _set_stream_info_to_sink(webrtc_s *webrtc, unsigned int track_id, sound_stream_info_h stream_info);
int _set_display_to_sink(webrtc_s *webrtc, unsigned int track_id, unsigned int type, void *display);
int _set_display_mode_to_sink(webrtc_s *webrtc, unsigned int track_id, webrtc_display_mode_e mode);
return (unsigned int)id;
}
-static void __invoke_track_added_cb(webrtc_s *webrtc, const gchar *name, bool is_video, bool build_track_context)
+static void __invoke_track_added_cb(webrtc_s *webrtc, const gchar *name, bool is_audio, bool build_track_context)
{
RET_IF(webrtc == NULL, "webrtc is NULL");
RET_IF(name == NULL, "name is NULL");
- LOG_INFO("webrtc[%p] [%s] track[%s] is added", webrtc, is_video ? "video" : "audio", name);
+ LOG_INFO("webrtc[%p] [%s] track[%s] is added", webrtc, GET_MEDIA_TYPE_NAME(is_audio), name);
if (webrtc->track_added_cb.callback) {
unsigned int id = __get_id_from_name(name);
LOG_DEBUG(">>> callback[%p], user_data[%p]", webrtc->track_added_cb.callback, webrtc->track_added_cb.user_data);
((webrtc_track_added_cb)(webrtc->track_added_cb.callback))((webrtc_h)webrtc,
- is_video ? WEBRTC_MEDIA_TYPE_VIDEO : WEBRTC_MEDIA_TYPE_AUDIO, id, webrtc->track_added_cb.user_data);
+ is_audio ? WEBRTC_MEDIA_TYPE_AUDIO : WEBRTC_MEDIA_TYPE_VIDEO, id, webrtc->track_added_cb.user_data);
LOG_DEBUG("<<< end of the callback");
if (build_track_context)
if (_is_audio_media_type(media_type)) {
sink->media_types |= MEDIA_TYPE_AUDIO;
- __invoke_track_added_cb(webrtc, GST_ELEMENT_NAME(decodebin), false, true);
+ __invoke_track_added_cb(webrtc, GST_ELEMENT_NAME(decodebin), true, true);
ret = __build_audiosink(webrtc, decodebin, new_pad);
} else {
sink->media_types |= MEDIA_TYPE_VIDEO;
- __invoke_track_added_cb(webrtc, GST_ELEMENT_NAME(decodebin), true, true);
+ __invoke_track_added_cb(webrtc, GST_ELEMENT_NAME(decodebin), false, true);
ret = __build_videosink(webrtc, decodebin, new_pad);
}
return new_caps;
}
-int _add_forwarding_sink_bin(webrtc_s *webrtc, GstPad *src_pad, bool is_video)
+int _add_forwarding_sink_bin(webrtc_s *webrtc, GstPad *src_pad, bool is_audio)
{
int ret = WEBRTC_ERROR_NONE;
unsigned int id;
return WEBRTC_ERROR_INVALID_OPERATION;
}
- sink->encoded_frame_cb = is_video ? &(webrtc->encoded_video_frame_cb) : &(webrtc->encoded_audio_frame_cb);
- sink->media_types = is_video ? MEDIA_TYPE_VIDEO : MEDIA_TYPE_AUDIO;
+ sink->encoded_frame_cb = is_audio ? &(webrtc->encoded_audio_frame_cb) : &(webrtc->encoded_video_frame_cb);
+ sink->media_types = is_audio ? MEDIA_TYPE_AUDIO : MEDIA_TYPE_VIDEO;
- __invoke_track_added_cb(webrtc, track_name, is_video, false);
+ __invoke_track_added_cb(webrtc, track_name, is_audio, false);
if (!gst_element_sync_state_with_parent(GST_ELEMENT(sink->bin))) {
LOG_ERROR("failed to gst_element_sync_state_with_parent() for [%s]", GST_ELEMENT_NAME(sink->bin));