*/
int webrtc_set_ecore_wl_display(webrtc_h webrtc, unsigned int track_id, void *ecore_wl_window);
+/**
+ * @internal
+ * @brief Sets mute to the audio track.
+ * @since_tizen 6.5
+ * @remarks If @a mute is set to @c true, playback of audio track received from a remote peer will be muted.
+ * @param[in] webrtc WebRTC handle
+ * @param[in] track_id The track id
+ * @param[in] mute Mute or not (@c true = mute, @c false = not mute)
+ * @return @c 0 on success,
+ * otherwise a negative error value
+ * @retval #WEBRTC_ERROR_NONE Successful
+ * @retval #WEBRTC_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #WEBRTC_ERROR_INVALID_OPERATION Invalid operation
+ * @pre Call webrtc_set_sound_stream_info() before calling this function.
+ * @see webrtc_get_audio_mute()
+ */
+int webrtc_set_audio_mute(webrtc_h webrtc, unsigned int track_id, bool mute);
+
+/**
+ * @internal
+ * @brief Gets the mute state of the audio track.
+ * @since_tizen 6.5
+ * @remarks The default value is @c false.
+ * @param[in] webrtc WebRTC handle
+ * @param[in] track_id The track id
+ * @param[out] muted Muted or not (@c true = muted, @c false = not muted)
+ * @return @c 0 on success,
+ * otherwise a negative error value
+ * @retval #WEBRTC_ERROR_NONE Successful
+ * @retval #WEBRTC_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #WEBRTC_ERROR_INVALID_OPERATION Invalid operation
+ * @pre Call webrtc_set_sound_stream_info() before calling this function.
+ * @see webrtc_set_audio_mute()
+ */
+int webrtc_get_audio_mute(webrtc_h webrtc, unsigned int track_id, bool *muted);
+
/**
* @internal
* @brief Sets a video loopback to render the video frames of the media source to an ecore wayland display.
int _get_display_visible_from_sink(webrtc_s *webrtc, unsigned int track_id, bool *visible);
int _set_display_visible_to_loopback(webrtc_s *webrtc, unsigned int track_id, bool visible);
int _get_display_visible_from_loopback(webrtc_s *webrtc, unsigned int track_id, bool *visible);
+int _set_audio_mute_to_sink(webrtc_s *webrtc, unsigned int track_id, bool mute);
+int _get_audio_mute_from_sink(webrtc_s *webrtc, unsigned int track_id, bool *muted);
int _set_audio_loopback(webrtc_s *webrtc, unsigned int source_id, sound_stream_info_h stream_info, unsigned int *track_id);
int _set_video_loopback(webrtc_s *webrtc, unsigned int source_id, unsigned int type, void *display, unsigned int *track_id);
int _decodebin_autoplug_select_cb(GstElement *decodebin, GstPad *pad, GstCaps *caps, GstElementFactory *factory, gpointer user_data);
return ret;
}
+int webrtc_set_audio_mute(webrtc_h webrtc, unsigned int track_id, bool mute)
+{
+ g_autoptr(GMutexLocker) locker = NULL;
+ webrtc_s *_webrtc = (webrtc_s *)webrtc;
+
+ RET_VAL_IF(_webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL");
+
+ locker = g_mutex_locker_new(&_webrtc->mutex);
+
+ RET_VAL_IF(_webrtc->track_added_cb.callback == NULL, WEBRTC_ERROR_INVALID_OPERATION, "track added callback was not set");
+
+ return _set_audio_mute_to_sink(_webrtc, track_id, mute);
+}
+
+int webrtc_get_audio_mute(webrtc_h webrtc, unsigned int track_id, bool *muted)
+{
+ g_autoptr(GMutexLocker) locker = NULL;
+ webrtc_s *_webrtc = (webrtc_s *)webrtc;
+
+ RET_VAL_IF(_webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL");
+ RET_VAL_IF(muted == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "muted is NULL");
+
+ locker = g_mutex_locker_new(&_webrtc->mutex);
+
+ RET_VAL_IF(_webrtc->track_added_cb.callback == NULL, WEBRTC_ERROR_INVALID_OPERATION, "track added callback was not set");
+
+ return _get_audio_mute_from_sink(_webrtc, track_id, muted);
+}
+
int webrtc_media_source_set_video_loopback_to_ecore_wl(webrtc_h webrtc, unsigned int source_id, void *ecore_wl_window, unsigned int *track_id)
{
int ret = WEBRTC_ERROR_NONE;
#include <gst/video/videooverlay.h>
#include <media_packet_internal.h>
+#define ELEMENT_NAME_AUDIO_SINK "audioSink"
+
//LCOV_EXCL_START
bool _is_owner_of_track_build_context(webrtc_s *webrtc, unsigned int track_id)
{
if (!(audioresample = _create_element(DEFAULT_ELEMENT_AUDIORESAMPLE, NULL)))
return WEBRTC_ERROR_INVALID_OPERATION;
- if (!(audiosink = _create_element(webrtc->ini.rendering_sink.a_sink_element, NULL)))
+ if (!(audiosink = _create_element(webrtc->ini.rendering_sink.a_sink_element, ELEMENT_NAME_AUDIO_SINK)))
return WEBRTC_ERROR_INVALID_OPERATION;
if (g_object_class_find_property(G_OBJECT_GET_CLASS(G_OBJECT(audiosink)), "stream-properties")) {
RET_VAL_IF(sink == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "sink is NULL");
RET_VAL_IF(sink->bin == NULL, WEBRTC_ERROR_INVALID_OPERATION, "bin is NULL");
RET_VAL_IF(sink->encoded_frame_cb != NULL, WEBRTC_ERROR_INVALID_OPERATION, "it may be a forwarding sink for encoded frame callback");
- RET_VAL_IF((sink->media_types & MEDIA_TYPE_AUDIO) == 0x0, WEBRTC_ERROR_INVALID_OPERATION, "it's not a audio track");
+ RET_VAL_IF((sink->media_types & MEDIA_TYPE_AUDIO) == 0x0, WEBRTC_ERROR_INVALID_OPERATION, "it's not an audio track");
RET_VAL_IF(strcmp(webrtc->ini.rendering_sink.a_sink_element, DEFAULT_AUDIO_SINK_ELEMENT), WEBRTC_ERROR_INVALID_OPERATION,
"it requires [%s] as an audio renderer", DEFAULT_AUDIO_SINK_ELEMENT);
return WEBRTC_ERROR_NONE;
}
+
+int _set_audio_mute_to_sink(webrtc_s *webrtc, unsigned int track_id, bool mute)
+{
+ webrtc_gst_slot_s *sink;
+ GstElement *audiosink;
+
+ RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL");
+ RET_VAL_IF(track_id == 0, WEBRTC_ERROR_INVALID_PARAMETER, "track id is 0");
+
+ sink = __find_sink_slot_by_id(webrtc, track_id);
+ RET_VAL_IF(sink == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "sink is NULL");
+ RET_VAL_IF(sink->bin == NULL, WEBRTC_ERROR_INVALID_OPERATION, "bin is NULL");
+ RET_VAL_IF(sink->encoded_frame_cb != NULL, WEBRTC_ERROR_INVALID_OPERATION, "it may be a forwarding sink for encoded frame callback");
+ RET_VAL_IF((sink->media_types & MEDIA_TYPE_AUDIO) == 0x0, WEBRTC_ERROR_INVALID_OPERATION, "it's not an audio track");
+ RET_VAL_IF(sink->sound_stream_info.type == NULL, WEBRTC_ERROR_INVALID_OPERATION, "sound_stream_info is not set");
+ RET_VAL_IF(!(audiosink = gst_bin_get_by_name(sink->bin, ELEMENT_NAME_AUDIO_SINK)), WEBRTC_ERROR_INVALID_OPERATION, "could not find audio sink element");
+
+ g_object_set(G_OBJECT(audiosink), "mute", mute, NULL);
+
+ LOG_INFO("webrtc[%p] track_id[%u] mute[%u]", webrtc, track_id, mute);
+
+ return WEBRTC_ERROR_NONE;
+}
+
+int _get_audio_mute_from_sink(webrtc_s *webrtc, unsigned int track_id, bool *muted)
+{
+ webrtc_gst_slot_s *sink;
+ GstElement *audiosink;
+
+ RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL");
+ RET_VAL_IF(track_id == 0, WEBRTC_ERROR_INVALID_PARAMETER, "track id is 0");
+ RET_VAL_IF(muted == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "muted is NULL");
+
+ sink = __find_sink_slot_by_id(webrtc, track_id);
+ RET_VAL_IF(sink == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "sink is NULL");
+ RET_VAL_IF(sink->bin == NULL, WEBRTC_ERROR_INVALID_OPERATION, "bin is NULL");
+ RET_VAL_IF(sink->encoded_frame_cb != NULL, WEBRTC_ERROR_INVALID_OPERATION, "it may be a forwarding sink for encoded frame callback");
+ RET_VAL_IF((sink->media_types & MEDIA_TYPE_AUDIO) == 0x0, WEBRTC_ERROR_INVALID_OPERATION, "it's not an audio track");
+ RET_VAL_IF(sink->sound_stream_info.type == NULL, WEBRTC_ERROR_INVALID_OPERATION, "sound_stream_info is not set");
+ RET_VAL_IF(!(audiosink = gst_bin_get_by_name(sink->bin, ELEMENT_NAME_AUDIO_SINK)), WEBRTC_ERROR_INVALID_OPERATION, "could not find audio sink element");
+
+ g_object_get(G_OBJECT(audiosink), "mute", muted, NULL);
+
+ LOG_INFO("webrtc[%p] track_id[%u] muted[%u]", webrtc, track_id, *muted);
+
+ return WEBRTC_ERROR_NONE;
+}
//LCOV_EXCL_STOP