return caps;
}
-static GstCaps *__make_default_raw_caps(webrtc_gst_slot_s *source, webrtc_ini_s *ini)
-{
- GstCaps *caps = NULL;
- const ini_item_media_source_s *ini_source;
- GstAudioInfo info;
- GstAudioFormat format;
-
- RET_VAL_IF(source == NULL, NULL, "source is NULL");
- RET_VAL_IF(ini == NULL, NULL, "ini is NULL");
-
- ini_source = _ini_get_source_by_type(ini, source->type);
- if (ini_source == NULL)
- ini_source = &ini->media_source;
-
- switch (source->type) {
- case WEBRTC_MEDIA_SOURCE_TYPE_VIDEOTEST:
- case WEBRTC_MEDIA_SOURCE_TYPE_CAMERA:
- case WEBRTC_MEDIA_SOURCE_TYPE_SCREEN:
- case WEBRTC_MEDIA_SOURCE_TYPE_CUSTOM_VIDEO: {
- caps = gst_caps_new_simple(MEDIA_TYPE_VIDEO_RAW,
- "format", G_TYPE_STRING, ini_source->v_raw_format,
- "framerate", GST_TYPE_FRACTION, source->video_info.framerate, 1,
- "width", G_TYPE_INT, source->video_info.width,
- "height", G_TYPE_INT, source->video_info.height,
- NULL);
- break;
- }
- case WEBRTC_MEDIA_SOURCE_TYPE_AUDIOTEST:
- case WEBRTC_MEDIA_SOURCE_TYPE_MIC:
- case WEBRTC_MEDIA_SOURCE_TYPE_CUSTOM_AUDIO:
- format = _get_gst_audio_raw_format_from_string(ini_source->a_raw_format);
- RET_VAL_IF(format == GST_AUDIO_FORMAT_UNKNOWN, NULL, "not supported raw format");
-
- gst_audio_info_set_format(&info, format, ini_source->a_samplerate, ini_source->a_channels, NULL);
- caps = gst_audio_info_to_caps(&info);
- break;
-
- case WEBRTC_MEDIA_SOURCE_TYPE_MEDIA_PACKET: {
- RET_VAL_IF(source->media_format == NULL, NULL, "media_format is NULL");
-
- caps = _make_mediapacketsrc_raw_caps_from_media_format(source);
- break;
- }
- default:
- LOG_ERROR_IF_REACHED("type(%d)", source->type);
- break;
- }
-
- return caps;
-}
-
-/* Use g_free() to free the media_type parameter. */
-static GstCaps *__make_default_encoded_caps(webrtc_gst_slot_s *source, webrtc_ini_s *ini, gchar **media_type)
-{
- GstCaps *caps;
- const ini_item_media_source_s *ini_source;
- const char *_media_type;
-
- RET_VAL_IF(source == NULL, NULL, "source is NULL");
- RET_VAL_IF(ini == NULL, NULL, "ini is NULL");
-
- ini_source = _ini_get_source_by_type(ini, source->type);
- if (ini_source == NULL)
- ini_source = &ini->media_source;
-
- if (source->media_types == MEDIA_TYPE_AUDIO)
- RET_VAL_IF(ini_source->a_codecs == NULL, NULL, "a_codecs is NULL");
- else if (source->media_types == MEDIA_TYPE_VIDEO)
- RET_VAL_IF(ini_source->v_codecs == NULL, NULL, "v_codecs is NULL");
-
- switch (source->type) {
- case WEBRTC_MEDIA_SOURCE_TYPE_VIDEOTEST:
- case WEBRTC_MEDIA_SOURCE_TYPE_CAMERA:
- case WEBRTC_MEDIA_SOURCE_TYPE_SCREEN:
- case WEBRTC_MEDIA_SOURCE_TYPE_CUSTOM_VIDEO:
- _media_type = _get_video_media_type(source->av[AV_IDX_VIDEO].codec);
- RET_VAL_IF(_media_type == NULL, NULL, "_media_type is NULL");
- caps = _get_caps_from_encoded_video_media_type(_media_type, source->video_info.width, source->video_info.height);
- break;
-
- case WEBRTC_MEDIA_SOURCE_TYPE_AUDIOTEST:
- case WEBRTC_MEDIA_SOURCE_TYPE_MIC:
- case WEBRTC_MEDIA_SOURCE_TYPE_CUSTOM_AUDIO:
- _media_type = _get_audio_media_type(source->av[AV_IDX_AUDIO].codec);
- RET_VAL_IF(_media_type == NULL, NULL, "_media_type is NULL");
-
- caps = _get_caps_from_encoded_audio_media_type(_media_type, ini_source->a_channels, ini_source->a_samplerate);
- break;
-
- case WEBRTC_MEDIA_SOURCE_TYPE_MEDIA_PACKET:
- if (source->media_types == MEDIA_TYPE_AUDIO) {
- _media_type = _get_audio_media_type(source->av[AV_IDX_AUDIO].codec);
- RET_VAL_IF(_media_type == NULL, NULL, "_media_type is NULL");
-
- caps = _get_caps_from_encoded_audio_media_type(_media_type, ini_source->a_channels, ini_source->a_samplerate);
-
- } else if (source->media_types == MEDIA_TYPE_VIDEO) {
- _media_type = _get_video_media_type(source->av[AV_IDX_VIDEO].codec);
- RET_VAL_IF(_media_type == NULL, NULL, "_media_type is NULL");
-
- caps = _get_caps_from_encoded_video_media_type(_media_type, ini_source->v_width, ini_source->v_height);
-
- } else {
- LOG_ERROR_IF_REACHED("source->media_types(0x%x)", source->media_types);
- return NULL;
- }
- break;
-
- default:
- LOG_ERROR_IF_REACHED("type(%d)", source->type);
- return NULL;
- }
-
- if (media_type)
- *media_type = g_strdup(_media_type);
-
- return caps;
-}
-
-//LCOV_EXCL_START
-static GstElement *__get_hw_encoder_element(webrtc_s *webrtc, webrtc_gst_slot_s *source)
-{
- const ini_item_media_source_s *ini_source;
- GstElement *encoder = NULL;
-
- RET_VAL_IF(webrtc == NULL, NULL, "webrtc is NULL");
- RET_VAL_IF(source == NULL, NULL, "source is NULL");
-
- ini_source = _ini_get_source_by_type(&webrtc->ini, source->type);
-
- switch (source->media_types) {
- case MEDIA_TYPE_AUDIO:
- if (ini_source && ini_source->a_hw_encoder_element)
- return _create_element(ini_source->a_hw_encoder_element, NULL);
- else if (webrtc->ini.media_source.a_hw_encoder_element)
- return _create_element(webrtc->ini.media_source.a_hw_encoder_element, NULL);
- break;
- case MEDIA_TYPE_VIDEO:
- if (ini_source && ini_source->v_hw_encoder_element)
- encoder = _create_element(ini_source->v_hw_encoder_element, NULL);
- else if (webrtc->ini.media_source.v_hw_encoder_element)
- encoder = _create_element(webrtc->ini.media_source.v_hw_encoder_element, NULL);
- break;
- default:
- LOG_ERROR_IF_REACHED("type(0x%x)", source->media_types);
- }
-
- if (encoder) {
-#ifndef TIZEN_TV
- if (webrtc->ini.resource_acquisition.video_encoder)
- webrtc->resource.need_to_acquire[MM_RESOURCE_MANAGER_RES_TYPE_VIDEO_ENCODER] = true;
-#endif
- LOG_WARNING("hw encoder element [%s]", GST_ELEMENT_NAME(encoder));
- return encoder;
- }
-
- LOG_DEBUG("no hw encoder is requested, source_type(%d), media_types(0x%x)", source->type, source->media_types);
- return NULL;
-}
-//LCOV_EXCL_STOP
-
static void __return_payload_type(webrtc_s *webrtc, unsigned int payload_type)
{
int i;
webrtc->payload_types ^= bitmask;
}
-static GstElement * __prepare_encoder(webrtc_s *webrtc, webrtc_gst_slot_s *source, bool is_audio)
-{
- GstElement *encoder = NULL;
- element_info_s elem_info;
- const gchar *encoder_klass_name = NULL;
- gchar *encoder_name = NULL;
-
- RET_VAL_IF(webrtc == NULL, NULL, "webrtc is NULL");
- RET_VAL_IF(source == NULL, NULL, "source is NULL");
-
- encoder_klass_name = is_audio ? GST_KLASS_NAME_ENCODER_AUDIO : GST_KLASS_NAME_ENCODER_VIDEO;
-
- if (source->zerocopy_enabled)
- encoder = __get_hw_encoder_element(webrtc, source);
- else
- CREATE_ELEMENT_FROM_REGISTRY(elem_info, encoder_klass_name,
- __make_default_raw_caps(source, &webrtc->ini),
- __make_default_encoded_caps(source, &webrtc->ini, NULL),
- webrtc->ini.general.gst_excluded_elements,
- encoder);
-
- RET_VAL_IF(encoder == NULL, NULL, "encoder is NULL");
-
- encoder_name = gst_element_get_name(encoder);
- if (g_strrstr(encoder_name, "vp8enc") || g_strrstr(encoder_name, "vp9enc")) {
- g_object_set(G_OBJECT(encoder),
- "threads", webrtc->ini.vpxenc_params.threads,
- "end-usage", webrtc->ini.vpxenc_params.end_usage,
- "cpu-used", webrtc->ini.vpxenc_params.cpu_used,
- "target-bitrate", webrtc->ini.vpxenc_params.target_bitrate,
- "keyframe-max-dist", webrtc->ini.vpxenc_params.keyframe_max_dist,
- "max-quantizer", webrtc->ini.vpxenc_params.max_quantizer,
- "min-quantizer", webrtc->ini.vpxenc_params.min_quantizer,
- "undershoot", webrtc->ini.vpxenc_params.undershoot,
- NULL);
-
- LOG_DEBUG("[%s] end-usage(%d) cpu-used(%d) target-bitrate(%d) keyframe-max-dist(%d) max-quantizer(%d) min-quantizer(%d) undershoot(%d)",
- encoder_name, webrtc->ini.vpxenc_params.end_usage, webrtc->ini.vpxenc_params.cpu_used, webrtc->ini.vpxenc_params.target_bitrate,
- webrtc->ini.vpxenc_params.keyframe_max_dist, webrtc->ini.vpxenc_params.max_quantizer, webrtc->ini.vpxenc_params.min_quantizer,
- webrtc->ini.vpxenc_params.undershoot);
-
- } else if (g_strrstr(encoder_name, "opusenc")) {
- if (source->av[AV_IDX_AUDIO].inbandfec) {
- g_object_set(G_OBJECT(encoder),
- "inband-fec", TRUE,
- "packet-loss-percentage", source->av[AV_IDX_AUDIO].packet_loss_percentage,
- NULL);
-
- LOG_DEBUG("[%s] inband-fec(%d), packet-loss-percentage(%d)",
- encoder_name, TRUE, source->av[AV_IDX_AUDIO].packet_loss_percentage);
- }
- }
-
- g_free(encoder_name);
- return encoder;
-}
-
static bool __link_switch_srcs(GstElement *switch_element, GList *switch_src_list)
{
GstElement *element;
}
//LCOV_EXCL_STOP
-int __create_rest_of_elements(webrtc_s *webrtc, webrtc_gst_slot_s *source, bool need_capsfilter, GList **element_list, bool is_audio)
-{
- GstElement *encoder = NULL;
- GstElement *payloader;
- GstElement *queue;
- GstElement *capsfilter2;
- GstElement *videocrop;
- GstCaps *sink_caps;
- element_info_s elem_info;
- gchar *media_type = NULL;
- int idx;
-
- RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL");
- RET_VAL_IF(source == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "source is NULL");
- RET_VAL_IF(element_list == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "element_list is NULL");
-
- idx = GET_AV_IDX(is_audio);
-
- if (need_capsfilter) {
- GstElement *capsfilter = _create_element(DEFAULT_ELEMENT_CAPSFILTER, ELEMENT_NAME_FIRST_CAPSFILTER);
- if (!capsfilter)
- return WEBRTC_ERROR_INVALID_OPERATION;
- APPEND_ELEMENT(*element_list, capsfilter);
-
- if (_is_encoded_format_supported(source->type, &webrtc->ini)) {
- if ((sink_caps = __make_default_encoded_caps(source, &webrtc->ini, NULL))) {
- PRINT_CAPS(sink_caps, "capsfilter");
- g_object_set(G_OBJECT(capsfilter), "caps", sink_caps, NULL);
- source->av[idx].render.appsrc_caps = sink_caps;
- if (source->av[idx].render.appsrc)
- g_object_set(G_OBJECT(source->av[idx].render.appsrc), "caps", sink_caps, NULL);
- }
-
- source->av[idx].render.need_decoding = true;
- _add_probe_to_pad_for_render(source, idx, gst_element_get_static_pad(capsfilter, "src"), _source_data_probe_cb);
-
- goto skip_encoder;
- }
-
- if ((sink_caps = __make_default_raw_caps(source, &webrtc->ini))) {
- PRINT_CAPS(sink_caps, "capsfilter");
- g_object_set(G_OBJECT(capsfilter), "caps", sink_caps, NULL);
- source->av[idx].render.appsrc_caps = sink_caps;
- if (source->av[idx].render.appsrc)
- g_object_set(G_OBJECT(source->av[idx].render.appsrc), "caps", sink_caps, NULL);
- }
-
- _add_probe_to_pad_for_render(source, idx, gst_element_get_static_pad(capsfilter, "src"), _source_data_probe_cb);
- }
-
- if (source->type == WEBRTC_MEDIA_SOURCE_TYPE_SCREEN && !source->zerocopy_enabled) {
- if (!(videocrop = _create_element(DEFAULT_ELEMENT_VIDEOCROP, ELEMENT_NAME_VIDEOCROP)))
- goto error;
- APPEND_ELEMENT(*element_list, videocrop);
- }
-
- encoder = __prepare_encoder(webrtc, source, is_audio);
- if (encoder == NULL) {
- _remove_probe_from_pad_for_render(source, idx);
- return WEBRTC_ERROR_INVALID_OPERATION;
- }
- APPEND_ELEMENT(*element_list, encoder);
-
- source->av[idx].render.need_decoding = false;
-
-skip_encoder:
- CREATE_ELEMENT_FROM_REGISTRY(elem_info, GST_KLASS_NAME_PAYLOADER_RTP,
- __make_default_encoded_caps(source, &webrtc->ini, &media_type),
- NULL,
- NULL,
- payloader);
- if (payloader == NULL)
- goto error;
- APPEND_ELEMENT(*element_list, payloader);
-
- if (!(queue = _create_element(DEFAULT_ELEMENT_QUEUE, NULL)))
- goto error;
- APPEND_ELEMENT(*element_list, queue);
-
- if (!(capsfilter2 = _create_element(DEFAULT_ELEMENT_CAPSFILTER, ELEMENT_NAME_RTP_CAPSFILTER)))
- goto error;
- APPEND_ELEMENT(*element_list, capsfilter2);
-
- if(_set_payload_type(webrtc, source, idx, media_type) != WEBRTC_ERROR_NONE)
- goto error;
-
- if ((sink_caps = _make_rtp_caps(media_type, source->av[idx].pt, source))) {
- g_object_set(G_OBJECT(capsfilter2), "caps", sink_caps, NULL);
- gst_caps_unref(sink_caps);
- }
-
- g_free(media_type);
-
- return WEBRTC_ERROR_NONE;
-
-error:
- _remove_probe_from_pad_for_render(source, idx);
- g_free(media_type);
-
- return WEBRTC_ERROR_INVALID_OPERATION;
-}
-
static int __complete_rest_of_videosrc(webrtc_s *webrtc, webrtc_gst_slot_s *source)
{
GList *element_list = NULL;
}
}
- if (__create_rest_of_elements(webrtc, source, true, &element_list, false) != WEBRTC_ERROR_NONE)
+ if (_create_rest_of_elements(webrtc, source, true, &element_list, false) != WEBRTC_ERROR_NONE)
goto exit;
if (!_add_elements_to_bin(source->bin, element_list)) {
source->av[AV_IDX_AUDIO].inbandfec = ini_source->use_inbandfec;
source->av[AV_IDX_AUDIO].packet_loss_percentage = ini_source->packet_loss_percentage;
- if (__create_rest_of_elements(webrtc, source, true, &element_list, true) != WEBRTC_ERROR_NONE)
+ if (_create_rest_of_elements(webrtc, source, true, &element_list, true) != WEBRTC_ERROR_NONE)
goto exit;
if (!_add_elements_to_bin(source->bin, element_list)) {
source->zerocopy_enabled = _is_hw_encoder_used(webrtc, source->type, source->media_types);
- if ((ret = __create_rest_of_elements(webrtc, source, false, &element_list, (source->media_types == MEDIA_TYPE_AUDIO))) != WEBRTC_ERROR_NONE)
+ if ((ret = _create_rest_of_elements(webrtc, source, false, &element_list, (source->media_types == MEDIA_TYPE_AUDIO))) != WEBRTC_ERROR_NONE)
goto exit;
if (!(sink_caps = _make_mediapacketsrc_raw_caps_from_media_format(source))) {
}
return ret;
}
+
+static GstCaps *__make_default_raw_caps(webrtc_gst_slot_s *source, webrtc_ini_s *ini)
+{
+ GstCaps *caps = NULL;
+ const ini_item_media_source_s *ini_source;
+ GstAudioInfo info;
+ GstAudioFormat format;
+
+ RET_VAL_IF(source == NULL, NULL, "source is NULL");
+ RET_VAL_IF(ini == NULL, NULL, "ini is NULL");
+
+ ini_source = _ini_get_source_by_type(ini, source->type);
+ if (ini_source == NULL)
+ ini_source = &ini->media_source;
+
+ switch (source->type) {
+ case WEBRTC_MEDIA_SOURCE_TYPE_VIDEOTEST:
+ case WEBRTC_MEDIA_SOURCE_TYPE_CAMERA:
+ case WEBRTC_MEDIA_SOURCE_TYPE_SCREEN:
+ case WEBRTC_MEDIA_SOURCE_TYPE_CUSTOM_VIDEO: {
+ caps = gst_caps_new_simple(MEDIA_TYPE_VIDEO_RAW,
+ "format", G_TYPE_STRING, ini_source->v_raw_format,
+ "framerate", GST_TYPE_FRACTION, source->video_info.framerate, 1,
+ "width", G_TYPE_INT, source->video_info.width,
+ "height", G_TYPE_INT, source->video_info.height,
+ NULL);
+ break;
+ }
+ case WEBRTC_MEDIA_SOURCE_TYPE_AUDIOTEST:
+ case WEBRTC_MEDIA_SOURCE_TYPE_MIC:
+ case WEBRTC_MEDIA_SOURCE_TYPE_CUSTOM_AUDIO:
+ format = _get_gst_audio_raw_format_from_string(ini_source->a_raw_format);
+ RET_VAL_IF(format == GST_AUDIO_FORMAT_UNKNOWN, NULL, "not supported raw format");
+
+ gst_audio_info_set_format(&info, format, ini_source->a_samplerate, ini_source->a_channels, NULL);
+ caps = gst_audio_info_to_caps(&info);
+ break;
+
+ case WEBRTC_MEDIA_SOURCE_TYPE_MEDIA_PACKET: {
+ RET_VAL_IF(source->media_format == NULL, NULL, "media_format is NULL");
+
+ caps = _make_mediapacketsrc_raw_caps_from_media_format(source);
+ break;
+ }
+ default:
+ LOG_ERROR_IF_REACHED("type(%d)", source->type);
+ break;
+ }
+
+ return caps;
+}
+
+/* Use g_free() to free the media_type parameter. */
+static GstCaps *__make_default_encoded_caps(webrtc_gst_slot_s *source, webrtc_ini_s *ini, gchar **media_type)
+{
+ GstCaps *caps;
+ const ini_item_media_source_s *ini_source;
+ const char *_media_type;
+
+ RET_VAL_IF(source == NULL, NULL, "source is NULL");
+ RET_VAL_IF(ini == NULL, NULL, "ini is NULL");
+
+ ini_source = _ini_get_source_by_type(ini, source->type);
+ if (ini_source == NULL)
+ ini_source = &ini->media_source;
+
+ if (source->media_types == MEDIA_TYPE_AUDIO)
+ RET_VAL_IF(ini_source->a_codecs == NULL, NULL, "a_codecs is NULL");
+ else if (source->media_types == MEDIA_TYPE_VIDEO)
+ RET_VAL_IF(ini_source->v_codecs == NULL, NULL, "v_codecs is NULL");
+
+ switch (source->type) {
+ case WEBRTC_MEDIA_SOURCE_TYPE_VIDEOTEST:
+ case WEBRTC_MEDIA_SOURCE_TYPE_CAMERA:
+ case WEBRTC_MEDIA_SOURCE_TYPE_SCREEN:
+ case WEBRTC_MEDIA_SOURCE_TYPE_CUSTOM_VIDEO:
+ _media_type = _get_video_media_type(source->av[AV_IDX_VIDEO].codec);
+ RET_VAL_IF(_media_type == NULL, NULL, "_media_type is NULL");
+ caps = _get_caps_from_encoded_video_media_type(_media_type, source->video_info.width, source->video_info.height);
+ break;
+
+ case WEBRTC_MEDIA_SOURCE_TYPE_AUDIOTEST:
+ case WEBRTC_MEDIA_SOURCE_TYPE_MIC:
+ case WEBRTC_MEDIA_SOURCE_TYPE_CUSTOM_AUDIO:
+ _media_type = _get_audio_media_type(source->av[AV_IDX_AUDIO].codec);
+ RET_VAL_IF(_media_type == NULL, NULL, "_media_type is NULL");
+
+ caps = _get_caps_from_encoded_audio_media_type(_media_type, ini_source->a_channels, ini_source->a_samplerate);
+ break;
+
+ case WEBRTC_MEDIA_SOURCE_TYPE_MEDIA_PACKET:
+ if (source->media_types == MEDIA_TYPE_AUDIO) {
+ _media_type = _get_audio_media_type(source->av[AV_IDX_AUDIO].codec);
+ RET_VAL_IF(_media_type == NULL, NULL, "_media_type is NULL");
+
+ caps = _get_caps_from_encoded_audio_media_type(_media_type, ini_source->a_channels, ini_source->a_samplerate);
+
+ } else if (source->media_types == MEDIA_TYPE_VIDEO) {
+ _media_type = _get_video_media_type(source->av[AV_IDX_VIDEO].codec);
+ RET_VAL_IF(_media_type == NULL, NULL, "_media_type is NULL");
+
+ caps = _get_caps_from_encoded_video_media_type(_media_type, ini_source->v_width, ini_source->v_height);
+
+ } else {
+ LOG_ERROR_IF_REACHED("source->media_types(0x%x)", source->media_types);
+ return NULL;
+ }
+ break;
+
+ default:
+ LOG_ERROR_IF_REACHED("type(%d)", source->type);
+ return NULL;
+ }
+
+ if (media_type)
+ *media_type = g_strdup(_media_type);
+
+ return caps;
+}
+
+//LCOV_EXCL_START
+static GstElement *__get_hw_encoder_element(webrtc_s *webrtc, webrtc_gst_slot_s *source)
+{
+ const ini_item_media_source_s *ini_source;
+ GstElement *encoder = NULL;
+
+ RET_VAL_IF(webrtc == NULL, NULL, "webrtc is NULL");
+ RET_VAL_IF(source == NULL, NULL, "source is NULL");
+
+ ini_source = _ini_get_source_by_type(&webrtc->ini, source->type);
+
+ switch (source->media_types) {
+ case MEDIA_TYPE_AUDIO:
+ if (ini_source && ini_source->a_hw_encoder_element)
+ return _create_element(ini_source->a_hw_encoder_element, NULL);
+ else if (webrtc->ini.media_source.a_hw_encoder_element)
+ return _create_element(webrtc->ini.media_source.a_hw_encoder_element, NULL);
+ break;
+ case MEDIA_TYPE_VIDEO:
+ if (ini_source && ini_source->v_hw_encoder_element)
+ encoder = _create_element(ini_source->v_hw_encoder_element, NULL);
+ else if (webrtc->ini.media_source.v_hw_encoder_element)
+ encoder = _create_element(webrtc->ini.media_source.v_hw_encoder_element, NULL);
+ break;
+ default:
+ LOG_ERROR_IF_REACHED("type(0x%x)", source->media_types);
+ }
+
+ if (encoder) {
+#ifndef TIZEN_TV
+ if (webrtc->ini.resource_acquisition.video_encoder)
+ webrtc->resource.need_to_acquire[MM_RESOURCE_MANAGER_RES_TYPE_VIDEO_ENCODER] = true;
+#endif
+ LOG_WARNING("hw encoder element [%s]", GST_ELEMENT_NAME(encoder));
+ return encoder;
+ }
+
+ LOG_DEBUG("no hw encoder is requested, source_type(%d), media_types(0x%x)", source->type, source->media_types);
+ return NULL;
+}
+//LCOV_EXCL_STOP
+
+static GstElement * __prepare_encoder(webrtc_s *webrtc, webrtc_gst_slot_s *source, bool is_audio)
+{
+ GstElement *encoder = NULL;
+ element_info_s elem_info;
+ const gchar *encoder_klass_name = NULL;
+ gchar *encoder_name = NULL;
+
+ RET_VAL_IF(webrtc == NULL, NULL, "webrtc is NULL");
+ RET_VAL_IF(source == NULL, NULL, "source is NULL");
+
+ encoder_klass_name = is_audio ? GST_KLASS_NAME_ENCODER_AUDIO : GST_KLASS_NAME_ENCODER_VIDEO;
+
+ if (source->zerocopy_enabled)
+ encoder = __get_hw_encoder_element(webrtc, source);
+ else
+ CREATE_ELEMENT_FROM_REGISTRY(elem_info, encoder_klass_name,
+ __make_default_raw_caps(source, &webrtc->ini),
+ __make_default_encoded_caps(source, &webrtc->ini, NULL),
+ webrtc->ini.general.gst_excluded_elements,
+ encoder);
+
+ RET_VAL_IF(encoder == NULL, NULL, "encoder is NULL");
+
+ encoder_name = gst_element_get_name(encoder);
+ if (g_strrstr(encoder_name, "vp8enc") || g_strrstr(encoder_name, "vp9enc")) {
+ g_object_set(G_OBJECT(encoder),
+ "threads", webrtc->ini.vpxenc_params.threads,
+ "end-usage", webrtc->ini.vpxenc_params.end_usage,
+ "cpu-used", webrtc->ini.vpxenc_params.cpu_used,
+ "target-bitrate", webrtc->ini.vpxenc_params.target_bitrate,
+ "keyframe-max-dist", webrtc->ini.vpxenc_params.keyframe_max_dist,
+ "max-quantizer", webrtc->ini.vpxenc_params.max_quantizer,
+ "min-quantizer", webrtc->ini.vpxenc_params.min_quantizer,
+ "undershoot", webrtc->ini.vpxenc_params.undershoot,
+ NULL);
+
+ LOG_DEBUG("[%s] end-usage(%d) cpu-used(%d) target-bitrate(%d) keyframe-max-dist(%d) max-quantizer(%d) min-quantizer(%d) undershoot(%d)",
+ encoder_name, webrtc->ini.vpxenc_params.end_usage, webrtc->ini.vpxenc_params.cpu_used, webrtc->ini.vpxenc_params.target_bitrate,
+ webrtc->ini.vpxenc_params.keyframe_max_dist, webrtc->ini.vpxenc_params.max_quantizer, webrtc->ini.vpxenc_params.min_quantizer,
+ webrtc->ini.vpxenc_params.undershoot);
+
+ } else if (g_strrstr(encoder_name, "opusenc")) {
+ if (source->av[AV_IDX_AUDIO].inbandfec) {
+ g_object_set(G_OBJECT(encoder),
+ "inband-fec", TRUE,
+ "packet-loss-percentage", source->av[AV_IDX_AUDIO].packet_loss_percentage,
+ NULL);
+
+ LOG_DEBUG("[%s] inband-fec(%d), packet-loss-percentage(%d)",
+ encoder_name, TRUE, source->av[AV_IDX_AUDIO].packet_loss_percentage);
+ }
+ }
+
+ g_free(encoder_name);
+ return encoder;
+}
+
+int _create_rest_of_elements(webrtc_s *webrtc, webrtc_gst_slot_s *source, bool need_capsfilter, GList **element_list, bool is_audio)
+{
+ GstElement *encoder = NULL;
+ GstElement *payloader;
+ GstElement *queue;
+ GstElement *capsfilter2;
+ GstElement *videocrop;
+ GstCaps *sink_caps;
+ element_info_s elem_info;
+ gchar *media_type = NULL;
+ int idx;
+
+ RET_VAL_IF(webrtc == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "webrtc is NULL");
+ RET_VAL_IF(source == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "source is NULL");
+ RET_VAL_IF(element_list == NULL, WEBRTC_ERROR_INVALID_PARAMETER, "element_list is NULL");
+
+ idx = GET_AV_IDX(is_audio);
+
+ if (need_capsfilter) {
+ GstElement *capsfilter = _create_element(DEFAULT_ELEMENT_CAPSFILTER, ELEMENT_NAME_FIRST_CAPSFILTER);
+ if (!capsfilter)
+ return WEBRTC_ERROR_INVALID_OPERATION;
+ APPEND_ELEMENT(*element_list, capsfilter);
+
+ if (_is_encoded_format_supported(source->type, &webrtc->ini)) {
+ if ((sink_caps = __make_default_encoded_caps(source, &webrtc->ini, NULL))) {
+ PRINT_CAPS(sink_caps, "capsfilter");
+ g_object_set(G_OBJECT(capsfilter), "caps", sink_caps, NULL);
+ source->av[idx].render.appsrc_caps = sink_caps;
+ if (source->av[idx].render.appsrc)
+ g_object_set(G_OBJECT(source->av[idx].render.appsrc), "caps", sink_caps, NULL);
+ }
+
+ source->av[idx].render.need_decoding = true;
+ _add_probe_to_pad_for_render(source, idx, gst_element_get_static_pad(capsfilter, "src"), _source_data_probe_cb);
+
+ goto skip_encoder;
+ }
+
+ if ((sink_caps = __make_default_raw_caps(source, &webrtc->ini))) {
+ PRINT_CAPS(sink_caps, "capsfilter");
+ g_object_set(G_OBJECT(capsfilter), "caps", sink_caps, NULL);
+ source->av[idx].render.appsrc_caps = sink_caps;
+ if (source->av[idx].render.appsrc)
+ g_object_set(G_OBJECT(source->av[idx].render.appsrc), "caps", sink_caps, NULL);
+ }
+
+ _add_probe_to_pad_for_render(source, idx, gst_element_get_static_pad(capsfilter, "src"), _source_data_probe_cb);
+ }
+
+ if (source->type == WEBRTC_MEDIA_SOURCE_TYPE_SCREEN && !source->zerocopy_enabled) {
+ if (!(videocrop = _create_element(DEFAULT_ELEMENT_VIDEOCROP, ELEMENT_NAME_VIDEOCROP)))
+ goto error;
+ APPEND_ELEMENT(*element_list, videocrop);
+ }
+
+ encoder = __prepare_encoder(webrtc, source, is_audio);
+ if (encoder == NULL) {
+ _remove_probe_from_pad_for_render(source, idx);
+ return WEBRTC_ERROR_INVALID_OPERATION;
+ }
+ APPEND_ELEMENT(*element_list, encoder);
+
+ source->av[idx].render.need_decoding = false;
+
+skip_encoder:
+ CREATE_ELEMENT_FROM_REGISTRY(elem_info, GST_KLASS_NAME_PAYLOADER_RTP,
+ __make_default_encoded_caps(source, &webrtc->ini, &media_type),
+ NULL,
+ NULL,
+ payloader);
+ if (payloader == NULL)
+ goto error;
+ APPEND_ELEMENT(*element_list, payloader);
+
+ if (!(queue = _create_element(DEFAULT_ELEMENT_QUEUE, NULL)))
+ goto error;
+ APPEND_ELEMENT(*element_list, queue);
+
+ if (!(capsfilter2 = _create_element(DEFAULT_ELEMENT_CAPSFILTER, ELEMENT_NAME_RTP_CAPSFILTER)))
+ goto error;
+ APPEND_ELEMENT(*element_list, capsfilter2);
+
+ if(_set_payload_type(webrtc, source, idx, media_type) != WEBRTC_ERROR_NONE)
+ goto error;
+
+ if ((sink_caps = _make_rtp_caps(media_type, source->av[idx].pt, source))) {
+ g_object_set(G_OBJECT(capsfilter2), "caps", sink_caps, NULL);
+ gst_caps_unref(sink_caps);
+ }
+
+ g_free(media_type);
+
+ return WEBRTC_ERROR_NONE;
+
+error:
+ _remove_probe_from_pad_for_render(source, idx);
+ g_free(media_type);
+
+ return WEBRTC_ERROR_INVALID_OPERATION;
+}