}
//LCOV_EXCL_STOP
-static GstCaps *__make_rtp_caps(const gchar *media_type, unsigned int payload_id)
+static GstCaps *__make_rtp_caps(const gchar *media_type, unsigned int payload_id, webrtc_gst_slot_s *source)
{
GstCaps *caps;
bool is_audio;
caps = gst_caps_new_simple("application/x-rtp",
"media", G_TYPE_STRING, GET_MEDIA_TYPE_NAME(is_audio),
- "payload", G_TYPE_INT, payload_id, NULL);
+ "payload", G_TYPE_INT, payload_id,
+ NULL);
+
+ if (is_audio && source->av[AV_IDX_AUDIO].inbandfec)
+ /* NOTE: set it with string type due to the parsing logic in gstwebrtcbin.c */
+ gst_structure_set(gst_caps_get_structure(caps, 0), "useinbandfec", G_TYPE_STRING, "1", NULL);
PRINT_CAPS(caps, "RTP");
"min-quantizer", webrtc->ini.vpxenc_params.min_quantizer,
"undershoot", webrtc->ini.vpxenc_params.undershoot,
NULL);
+
+ LOG_DEBUG("[%s] end-usage(%d) cpu-used(%d) target-bitrate(%d) keyframe-max-dist(%d) max-quantizer(%d) min-quantizer(%d) undershoot(%d)",
+ encoder_name, webrtc->ini.vpxenc_params.end_usage, webrtc->ini.vpxenc_params.cpu_used, webrtc->ini.vpxenc_params.target_bitrate,
+ webrtc->ini.vpxenc_params.keyframe_max_dist, webrtc->ini.vpxenc_params.max_quantizer, webrtc->ini.vpxenc_params.min_quantizer,
+ webrtc->ini.vpxenc_params.undershoot);
+
+ } else if (g_strrstr(encoder_name, "opusenc")) {
+ if (source->av[AV_IDX_AUDIO].inbandfec) {
+ g_object_set(G_OBJECT(encoder), "inband-fec", TRUE, NULL);
+ g_object_set(G_OBJECT(encoder), "packet-loss-percentage", 10, NULL); /* TODO: set this value from ini or API */
+ LOG_DEBUG("[%s] inband-fec(%d)", encoder_name, TRUE);
+ }
}
- g_free(encoder_name);
+ g_free(encoder_name);
return encoder;
}
source->av[idx].payload_id = payload_id;
- if ((sink_caps = __make_rtp_caps(media_type, payload_id))) {
+ if ((sink_caps = __make_rtp_caps(media_type, payload_id, source))) {
g_object_set(G_OBJECT(capsfilter2), "caps", sink_caps, NULL);
gst_caps_unref(sink_caps);
}
source->av[GET_AV_IDX_BY_TYPE(source->media_types)].payload_id = payload_id;
- if ((sink_caps = __make_rtp_caps(media_type, payload_id))) {
+ if ((sink_caps = __make_rtp_caps(media_type, payload_id, source))) {
g_object_set(G_OBJECT(capsfilter), "caps", sink_caps, NULL);
gst_caps_unref(sink_caps);
}
APPEND_ELEMENT(element_list, appsrc);
g_object_set(G_OBJECT(appsrc),
- "is-live", TRUE,
- "format", GST_FORMAT_TIME,
- NULL);
+ "is-live", TRUE,
+ "format", GST_FORMAT_TIME,
+ NULL);
if (!(queue = _create_element(DEFAULT_ELEMENT_QUEUE, _av_tbl[av_idx].queue_name)))
goto exit;
source->av[GET_AV_IDX(is_audio)].payload_id = payload_id;
- if ((sink_caps = __make_rtp_caps(GET_MEDIA_TYPE_NAME(is_audio), payload_id))) {
+ if ((sink_caps = __make_rtp_caps(GET_MEDIA_TYPE_NAME(is_audio), payload_id, source))) {
g_object_set(G_OBJECT(capsfilter), "caps", sink_caps, NULL);
gst_caps_unref(sink_caps);
}
LOG_INFO("set source crop x:%d, y:%d, width:%d, height:%d, mode:%s", x, y, w, h, (portrait_mode) ? "portrait" : "landscape");
g_object_get(G_OBJECT(screen_source),
- portrait_mode ? "mirroring-v-src-width" : "mirroring-h-src-width", &src_width,
- portrait_mode ? "mirroring-v-src-height" : "mirroring-h-src-height", &src_height,
- portrait_mode ? "mirroring-v-x" : "mirroring-h-x", &mirroring_x,
- portrait_mode ? "mirroring-v-y" : "mirroring-h-y", &mirroring_y,
- portrait_mode ? "mirroring-v-width" : "mirroring-h-width", &mirroring_width,
- portrait_mode ? "mirroring-v-height" : "mirroring-h-height", &mirroring_height,
- "output-width", &output_width,
- "output-height", &output_height,
- NULL);
+ portrait_mode ? "mirroring-v-src-width" : "mirroring-h-src-width", &src_width,
+ portrait_mode ? "mirroring-v-src-height" : "mirroring-h-src-height", &src_height,
+ portrait_mode ? "mirroring-v-x" : "mirroring-h-x", &mirroring_x,
+ portrait_mode ? "mirroring-v-y" : "mirroring-h-y", &mirroring_y,
+ portrait_mode ? "mirroring-v-width" : "mirroring-h-width", &mirroring_width,
+ portrait_mode ? "mirroring-v-height" : "mirroring-h-height", &mirroring_height,
+ "output-width", &output_width,
+ "output-height", &output_height,
+ NULL);
rw = (float)src_width / mirroring_width;
rh = (float)src_height / mirroring_height;
"mirroring[x:%d y:%d width:%d, height:%d", src_width, src_height, output_width,
output_height, mirroring_x, mirroring_y, mirroring_width, mirroring_height);
- g_object_set(G_OBJECT(videocrop), "left", left, "right", right, "top", top,
- "bottom", bottom, NULL);
+ g_object_set(G_OBJECT(videocrop),
+ "left", left,
+ "right", right,
+ "top", top,
+ "bottom", bottom,
+ NULL);
LOG_INFO("cropped: left:%d, right:%d, top:%d, bottom:%d", left, right, top, bottom);
videocrop = gst_bin_get_by_name(source->bin, ELEMENT_NAME_VIDEOCROP);
RET_VAL_IF(videocrop == NULL, WEBRTC_ERROR_INVALID_OPERATION, "videocrop is NULL");
- g_object_get(G_OBJECT(videocrop), "left", &left, "right", &right, "top", &top, "bottom", &bottom, NULL);
+ g_object_get(G_OBJECT(videocrop),
+ "left", &left,
+ "right", &right,
+ "top", &top,
+ "bottom", &bottom,
+ NULL);
RET_VAL_IF(left == 0 && right == 0 && top == 0 && bottom == 0, WEBRTC_ERROR_INVALID_OPERATION, "webrtc_screen_source_set_crop was not set");
- g_object_set(G_OBJECT(videocrop), "left", 0, "right", 0, "top", 0, "bottom", 0, NULL);
+ g_object_set(G_OBJECT(videocrop),
+ "left", 0,
+ "right", 0,
+ "top", 0,
+ "bottom", 0,
+ NULL);
return WEBRTC_ERROR_NONE;
}