Name: capi-media-webrtc
Summary: A WebRTC library in Tizen Native API
-Version: 0.4.25
+Version: 0.4.26
Release: 0
Group: Multimedia/API
License: Apache-2.0
RET_IF(ret != WEBRTC_ERROR_NONE, "ret[0x%x]", ret);
g_ad.conns[index].index = index;
+ g_mutex_init(&g_ad.conns[index].render.video_track_mutex);
+
g_print("webrtc[%p, index:%d] is created\n", g_ad.conns[index].webrtc, index);
}
}
g_ad.conns[index].render.loopback_track_id = 0;
+ g_mutex_clear(&g_ad.conns[index].render.video_track_mutex);
g_ad.conns[index].encoded_audio_frame_cb_is_set = false;
g_ad.conns[index].encoded_video_frame_cb_is_set = false;
g_print("__track_added_cb() is invoked, webrtc[%p], type[%d], id[%u], conn[%p]\n", webrtc, type, id, conn);
if (type == WEBRTC_MEDIA_TYPE_VIDEO) {
+ g_mutex_lock(&conn->render.video_track_mutex);
if (!g_ad.validate_encoded_frame_cb) {
+ void *eo;
+ if (conn->render.video_track_count == MAX_REMOTE_VIDEO_TRACKS_PER_PEER_LEN) {
+ g_print("%d video tracks are already receiving, skip this new one\n", MAX_REMOTE_VIDEO_TRACKS_PER_PEER_LEN);
+ g_mutex_unlock(&conn->render.video_track_mutex);
+ return;
+ }
+ eo = conn->render.eo[conn->render.video_track_count];
g_print("Video track is added, set display - display_type[%d], display[%p]\n",
- conn->render.display_type, conn->render.display_type == WEBRTC_DISPLAY_TYPE_OVERLAY ? g_ad.win : conn->render.eo);
+ conn->render.display_type, conn->render.display_type == WEBRTC_DISPLAY_TYPE_OVERLAY ? g_ad.win : eo);
#ifdef TIZEN_FEATURE_ESPP
if (conn->render.espp.handle) {
if (conn->render.display_type == WEBRTC_DISPLAY_TYPE_OVERLAY)
esplusplayer_set_display(conn->render.espp.handle, ESPLUSPLAYER_DISPLAY_TYPE_OVERLAY, g_ad.win);
else if (conn->render.display_type == WEBRTC_DISPLAY_TYPE_EVAS)
- esplusplayer_set_display(conn->render.espp.handle, ESPLUSPLAYER_DISPLAY_TYPE_EVAS, conn->render.eo);
+ esplusplayer_set_display(conn->render.espp.handle, ESPLUSPLAYER_DISPLAY_TYPE_EVAS, eo);
else
g_print("invalid display type[%d]\n", conn->render.display_type);
} else
webrtc_set_display_mode(webrtc, id, WEBRTC_DISPLAY_MODE_LETTER_BOX);
webrtc_set_display_visible(webrtc, id, true);
} else if (conn->render.display_type == WEBRTC_DISPLAY_TYPE_EVAS) {
- webrtc_set_display(webrtc, id, WEBRTC_DISPLAY_TYPE_EVAS, conn->render.eo);
+ webrtc_set_display(webrtc, id, WEBRTC_DISPLAY_TYPE_EVAS, eo);
webrtc_set_display_mode(webrtc, id, WEBRTC_DISPLAY_MODE_LETTER_BOX);
webrtc_set_display_visible(webrtc, id, true);
} else {
g_printerr("failed to set state to PLAYING to video render pipeline\n");
}
}
+ conn->render.video_track_count++;
+ g_mutex_unlock(&conn->render.video_track_mutex);
} else if (type == WEBRTC_MEDIA_TYPE_AUDIO) {
if (!g_ad.validate_encoded_frame_cb) {
#define MAX_CONNECTION_LEN 3
#define MAX_CHANNEL_LEN 10
#define MAX_MEDIA_PACKET_SOURCE_LEN 4
+#define MAX_REMOTE_VIDEO_TRACKS_PER_PEER_LEN 3
typedef struct {
unsigned int source_id;
struct {
sound_stream_info_h stream_info;
webrtc_display_type_e display_type;
- void *eo;
+ void *eo[MAX_REMOTE_VIDEO_TRACKS_PER_PEER_LEN];
void *text_eo;
unsigned int loopback_track_id;
+ int video_track_count;
+ GMutex video_track_mutex;
#ifdef TIZEN_FEATURE_ESPP
struct {
esplusplayer_handle handle;
evas_object_show(win);
}
+static void __update_eo_setting(Evas_Object *eo, int i, int win_width, int win_height)
+{
+ int width = win_width / 2;
+ int height = win_height / 2;
+
+ RET_IF(!eo, "eo is NULL");
+
+ evas_object_image_size_set(eo, width, height);
+ evas_object_image_fill_set(eo, 0, 0, width, height);
+ evas_object_resize(eo, width, height);
+ evas_object_move(eo, (i % 2) * width, (i / 2) * height);
+}
+
static int app_create(void *data)
{
appdata_s *ad = data;
Evas_Object *win = NULL;
Evas_Object **eo;
- int i;
+ int i, j;
/* use gl backend */
elm_config_accel_preference_set("opengl");
* | eo (remote1) | eo (remote2) | *
* |____________________|____________________| */
for (i = 0; i < MAX_CONNECTION_LEN + 1; i++) {
- eo = (i == 0) ? (Evas_Object **)&ad->eo_mine : (Evas_Object **)&ad->conns[i - 1].render.eo;
+ if (i == 0) {
+ /* For local peer loopback video track */
+ eo = (Evas_Object **)&ad->eo_mine;
+
+ } else if (i == 1) {
+ /* For only one remote peer scenario with multiple remote video tracks */
+ for (j = 0; j < MAX_REMOTE_VIDEO_TRACKS_PER_PEER_LEN; j++) {
+ eo = (Evas_Object **)&ad->conns[0].render.eo[j];
+ *eo = create_image_object(ad->win);
+ if (!*eo) {
+ g_print("failed to create evas image object\n");
+ continue;
+ }
+ __update_eo_setting(*eo, j + 1, ad->win_width, ad->win_height);
+ }
+ continue;
+
+ } else {
+ /* For each remote video track from each remote peer with room scenario */
+ eo = (Evas_Object **)&ad->conns[i - 1].render.eo[0];
+ }
+
*eo = create_image_object(ad->win);
if (!*eo) {
g_print("failed to create evas image object\n");
continue;
}
- evas_object_image_size_set(*eo, ad->win_width / 2, ad->win_height / 2);
- evas_object_image_fill_set(*eo, 0, 0, ad->win_width / 2, ad->win_height / 2);
- evas_object_resize(*eo, ad->win_width / 2, ad->win_height / 2);
- evas_object_move(*eo, (i % 2) * (ad->win_width / 2), (i / 2) * (ad->win_height / 2));
+ __update_eo_setting(*eo, i, ad->win_width, ad->win_height);
}
elm_win_activate(win);
evas_object_show(win);
static int app_terminate(void *data)
{
appdata_s *ad = data;
- int i;
+ int i, j;
for (i = 0; i < MAX_CONNECTION_LEN; i++) {
- if (ad->conns[i].render.eo) {
- evas_object_del(ad->conns[i].render.eo);
- ad->conns[i].render.eo = NULL;
+ for (j = 0; j < MAX_REMOTE_VIDEO_TRACKS_PER_PEER_LEN; j++) {
+ if (ad->conns[i].render.eo[j]) {
+ evas_object_del(ad->conns[i].render.eo[j]);
+ ad->conns[i].render.eo[j] = NULL;
+ }
}
if (ad->conns[i].render.text_eo) {
evas_object_del(ad->conns[i].render.text_eo);
void _app_stop(void)
{
elm_exit();
-}
\ No newline at end of file
+}