webrtc_test: Add support to show multiple video tracks from one remote peer 23/302223/1 accepted/tizen/unified/20231205.171423 accepted/tizen/unified/riscv/20231226.211541
authorSangchul Lee <sc11.lee@samsung.com>
Fri, 1 Dec 2023 02:57:35 +0000 (11:57 +0900)
committerSangchul Lee <sc11.lee@samsung.com>
Mon, 4 Dec 2023 08:15:32 +0000 (08:15 +0000)
Test condition:
 1. peer to peer test (not using room menu)
 2. display type is EVAS
 3. it should not exceed more than 3 remote video tracks.

[Version] 0.4.26
[Issue Type] Testsuite

Change-Id: Idd081077d97a6bab538d49a0f5a3c282079224b9
Signed-off-by: Sangchul Lee <sc11.lee@samsung.com>
(cherry picked from commit e3dd9012c7ec6781b59b989fc3616819df17c3e0)

packaging/capi-media-webrtc.spec
test/webrtc_test.c
test/webrtc_test_priv.h
test/webrtc_test_ui.c

index 387183f6429d42110b0577e44e794fc859fcee67..66071e92c75cb3bef199758346e4cb511fe0d143 100644 (file)
@@ -1,6 +1,6 @@
 Name:       capi-media-webrtc
 Summary:    A WebRTC library in Tizen Native API
-Version:    0.4.25
+Version:    0.4.26
 Release:    0
 Group:      Multimedia/API
 License:    Apache-2.0
index 8a6c0fe62bf1d628cfc0a52c1b600eb6bfd63ed5..b0ce35a2b1d7404676f5aa9b92f250487cc412ae 100644 (file)
@@ -181,6 +181,8 @@ void _webrtc_create(int index)
        RET_IF(ret != WEBRTC_ERROR_NONE, "ret[0x%x]", ret);
 
        g_ad.conns[index].index = index;
+       g_mutex_init(&g_ad.conns[index].render.video_track_mutex);
+
        g_print("webrtc[%p, index:%d] is created\n", g_ad.conns[index].webrtc, index);
 }
 
@@ -236,6 +238,7 @@ void _webrtc_destroy(int index)
        }
 
        g_ad.conns[index].render.loopback_track_id = 0;
+       g_mutex_clear(&g_ad.conns[index].render.video_track_mutex);
 
        g_ad.conns[index].encoded_audio_frame_cb_is_set = false;
        g_ad.conns[index].encoded_video_frame_cb_is_set = false;
@@ -1936,15 +1939,23 @@ static void __track_added_cb(webrtc_h webrtc, webrtc_media_type_e type, unsigned
        g_print("__track_added_cb() is invoked, webrtc[%p], type[%d], id[%u], conn[%p]\n", webrtc, type, id, conn);
 
        if (type == WEBRTC_MEDIA_TYPE_VIDEO) {
+               g_mutex_lock(&conn->render.video_track_mutex);
                if (!g_ad.validate_encoded_frame_cb) {
+                       void *eo;
+                       if (conn->render.video_track_count == MAX_REMOTE_VIDEO_TRACKS_PER_PEER_LEN) {
+                               g_print("%d video tracks are already receiving, skip this new one\n", MAX_REMOTE_VIDEO_TRACKS_PER_PEER_LEN);
+                               g_mutex_unlock(&conn->render.video_track_mutex);
+                               return;
+                       }
+                       eo = conn->render.eo[conn->render.video_track_count];
                        g_print("Video track is added, set display - display_type[%d], display[%p]\n",
-                               conn->render.display_type, conn->render.display_type == WEBRTC_DISPLAY_TYPE_OVERLAY ? g_ad.win : conn->render.eo);
+                               conn->render.display_type, conn->render.display_type == WEBRTC_DISPLAY_TYPE_OVERLAY ? g_ad.win : eo);
 #ifdef TIZEN_FEATURE_ESPP
                        if (conn->render.espp.handle) {
                                if (conn->render.display_type == WEBRTC_DISPLAY_TYPE_OVERLAY)
                                        esplusplayer_set_display(conn->render.espp.handle, ESPLUSPLAYER_DISPLAY_TYPE_OVERLAY, g_ad.win);
                                else if (conn->render.display_type == WEBRTC_DISPLAY_TYPE_EVAS)
-                                       esplusplayer_set_display(conn->render.espp.handle, ESPLUSPLAYER_DISPLAY_TYPE_EVAS, conn->render.eo);
+                                       esplusplayer_set_display(conn->render.espp.handle, ESPLUSPLAYER_DISPLAY_TYPE_EVAS, eo);
                                else
                                        g_print("invalid display type[%d]\n", conn->render.display_type);
                        } else
@@ -1955,7 +1966,7 @@ static void __track_added_cb(webrtc_h webrtc, webrtc_media_type_e type, unsigned
                                        webrtc_set_display_mode(webrtc, id, WEBRTC_DISPLAY_MODE_LETTER_BOX);
                                        webrtc_set_display_visible(webrtc, id, true);
                                } else if (conn->render.display_type == WEBRTC_DISPLAY_TYPE_EVAS) {
-                                       webrtc_set_display(webrtc, id, WEBRTC_DISPLAY_TYPE_EVAS, conn->render.eo);
+                                       webrtc_set_display(webrtc, id, WEBRTC_DISPLAY_TYPE_EVAS, eo);
                                        webrtc_set_display_mode(webrtc, id, WEBRTC_DISPLAY_MODE_LETTER_BOX);
                                        webrtc_set_display_visible(webrtc, id, true);
                                } else {
@@ -1970,6 +1981,8 @@ static void __track_added_cb(webrtc_h webrtc, webrtc_media_type_e type, unsigned
                                        g_printerr("failed to set state to PLAYING to video render pipeline\n");
                        }
                }
+               conn->render.video_track_count++;
+               g_mutex_unlock(&conn->render.video_track_mutex);
 
        } else if (type == WEBRTC_MEDIA_TYPE_AUDIO) {
                if (!g_ad.validate_encoded_frame_cb) {
index 163544f83cba15d3fe64df7078b8705049d17feb..1dac28ab1d006693eb17b1a884746e9f0b89aefe 100644 (file)
@@ -175,6 +175,7 @@ enum {
 #define MAX_CONNECTION_LEN 3
 #define MAX_CHANNEL_LEN 10
 #define MAX_MEDIA_PACKET_SOURCE_LEN 4
+#define MAX_REMOTE_VIDEO_TRACKS_PER_PEER_LEN 3
 
 typedef struct {
        unsigned int source_id;
@@ -230,9 +231,11 @@ typedef struct _connection_s {
        struct {
                sound_stream_info_h stream_info;
                webrtc_display_type_e display_type;
-               void *eo;
+               void *eo[MAX_REMOTE_VIDEO_TRACKS_PER_PEER_LEN];
                void *text_eo;
                unsigned int loopback_track_id;
+               int video_track_count;
+               GMutex video_track_mutex;
 #ifdef TIZEN_FEATURE_ESPP
                struct {
                        esplusplayer_handle handle;
index b4c64db8e57ba747386797c1f3deb7bb676e7ad2..5f69b771b223fce7b4df5c659b5af7552782cf3c 100644 (file)
@@ -110,12 +110,25 @@ static void create_render_rect_and_bg(Evas_Object *win)
        evas_object_show(win);
 }
 
+static void __update_eo_setting(Evas_Object *eo, int i, int win_width, int win_height)
+{
+       int width = win_width / 2;
+       int height = win_height / 2;
+
+       RET_IF(!eo, "eo is NULL");
+
+       evas_object_image_size_set(eo, width, height);
+       evas_object_image_fill_set(eo, 0, 0, width, height);
+       evas_object_resize(eo, width, height);
+       evas_object_move(eo, (i % 2) * width, (i / 2) * height);
+}
+
 static int app_create(void *data)
 {
        appdata_s *ad = data;
        Evas_Object *win = NULL;
        Evas_Object **eo;
-       int i;
+       int i, j;
 
        /* use gl backend */
        elm_config_accel_preference_set("opengl");
@@ -134,16 +147,34 @@ static int app_create(void *data)
         * |    eo (remote1)    |    eo (remote2)    | *
         * |____________________|____________________| */
        for (i = 0; i < MAX_CONNECTION_LEN + 1; i++) {
-               eo = (i == 0) ? (Evas_Object **)&ad->eo_mine : (Evas_Object **)&ad->conns[i - 1].render.eo;
+               if (i == 0) {
+                       /* For local peer loopback video track */
+                       eo = (Evas_Object **)&ad->eo_mine;
+
+               } else if (i == 1) {
+                       /* For only one remote peer scenario with multiple remote video tracks */
+                       for (j = 0; j < MAX_REMOTE_VIDEO_TRACKS_PER_PEER_LEN; j++) {
+                               eo = (Evas_Object **)&ad->conns[0].render.eo[j];
+                               *eo = create_image_object(ad->win);
+                               if (!*eo) {
+                                       g_print("failed to create evas image object\n");
+                                       continue;
+                               }
+                               __update_eo_setting(*eo, j + 1, ad->win_width, ad->win_height);
+                       }
+                       continue;
+
+               } else {
+                       /* For each remote video track from each remote peer with room scenario */
+                       eo = (Evas_Object **)&ad->conns[i - 1].render.eo[0];
+               }
+
                *eo = create_image_object(ad->win);
                if (!*eo) {
                        g_print("failed to create evas image object\n");
                        continue;
                }
-               evas_object_image_size_set(*eo, ad->win_width / 2, ad->win_height / 2);
-               evas_object_image_fill_set(*eo, 0, 0, ad->win_width / 2, ad->win_height / 2);
-               evas_object_resize(*eo, ad->win_width / 2, ad->win_height / 2);
-               evas_object_move(*eo, (i % 2) * (ad->win_width / 2), (i / 2) * (ad->win_height / 2));
+               __update_eo_setting(*eo, i, ad->win_width, ad->win_height);
        }
        elm_win_activate(win);
        evas_object_show(win);
@@ -154,12 +185,14 @@ static int app_create(void *data)
 static int app_terminate(void *data)
 {
        appdata_s *ad = data;
-       int i;
+       int i, j;
 
        for (i = 0; i < MAX_CONNECTION_LEN; i++) {
-               if (ad->conns[i].render.eo) {
-                       evas_object_del(ad->conns[i].render.eo);
-                       ad->conns[i].render.eo = NULL;
+               for (j = 0; j < MAX_REMOTE_VIDEO_TRACKS_PER_PEER_LEN; j++) {
+                       if (ad->conns[i].render.eo[j]) {
+                               evas_object_del(ad->conns[i].render.eo[j]);
+                               ad->conns[i].render.eo[j] = NULL;
+                       }
                }
                if (ad->conns[i].render.text_eo) {
                        evas_object_del(ad->conns[i].render.text_eo);
@@ -221,4 +254,4 @@ void _app_start(int *argc, char **argv)
 void _app_stop(void)
 {
        elm_exit();
-}
\ No newline at end of file
+}