Add mutex and check whether destroying rpc handle is requested or not 53/314053/8
authorsooyeon <sooyeon.kim@samsung.com>
Thu, 4 Jul 2024 10:40:13 +0000 (19:40 +0900)
committersooyeon <sooyeon.kim@samsung.com>
Tue, 9 Jul 2024 00:32:01 +0000 (09:32 +0900)
- Issue:
rpc handle double free issue is occurred.

- Solution:
We check whether destroying rpc handle is requested or not. Also, we add mutex to protect rpc handle access.

Change-Id: I6db79b254b4c7283ad69f40d8a9d878c9ee24c0f
Signed-off-by: sooyeon <sooyeon.kim@samsung.com>
client/tts.c
client/tts_client.c
client/tts_client.h
client/tts_core.c
client/tts_tidl.c

index fc97a36ad29e09714b5061157d474c8e5cfd3197..01d0f25c5075d927e4208a832d008815ff25f619 100644 (file)
@@ -280,6 +280,9 @@ static int destroy_tts_handle(tts_h tts)
                return TTS_ERROR_OPERATION_FAILED;
        }
 
+       /* set client requested to be destroyed */
+       tts_client_set_requested_to_destroy(client, true);
+
        tts_config_mgr_finalize(uid, TTS_CONFIG_CLIENT_TYPE_DEFAULT);
 
        // TODO: move into tts_client
index c5bfa27be3028b2fe12d918db8b9dd3c662e1e2a..1648d3d9a5da1a303431eed272e58fe6c5aa56bc 100644 (file)
@@ -175,6 +175,7 @@ int tts_client_new(tts_h* tts)
        client->pid = getpid();
        client->uid = temp->handle;
        client->current_service_state = TTS_SERVICE_STATE_NONE;
+       client->requested_to_destroy = false;
 
        pthread_mutex_lock(&g_client_list_mutex);
        g_client_list = g_list_append(g_client_list, client);
@@ -498,6 +499,24 @@ bool tts_client_is_service_out(tts_client_s* client)
        return client->service_out;
 }
 
+void tts_client_set_requested_to_destroy(tts_client_s* client, bool is_requested_to_destroy)
+{
+       if (false == tts_client_is_valid_client(client)) {
+               return;
+       }
+
+       client->requested_to_destroy = is_requested_to_destroy;
+}
+
+bool tts_client_is_requested_to_destroy(tts_client_s* client)
+{
+       if (false == tts_client_is_valid_client(client)) {
+               return false;
+       }
+
+       return client->requested_to_destroy;
+}
+
 void tts_client_set_mode(tts_client_s* client, tts_mode_e mode)
 {
        if (false == tts_client_is_valid_client(client)) {
index 9e4c73a342cb4de7d8fec4e231aac4e07769fde3..594581e4d70ccae017f8eb67c1b6dea9ef454139 100644 (file)
@@ -81,6 +81,7 @@ typedef struct {
        bool start_listening;
        bool reprepared;
        bool service_out;
+       bool requested_to_destroy;
 
        /* options */
        char*           credential;
@@ -132,6 +133,9 @@ bool tts_client_is_reprepared(tts_client_s* client);
 void tts_client_set_service_out(tts_client_s* client, bool is_service_out);
 bool tts_client_is_service_out(tts_client_s* client);
 
+void tts_client_set_requested_to_destroy(tts_client_s* client, bool is_requested_to_destroy);
+bool tts_client_is_requested_to_destroy(tts_client_s* client);
+
 void tts_client_set_mode(tts_client_s* client, tts_mode_e mode);
 tts_mode_e tts_client_get_mode(tts_client_s* client);
 
index e43f12624ee3a7268d0158cf9de3aea42bf0a98a..58974cd29c780ebfb45e2348e067abf9d9799b5f 100644 (file)
@@ -609,6 +609,11 @@ static inline bool __is_ipc_retry_needed(tts_client_s* client, tts_error_e ret)
                return false;
        }
 
+       if (tts_client_is_requested_to_destroy(client)) {
+               SLOG(LOG_ERROR, TAG_TTSC, "[WARNING] The current client is requested to be destroyed. Do not request to reprepare.");
+               return false;
+       }
+
        if (TTS_ERROR_TIMED_OUT == ret) {
                SLOG(LOG_WARN, TAG_TTSC, "[WARNING] IPC is failed. Try again. ret(%d/%s)", ret, get_error_message(ret));
                usleep(10000);
index 31f0be6bf4586a88db569f1e1f7c172e94d7826e..b5501179836caf6c49936e5b32cc772cf7784a97 100644 (file)
@@ -18,6 +18,7 @@
 #include "tts_core.h"
 #include "tts_dlog.h"
 
+
 #define MAXSLEEP 128
 #define MAX_CONNECT_CHECK 100
 
@@ -29,6 +30,7 @@ typedef struct {
        rpc_port_proxy_tts_h rpc_h;
        rpc_port_proxy_tts_notify_cb_h notify_cb_h;
        char* engine_app_id;
+       bool destruction_requesting;
 } tts_tidl_info_s;
 
 static GList* g_tidl_infos = NULL;
@@ -36,6 +38,8 @@ static GSList *g_destruction_scheduled_handles = NULL;
 
 static Ecore_Idler *g_destroy_handles_idler = NULL;
 
+static pthread_mutex_t g_rpc_h_mutex = PTHREAD_MUTEX_INITIALIZER;
+
 
 static tts_tidl_info_s* __get_tidl_info_s(unsigned int uid)
 {
@@ -158,12 +162,14 @@ static void __notify_cb(void *user_data, int pid, int uid, bundle *msg)
 
 static void destroy_scheduled_handle(gpointer data)
 {
+       pthread_mutex_lock(&g_rpc_h_mutex);
        rpc_port_proxy_tts_h rpc_h = (rpc_port_proxy_tts_h)data;
        SLOG(LOG_WARN, TAG_TTSC, "[WARNING] Destroy rpc handle(%p)", rpc_h);
        int ret = rpc_port_proxy_tts_destroy(rpc_h);
        if (RPC_PORT_ERROR_NONE != ret) {
                SLOG(LOG_ERROR, TAG_TTSC, "[ERROR] Fail to destroy handle. ret(%d/%s)", ret, get_error_message(ret));
        }
+       pthread_mutex_unlock(&g_rpc_h_mutex);
 }
 
 static Eina_Bool destroy_scheduled_handles_by_ecore_idler(void *user_data)
@@ -176,13 +182,11 @@ static Eina_Bool destroy_scheduled_handles_by_ecore_idler(void *user_data)
        return EINA_FALSE;
 }
 
-static inline void destroy_rpc_port(tts_tidl_info_s* info)
+static void __initialize_tidl_info(tts_tidl_info_s* info)
 {
-       RETM_IF(NULL == info->rpc_h, "[TIDL] Handle is already destroyed");
-
-       g_destruction_scheduled_handles = g_slist_append(g_destruction_scheduled_handles, info->rpc_h);
-       if (NULL == g_destroy_handles_idler) {
-               g_destroy_handles_idler = ecore_idler_add(destroy_scheduled_handles_by_ecore_idler, NULL);
+       if (NULL == info) {
+               SLOG(LOG_ERROR, TAG_TTSC, "[TIDL] Invalid parameter");
+               return ;
        }
 
        info->rpc_h = NULL;
@@ -192,8 +196,32 @@ static inline void destroy_rpc_port(tts_tidl_info_s* info)
        info->engine_app_id = NULL;
 
        info->register_callback_invoked = false;
-       info->connection_requesting = false;
        info->connected = false;
+       info->connection_requesting = false;
+}
+
+static inline void destroy_rpc_port(tts_tidl_info_s* info)
+{
+       pthread_mutex_lock(&g_rpc_h_mutex);
+       if (NULL == info->rpc_h) {
+               SLOG(LOG_WARN, TAG_TTSC, "[TIDL] Handle is already destroyed");
+               pthread_mutex_unlock(&g_rpc_h_mutex);
+               return ;
+       }
+       if (true == info->destruction_requesting) {
+               SLOG(LOG_WARN, TAG_TTSC, "[TIDL] Destroying rpc port is already requested");
+               pthread_mutex_unlock(&g_rpc_h_mutex);
+               return ;
+       }
+
+       g_destruction_scheduled_handles = g_slist_append(g_destruction_scheduled_handles, info->rpc_h);
+       if (NULL == g_destroy_handles_idler) {
+               g_destroy_handles_idler = ecore_idler_add(destroy_scheduled_handles_by_ecore_idler, NULL);
+       }
+
+       info->destruction_requesting = true;
+
+       pthread_mutex_unlock(&g_rpc_h_mutex);
 }
 
 static void __on_connected(rpc_port_proxy_tts_h h, void *user_data)
@@ -208,6 +236,7 @@ static void __on_connected(rpc_port_proxy_tts_h h, void *user_data)
 
        info->connected = true;
        info->connection_requesting = false;
+       info->destruction_requesting = false;
 
        SLOG(LOG_DEBUG, TAG_TTSC, "Connected to server");
 }
@@ -226,10 +255,11 @@ static void __on_disconnected(rpc_port_proxy_tts_h h, void *user_data)
        info->connection_requesting = false;
        info->register_callback_invoked = false;
 
-       SLOG(LOG_DEBUG, TAG_TTSC, "Disconnected from server");
+       SLOG(LOG_ERROR, TAG_TTSC, "Disconnected from server");
        if (tts_client_is_listening_started(uid)) {
-               SLOG(LOG_DEBUG, TAG_TTSC, "Try to reconnect to server");
+               SLOG(LOG_ERROR, TAG_TTSC, "Try to reconnect to server");
                destroy_rpc_port(info);
+               __initialize_tidl_info(info);
                tts_core_handle_service_reset();
        }
 }
@@ -243,6 +273,7 @@ static void __on_rejected(rpc_port_proxy_tts_h h, void *user_data)
 
        tts_tidl_info_s* info = __get_tidl_info_s(uid);
        RETM_IF(NULL == info, "[ERROR] Fail to get tidl info");
+       info->connected = false;
        info->connection_requesting = false;
        info->register_callback_invoked = false;
 
@@ -280,6 +311,7 @@ int tts_tidl_open_connection(unsigned int uid)
        }
 
        info->uid = uid;
+       info->destruction_requesting = false;
        g_tidl_infos = g_list_append(g_tidl_infos, info);
 
        SLOG(LOG_ERROR, TAG_TTSC, "[TIDL] uid(%u)", uid);
@@ -297,6 +329,7 @@ int tts_tidl_close_connection(unsigned int uid)
        RETVM_IF(NULL == info, TTS_ERROR_INVALID_PARAMETER, "[ERROR] Fail to get tidl info");
 
        destroy_rpc_port(info);
+       __initialize_tidl_info(info);
        if (g_destroy_handles_idler) {
                ecore_idler_del(g_destroy_handles_idler);
                g_destroy_handles_idler = NULL;
@@ -305,9 +338,6 @@ int tts_tidl_close_connection(unsigned int uid)
        g_slist_free_full(g_steal_pointer(&g_destruction_scheduled_handles), destroy_scheduled_handle);
        g_destruction_scheduled_handles = NULL;
 
-       free(info->engine_app_id);
-       info->engine_app_id = NULL;
-
        g_tidl_infos = g_list_remove(g_tidl_infos, info);
        free(info);
 
@@ -378,27 +408,38 @@ static int __invoke_register_callback(int pid, tts_mode_e mode, tts_playing_mode
 
 static inline bool __is_rpc_port_valid(tts_tidl_info_s* info, const char* engine_id)
 {
+       pthread_mutex_lock(&g_rpc_h_mutex);
        if (NULL == info->rpc_h || NULL == info->engine_app_id) {
                SLOG(LOG_ERROR, TAG_TTSC, "[ERROR] Handle is empty");
+               pthread_mutex_unlock(&g_rpc_h_mutex);
                return false;
        }
 
        if (NULL == engine_id) {
                SLOG(LOG_ERROR, TAG_TTSC, "[ERROR] Engine ID is null");
+               pthread_mutex_unlock(&g_rpc_h_mutex);
                return false;
        }
 
        if (0 != strncmp(info->engine_app_id, engine_id, TTS_ENGINE_APPID_LEN)) {
                SLOG(LOG_ERROR, TAG_TTSC, "[ERROR] Engine id is not current engine");
+               pthread_mutex_unlock(&g_rpc_h_mutex);
                return false;
        }
 
+       pthread_mutex_unlock(&g_rpc_h_mutex);
+
        return true;
 }
 
 static inline int __create_rpc_port(tts_tidl_info_s* info, const char* engine_id)
 {
-       RETVM_IF(NULL != info->rpc_h, TTS_ERROR_NONE, "[TIDL] Handle is already created");
+       pthread_mutex_lock(&g_rpc_h_mutex);
+       if (NULL != info->rpc_h) {
+               SLOG(LOG_INFO, TAG_TTSC, "[TIDL] Handle is already created");
+               pthread_mutex_unlock(&g_rpc_h_mutex);
+               return TTS_ERROR_NONE;
+       }
 
        rpc_port_proxy_tts_callback_s rpc_callback = {
                .connected = __on_connected,
@@ -410,11 +451,15 @@ static inline int __create_rpc_port(tts_tidl_info_s* info, const char* engine_id
        uintptr_t ptr_uid = info->uid;
        if (RPC_PORT_ERROR_NONE != rpc_port_proxy_tts_create(engine_id, &rpc_callback, (void*)ptr_uid, &handle) || NULL == handle) {
                SLOG(LOG_ERROR, TAG_TTSC, "[ERROR] Fail to create proxy");
+               pthread_mutex_unlock(&g_rpc_h_mutex);
                return TTS_ERROR_OPERATION_FAILED;
        }
 
        info->rpc_h = handle;
        info->engine_app_id = strdup(engine_id);
+       info->destruction_requesting = false;
+
+       pthread_mutex_unlock(&g_rpc_h_mutex);
 
        return TTS_ERROR_NONE;
 }
@@ -422,6 +467,7 @@ static inline int __create_rpc_port(tts_tidl_info_s* info, const char* engine_id
 static int __reset_rpc_port(tts_tidl_info_s* info, const char* engine_id)
 {
        destroy_rpc_port(info);
+       __initialize_tidl_info(info);
 
        if (TTS_ERROR_NONE != __create_rpc_port(info, engine_id)) {
                SLOG(LOG_ERROR, TAG_TTSC, "[ERROR] Fail to create new rpc port");
@@ -478,12 +524,16 @@ int tts_tidl_request_hello(unsigned int uid, tts_mode_e mode, tts_playing_mode_e
 
 static int __request_tidl_connect_sync(tts_tidl_info_s* info)
 {
+       pthread_mutex_lock(&g_rpc_h_mutex);
        int ret = rpc_port_proxy_tts_connect_sync(info->rpc_h);
        SLOG(LOG_INFO, TAG_TTSC, "[INFO] Request connection to stub. ret(%d)", ret);
        if (RPC_PORT_ERROR_NONE == ret) {
+               pthread_mutex_unlock(&g_rpc_h_mutex);
                return TTS_ERROR_NONE;
        }
 
+       pthread_mutex_unlock(&g_rpc_h_mutex);
+
        return TTS_ERROR_OPERATION_FAILED;
 }
 
@@ -493,6 +543,7 @@ static int __convert_and_handle_tidl_error(int ret, tts_tidl_info_s* info)
        case RPC_PORT_ERROR_IO_ERROR:
                SLOG(LOG_INFO, TAG_TTSC, "[INFO] IO error occurs. Destroy old rpc port");
                destroy_rpc_port(info);
+               __initialize_tidl_info(info);
                return TTS_ERROR_IO_ERROR;
 
        case RPC_PORT_ERROR_OUT_OF_MEMORY:
@@ -626,6 +677,8 @@ int tts_tidl_request_finalize(unsigned int uid)
 
        tts_client_set_start_listening(uid, false);
        destroy_rpc_port(info);
+       __initialize_tidl_info(info);
+
        SLOG(LOG_ERROR, TAG_TTSC, ">>>> Success tts finalize. uid(%u)", uid);
 
        return TTS_ERROR_NONE;
@@ -950,7 +1003,7 @@ int tts_tidl_request_add_pcm(unsigned int uid, int event, const char* data, int
        }
 
        if (TTS_ERROR_NONE != ret) {
-               SLOG(LOG_ERROR, TAG_TTSC, ">>>> Request pause pcm : Fail to invoke message(%d)", ret);
+               SLOG(LOG_ERROR, TAG_TTSC, ">>>> Request to add pcm : Fail to invoke message(%d)", ret);
                return ret;
        }