fix ecore-thread scheduler starvation issue.
authorraster <raster@7cbeb6ba-43b4-40fd-8cce-4c39aea84d33>
Thu, 24 May 2012 09:51:17 +0000 (09:51 +0000)
committerraster <raster@7cbeb6ba-43b4-40fd-8cce-4c39aea84d33>
Thu, 24 May 2012 09:51:17 +0000 (09:51 +0000)
git-svn-id: svn+ssh://svn.enlightenment.org/var/svn/e/trunk/ecore@71404 7cbeb6ba-43b4-40fd-8cce-4c39aea84d33

ChangeLog
src/lib/ecore/Ecore.h
src/lib/ecore/ecore_thread.c

index e997a78..5c99801 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
         accessind already deleted ecore-con clients. use client
         ref/unref to fix it. No backport of this fix as it requires a
         new feature.
-
+        * Fix ecore-thread scheduling issue where re-scheduled threads
+        will hold a loop busy and not allow feedback workers to run,
+        so now have fairer scheduling.
+        * Allow 16 * cpu num for worker threads (default still cpu num)
index 7aaf6b9..aa98153 100644 (file)
@@ -2120,7 +2120,7 @@ EAPI int ecore_thread_max_get(void);
  * @param num The new maximum
  *
  * This sets a new value for the maximum number of concurrently running
- * Ecore_Thread's. It @b must an integer between 1 and (2 * @c x), where @c x
+ * Ecore_Thread's. It @b must an integer between 1 and (16 * @c x), where @c x
  * is the number for CPUs available.
  *
  * @see ecore_thread_max_get()
index 75fea42..8424ca1 100644 (file)
@@ -1,3 +1,4 @@
+
 #ifdef HAVE_CONFIG_H
 # include <config.h>
 #endif
@@ -398,44 +399,40 @@ static void
 _ecore_short_job(PH(thread))
 {
    Ecore_Pthread_Worker *work;
+   int cancel;
 
-   while (_ecore_pending_job_threads)
+   LKL(_ecore_pending_job_threads_mutex);
+   
+   if (!_ecore_pending_job_threads)
      {
-        int cancel;
-
+        LKU(_ecore_pending_job_threads_mutex);
+        return;
+     }
+   
+   work = eina_list_data_get(_ecore_pending_job_threads);
+   _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
+                                                      _ecore_pending_job_threads);
+   
+   LKU(_ecore_pending_job_threads_mutex);
+   
+   LKL(work->cancel_mutex);
+   cancel = work->cancel;
+   LKU(work->cancel_mutex);
+   work->self = thread;
+   if (!cancel)
+     work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
+   
+   if (work->reschedule)
+     {
+        work->reschedule = EINA_FALSE;
+        
         LKL(_ecore_pending_job_threads_mutex);
-
-        if (!_ecore_pending_job_threads)
-          {
-             LKU(_ecore_pending_job_threads_mutex);
-             break;
-          }
-
-        work = eina_list_data_get(_ecore_pending_job_threads);
-        _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
-                                                           _ecore_pending_job_threads);
-
+        _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
         LKU(_ecore_pending_job_threads_mutex);
-
-        LKL(work->cancel_mutex);
-        cancel = work->cancel;
-        LKU(work->cancel_mutex);
-        work->self = thread;
-        if (!cancel)
-          work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
-
-        if (work->reschedule)
-          {
-             work->reschedule = EINA_FALSE;
-
-             LKL(_ecore_pending_job_threads_mutex);
-             _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
-             LKU(_ecore_pending_job_threads_mutex);
-          }
-        else
-          {
-             ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
-          }
+     }
+   else
+     {
+        ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
      }
 }
 
@@ -443,44 +440,40 @@ static void
 _ecore_feedback_job(PH(thread))
 {
    Ecore_Pthread_Worker *work;
-
-   while (_ecore_pending_job_threads_feedback)
+   int cancel;
+   
+   LKL(_ecore_pending_job_threads_mutex);
+   
+   if (!_ecore_pending_job_threads_feedback)
      {
-        int cancel;
-
+        LKU(_ecore_pending_job_threads_mutex);
+        return;
+     }
+   
+   work = eina_list_data_get(_ecore_pending_job_threads_feedback);
+   _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
+                                                               _ecore_pending_job_threads_feedback);
+   
+   LKU(_ecore_pending_job_threads_mutex);
+   
+   LKL(work->cancel_mutex);
+   cancel = work->cancel;
+   LKU(work->cancel_mutex);
+   work->self = thread;
+   if (!cancel)
+     work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
+   
+   if (work->reschedule)
+     {
+        work->reschedule = EINA_FALSE;
+        
         LKL(_ecore_pending_job_threads_mutex);
-
-        if (!_ecore_pending_job_threads_feedback)
-          {
-             LKU(_ecore_pending_job_threads_mutex);
-             break;
-          }
-
-        work = eina_list_data_get(_ecore_pending_job_threads_feedback);
-        _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
-                                                                    _ecore_pending_job_threads_feedback);
-
+        _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
         LKU(_ecore_pending_job_threads_mutex);
-
-        LKL(work->cancel_mutex);
-        cancel = work->cancel;
-        LKU(work->cancel_mutex);
-        work->self = thread;
-        if (!cancel)
-          work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
-
-        if (work->reschedule)
-          {
-             work->reschedule = EINA_FALSE;
-
-             LKL(_ecore_pending_job_threads_mutex);
-             _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
-             LKU(_ecore_pending_job_threads_mutex);
-          }
-        else
-          {
-             ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
-          }
+     }
+   else
+     {
+        ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
      }
 }
 
@@ -519,8 +512,8 @@ _ecore_thread_worker(void *data __UNUSED__)
    eina_sched_prio_drop();
 
 restart:
-   if (_ecore_pending_job_threads) _ecore_short_job(PHS());
-   if (_ecore_pending_job_threads_feedback) _ecore_feedback_job(PHS());
+   _ecore_short_job(PHS());
+   _ecore_feedback_job(PHS());
 
    /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
 
@@ -536,7 +529,7 @@ restart:
 #ifdef _WIN32
    Sleep(1); /* around 50ms */
 #else
-   usleep(200);
+   usleep(50);
 #endif
 
    LKL(_ecore_pending_job_threads_mutex);
@@ -1188,7 +1181,7 @@ ecore_thread_max_set(int num)
    EINA_MAIN_LOOP_CHECK_RETURN;
    if (num < 1) return;
    /* avoid doing something hilarious by blocking dumb users */
-   if (num >= (2 * eina_cpu_count())) return;
+   if (num >= (16 * eina_cpu_count())) num = 16 * eina_cpu_count();
 
    _ecore_thread_count_max = num;
 }