12 #include "ecore_private.h"
14 #ifdef EFL_HAVE_THREADS
16 # ifdef EFL_HAVE_POSIX_THREADS
20 # include <sys/resource.h>
22 # include <sys/syscall.h>
26 # define PH(x) pthread_t x
27 # define PHE(x, y) pthread_equal(x, y)
28 # define PHS() pthread_self()
29 # define PHC(x, f, d) pthread_create(&(x), NULL, (void*) f, d)
30 # define PHJ(x, p) pthread_join(x, (void**)(&(p)))
31 # define PHA(x) pthread_cancel(x)
33 # define CD(x) pthread_cond_t x
34 # define CDI(x) pthread_cond_init(&(x), NULL);
35 # define CDD(x) pthread_cond_destroy(&(x));
36 # define CDB(x) pthread_cond_broadcast(&(x));
37 # define CDW(x, y, t) pthread_cond_timedwait(&(x), &(y), t);
39 # define LK(x) pthread_mutex_t x
40 # define LKI(x) pthread_mutex_init(&(x), NULL);
41 # define LKD(x) pthread_mutex_destroy(&(x));
42 # define LKL(x) pthread_mutex_lock(&(x));
43 # define LKU(x) pthread_mutex_unlock(&(x));
45 # define LRWK(x) pthread_rwlock_t x
46 # define LRWKI(x) pthread_rwlock_init(&(x), NULL);
47 # define LRWKD(x) pthread_rwlock_destroy(&(x));
48 # define LRWKWL(x) pthread_rwlock_wrlock(&(x));
49 # define LRWKRL(x) pthread_rwlock_rdlock(&(x));
50 # define LRWKU(x) pthread_rwlock_unlock(&(x));
52 # else /* EFL_HAVE_WIN32_THREADS */
54 # define WIN32_LEAN_AND_MEAN
56 # undef WIN32_LEAN_AND_MEAN
64 # define PH(x) win32_thread *x
65 # define PHE(x, y) ((x) == (y))
66 # define PHS() (HANDLE)GetCurrentThreadId()
68 int _ecore_thread_win32_create(win32_thread **x, LPTHREAD_START_ROUTINE f, void *d)
71 t = (win32_thread *)calloc(1, sizeof(win32_thread));
75 (t)->thread = CreateThread(NULL, 0, f, d, 0, NULL);
86 # define PHC(x, f, d) _ecore_thread_win32_create(&(x), (LPTHREAD_START_ROUTINE)f, d)
88 int _ecore_thread_win32_join(win32_thread *x, void **res)
92 WaitForSingleObject(x->thread, INFINITE);
93 CloseHandle(x->thread);
95 if (res) *res = x->val;
101 # define PHJ(x, p) _ecore_thread_win32_join(x, (void**)(&(p)))
102 # define PHA(x) TerminateThread(x->thread, 0)
104 # define LK(x) HANDLE x
105 # define LKI(x) x = CreateMutex(NULL, FALSE, NULL)
106 # define LKD(x) CloseHandle(x)
107 # define LKL(x) WaitForSingleObject(x, INFINITE)
108 # define LKU(x) ReleaseMutex(x)
114 CRITICAL_SECTION threads_count_lock;
117 # define CD(x) win32_cond *x
121 x = (win32_cond *)calloc(1, sizeof(win32_cond)); \
124 x->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL); \
126 InitializeCriticalSection(&x->threads_count_lock); \
137 CloseHandle(x->semaphore); \
144 EnterCriticalSection(&x->threads_count_lock); \
145 if (x->threads_count > 0) \
146 ReleaseSemaphore(x->semaphore, x->threads_count, NULL); \
147 LeaveCriticalSection (&x->threads_count_lock); \
150 int _ecore_thread_win32_cond_timedwait(win32_cond *c, HANDLE *external_mutex, struct timeval *t)
153 DWORD val = t->tv_sec * 1000 + (t->tv_usec / 1000);
155 EnterCriticalSection (&c->threads_count_lock);
157 LeaveCriticalSection (&c->threads_count_lock);
159 res = WaitForSingleObject(c->semaphore, val);
160 if (res == WAIT_OBJECT_0)
165 # define CDW(x, y, t) _ecore_thread_win32_cond_timedwait(x, y, t)
178 # define LRWK(x) win32_rwl *x
181 x = (win32_rwl *)calloc(1, sizeof(win32_rwl)); \
190 CDI(x->cond_write); \
191 if (!x->cond_write) \
218 CDD(x->cond_write); \
226 if (x->writers || x->readers > 0) \
228 x->writers_count++; \
229 while (x->writers || x->readers > 0) \
231 EnterCriticalSection(&x->cond_write->threads_count_lock); \
232 x->cond_read->threads_count++; \
233 LeaveCriticalSection(&x->cond_write->threads_count_lock); \
234 res = WaitForSingleObject(x->cond_write->semaphore, INFINITE); \
235 if (res != WAIT_OBJECT_0) break; \
237 x->writers_count--; \
239 if (res == 0) x->writers_count = 1; \
248 x->readers_count++; \
251 EnterCriticalSection(&x->cond_write->threads_count_lock); \
252 x->cond_read->threads_count++; \
253 LeaveCriticalSection(&x->cond_write->threads_count_lock); \
254 res = WaitForSingleObject(x->cond_write->semaphore, INFINITE); \
255 if (res != WAIT_OBJECT_0) break; \
257 x->readers_count--; \
269 if (x->readers_count == 1) \
271 EnterCriticalSection(&x->cond_read->threads_count_lock); \
272 if (x->cond_read->threads_count > 0) \
273 ReleaseSemaphore(x->cond_read->semaphore, 1, 0); \
274 LeaveCriticalSection(&x->cond_read->threads_count_lock); \
276 else if (x->readers_count > 0) \
278 else if (x->writers_count > 0) \
280 EnterCriticalSection (&x->cond_write->threads_count_lock); \
281 if (x->cond_write->threads_count > 0) \
282 ReleaseSemaphore(x->cond_write->semaphore, 1, 0); \
283 LeaveCriticalSection (&x->cond_write->threads_count_lock); \
286 else if (x->readers > 0) \
289 if (x->readers == 0 && x->writers_count > 0) \
291 EnterCriticalSection (&x->cond_write->threads_count_lock); \
292 if (x->cond_write->threads_count > 0) \
293 ReleaseSemaphore(x->cond_write->semaphore, 1, 0); \
294 LeaveCriticalSection (&x->cond_write->threads_count_lock); \
304 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
305 typedef struct _Ecore_Pthread Ecore_Pthread;
306 typedef struct _Ecore_Thread_Data Ecore_Thread_Data;
308 struct _Ecore_Thread_Data
314 struct _Ecore_Pthread_Worker
318 Ecore_Thread_Cb func_blocking;
321 Ecore_Thread_Cb func_heavy;
322 Ecore_Thread_Notify_Cb func_notify;
325 Ecore_Pipe *direct_pipe;
326 Ecore_Pthread_Worker *direct_worker;
333 Ecore_Thread_Cb func_cancel;
334 Ecore_Thread_Cb func_end;
335 #ifdef EFL_HAVE_THREADS
344 Eina_Bool cancel : 1;
345 Eina_Bool feedback_run : 1;
347 Eina_Bool reschedule : 1;
350 #ifdef EFL_HAVE_THREADS
351 typedef struct _Ecore_Pthread_Data Ecore_Pthread_Data;
353 struct _Ecore_Pthread_Data
355 Ecore_Pthread_Worker *death_job;
362 static void _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte);
364 static int _ecore_thread_count_max = 0;
365 static int ECORE_THREAD_PIPE_DEL = 0;
366 static Eina_Array *_ecore_thread_pipe = NULL;
369 _ecore_thread_pipe_get(void)
371 if (eina_array_count_get(_ecore_thread_pipe) > 0)
372 return eina_array_pop(_ecore_thread_pipe);
374 return ecore_pipe_add(_ecore_thread_handler, NULL);
377 #ifdef EFL_HAVE_THREADS
378 static int _ecore_thread_count = 0;
380 static Ecore_Event_Handler *del_handler = NULL;
381 static Eina_List *_ecore_active_job_threads = NULL;
382 static Eina_List *_ecore_pending_job_threads = NULL;
383 static Eina_List *_ecore_pending_job_threads_feedback = NULL;
384 static LK(_ecore_pending_job_threads_mutex);
386 static Eina_Hash *_ecore_thread_global_hash = NULL;
387 static LRWK(_ecore_thread_global_hash_lock);
388 static LK(_ecore_thread_global_hash_mutex);
389 static CD(_ecore_thread_global_hash_cond);
391 static PH(main_loop_thread);
392 static Eina_Bool have_main_loop_thread = 0;
394 static Eina_Trash *_ecore_thread_worker_trash = NULL;
395 static int _ecore_thread_worker_count = 0;
398 _ecore_thread_worker_free(Ecore_Pthread_Worker *worker)
400 if (_ecore_thread_worker_count > (_ecore_thread_count_max + 1) * 16)
406 eina_trash_push(&_ecore_thread_worker_trash, worker);
410 _ecore_thread_data_free(void *data)
412 Ecore_Thread_Data *d = data;
414 if (d->cb) d->cb(d->data);
419 _ecore_thread_pipe_free(void *data __UNUSED__, void *event)
421 Ecore_Pipe *p = event;
423 if (eina_array_count_get(_ecore_thread_pipe) < 50)
424 eina_array_push(_ecore_thread_pipe, p);
427 eina_threads_shutdown();
431 _ecore_thread_pipe_del(void *data __UNUSED__, int type __UNUSED__, void *event __UNUSED__)
433 /* This is a hack to delay pipe destruction until we are out of its internal loop. */
434 return ECORE_CALLBACK_CANCEL;
438 _ecore_thread_end(Ecore_Pthread_Data *pth, __UNUSED__ Ecore_Thread *work)
442 if (PHJ(pth->thread, p) != 0)
445 _ecore_active_job_threads = eina_list_remove(_ecore_active_job_threads, pth);
447 ecore_event_add(ECORE_THREAD_PIPE_DEL, pth->p, _ecore_thread_pipe_free, NULL);
452 _ecore_thread_kill(Ecore_Pthread_Worker *work)
456 if (work->func_cancel)
457 work->func_cancel((void *) work->data, (Ecore_Thread *) work);
462 work->func_end((void *) work->data, (Ecore_Thread *) work);
465 if (work->feedback_run)
467 ecore_pipe_del(work->u.feedback_run.notify);
469 if (work->u.feedback_run.direct_pipe)
470 eina_array_push(_ecore_thread_pipe, work->u.feedback_run.direct_pipe);
471 if (work->u.feedback_run.direct_worker)
472 _ecore_thread_worker_free(work->u.feedback_run.direct_worker);
477 eina_hash_free(work->hash);
482 _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte)
484 Ecore_Pthread_Worker *work;
486 if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
488 work = *(Ecore_Pthread_Worker **)buffer;
490 if (work->feedback_run)
492 if (work->u.feedback_run.send != work->u.feedback_run.received)
494 work->kill = EINA_TRUE;
499 _ecore_thread_kill(work);
503 _ecore_notify_handler(void *data, void *buffer, unsigned int nbyte)
505 Ecore_Pthread_Worker *work = data;
508 if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
510 user_data = *(void **)buffer;
511 work->u.feedback_run.received++;
513 if (work->u.feedback_run.func_notify)
514 work->u.feedback_run.func_notify((void *) work->data, (Ecore_Thread *) work, user_data);
516 /* Force reading all notify event before killing the thread */
517 if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
519 _ecore_thread_kill(work);
524 _ecore_short_job(Ecore_Pipe *end_pipe)
526 Ecore_Pthread_Worker *work;
528 while (_ecore_pending_job_threads)
530 LKL(_ecore_pending_job_threads_mutex);
532 if (!_ecore_pending_job_threads)
534 LKU(_ecore_pending_job_threads_mutex);
538 work = eina_list_data_get(_ecore_pending_job_threads);
539 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
540 _ecore_pending_job_threads);
542 LKU(_ecore_pending_job_threads_mutex);
545 work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
547 if (work->reschedule)
549 work->reschedule = EINA_FALSE;
551 LKL(_ecore_pending_job_threads_mutex);
552 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
553 LKU(_ecore_pending_job_threads_mutex);
557 ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
563 _ecore_feedback_job(Ecore_Pipe *end_pipe, PH(thread))
565 Ecore_Pthread_Worker *work;
567 while (_ecore_pending_job_threads_feedback)
569 LKL(_ecore_pending_job_threads_mutex);
571 if (!_ecore_pending_job_threads_feedback)
573 LKU(_ecore_pending_job_threads_mutex);
577 work = eina_list_data_get(_ecore_pending_job_threads_feedback);
578 _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
579 _ecore_pending_job_threads_feedback);
581 LKU(_ecore_pending_job_threads_mutex);
585 work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
587 if (work->reschedule)
589 work->reschedule = EINA_FALSE;
591 LKL(_ecore_pending_job_threads_mutex);
592 _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
593 LKU(_ecore_pending_job_threads_mutex);
597 ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
603 _ecore_direct_worker(Ecore_Pthread_Worker *work)
605 Ecore_Pthread_Data *pth;
607 #ifdef EFL_POSIX_THREADS
608 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
609 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
612 eina_sched_prio_drop();
614 pth = malloc(sizeof (Ecore_Pthread_Data));
615 if (!pth) return NULL;
617 pth->p = work->u.feedback_run.direct_pipe;
625 work->self = pth->thread;
626 work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
628 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
630 work = work->u.feedback_run.direct_worker;
638 work->u.short_run.func_blocking = NULL;
639 work->func_end = (void *) _ecore_thread_end;
640 work->func_cancel = NULL;
641 work->cancel = EINA_FALSE;
642 work->feedback_run = EINA_FALSE;
643 work->kill = EINA_FALSE;
648 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
654 _ecore_thread_worker(Ecore_Pthread_Data *pth)
656 Ecore_Pthread_Worker *work;
658 #ifdef EFL_POSIX_THREADS
659 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
660 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
663 eina_sched_prio_drop();
665 LKL(_ecore_pending_job_threads_mutex);
666 _ecore_thread_count++;
667 LKU(_ecore_pending_job_threads_mutex);
670 if (_ecore_pending_job_threads) _ecore_short_job(pth->p);
671 if (_ecore_pending_job_threads_feedback) _ecore_feedback_job(pth->p, pth->thread);
673 /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
675 LKL(_ecore_pending_job_threads_mutex);
676 if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
678 LKU(_ecore_pending_job_threads_mutex);
681 LKU(_ecore_pending_job_threads_mutex);
683 /* Sleep a little to prevent premature death */
685 Sleep(1); /* around 50ms */
690 LKL(_ecore_pending_job_threads_mutex);
691 if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
693 LKU(_ecore_pending_job_threads_mutex);
696 _ecore_thread_count--;
697 LKU(_ecore_pending_job_threads_mutex);
699 work = pth->death_job;
700 if (!work) return NULL;
703 work->u.short_run.func_blocking = NULL;
704 work->func_end = (void *) _ecore_thread_end;
705 work->func_cancel = NULL;
706 work->cancel = EINA_FALSE;
707 work->feedback_run = EINA_FALSE;
708 work->kill = EINA_FALSE;
713 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
720 static Ecore_Pthread_Worker *
721 _ecore_thread_worker_new(void)
723 Ecore_Pthread_Worker *result;
725 #ifdef EFL_HAVE_THREADS
726 result = eina_trash_pop(&_ecore_thread_worker_trash);
728 if (!result) result = malloc(sizeof (Ecore_Pthread_Worker));
729 else _ecore_thread_worker_count--;
733 return malloc(sizeof (Ecore_Pthread_Worker));
738 _ecore_thread_init(void)
740 _ecore_thread_count_max = eina_cpu_count();
741 if (_ecore_thread_count_max <= 0)
742 _ecore_thread_count_max = 1;
744 ECORE_THREAD_PIPE_DEL = ecore_event_type_new();
745 _ecore_thread_pipe = eina_array_new(8);
747 #ifdef EFL_HAVE_THREADS
748 del_handler = ecore_event_handler_add(ECORE_THREAD_PIPE_DEL, _ecore_thread_pipe_del, NULL);
749 main_loop_thread = PHS();
750 have_main_loop_thread = 1;
752 LKI(_ecore_pending_job_threads_mutex);
753 LRWKI(_ecore_thread_global_hash_lock);
754 LKI(_ecore_thread_global_hash_mutex);
755 CDI(_ecore_thread_global_hash_cond);
760 _ecore_thread_shutdown(void)
762 /* FIXME: If function are still running in the background, should we kill them ? */
764 Eina_Array_Iterator it;
767 #ifdef EFL_HAVE_THREADS
768 Ecore_Pthread_Worker *work;
769 Ecore_Pthread_Data *pth;
771 LKL(_ecore_pending_job_threads_mutex);
773 EINA_LIST_FREE(_ecore_pending_job_threads, work)
775 if (work->func_cancel)
776 work->func_cancel((void *)work->data, (Ecore_Thread *) work);
780 EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
782 if (work->func_cancel)
783 work->func_cancel((void *)work->data, (Ecore_Thread *) work);
787 LKU(_ecore_pending_job_threads_mutex);
789 /* Improve emergency shutdown */
790 EINA_LIST_FREE(_ecore_active_job_threads, pth)
795 PHJ(pth->thread, ep);
797 ecore_pipe_del(pth->p);
799 if (_ecore_thread_global_hash)
800 eina_hash_free(_ecore_thread_global_hash);
801 ecore_event_handler_del(del_handler);
802 have_main_loop_thread = 0;
805 LKD(_ecore_pending_job_threads_mutex);
806 LRWKD(_ecore_thread_global_hash_lock);
807 LKD(_ecore_thread_global_hash_mutex);
808 CDD(_ecore_thread_global_hash_cond);
811 EINA_ARRAY_ITER_NEXT(_ecore_thread_pipe, i, p, it)
814 eina_array_free(_ecore_thread_pipe);
815 _ecore_thread_pipe = NULL;
819 * @addtogroup Ecore_Group Ecore - Main Loop and Job Functions.
825 * @addtogroup Ecore_Thread_Group Ecore Thread functions
827 * These functions allow for ecore-managed threads which integrate with ecore's main loop.
833 * @brief Run some blocking code in a parallel thread to avoid locking the main loop.
834 * @param func_blocking The function that should run in another thread.
835 * @param func_end The function that will be called in the main loop if the thread terminate correctly.
836 * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
837 * @param data User context data to pass to all callback.
838 * @return A reference to the newly created thread instance, or NULL if it failed.
840 * ecore_thread_run provide a facility for easily managing blocking task in a
841 * parallel thread. You should provide three function. The first one, func_blocking,
842 * that will do the blocking work in another thread (so you should not use the
843 * EFL in it except Eina if you are careful). The second one, func_end,
844 * that will be called in Ecore main loop when func_blocking is done. So you
845 * can use all the EFL inside this function. The last one, func_cancel, will
846 * be called in the main loop if the thread is cancelled or could not run at all.
848 * Be aware, that you can't make assumption on the result order of func_end
849 * after many call to ecore_thread_run, as we start as much thread as the
850 * host CPU can handle.
853 ecore_thread_run(Ecore_Thread_Cb func_blocking,
854 Ecore_Thread_Cb func_end,
855 Ecore_Thread_Cb func_cancel,
858 Ecore_Pthread_Worker *work;
859 #ifdef EFL_HAVE_THREADS
860 Ecore_Pthread_Data *pth = NULL;
863 if (!func_blocking) return NULL;
865 work = _ecore_thread_worker_new();
869 func_cancel((void *) data, NULL);
873 work->u.short_run.func_blocking = func_blocking;
874 work->func_end = func_end;
875 work->func_cancel = func_cancel;
876 work->cancel = EINA_FALSE;
877 work->feedback_run = EINA_FALSE;
878 work->kill = EINA_FALSE;
879 work->reschedule = EINA_FALSE;
882 #ifdef EFL_HAVE_THREADS
887 LKL(_ecore_pending_job_threads_mutex);
888 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
890 if (_ecore_thread_count == _ecore_thread_count_max)
892 LKU(_ecore_pending_job_threads_mutex);
893 return (Ecore_Thread *) work;
896 LKU(_ecore_pending_job_threads_mutex);
898 /* One more thread could be created. */
899 pth = malloc(sizeof (Ecore_Pthread_Data));
900 if (!pth) goto on_error;
902 pth->p = _ecore_thread_pipe_get();
903 pth->death_job = _ecore_thread_worker_new();
904 if (!pth->p || !pth->death_job) goto on_error;
908 if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
909 return (Ecore_Thread *) work;
911 eina_threads_shutdown();
916 if (pth->p) eina_array_push(_ecore_thread_pipe, pth->p);
917 if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
921 if (_ecore_thread_count == 0)
923 LKL(_ecore_pending_job_threads_mutex);
924 _ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
925 LKU(_ecore_pending_job_threads_mutex);
927 if (work->func_cancel)
928 work->func_cancel((void *) work->data, (Ecore_Thread *) work);
932 return (Ecore_Thread *) work;
935 If no thread and as we don't want to break app that rely on this
936 facility, we will lock the interface until we are done.
939 /* Handle reschedule by forcing it here. That would mean locking the app,
940 * would be better with an idler, but really to complex for a case where
941 * thread should really exist.
943 work->reschedule = EINA_FALSE;
945 func_blocking((void *)data, (Ecore_Thread *) work);
946 if (work->cancel == EINA_FALSE) func_end((void *)data, (Ecore_Thread *) work);
947 else func_end((void *)data, (Ecore_Thread *) work);
949 } while (work->reschedule == EINA_TRUE);
958 * @brief Cancel a running thread.
959 * @param thread The thread to cancel.
960 * @return Will return EINA_TRUE if the thread has been cancelled,
961 * EINA_FALSE if it is pending.
963 * ecore_thread_cancel give the possibility to cancel a task still running. It
964 * will return EINA_FALSE, if the destruction is delayed or EINA_TRUE if it is
965 * cancelled after this call.
967 * This function work in the main loop and in the thread, but you should not pass
968 * the Ecore_Thread variable from main loop to the worker thread in any structure.
969 * You should always use the one passed to the Ecore_Thread_Heavy_Cb.
971 * func_end, func_cancel will destroy the handler, so don't use it after.
972 * And if ecore_thread_cancel return EINA_TRUE, you should not use Ecore_Thread also.
975 ecore_thread_cancel(Ecore_Thread *thread)
977 #ifdef EFL_HAVE_THREADS
978 Ecore_Pthread_Worker *work = (Ecore_Pthread_Worker *)thread;
986 if (work->feedback_run)
990 if (work->u.feedback_run.send != work->u.feedback_run.received)
994 LKL(_ecore_pending_job_threads_mutex);
996 if ((have_main_loop_thread) &&
997 (PHE(main_loop_thread, PHS())))
999 if (!work->feedback_run)
1000 EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
1002 if ((void *) work == (void *) thread)
1004 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
1006 LKU(_ecore_pending_job_threads_mutex);
1008 if (work->func_cancel)
1009 work->func_cancel((void *) work->data, (Ecore_Thread *) work);
1016 EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
1018 if ((void *) work == (void *) thread)
1020 _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
1022 LKU(_ecore_pending_job_threads_mutex);
1024 if (work->func_cancel)
1025 work->func_cancel((void *) work->data, (Ecore_Thread *) work);
1033 LKU(_ecore_pending_job_threads_mutex);
1035 /* Delay the destruction */
1037 ((Ecore_Pthread_Worker *)thread)->cancel = EINA_TRUE;
1045 * @brief Tell if a thread was canceled or not.
1046 * @param thread The thread to test.
1047 * @return EINA_TRUE if the thread is cancelled,
1048 * EINA_FALSE if it is not.
1050 * You can use this function in main loop and in the thread.
1053 ecore_thread_check(Ecore_Thread *thread)
1055 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1057 if (!worker) return EINA_TRUE;
1058 return worker->cancel;
1062 * @brief Run some heavy code in a parallel thread to avoid locking the main loop.
1063 * @param func_heavy The function that should run in another thread.
1064 * @param func_notify The function that will receive the data send by func_heavy in the main loop.
1065 * @param func_end The function that will be called in the main loop if the thread terminate correctly.
1066 * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
1067 * @param data User context data to pass to all callback.
1068 * @param try_no_queue If you want to run outside of the thread pool.
1069 * @return A reference to the newly created thread instance, or NULL if it failed.
1071 * ecore_thread_feedback_run provide a facility for easily managing heavy task in a
1072 * parallel thread. You should provide four functions. The first one, func_heavy,
1073 * that will do the heavy work in another thread (so you should not use the
1074 * EFL in it except Eina and Eet if you are careful). The second one, func_notify,
1075 * will receive the data send from the thread function (func_heavy) by ecore_thread_feedback
1076 * in the main loop (and so, can use all the EFL). The third, func_end,
1077 * that will be called in Ecore main loop when func_heavy is done. So you
1078 * can use all the EFL inside this function. The last one, func_cancel, will
1079 * be called in the main loop also, if the thread is cancelled or could not run at all.
1081 * Be aware, that you can't make assumption on the result order of func_end
1082 * after many call to ecore_feedback_run, as we start as much thread as the
1083 * host CPU can handle.
1085 * If you set try_no_queue, it will try to run outside of the thread pool, this can bring
1086 * the CPU down, so be careful with that. Of course if it can't start a new thread, it will
1087 * try to use one from the pool.
1089 EAPI Ecore_Thread *ecore_thread_feedback_run(Ecore_Thread_Cb func_heavy,
1090 Ecore_Thread_Notify_Cb func_notify,
1091 Ecore_Thread_Cb func_end,
1092 Ecore_Thread_Cb func_cancel,
1094 Eina_Bool try_no_queue)
1097 #ifdef EFL_HAVE_THREADS
1098 Ecore_Pthread_Worker *worker;
1099 Ecore_Pthread_Data *pth = NULL;
1101 if (!func_heavy) return NULL;
1103 worker = _ecore_thread_worker_new();
1104 if (!worker) goto on_error;
1106 worker->u.feedback_run.func_heavy = func_heavy;
1107 worker->u.feedback_run.func_notify = func_notify;
1108 worker->hash = NULL;
1111 worker->func_cancel = func_cancel;
1112 worker->func_end = func_end;
1113 worker->data = data;
1114 worker->cancel = EINA_FALSE;
1115 worker->feedback_run = EINA_TRUE;
1116 worker->kill = EINA_FALSE;
1117 worker->reschedule = EINA_FALSE;
1119 worker->u.feedback_run.send = 0;
1120 worker->u.feedback_run.received = 0;
1122 worker->u.feedback_run.notify = ecore_pipe_add(_ecore_notify_handler, worker);
1123 worker->u.feedback_run.direct_pipe = NULL;
1124 worker->u.feedback_run.direct_worker = NULL;
1130 worker->u.feedback_run.direct_pipe = _ecore_thread_pipe_get();
1131 worker->u.feedback_run.direct_worker = _ecore_thread_worker_new();
1133 if (PHC(t, _ecore_direct_worker, worker) == 0)
1134 return (Ecore_Thread *) worker;
1137 LKL(_ecore_pending_job_threads_mutex);
1138 _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
1140 if (_ecore_thread_count == _ecore_thread_count_max)
1142 LKU(_ecore_pending_job_threads_mutex);
1143 return (Ecore_Thread *) worker;
1146 LKU(_ecore_pending_job_threads_mutex);
1148 /* One more thread could be created. */
1149 pth = malloc(sizeof (Ecore_Pthread_Data));
1150 if (!pth) goto on_error;
1152 pth->p = _ecore_thread_pipe_get();
1153 pth->death_job = _ecore_thread_worker_new();
1154 if (!pth->p || !pth->death_job) goto on_error;
1156 eina_threads_init();
1158 if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
1159 return (Ecore_Thread *) worker;
1161 eina_threads_shutdown();
1166 if (pth->p) eina_array_push(_ecore_thread_pipe, pth->p);
1167 if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
1171 if (_ecore_thread_count == 0)
1173 LKL(_ecore_pending_job_threads_mutex);
1174 _ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
1176 LKU(_ecore_pending_job_threads_mutex);
1178 if (func_cancel) func_cancel((void *) data, NULL);
1182 ecore_pipe_del(worker->u.feedback_run.notify);
1188 return (Ecore_Thread *) worker;
1190 Ecore_Pthread_Worker worker;
1192 (void) try_no_queue;
1195 If no thread and as we don't want to break app that rely on this
1196 facility, we will lock the interface until we are done.
1198 worker.u.feedback_run.func_heavy = func_heavy;
1199 worker.u.feedback_run.func_notify = func_notify;
1200 worker.u.feedback_run.notify = NULL;
1201 worker.u.feedback_run.send = 0;
1202 worker.u.feedback_run.received = 0;
1203 worker.func_cancel = func_cancel;
1204 worker.func_end = func_end;
1206 worker.cancel = EINA_FALSE;
1207 worker.feedback_run = EINA_TRUE;
1208 worker.kill = EINA_FALSE;
1211 worker.reschedule = EINA_FALSE;
1213 func_heavy((void *)data, (Ecore_Thread *) &worker);
1215 if (worker.cancel) func_cancel((void *)data, (Ecore_Thread *) &worker);
1216 else func_end((void *)data, (Ecore_Thread *) &worker);
1217 } while (worker.reschedule == EINA_FALSE);
1224 * @brief Send data to main loop from worker thread.
1225 * @param thread The current Ecore_Thread context to send data from
1226 * @param data Data to be transmitted to the main loop
1227 * @return EINA_TRUE if data was successfully send to main loop,
1228 * EINA_FALSE if anything goes wrong.
1230 * After a succesfull call, the data should be considered owned
1233 * You should use this function only in the func_heavy call.
1236 ecore_thread_feedback(Ecore_Thread *thread, const void *data)
1238 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1240 if (!worker) return EINA_FALSE;
1241 if (!worker->feedback_run) return EINA_FALSE;
1243 #ifdef EFL_HAVE_THREADS
1244 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1246 worker->u.feedback_run.send++;
1247 ecore_pipe_write(worker->u.feedback_run.notify, &data, sizeof (void *));
1251 worker->u.feedback_run.func_notify((void*) worker->data, thread, (void*) data);
1258 * @brief Plan to recall the heavy function once it exist it.
1259 * @param thread The current Ecore_Thread context to reschedule
1260 * @return EINA_TRUE if data was successfully send to main loop,
1261 * EINA_FALSE if anything goes wrong.
1263 * After a succesfull call, you can still do what you want in your thread, it
1264 * will only reschedule it once you exit the heavy loop.
1266 * You should use this function only in the func_heavy call.
1269 ecore_thread_reschedule(Ecore_Thread *thread)
1271 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1273 if (!worker) return EINA_FALSE;
1275 #ifdef EFL_HAVE_THREADS
1276 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1279 worker->reschedule = EINA_TRUE;
1284 * @brief Get number of active thread jobs
1285 * @return Number of active threads running jobs
1286 * This returns the number of threads currently running jobs through the
1290 ecore_thread_active_get(void)
1292 #ifdef EFL_HAVE_THREADS
1293 return _ecore_thread_count;
1300 * @brief Get number of pending (short) thread jobs
1301 * @return Number of pending threads running "short" jobs
1302 * This returns the number of threads currently running jobs through the
1303 * ecore_thread_run api call.
1306 ecore_thread_pending_get(void)
1309 #ifdef EFL_HAVE_THREADS
1310 LKL(_ecore_pending_job_threads_mutex);
1311 ret = eina_list_count(_ecore_pending_job_threads);
1312 LKU(_ecore_pending_job_threads_mutex);
1320 * @brief Get number of pending feedback thread jobs
1321 * @return Number of pending threads running "feedback" jobs
1322 * This returns the number of threads currently running jobs through the
1323 * ecore_thread_feedback_run api call.
1326 ecore_thread_pending_feedback_get(void)
1329 #ifdef EFL_HAVE_THREADS
1330 LKL(_ecore_pending_job_threads_mutex);
1331 ret = eina_list_count(_ecore_pending_job_threads_feedback);
1332 LKU(_ecore_pending_job_threads_mutex);
1340 * @brief Get number of pending thread jobs
1341 * @return Number of pending threads running jobs
1342 * This returns the number of threads currently running jobs through the
1343 * ecore_thread_run and ecore_thread_feedback_run api calls combined.
1346 ecore_thread_pending_total_get(void)
1349 #ifdef EFL_HAVE_THREADS
1350 LKL(_ecore_pending_job_threads_mutex);
1351 ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
1352 LKU(_ecore_pending_job_threads_mutex);
1360 * @brief Get the max number of threads that can run simultaneously
1361 * @return Max number of threads ecore will run
1362 * This returns the total number of threads that ecore will attempt to run
1366 ecore_thread_max_get(void)
1368 return _ecore_thread_count_max;
1372 * @brief Set the max number of threads that can run simultaneously
1373 * @param num The new maximum
1374 * This sets the maximum number of threads that ecore will try to run
1375 * simultaneously. This number cannot be < 1 or >= 2x the number of active cpus.
1378 ecore_thread_max_set(int num)
1380 if (num < 1) return;
1381 /* avoid doing something hilarious by blocking dumb users */
1382 if (num >= (2 * eina_cpu_count())) return;
1384 _ecore_thread_count_max = num;
1388 * @brief Reset the max number of threads that can run simultaneously
1389 * This resets the maximum number of threads that ecore will try to run
1390 * simultaneously to the number of active cpus.
1393 ecore_thread_max_reset(void)
1395 _ecore_thread_count_max = eina_cpu_count();
1399 * @brief Get the number of threads which are available to be used
1400 * @return The number of available threads
1401 * This returns the number of threads slots that ecore has currently available.
1402 * Assuming that you haven't changed the max number of threads with @ref ecore_thread_max_set
1403 * this should be equal to (num_cpus - (active_running + active_feedback_running))
1406 ecore_thread_available_get(void)
1409 #ifdef EFL_HAVE_THREADS
1410 LKL(_ecore_pending_job_threads_mutex);
1411 ret = _ecore_thread_count_max - _ecore_thread_count;
1412 LKU(_ecore_pending_job_threads_mutex);
1420 * @brief Add data to the thread for subsequent use
1421 * @param thread The thread context to add to
1422 * @param key The name string to add the data with
1423 * @param value The data to add
1424 * @param cb The callback to free the data with
1425 * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1426 * @return EINA_TRUE on success, EINA_FALSE on failure
1427 * This adds data to the thread context, allowing the thread
1428 * to retrieve and use it without complicated mutexing. This function can only be called by a
1429 * *_run thread INSIDE the thread and will return EINA_FALSE in any case but success.
1430 * All data added to the thread will be freed with its associated callback (if present)
1431 * upon thread termination. If no callback is specified, it is expected that the user will free the
1432 * data, but this is most likely not what you want.
1435 ecore_thread_local_data_add(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1437 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1438 Ecore_Thread_Data *d;
1441 if ((!thread) || (!key) || (!value))
1443 #ifdef EFL_HAVE_THREADS
1444 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1447 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1452 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1459 ret = eina_hash_direct_add(worker->hash, key, d);
1461 ret = eina_hash_add(worker->hash, key, d);
1470 * @brief Modify data in the thread, or add if not found
1471 * @param thread The thread context
1472 * @param key The name string to add the data with
1473 * @param value The data to add
1474 * @param cb The callback to free the data with
1475 * @return The old data associated with @p key on success if modified, NULL if added
1476 * This adds/modifies data in the thread context, adding only if modify fails.
1477 * This function can only be called by a *_run thread INSIDE the thread.
1478 * All data added to the thread pool will be freed with its associated callback (if present)
1479 * upon thread termination. If no callback is specified, it is expected that the user will free the
1480 * data, but this is most likely not what you want.
1483 ecore_thread_local_data_set(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb)
1485 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1486 Ecore_Thread_Data *d, *r;
1488 if ((!thread) || (!key) || (!value))
1490 #ifdef EFL_HAVE_THREADS
1491 if (!PHE(worker->self, PHS())) return NULL;
1494 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1499 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1505 r = eina_hash_set(worker->hash, key, d);
1516 * @brief Find data in the thread's data
1517 * @param thread The thread context
1518 * @param key The name string the data is associated with
1519 * @return The value, or NULL on error
1520 * This finds data in the thread context that has been previously added with @ref ecore_thread_local_data_add
1521 * This function can only be called by a *_run thread INSIDE the thread, and will return NULL
1522 * in any case but success.
1526 ecore_thread_local_data_find(Ecore_Thread *thread, const char *key)
1528 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1529 Ecore_Thread_Data *d;
1531 if ((!thread) || (!key))
1533 #ifdef EFL_HAVE_THREADS
1534 if (!PHE(worker->self, PHS())) return NULL;
1539 d = eina_hash_find(worker->hash, key);
1547 * @brief Delete data from the thread's data
1548 * @param thread The thread context
1549 * @param key The name string the data is associated with
1550 * @return EINA_TRUE on success, EINA_FALSE on failure
1551 * This deletes the data pointer from the thread context which was previously added with @ref ecore_thread_local_data_add
1552 * This function can only be called by a *_run thread INSIDE the thread, and will return EINA_FALSE
1553 * in any case but success. Note that this WILL free the data if a callback was specified.
1556 ecore_thread_local_data_del(Ecore_Thread *thread, const char *key)
1558 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1559 Ecore_Thread_Data *d;
1560 if ((!thread) || (!key))
1562 #ifdef EFL_HAVE_THREADS
1563 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1567 if ((d = eina_hash_find(worker->hash, key)))
1568 _ecore_thread_data_free(d);
1569 return eina_hash_del_by_key(worker->hash, key);
1576 * @brief Add data to the global data
1577 * @param key The name string to add the data with
1578 * @param value The data to add
1579 * @param cb The optional callback to free the data with once ecore is shut down
1580 * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1581 * @return EINA_TRUE on success, EINA_FALSE on failure
1582 * This adds data to the global thread data, and will return EINA_FALSE in any case but success.
1583 * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1584 * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback
1585 * was specified for, you will most likely encounter a segv later on.
1588 ecore_thread_global_data_add(const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1591 Ecore_Thread_Data *d;
1593 if ((!key) || (!value))
1595 #ifdef EFL_HAVE_THREADS
1596 LRWKWL(_ecore_thread_global_hash_lock);
1597 if (!_ecore_thread_global_hash)
1598 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1599 LRWKU(_ecore_thread_global_hash_lock);
1601 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1607 if (!_ecore_thread_global_hash)
1609 LRWKWL(_ecore_thread_global_hash_lock);
1611 ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1613 ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1614 LRWKU(_ecore_thread_global_hash_lock);
1615 CDB(_ecore_thread_global_hash_cond);
1623 * @brief Add data to the global data
1624 * @param key The name string to add the data with
1625 * @param value The data to add
1626 * @param cb The optional callback to free the data with once ecore is shut down
1627 * @return An Ecore_Thread_Data on success, NULL on failure
1628 * This adds data to the global thread data and returns NULL, or replaces the previous data
1629 * associated with @p key and returning the previous data if it existed. To see if an error occurred,
1630 * one must use eina_error_get.
1631 * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1632 * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback
1633 * was specified for, you will most likely encounter a segv later on.
1636 ecore_thread_global_data_set(const char *key, void *value, Eina_Free_Cb cb)
1638 Ecore_Thread_Data *d, *r;
1641 if ((!key) || (!value))
1643 #ifdef EFL_HAVE_THREADS
1644 LRWKWL(_ecore_thread_global_hash_lock);
1645 if (!_ecore_thread_global_hash)
1646 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1647 LRWKU(_ecore_thread_global_hash_lock);
1649 if (!_ecore_thread_global_hash)
1652 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1658 LRWKWL(_ecore_thread_global_hash_lock);
1659 r = eina_hash_set(_ecore_thread_global_hash, key, d);
1660 LRWKU(_ecore_thread_global_hash_lock);
1661 CDB(_ecore_thread_global_hash_cond);
1672 * @brief Find data in the global data
1673 * @param key The name string the data is associated with
1674 * @return The value, or NULL on error
1675 * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1676 * This function will return NULL in any case but success.
1677 * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1678 * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback
1679 * was specified for, you will most likely encounter a segv later on.
1680 * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1681 * if you will be doing anything with it.
1685 ecore_thread_global_data_find(const char *key)
1687 Ecore_Thread_Data *ret;
1690 #ifdef EFL_HAVE_THREADS
1691 if (!_ecore_thread_global_hash) return NULL;
1693 LRWKRL(_ecore_thread_global_hash_lock);
1694 ret = eina_hash_find(_ecore_thread_global_hash, key);
1695 LRWKU(_ecore_thread_global_hash_lock);
1703 * @brief Delete data from the global data
1704 * @param key The name string the data is associated with
1705 * @return EINA_TRUE on success, EINA_FALSE on failure
1706 * This deletes the data pointer from the global data which was previously added with @ref ecore_thread_global_data_add
1707 * This function will return EINA_FALSE in any case but success.
1708 * Note that this WILL free the data if an @c Eina_Free_Cb was specified when the data was added.
1711 ecore_thread_global_data_del(const char *key)
1714 Ecore_Thread_Data *d;
1718 #ifdef EFL_HAVE_THREADS
1719 if (!_ecore_thread_global_hash)
1722 LRWKWL(_ecore_thread_global_hash_lock);
1723 if ((d = eina_hash_find(_ecore_thread_global_hash, key)))
1724 _ecore_thread_data_free(d);
1725 ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1726 LRWKU(_ecore_thread_global_hash_lock);
1734 * @brief Find data in the global data and optionally wait for the data if not found
1735 * @param key The name string the data is associated with
1736 * @param seconds The amount of time in seconds to wait for the data. If 0, the call will be async and not wait for data.
1737 * If < 0 the call will wait indefinitely for the data.
1738 * @return The value, or NULL on failure
1739 * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1740 * This function will return NULL in any case but success.
1741 * Use @p seconds to specify the amount of time to wait. Use > 0 for an actual wait time, 0 to not wait, and < 0 to wait indefinitely.
1742 * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1743 * if you will be doing anything with it.
1746 ecore_thread_global_data_wait(const char *key, double seconds)
1749 Ecore_Thread_Data *ret = NULL;
1753 #ifdef EFL_HAVE_THREADS
1754 if (!_ecore_thread_global_hash)
1757 tm = ecore_time_get() + seconds;
1762 struct timespec t = { 0, 0 };
1764 t.tv_sec = (long int)tm;
1765 t.tv_nsec = (long int)((tm - (double)t.tv_sec) * 1000000000);
1767 struct timeval t = { 0, 0 };
1769 t.tv_sec = (long int)tm;
1770 t.tv_usec = (long int)((tm - (double)t.tv_sec) * 1000000);
1772 LRWKRL(_ecore_thread_global_hash_lock);
1773 ret = eina_hash_find(_ecore_thread_global_hash, key);
1774 LRWKU(_ecore_thread_global_hash_lock);
1775 if ((ret) || (!seconds) || ((seconds > 0) && (tm <= ecore_time_get())))
1777 LKL(_ecore_thread_global_hash_mutex);
1778 CDW(_ecore_thread_global_hash_cond, _ecore_thread_global_hash_mutex, &t);
1779 LKU(_ecore_thread_global_hash_mutex);
1781 if (ret) return ret->data;