17 #include "ecore_private.h"
19 #ifdef EFL_HAVE_THREADS
21 # define LK(x) Eina_Lock x
22 # define LKI(x) eina_lock_new(&(x))
23 # define LKD(x) eina_lock_free(&(x))
24 # define LKL(x) eina_lock_take(&(x))
25 # define LKU(x) eina_lock_release(&(x))
27 # define CD(x) Eina_Condition x
28 # define CDI(x, m) eina_condition_new(&(x), &(m))
29 # define CDD(x) eina_condition_free(&(x))
30 # define CDB(x) eina_condition_broadcast(&(x))
31 # define CDW(x, t) eina_condition_timedwait(&(x), t)
33 # define LRWK(x) Eina_RWLock x
34 # define LRWKI(x) eina_rwlock_new(&(x));
35 # define LRWKD(x) eina_rwlock_free(&(x));
36 # define LRWKWL(x) eina_rwlock_take_write(&(x));
37 # define LRWKRL(x) eina_rwlock_take_read(&(x));
38 # define LRWKU(x) eina_rwlock_release(&(x));
40 # ifdef EFL_HAVE_POSIX_THREADS
44 # include <sys/resource.h>
46 # include <sys/syscall.h>
50 # define PH(x) pthread_t x
51 # define PHE(x, y) pthread_equal(x, y)
52 # define PHS() pthread_self()
53 # define PHC(x, f, d) pthread_create(&(x), NULL, (void *)f, d)
54 # define PHJ(x) pthread_join(x, NULL)
55 # define PHA(x) pthread_cancel(x)
57 # else /* EFL_HAVE_WIN32_THREADS */
59 # define WIN32_LEAN_AND_MEAN
61 # undef WIN32_LEAN_AND_MEAN
69 static Eina_List *_ecore_thread_win32_threads = NULL;
70 static Eina_Lock _ecore_thread_win32_lock;
72 # define PH(x) win32_thread * x
73 # define PHE(x, y) ((x) == (y))
76 _ecore_thread_win32_self()
81 LKL(_ecore_thread_win32_lock);
82 EINA_LIST_FOREACH(_ecore_thread_win32_threads, l, t)
83 if (t->thread == GetCurrentThread())
85 LKU(_ecore_thread_win32_lock);
89 LKU(_ecore_thread_win32_lock);
93 # define PHS() _ecore_thread_win32_self()
96 _ecore_thread_win32_create(win32_thread **x,
97 LPTHREAD_START_ROUTINE f,
102 t = (win32_thread *)calloc(1, sizeof(win32_thread));
106 LKL(_ecore_thread_win32_lock);
107 (t)->thread = CreateThread(NULL, 0, f, d, 0, NULL);
111 LKU(_ecore_thread_win32_lock);
116 _ecore_thread_win32_threads = eina_list_append(_ecore_thread_win32_threads, t);
117 LKU(_ecore_thread_win32_lock);
122 # define PHC(x, f, d) _ecore_thread_win32_create(&(x), (LPTHREAD_START_ROUTINE)f, d)
125 _ecore_thread_win32_join(win32_thread *x,
130 WaitForSingleObject(x->thread, INFINITE);
131 CloseHandle(x->thread);
133 if (res) *res = x->val;
134 _ecore_thread_win32_threads = eina_list_remove(_ecore_thread_win32_threads, x);
140 # define PHJ(x) _ecore_thread_win32_join(x, NULL)
141 # define PHA(x) TerminateThread(x->thread, 0)
147 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
148 typedef struct _Ecore_Pthread Ecore_Pthread;
149 typedef struct _Ecore_Thread_Data Ecore_Thread_Data;
151 struct _Ecore_Thread_Data
157 struct _Ecore_Pthread_Worker
162 Ecore_Thread_Cb func_blocking;
166 Ecore_Thread_Cb func_heavy;
167 Ecore_Thread_Notify_Cb func_notify;
169 Ecore_Pthread_Worker *direct_worker;
175 Ecore_Thread_Cb func_main;
176 Ecore_Thread_Notify_Cb func_notify;
179 Ecore_Pthread_Worker *direct_worker;
188 Ecore_Thread_Cb func_cancel;
189 Ecore_Thread_Cb func_end;
190 #ifdef EFL_HAVE_THREADS
201 #ifdef EFL_HAVE_THREADS
205 Eina_Bool message_run : 1;
206 Eina_Bool feedback_run : 1;
208 Eina_Bool reschedule : 1;
209 Eina_Bool no_queue : 1;
212 #ifdef EFL_HAVE_THREADS
213 typedef struct _Ecore_Pthread_Notify Ecore_Pthread_Notify;
214 struct _Ecore_Pthread_Notify
216 Ecore_Pthread_Worker *work;
217 const void *user_data;
220 typedef void *(*Ecore_Thread_Sync_Cb)(void* data, Ecore_Thread *thread);
222 typedef struct _Ecore_Pthread_Message Ecore_Pthread_Message;
223 struct _Ecore_Pthread_Message
226 Ecore_Thread_Cb async;
227 Ecore_Thread_Sync_Cb sync;
234 Eina_Bool callback : 1;
240 static int _ecore_thread_count_max = 0;
242 #ifdef EFL_HAVE_THREADS
244 static void _ecore_thread_handler(void *data);
246 static int _ecore_thread_count = 0;
248 static Eina_List *_ecore_running_job = NULL;
249 static Eina_List *_ecore_pending_job_threads = NULL;
250 static Eina_List *_ecore_pending_job_threads_feedback = NULL;
251 static LK(_ecore_pending_job_threads_mutex);
252 static LK(_ecore_running_job_mutex);
254 static Eina_Hash *_ecore_thread_global_hash = NULL;
255 static LRWK(_ecore_thread_global_hash_lock);
256 static LK(_ecore_thread_global_hash_mutex);
257 static CD(_ecore_thread_global_hash_cond);
259 static Eina_Bool have_main_loop_thread = 0;
261 static Eina_Trash *_ecore_thread_worker_trash = NULL;
262 static int _ecore_thread_worker_count = 0;
264 static void *_ecore_thread_worker(void *);
265 static Ecore_Pthread_Worker *_ecore_thread_worker_new(void);
267 static PH(get_main_loop_thread) (void)
269 static PH(main_loop_thread);
270 static pid_t main_loop_pid;
271 pid_t pid = getpid();
273 if (pid != main_loop_pid)
276 main_loop_thread = PHS();
277 have_main_loop_thread = 1;
280 return main_loop_thread;
284 _ecore_thread_worker_free(Ecore_Pthread_Worker *worker)
286 LKD(worker->cancel_mutex);
290 if (_ecore_thread_worker_count > ((_ecore_thread_count_max + 1) * 16))
292 _ecore_thread_worker_count--;
297 eina_trash_push(&_ecore_thread_worker_trash, worker);
301 _ecore_thread_data_free(void *data)
303 Ecore_Thread_Data *d = data;
305 if (d->cb) d->cb(d->data);
310 _ecore_thread_join(PH(thread))
316 _ecore_thread_kill(Ecore_Pthread_Worker *work)
320 if (work->func_cancel)
321 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
326 work->func_end((void *)work->data, (Ecore_Thread *)work);
329 if (work->feedback_run)
331 if (work->u.feedback_run.direct_worker)
332 _ecore_thread_worker_free(work->u.feedback_run.direct_worker);
335 eina_hash_free(work->hash);
336 _ecore_thread_worker_free(work);
340 _ecore_thread_handler(void *data)
342 Ecore_Pthread_Worker *work = data;
344 if (work->feedback_run)
346 if (work->u.feedback_run.send != work->u.feedback_run.received)
348 work->kill = EINA_TRUE;
353 _ecore_thread_kill(work);
358 _ecore_nothing_handler(void *data __UNUSED__, void *buffer __UNUSED__, unsigned int nbyte __UNUSED__)
364 _ecore_notify_handler(void *data)
366 Ecore_Pthread_Notify *notify = data;
367 Ecore_Pthread_Worker *work = notify->work;
368 void *user_data = (void*) notify->user_data;
370 work->u.feedback_run.received++;
372 if (work->u.feedback_run.func_notify)
373 work->u.feedback_run.func_notify((void *)work->data, (Ecore_Thread *)work, user_data);
375 /* Force reading all notify event before killing the thread */
376 if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
378 _ecore_thread_kill(work);
385 _ecore_message_notify_handler(void *data)
387 Ecore_Pthread_Notify *notify = data;
388 Ecore_Pthread_Worker *work = notify->work;
389 Ecore_Pthread_Message *user_data = (void *) notify->user_data;
390 Eina_Bool delete = EINA_TRUE;
392 work->u.message_run.from.received++;
394 if (!user_data->callback)
396 if (work->u.message_run.func_notify)
397 work->u.message_run.func_notify((void *) work->data, (Ecore_Thread *) work, (void *) user_data->data);
403 user_data->data = user_data->u.sync((void*) user_data->data, (Ecore_Thread *) work);
404 user_data->callback = EINA_FALSE;
405 user_data->code = INT_MAX;
406 ecore_pipe_write(work->u.message_run.send, &user_data, sizeof (Ecore_Pthread_Message *));
412 user_data->u.async((void*) user_data->data, (Ecore_Thread *) work);
421 /* Force reading all notify event before killing the thread */
422 if (work->kill && work->u.message_run.from.send == work->u.message_run.from.received)
424 _ecore_thread_kill(work);
430 _ecore_short_job(PH(thread))
432 Ecore_Pthread_Worker *work;
435 LKL(_ecore_pending_job_threads_mutex);
437 if (!_ecore_pending_job_threads)
439 LKU(_ecore_pending_job_threads_mutex);
443 work = eina_list_data_get(_ecore_pending_job_threads);
444 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
445 _ecore_pending_job_threads);
446 LKU(_ecore_pending_job_threads_mutex);
448 LKL(_ecore_running_job_mutex);
449 _ecore_running_job = eina_list_append(_ecore_running_job, work);
450 LKU(_ecore_running_job_mutex);
452 LKL(work->cancel_mutex);
453 cancel = work->cancel;
454 LKU(work->cancel_mutex);
457 work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
459 LKL(_ecore_running_job_mutex);
460 _ecore_running_job = eina_list_remove(_ecore_running_job, work);
461 LKU(_ecore_running_job_mutex);
463 if (work->reschedule)
465 work->reschedule = EINA_FALSE;
467 LKL(_ecore_pending_job_threads_mutex);
468 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
469 LKU(_ecore_pending_job_threads_mutex);
473 ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
478 _ecore_feedback_job(PH(thread))
480 Ecore_Pthread_Worker *work;
483 LKL(_ecore_pending_job_threads_mutex);
485 if (!_ecore_pending_job_threads_feedback)
487 LKU(_ecore_pending_job_threads_mutex);
491 work = eina_list_data_get(_ecore_pending_job_threads_feedback);
492 _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
493 _ecore_pending_job_threads_feedback);
494 LKU(_ecore_pending_job_threads_mutex);
495 LKL(_ecore_running_job_mutex);
496 _ecore_running_job = eina_list_append(_ecore_running_job, work);
497 LKU(_ecore_running_job_mutex);
499 LKL(work->cancel_mutex);
500 cancel = work->cancel;
501 LKU(work->cancel_mutex);
504 work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
506 LKL(_ecore_running_job_mutex);
507 _ecore_running_job = eina_list_remove(_ecore_running_job, work);
508 LKU(_ecore_running_job_mutex);
510 if (work->reschedule)
512 work->reschedule = EINA_FALSE;
514 LKL(_ecore_pending_job_threads_mutex);
515 _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
516 LKU(_ecore_pending_job_threads_mutex);
520 ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
525 _ecore_direct_worker(Ecore_Pthread_Worker *work)
527 #ifdef EFL_POSIX_THREADS
528 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
529 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
532 eina_sched_prio_drop();
535 if (work->message_run)
536 work->u.message_run.func_main((void *) work->data, (Ecore_Thread *) work);
538 work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
540 ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
542 ecore_main_loop_thread_safe_call_async((Ecore_Cb) _ecore_thread_join,
549 _ecore_thread_worker(void *data __UNUSED__)
551 #ifdef EFL_POSIX_THREADS
552 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
553 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
556 eina_sched_prio_drop();
559 _ecore_short_job(PHS());
560 _ecore_feedback_job(PHS());
562 /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
564 LKL(_ecore_pending_job_threads_mutex);
565 if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
567 LKU(_ecore_pending_job_threads_mutex);
570 LKU(_ecore_pending_job_threads_mutex);
572 /* Sleep a little to prevent premature death */
574 Sleep(1); /* around 50ms */
579 LKL(_ecore_pending_job_threads_mutex);
580 if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
582 LKU(_ecore_pending_job_threads_mutex);
585 _ecore_thread_count--;
587 ecore_main_loop_thread_safe_call_async((Ecore_Cb) _ecore_thread_join,
589 LKU(_ecore_pending_job_threads_mutex);
596 static Ecore_Pthread_Worker *
597 _ecore_thread_worker_new(void)
599 #ifdef EFL_HAVE_THREADS
600 Ecore_Pthread_Worker *result;
602 result = eina_trash_pop(&_ecore_thread_worker_trash);
606 result = calloc(1, sizeof(Ecore_Pthread_Worker));
607 _ecore_thread_worker_count++;
610 LKI(result->cancel_mutex);
612 CDI(result->cond, result->mutex);
616 return malloc(sizeof (Ecore_Pthread_Worker));
621 _ecore_thread_init(void)
623 _ecore_thread_count_max = eina_cpu_count();
624 if (_ecore_thread_count_max <= 0)
625 _ecore_thread_count_max = 1;
627 #ifdef EFL_HAVE_THREADS
628 # ifdef EFL_HAVE_WIN32_THREADS
629 LKI(_ecore_thread_win32_lock);
631 LKI(_ecore_pending_job_threads_mutex);
632 LRWKI(_ecore_thread_global_hash_lock);
633 LKI(_ecore_thread_global_hash_mutex);
634 LKI(_ecore_running_job_mutex);
635 CDI(_ecore_thread_global_hash_cond, _ecore_thread_global_hash_mutex);
640 _ecore_thread_shutdown(void)
642 /* FIXME: If function are still running in the background, should we kill them ? */
643 #ifdef EFL_HAVE_THREADS
644 Ecore_Pthread_Worker *work;
649 LKL(_ecore_pending_job_threads_mutex);
651 EINA_LIST_FREE(_ecore_pending_job_threads, work)
653 if (work->func_cancel)
654 work->func_cancel((void *)work->data, (Ecore_Thread *) work);
658 EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
660 if (work->func_cancel)
661 work->func_cancel((void *)work->data, (Ecore_Thread *) work);
665 LKU(_ecore_pending_job_threads_mutex);
666 LKL(_ecore_running_job_mutex);
668 EINA_LIST_FOREACH(_ecore_running_job, l, work)
669 ecore_thread_cancel((Ecore_Thread*) work);
671 LKU(_ecore_running_job_mutex);
675 LKL(_ecore_pending_job_threads_mutex);
676 if (_ecore_thread_count > 0)
684 LKU(_ecore_pending_job_threads_mutex);
686 if (test) usleep(50000);
688 while (test == EINA_TRUE && iteration < 20);
690 if (iteration == 20 && _ecore_thread_count > 0)
692 ERR("%i of the child thread are still running after 1s. This can lead to a segv. Sorry.", _ecore_thread_count);
695 if (_ecore_thread_global_hash)
696 eina_hash_free(_ecore_thread_global_hash);
697 have_main_loop_thread = 0;
699 while ((work = eina_trash_pop(&_ecore_thread_worker_trash)))
704 LKD(_ecore_pending_job_threads_mutex);
705 LRWKD(_ecore_thread_global_hash_lock);
706 LKD(_ecore_thread_global_hash_mutex);
707 LKD(_ecore_running_job_mutex);
708 CDD(_ecore_thread_global_hash_cond);
709 # ifdef EFL_HAVE_WIN32_THREADS
710 LKU(_ecore_thread_win32_lock);
716 ecore_thread_run(Ecore_Thread_Cb func_blocking,
717 Ecore_Thread_Cb func_end,
718 Ecore_Thread_Cb func_cancel,
721 Ecore_Pthread_Worker *work;
722 Eina_Bool tried = EINA_FALSE;
723 #ifdef EFL_HAVE_THREADS
727 EINA_MAIN_LOOP_CHECK_RETURN_VAL(NULL);
729 if (!func_blocking) return NULL;
731 work = _ecore_thread_worker_new();
735 func_cancel((void *)data, NULL);
739 work->u.short_run.func_blocking = func_blocking;
740 work->func_end = func_end;
741 work->func_cancel = func_cancel;
742 work->cancel = EINA_FALSE;
743 work->feedback_run = EINA_FALSE;
744 work->message_run = EINA_FALSE;
745 work->kill = EINA_FALSE;
746 work->reschedule = EINA_FALSE;
747 work->no_queue = EINA_FALSE;
750 #ifdef EFL_HAVE_THREADS
754 LKL(_ecore_pending_job_threads_mutex);
755 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
757 if (_ecore_thread_count == _ecore_thread_count_max)
759 LKU(_ecore_pending_job_threads_mutex);
760 return (Ecore_Thread *)work;
763 LKU(_ecore_pending_job_threads_mutex);
765 /* One more thread could be created. */
768 LKL(_ecore_pending_job_threads_mutex);
771 if (PHC(thread, _ecore_thread_worker, NULL) == 0)
773 _ecore_thread_count++;
774 LKU(_ecore_pending_job_threads_mutex);
775 return (Ecore_Thread *)work;
779 _ecore_main_call_flush();
784 if (_ecore_thread_count == 0)
786 _ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
788 if (work->func_cancel)
789 work->func_cancel((void *) work->data, (Ecore_Thread *) work);
793 LKD(work->cancel_mutex);
797 LKU(_ecore_pending_job_threads_mutex);
799 eina_threads_shutdown();
801 return (Ecore_Thread *)work;
804 If no thread and as we don't want to break app that rely on this
805 facility, we will lock the interface until we are done.
808 /* Handle reschedule by forcing it here. That would mean locking the app,
809 * would be better with an idler, but really to complex for a case where
810 * thread should really exist.
812 work->reschedule = EINA_FALSE;
814 func_blocking((void *)data, (Ecore_Thread *)work);
815 if (work->cancel == EINA_FALSE) func_end((void *)data, (Ecore_Thread *)work);
816 else func_cancel((void *)data, (Ecore_Thread *)work);
817 } while (work->reschedule == EINA_TRUE);
826 ecore_thread_cancel(Ecore_Thread *thread)
828 #ifdef EFL_HAVE_THREADS
829 Ecore_Pthread_Worker *volatile work = (Ecore_Pthread_Worker *)thread;
835 LKL(work->cancel_mutex);
836 cancel = work->cancel;
837 LKU(work->cancel_mutex);
841 if (work->feedback_run)
845 if (work->u.feedback_run.send != work->u.feedback_run.received)
849 LKL(_ecore_pending_job_threads_mutex);
851 if ((have_main_loop_thread) &&
852 (PHE(get_main_loop_thread(), PHS())))
854 if (!work->feedback_run)
855 EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
857 if ((void *)work == (void *)thread)
859 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
861 LKU(_ecore_pending_job_threads_mutex);
863 if (work->func_cancel)
864 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
871 EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
873 if ((void *)work == (void *)thread)
875 _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
877 LKU(_ecore_pending_job_threads_mutex);
879 if (work->func_cancel)
880 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
888 LKU(_ecore_pending_job_threads_mutex);
890 work = (Ecore_Pthread_Worker *)thread;
892 /* Delay the destruction */
894 LKL(work->cancel_mutex);
895 work->cancel = EINA_TRUE;
896 LKU(work->cancel_mutex);
906 ecore_thread_check(Ecore_Thread *thread)
908 Ecore_Pthread_Worker *volatile worker = (Ecore_Pthread_Worker *) thread;
911 if (!worker) return EINA_TRUE;
912 #ifdef EFL_HAVE_THREADS
913 LKL(worker->cancel_mutex);
915 cancel = worker->cancel;
916 /* FIXME: there is an insane bug driving me nuts here. I don't know if
917 it's a race condition, some cache issue or some alien attack on our software.
918 But ecore_thread_check will only work correctly with a printf, all the volatile,
919 lock and even usleep don't help here... */
920 /* fprintf(stderr, "wc: %i\n", cancel); */
921 #ifdef EFL_HAVE_THREADS
922 LKU(worker->cancel_mutex);
928 ecore_thread_feedback_run(Ecore_Thread_Cb func_heavy,
929 Ecore_Thread_Notify_Cb func_notify,
930 Ecore_Thread_Cb func_end,
931 Ecore_Thread_Cb func_cancel,
933 Eina_Bool try_no_queue)
935 #ifdef EFL_HAVE_THREADS
936 Ecore_Pthread_Worker *worker;
937 Eina_Bool tried = EINA_FALSE;
940 EINA_MAIN_LOOP_CHECK_RETURN_VAL(NULL);
942 if (!func_heavy) return NULL;
944 worker = _ecore_thread_worker_new();
945 if (!worker) goto on_error;
947 worker->u.feedback_run.func_heavy = func_heavy;
948 worker->u.feedback_run.func_notify = func_notify;
950 worker->func_cancel = func_cancel;
951 worker->func_end = func_end;
953 worker->cancel = EINA_FALSE;
954 worker->message_run = EINA_FALSE;
955 worker->feedback_run = EINA_TRUE;
956 worker->kill = EINA_FALSE;
957 worker->reschedule = EINA_FALSE;
960 worker->u.feedback_run.send = 0;
961 worker->u.feedback_run.received = 0;
963 worker->u.feedback_run.direct_worker = NULL;
969 worker->u.feedback_run.direct_worker = _ecore_thread_worker_new();
970 worker->no_queue = EINA_TRUE;
975 if (PHC(t, _ecore_direct_worker, worker) == 0)
976 return (Ecore_Thread *)worker;
979 _ecore_main_call_flush();
984 if (worker->u.feedback_run.direct_worker)
986 _ecore_thread_worker_free(worker->u.feedback_run.direct_worker);
987 worker->u.feedback_run.direct_worker = NULL;
990 eina_threads_shutdown();
993 worker->no_queue = EINA_FALSE;
995 LKL(_ecore_pending_job_threads_mutex);
996 _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
998 if (_ecore_thread_count == _ecore_thread_count_max)
1000 LKU(_ecore_pending_job_threads_mutex);
1001 return (Ecore_Thread *)worker;
1004 LKU(_ecore_pending_job_threads_mutex);
1006 /* One more thread could be created. */
1007 eina_threads_init();
1009 LKL(_ecore_pending_job_threads_mutex);
1011 if (PHC(thread, _ecore_thread_worker, NULL) == 0)
1013 _ecore_thread_count++;
1014 LKU(_ecore_pending_job_threads_mutex);
1015 return (Ecore_Thread *)worker;
1019 _ecore_main_call_flush();
1023 LKU(_ecore_pending_job_threads_mutex);
1025 eina_threads_shutdown();
1028 LKL(_ecore_pending_job_threads_mutex);
1029 if (_ecore_thread_count == 0)
1031 _ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
1034 if (func_cancel) func_cancel((void *)data, NULL);
1044 LKU(_ecore_pending_job_threads_mutex);
1046 return (Ecore_Thread *)worker;
1048 Ecore_Pthread_Worker worker;
1053 If no thread and as we don't want to break app that rely on this
1054 facility, we will lock the interface until we are done.
1056 worker.u.feedback_run.func_heavy = func_heavy;
1057 worker.u.feedback_run.func_notify = func_notify;
1058 worker.u.feedback_run.send = 0;
1059 worker.u.feedback_run.received = 0;
1060 worker.func_cancel = func_cancel;
1061 worker.func_end = func_end;
1063 worker.cancel = EINA_FALSE;
1064 worker.feedback_run = EINA_TRUE;
1065 worker.message_run = EINA_FALSE;
1066 worker.kill = EINA_FALSE;
1069 worker.reschedule = EINA_FALSE;
1071 func_heavy((void *)data, (Ecore_Thread *)&worker);
1073 if (worker.cancel) func_cancel((void *)data, (Ecore_Thread *)&worker);
1074 else func_end((void *)data, (Ecore_Thread *)&worker);
1075 } while (worker.reschedule == EINA_TRUE);
1082 ecore_thread_feedback(Ecore_Thread *thread,
1085 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1087 if (!worker) return EINA_FALSE;
1089 #ifdef EFL_HAVE_THREADS
1090 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1092 if (worker->feedback_run)
1094 Ecore_Pthread_Notify *notify;
1096 notify = malloc(sizeof (Ecore_Pthread_Notify));
1097 if (!notify) return EINA_FALSE;
1099 notify->user_data = data;
1100 notify->work = worker;
1101 worker->u.feedback_run.send++;
1103 ecore_main_loop_thread_safe_call_async(_ecore_notify_handler, notify);
1105 else if (worker->message_run)
1107 Ecore_Pthread_Message *msg;
1108 Ecore_Pthread_Notify *notify;
1110 msg = malloc(sizeof (Ecore_Pthread_Message));
1111 if (!msg) return EINA_FALSE;
1113 msg->callback = EINA_FALSE;
1114 msg->sync = EINA_FALSE;
1116 notify = malloc(sizeof (Ecore_Pthread_Notify));
1122 notify->work = worker;
1123 notify->user_data = msg;
1125 worker->u.message_run.from.send++;
1126 ecore_main_loop_thread_safe_call_async(_ecore_message_notify_handler, notify);
1133 worker->u.feedback_run.func_notify((void *)worker->data, thread, (void *)data);
1141 ecore_thread_message_run(Ecore_Thread_Cb func_main,
1142 Ecore_Thread_Notify_Cb func_notify,
1143 Ecore_Thread_Cb func_end,
1144 Ecore_Thread_Cb func_cancel,
1147 #ifdef EFL_HAVE_THREADS
1148 Ecore_Pthread_Worker *worker;
1151 if (!func_main) return NULL;
1153 worker = _ecore_thread_worker_new();
1154 if (!worker) return NULL;
1156 worker->u.message_run.func_main = func_main;
1157 worker->u.message_run.func_notify = func_notify;
1158 worker->u.message_run.direct_worker = _ecore_thread_worker_new();
1159 worker->u.message_run.send = ecore_pipe_add(_ecore_nothing_handler, worker);
1160 worker->u.message_run.from.send = 0;
1161 worker->u.message_run.from.received = 0;
1162 worker->u.message_run.to.send = 0;
1163 worker->u.message_run.to.received = 0;
1165 ecore_pipe_freeze(worker->u.message_run.send);
1167 worker->func_cancel = func_cancel;
1168 worker->func_end = func_end;
1169 worker->hash = NULL;
1170 worker->data = data;
1172 worker->cancel = EINA_FALSE;
1173 worker->message_run = EINA_TRUE;
1174 worker->feedback_run = EINA_FALSE;
1175 worker->kill = EINA_FALSE;
1176 worker->reschedule = EINA_FALSE;
1177 worker->no_queue = EINA_FALSE;
1180 eina_threads_init();
1182 if (PHC(t, _ecore_direct_worker, worker) == 0)
1183 return (Ecore_Thread*) worker;
1185 eina_threads_shutdown();
1187 if (worker->u.message_run.direct_worker) _ecore_thread_worker_free(worker->u.message_run.direct_worker);
1188 if (worker->u.message_run.send) ecore_pipe_del(worker->u.message_run.send);
1193 /* Note: This type of thread can't and never will work without thread support */
1194 WRN("ecore_thread_message_run called, but threads disable in Ecore, things will go wrong. Starting now !");
1195 # warning "You disabled threads support in ecore, I hope you know what you are doing !"
1198 func_cancel((void *) data, NULL);
1205 ecore_thread_reschedule(Ecore_Thread *thread)
1207 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1209 if (!worker) return EINA_FALSE;
1211 #ifdef EFL_HAVE_THREADS
1212 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1215 worker->reschedule = EINA_TRUE;
1220 ecore_thread_active_get(void)
1222 #ifdef EFL_HAVE_THREADS
1223 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1224 return _ecore_thread_count;
1231 ecore_thread_pending_get(void)
1233 #ifdef EFL_HAVE_THREADS
1236 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1237 LKL(_ecore_pending_job_threads_mutex);
1238 ret = eina_list_count(_ecore_pending_job_threads);
1239 LKU(_ecore_pending_job_threads_mutex);
1247 ecore_thread_pending_feedback_get(void)
1249 #ifdef EFL_HAVE_THREADS
1252 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1253 LKL(_ecore_pending_job_threads_mutex);
1254 ret = eina_list_count(_ecore_pending_job_threads_feedback);
1255 LKU(_ecore_pending_job_threads_mutex);
1263 ecore_thread_pending_total_get(void)
1265 #ifdef EFL_HAVE_THREADS
1268 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1269 LKL(_ecore_pending_job_threads_mutex);
1270 ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
1271 LKU(_ecore_pending_job_threads_mutex);
1279 ecore_thread_max_get(void)
1281 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1282 return _ecore_thread_count_max;
1286 ecore_thread_max_set(int num)
1288 EINA_MAIN_LOOP_CHECK_RETURN;
1289 if (num < 1) return;
1290 /* avoid doing something hilarious by blocking dumb users */
1291 if (num > (16 * eina_cpu_count())) num = 16 * eina_cpu_count();
1293 _ecore_thread_count_max = num;
1297 ecore_thread_max_reset(void)
1299 EINA_MAIN_LOOP_CHECK_RETURN;
1300 _ecore_thread_count_max = eina_cpu_count();
1304 ecore_thread_available_get(void)
1306 #ifdef EFL_HAVE_THREADS
1309 LKL(_ecore_pending_job_threads_mutex);
1310 ret = _ecore_thread_count_max - _ecore_thread_count;
1311 LKU(_ecore_pending_job_threads_mutex);
1319 ecore_thread_local_data_add(Ecore_Thread *thread,
1325 #ifdef EFL_HAVE_THREADS
1326 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1327 Ecore_Thread_Data *d;
1331 if ((!thread) || (!key) || (!value))
1333 #ifdef EFL_HAVE_THREADS
1334 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1337 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1342 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1349 ret = eina_hash_direct_add(worker->hash, key, d);
1351 ret = eina_hash_add(worker->hash, key, d);
1362 ecore_thread_local_data_set(Ecore_Thread *thread,
1367 #ifdef EFL_HAVE_THREADS
1368 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1369 Ecore_Thread_Data *d, *r;
1373 if ((!thread) || (!key) || (!value))
1375 #ifdef EFL_HAVE_THREADS
1376 if (!PHE(worker->self, PHS())) return NULL;
1379 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1384 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1390 r = eina_hash_set(worker->hash, key, d);
1402 ecore_thread_local_data_find(Ecore_Thread *thread,
1405 #ifdef EFL_HAVE_THREADS
1406 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1407 Ecore_Thread_Data *d;
1410 if ((!thread) || (!key))
1412 #ifdef EFL_HAVE_THREADS
1413 if (!PHE(worker->self, PHS())) return NULL;
1418 d = eina_hash_find(worker->hash, key);
1428 ecore_thread_local_data_del(Ecore_Thread *thread,
1431 #ifdef EFL_HAVE_THREADS
1432 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1435 if ((!thread) || (!key))
1437 #ifdef EFL_HAVE_THREADS
1438 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1442 return eina_hash_del_by_key(worker->hash, key);
1449 ecore_thread_global_data_add(const char *key,
1454 #ifdef EFL_HAVE_THREADS
1455 Ecore_Thread_Data *d;
1459 if ((!key) || (!value))
1461 #ifdef EFL_HAVE_THREADS
1462 LRWKWL(_ecore_thread_global_hash_lock);
1463 if (!_ecore_thread_global_hash)
1464 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1465 LRWKU(_ecore_thread_global_hash_lock);
1467 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1473 if (!_ecore_thread_global_hash)
1475 LRWKWL(_ecore_thread_global_hash_lock);
1477 ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1479 ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1480 LRWKU(_ecore_thread_global_hash_lock);
1481 CDB(_ecore_thread_global_hash_cond);
1491 ecore_thread_global_data_set(const char *key,
1495 #ifdef EFL_HAVE_THREADS
1496 Ecore_Thread_Data *d, *r;
1500 if ((!key) || (!value))
1502 #ifdef EFL_HAVE_THREADS
1503 LRWKWL(_ecore_thread_global_hash_lock);
1504 if (!_ecore_thread_global_hash)
1505 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1506 LRWKU(_ecore_thread_global_hash_lock);
1508 if (!_ecore_thread_global_hash)
1511 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1517 LRWKWL(_ecore_thread_global_hash_lock);
1518 r = eina_hash_set(_ecore_thread_global_hash, key, d);
1519 LRWKU(_ecore_thread_global_hash_lock);
1520 CDB(_ecore_thread_global_hash_cond);
1532 ecore_thread_global_data_find(const char *key)
1534 #ifdef EFL_HAVE_THREADS
1535 Ecore_Thread_Data *ret;
1540 #ifdef EFL_HAVE_THREADS
1541 if (!_ecore_thread_global_hash) return NULL;
1543 LRWKRL(_ecore_thread_global_hash_lock);
1544 ret = eina_hash_find(_ecore_thread_global_hash, key);
1545 LRWKU(_ecore_thread_global_hash_lock);
1555 ecore_thread_global_data_del(const char *key)
1557 #ifdef EFL_HAVE_THREADS
1563 #ifdef EFL_HAVE_THREADS
1564 if (!_ecore_thread_global_hash)
1567 LRWKWL(_ecore_thread_global_hash_lock);
1568 ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1569 LRWKU(_ecore_thread_global_hash_lock);
1577 ecore_thread_global_data_wait(const char *key,
1580 #ifdef EFL_HAVE_THREADS
1582 Ecore_Thread_Data *ret = NULL;
1587 #ifdef EFL_HAVE_THREADS
1588 if (!_ecore_thread_global_hash)
1591 tm = ecore_time_get() + seconds;
1595 LRWKRL(_ecore_thread_global_hash_lock);
1596 ret = eina_hash_find(_ecore_thread_global_hash, key);
1597 LRWKU(_ecore_thread_global_hash_lock);
1598 if ((ret) || (!seconds) || ((seconds > 0) && (tm <= ecore_time_get())))
1600 LKL(_ecore_thread_global_hash_mutex);
1601 CDW(_ecore_thread_global_hash_cond, tm);
1602 LKU(_ecore_thread_global_hash_mutex);
1604 if (ret) return ret->data;