17 #include "ecore_private.h"
19 #ifdef EFL_HAVE_THREADS
21 # define LK(x) Eina_Lock x
22 # define LKI(x) eina_lock_new(&(x))
23 # define LKD(x) eina_lock_free(&(x))
24 # define LKL(x) eina_lock_take(&(x))
25 # define LKU(x) eina_lock_release(&(x))
27 # define CD(x) Eina_Condition x
28 # define CDI(x, m) eina_condition_new(&(x), &(m))
29 # define CDD(x) eina_condition_free(&(x))
30 # define CDB(x) eina_condition_broadcast(&(x))
31 # define CDW(x, t) eina_condition_timedwait(&(x), t)
33 # define LRWK(x) Eina_RWLock x
34 # define LRWKI(x) eina_rwlock_new(&(x));
35 # define LRWKD(x) eina_rwlock_free(&(x));
36 # define LRWKWL(x) eina_rwlock_take_write(&(x));
37 # define LRWKRL(x) eina_rwlock_take_read(&(x));
38 # define LRWKU(x) eina_rwlock_release(&(x));
40 # ifdef EFL_HAVE_POSIX_THREADS
44 # include <sys/resource.h>
46 # include <sys/syscall.h>
50 # define PH(x) pthread_t x
51 # define PHE(x, y) pthread_equal(x, y)
52 # define PHS() pthread_self()
53 # define PHC(x, f, d) pthread_create(&(x), NULL, (void *)f, d)
54 # define PHJ(x) pthread_join(x, NULL)
55 # define PHA(x) pthread_cancel(x)
57 # else /* EFL_HAVE_WIN32_THREADS */
59 # define WIN32_LEAN_AND_MEAN
61 # undef WIN32_LEAN_AND_MEAN
69 # define PH(x) win32_thread * x
70 # define PHE(x, y) ((x) == (y))
71 # define PHS() (HANDLE)GetCurrentThreadId()
74 _ecore_thread_win32_create(win32_thread **x,
75 LPTHREAD_START_ROUTINE f,
79 t = (win32_thread *)calloc(1, sizeof(win32_thread));
83 (t)->thread = CreateThread(NULL, 0, f, d, 0, NULL);
95 # define PHC(x, f, d) _ecore_thread_win32_create(&(x), (LPTHREAD_START_ROUTINE)f, d)
98 _ecore_thread_win32_join(win32_thread *x,
103 WaitForSingleObject(x->thread, INFINITE);
104 CloseHandle(x->thread);
106 if (res) *res = x->val;
112 # define PHJ(x) _ecore_thread_win32_join(x, NULL)
113 # define PHA(x) TerminateThread(x->thread, 0)
119 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
120 typedef struct _Ecore_Pthread Ecore_Pthread;
121 typedef struct _Ecore_Thread_Data Ecore_Thread_Data;
123 struct _Ecore_Thread_Data
129 struct _Ecore_Pthread_Worker
134 Ecore_Thread_Cb func_blocking;
138 Ecore_Thread_Cb func_heavy;
139 Ecore_Thread_Notify_Cb func_notify;
141 Ecore_Pthread_Worker *direct_worker;
147 Ecore_Thread_Cb func_main;
148 Ecore_Thread_Notify_Cb func_notify;
151 Ecore_Pthread_Worker *direct_worker;
160 Ecore_Thread_Cb func_cancel;
161 Ecore_Thread_Cb func_end;
162 #ifdef EFL_HAVE_THREADS
173 #ifdef EFL_HAVE_THREADS
177 Eina_Bool message_run : 1;
178 Eina_Bool feedback_run : 1;
180 Eina_Bool reschedule : 1;
181 Eina_Bool no_queue : 1;
184 #ifdef EFL_HAVE_THREADS
185 typedef struct _Ecore_Pthread_Notify Ecore_Pthread_Notify;
186 struct _Ecore_Pthread_Notify
188 Ecore_Pthread_Worker *work;
189 const void *user_data;
192 typedef void *(*Ecore_Thread_Sync_Cb)(void* data, Ecore_Thread *thread);
194 typedef struct _Ecore_Pthread_Message Ecore_Pthread_Message;
195 struct _Ecore_Pthread_Message
198 Ecore_Thread_Cb async;
199 Ecore_Thread_Sync_Cb sync;
206 Eina_Bool callback : 1;
212 static int _ecore_thread_count_max = 0;
214 #ifdef EFL_HAVE_THREADS
216 static void _ecore_thread_handler(void *data);
218 static int _ecore_thread_count = 0;
220 static Eina_List *_ecore_running_job = NULL;
221 static Eina_List *_ecore_pending_job_threads = NULL;
222 static Eina_List *_ecore_pending_job_threads_feedback = NULL;
223 static LK(_ecore_pending_job_threads_mutex);
225 static Eina_Hash *_ecore_thread_global_hash = NULL;
226 static LRWK(_ecore_thread_global_hash_lock);
227 static LK(_ecore_thread_global_hash_mutex);
228 static CD(_ecore_thread_global_hash_cond);
230 static Eina_Bool have_main_loop_thread = 0;
232 static Eina_Trash *_ecore_thread_worker_trash = NULL;
233 static int _ecore_thread_worker_count = 0;
235 static void *_ecore_thread_worker(void *);
236 static Ecore_Pthread_Worker *_ecore_thread_worker_new(void);
238 static PH(get_main_loop_thread) (void)
240 static PH(main_loop_thread);
241 static pid_t main_loop_pid;
242 pid_t pid = getpid();
244 if (pid != main_loop_pid)
247 main_loop_thread = PHS();
248 have_main_loop_thread = 1;
251 return main_loop_thread;
255 _ecore_thread_worker_free(Ecore_Pthread_Worker *worker)
257 LKD(worker->cancel_mutex);
261 if (_ecore_thread_worker_count > ((_ecore_thread_count_max + 1) * 16))
263 _ecore_thread_worker_count--;
268 eina_trash_push(&_ecore_thread_worker_trash, worker);
272 _ecore_thread_data_free(void *data)
274 Ecore_Thread_Data *d = data;
276 if (d->cb) d->cb(d->data);
281 _ecore_thread_join(PH(thread))
287 _ecore_thread_kill(Ecore_Pthread_Worker *work)
291 if (work->func_cancel)
292 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
297 work->func_end((void *)work->data, (Ecore_Thread *)work);
300 if (work->feedback_run)
302 if (work->u.feedback_run.direct_worker)
303 _ecore_thread_worker_free(work->u.feedback_run.direct_worker);
306 eina_hash_free(work->hash);
307 _ecore_thread_worker_free(work);
311 _ecore_thread_handler(void *data)
313 Ecore_Pthread_Worker *work = data;
315 if (work->feedback_run)
317 if (work->u.feedback_run.send != work->u.feedback_run.received)
319 work->kill = EINA_TRUE;
324 _ecore_thread_kill(work);
329 _ecore_nothing_handler(void *data __UNUSED__, void *buffer __UNUSED__, unsigned int nbyte __UNUSED__)
335 _ecore_notify_handler(void *data)
337 Ecore_Pthread_Notify *notify = data;
338 Ecore_Pthread_Worker *work = notify->work;
339 void *user_data = (void*) notify->user_data;
341 work->u.feedback_run.received++;
343 if (work->u.feedback_run.func_notify)
344 work->u.feedback_run.func_notify((void *)work->data, (Ecore_Thread *)work, user_data);
346 /* Force reading all notify event before killing the thread */
347 if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
349 _ecore_thread_kill(work);
356 _ecore_message_notify_handler(void *data)
358 Ecore_Pthread_Notify *notify = data;
359 Ecore_Pthread_Worker *work = notify->work;
360 Ecore_Pthread_Message *user_data = (void *) notify->user_data;
361 Eina_Bool delete = EINA_TRUE;
363 work->u.message_run.from.received++;
365 if (!user_data->callback)
367 if (work->u.message_run.func_notify)
368 work->u.message_run.func_notify((void *) work->data, (Ecore_Thread *) work, (void *) user_data->data);
374 user_data->data = user_data->u.sync((void*) user_data->data, (Ecore_Thread *) work);
375 user_data->callback = EINA_FALSE;
376 user_data->code = INT_MAX;
377 ecore_pipe_write(work->u.message_run.send, &user_data, sizeof (Ecore_Pthread_Message *));
383 user_data->u.async((void*) user_data->data, (Ecore_Thread *) work);
392 /* Force reading all notify event before killing the thread */
393 if (work->kill && work->u.message_run.from.send == work->u.message_run.from.received)
395 _ecore_thread_kill(work);
401 _ecore_short_job(PH(thread))
403 Ecore_Pthread_Worker *work;
406 LKL(_ecore_pending_job_threads_mutex);
408 if (!_ecore_pending_job_threads)
410 LKU(_ecore_pending_job_threads_mutex);
414 work = eina_list_data_get(_ecore_pending_job_threads);
415 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
416 _ecore_pending_job_threads);
417 _ecore_running_job = eina_list_append(_ecore_running_job, work);
418 LKU(_ecore_pending_job_threads_mutex);
420 LKL(work->cancel_mutex);
421 cancel = work->cancel;
422 LKU(work->cancel_mutex);
425 work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
427 LKL(_ecore_pending_job_threads_mutex);
428 _ecore_running_job = eina_list_remove(_ecore_running_job, work);
429 LKU(_ecore_pending_job_threads_mutex);
431 if (work->reschedule)
433 work->reschedule = EINA_FALSE;
435 LKL(_ecore_pending_job_threads_mutex);
436 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
437 LKU(_ecore_pending_job_threads_mutex);
441 ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
446 _ecore_feedback_job(PH(thread))
448 Ecore_Pthread_Worker *work;
451 LKL(_ecore_pending_job_threads_mutex);
453 if (!_ecore_pending_job_threads_feedback)
455 LKU(_ecore_pending_job_threads_mutex);
459 work = eina_list_data_get(_ecore_pending_job_threads_feedback);
460 _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
461 _ecore_pending_job_threads_feedback);
462 _ecore_running_job = eina_list_append(_ecore_running_job, work);
463 LKU(_ecore_pending_job_threads_mutex);
465 LKL(work->cancel_mutex);
466 cancel = work->cancel;
467 LKU(work->cancel_mutex);
470 work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
472 LKL(_ecore_pending_job_threads_mutex);
473 _ecore_running_job = eina_list_remove(_ecore_running_job, work);
474 LKU(_ecore_pending_job_threads_mutex);
476 if (work->reschedule)
478 work->reschedule = EINA_FALSE;
480 LKL(_ecore_pending_job_threads_mutex);
481 _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
482 LKU(_ecore_pending_job_threads_mutex);
486 ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
491 _ecore_direct_worker(Ecore_Pthread_Worker *work)
493 #ifdef EFL_POSIX_THREADS
494 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
495 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
498 eina_sched_prio_drop();
501 if (work->message_run)
502 work->u.message_run.func_main((void *) work->data, (Ecore_Thread *) work);
504 work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
506 ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
508 ecore_main_loop_thread_safe_call_async((Ecore_Cb) _ecore_thread_join,
515 _ecore_thread_worker(void *data __UNUSED__)
517 #ifdef EFL_POSIX_THREADS
518 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
519 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
522 eina_sched_prio_drop();
525 _ecore_short_job(PHS());
526 _ecore_feedback_job(PHS());
528 /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
530 LKL(_ecore_pending_job_threads_mutex);
531 if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
533 LKU(_ecore_pending_job_threads_mutex);
536 LKU(_ecore_pending_job_threads_mutex);
538 /* Sleep a little to prevent premature death */
540 Sleep(1); /* around 50ms */
545 LKL(_ecore_pending_job_threads_mutex);
546 if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
548 LKU(_ecore_pending_job_threads_mutex);
551 _ecore_thread_count--;
553 ecore_main_loop_thread_safe_call_async((Ecore_Cb) _ecore_thread_join,
555 LKU(_ecore_pending_job_threads_mutex);
562 static Ecore_Pthread_Worker *
563 _ecore_thread_worker_new(void)
565 #ifdef EFL_HAVE_THREADS
566 Ecore_Pthread_Worker *result;
568 result = eina_trash_pop(&_ecore_thread_worker_trash);
572 result = calloc(1, sizeof(Ecore_Pthread_Worker));
573 _ecore_thread_worker_count++;
576 LKI(result->cancel_mutex);
578 CDI(result->cond, result->mutex);
582 return malloc(sizeof (Ecore_Pthread_Worker));
587 _ecore_thread_init(void)
589 _ecore_thread_count_max = eina_cpu_count();
590 if (_ecore_thread_count_max <= 0)
591 _ecore_thread_count_max = 1;
593 #ifdef EFL_HAVE_THREADS
594 LKI(_ecore_pending_job_threads_mutex);
595 LRWKI(_ecore_thread_global_hash_lock);
596 LKI(_ecore_thread_global_hash_mutex);
597 CDI(_ecore_thread_global_hash_cond, _ecore_thread_global_hash_mutex);
602 _ecore_thread_shutdown(void)
604 /* FIXME: If function are still running in the background, should we kill them ? */
605 #ifdef EFL_HAVE_THREADS
606 Ecore_Pthread_Worker *work;
611 LKL(_ecore_pending_job_threads_mutex);
613 EINA_LIST_FREE(_ecore_pending_job_threads, work)
615 if (work->func_cancel)
616 work->func_cancel((void *)work->data, (Ecore_Thread *) work);
620 EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
622 if (work->func_cancel)
623 work->func_cancel((void *)work->data, (Ecore_Thread *) work);
627 EINA_LIST_FOREACH(_ecore_running_job, l, work)
628 ecore_thread_cancel((Ecore_Thread*) work);
630 LKU(_ecore_pending_job_threads_mutex);
634 LKL(_ecore_pending_job_threads_mutex);
635 if (_ecore_thread_count > 0)
643 LKU(_ecore_pending_job_threads_mutex);
645 if (test) usleep(50000);
647 while (test == EINA_TRUE && iteration < 20);
649 if (iteration == 20 && _ecore_thread_count > 0)
651 ERR("%i of the child thread are still running after 1s. This can lead to a segv. Sorry.", _ecore_thread_count);
654 if (_ecore_thread_global_hash)
655 eina_hash_free(_ecore_thread_global_hash);
656 have_main_loop_thread = 0;
658 while ((work = eina_trash_pop(&_ecore_thread_worker_trash)))
663 LKD(_ecore_pending_job_threads_mutex);
664 LRWKD(_ecore_thread_global_hash_lock);
665 LKD(_ecore_thread_global_hash_mutex);
666 CDD(_ecore_thread_global_hash_cond);
671 ecore_thread_run(Ecore_Thread_Cb func_blocking,
672 Ecore_Thread_Cb func_end,
673 Ecore_Thread_Cb func_cancel,
676 Ecore_Pthread_Worker *work;
677 Eina_Bool tried = EINA_FALSE;
678 #ifdef EFL_HAVE_THREADS
682 EINA_MAIN_LOOP_CHECK_RETURN_VAL(NULL);
684 if (!func_blocking) return NULL;
686 work = _ecore_thread_worker_new();
690 func_cancel((void *)data, NULL);
694 work->u.short_run.func_blocking = func_blocking;
695 work->func_end = func_end;
696 work->func_cancel = func_cancel;
697 work->cancel = EINA_FALSE;
698 work->feedback_run = EINA_FALSE;
699 work->message_run = EINA_FALSE;
700 work->kill = EINA_FALSE;
701 work->reschedule = EINA_FALSE;
702 work->no_queue = EINA_FALSE;
705 #ifdef EFL_HAVE_THREADS
709 LKL(_ecore_pending_job_threads_mutex);
710 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
712 if (_ecore_thread_count == _ecore_thread_count_max)
714 LKU(_ecore_pending_job_threads_mutex);
715 return (Ecore_Thread *)work;
718 LKU(_ecore_pending_job_threads_mutex);
720 /* One more thread could be created. */
723 LKL(_ecore_pending_job_threads_mutex);
726 if (PHC(thread, _ecore_thread_worker, NULL) == 0)
728 _ecore_thread_count++;
729 LKU(_ecore_pending_job_threads_mutex);
730 return (Ecore_Thread *)work;
734 _ecore_main_call_flush();
739 if (_ecore_thread_count == 0)
741 _ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
743 if (work->func_cancel)
744 work->func_cancel((void *) work->data, (Ecore_Thread *) work);
748 LKD(work->cancel_mutex);
752 LKU(_ecore_pending_job_threads_mutex);
754 eina_threads_shutdown();
756 return (Ecore_Thread *)work;
759 If no thread and as we don't want to break app that rely on this
760 facility, we will lock the interface until we are done.
763 /* Handle reschedule by forcing it here. That would mean locking the app,
764 * would be better with an idler, but really to complex for a case where
765 * thread should really exist.
767 work->reschedule = EINA_FALSE;
769 func_blocking((void *)data, (Ecore_Thread *)work);
770 if (work->cancel == EINA_FALSE) func_end((void *)data, (Ecore_Thread *)work);
771 else func_cancel((void *)data, (Ecore_Thread *)work);
772 } while (work->reschedule == EINA_TRUE);
781 ecore_thread_cancel(Ecore_Thread *thread)
783 #ifdef EFL_HAVE_THREADS
784 Ecore_Pthread_Worker *volatile work = (Ecore_Pthread_Worker *)thread;
790 LKL(work->cancel_mutex);
791 cancel = work->cancel;
792 LKU(work->cancel_mutex);
796 if (work->feedback_run)
800 if (work->u.feedback_run.send != work->u.feedback_run.received)
804 LKL(_ecore_pending_job_threads_mutex);
806 if ((have_main_loop_thread) &&
807 (PHE(get_main_loop_thread(), PHS())))
809 if (!work->feedback_run)
810 EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
812 if ((void *)work == (void *)thread)
814 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
816 LKU(_ecore_pending_job_threads_mutex);
818 if (work->func_cancel)
819 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
826 EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
828 if ((void *)work == (void *)thread)
830 _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
832 LKU(_ecore_pending_job_threads_mutex);
834 if (work->func_cancel)
835 work->func_cancel((void *)work->data, (Ecore_Thread *)work);
843 LKU(_ecore_pending_job_threads_mutex);
845 work = (Ecore_Pthread_Worker *)thread;
847 /* Delay the destruction */
849 LKL(work->cancel_mutex);
850 work->cancel = EINA_TRUE;
851 LKU(work->cancel_mutex);
861 ecore_thread_check(Ecore_Thread *thread)
863 Ecore_Pthread_Worker *volatile worker = (Ecore_Pthread_Worker *) thread;
866 if (!worker) return EINA_TRUE;
867 #ifdef EFL_HAVE_THREADS
868 LKL(worker->cancel_mutex);
870 cancel = worker->cancel;
871 /* FIXME: there is an insane bug driving me nuts here. I don't know if
872 it's a race condition, some cache issue or some alien attack on our software.
873 But ecore_thread_check will only work correctly with a printf, all the volatile,
874 lock and even usleep don't help here... */
875 /* fprintf(stderr, "wc: %i\n", cancel); */
876 #ifdef EFL_HAVE_THREADS
877 LKU(worker->cancel_mutex);
883 ecore_thread_feedback_run(Ecore_Thread_Cb func_heavy,
884 Ecore_Thread_Notify_Cb func_notify,
885 Ecore_Thread_Cb func_end,
886 Ecore_Thread_Cb func_cancel,
888 Eina_Bool try_no_queue)
890 #ifdef EFL_HAVE_THREADS
891 Ecore_Pthread_Worker *worker;
892 Eina_Bool tried = EINA_FALSE;
895 EINA_MAIN_LOOP_CHECK_RETURN_VAL(NULL);
897 if (!func_heavy) return NULL;
899 worker = _ecore_thread_worker_new();
900 if (!worker) goto on_error;
902 worker->u.feedback_run.func_heavy = func_heavy;
903 worker->u.feedback_run.func_notify = func_notify;
905 worker->func_cancel = func_cancel;
906 worker->func_end = func_end;
908 worker->cancel = EINA_FALSE;
909 worker->message_run = EINA_FALSE;
910 worker->feedback_run = EINA_TRUE;
911 worker->kill = EINA_FALSE;
912 worker->reschedule = EINA_FALSE;
915 worker->u.feedback_run.send = 0;
916 worker->u.feedback_run.received = 0;
918 worker->u.feedback_run.direct_worker = NULL;
924 worker->u.feedback_run.direct_worker = _ecore_thread_worker_new();
925 worker->no_queue = EINA_TRUE;
930 if (PHC(t, _ecore_direct_worker, worker) == 0)
931 return (Ecore_Thread *)worker;
934 _ecore_main_call_flush();
939 if (worker->u.feedback_run.direct_worker)
941 _ecore_thread_worker_free(worker->u.feedback_run.direct_worker);
942 worker->u.feedback_run.direct_worker = NULL;
945 eina_threads_shutdown();
948 worker->no_queue = EINA_FALSE;
950 LKL(_ecore_pending_job_threads_mutex);
951 _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
953 if (_ecore_thread_count == _ecore_thread_count_max)
955 LKU(_ecore_pending_job_threads_mutex);
956 return (Ecore_Thread *)worker;
959 LKU(_ecore_pending_job_threads_mutex);
961 /* One more thread could be created. */
964 LKL(_ecore_pending_job_threads_mutex);
966 if (PHC(thread, _ecore_thread_worker, NULL) == 0)
968 _ecore_thread_count++;
969 LKU(_ecore_pending_job_threads_mutex);
970 return (Ecore_Thread *)worker;
974 _ecore_main_call_flush();
978 LKU(_ecore_pending_job_threads_mutex);
980 eina_threads_shutdown();
983 LKL(_ecore_pending_job_threads_mutex);
984 if (_ecore_thread_count == 0)
986 _ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
989 if (func_cancel) func_cancel((void *)data, NULL);
999 LKU(_ecore_pending_job_threads_mutex);
1001 return (Ecore_Thread *)worker;
1003 Ecore_Pthread_Worker worker;
1008 If no thread and as we don't want to break app that rely on this
1009 facility, we will lock the interface until we are done.
1011 worker.u.feedback_run.func_heavy = func_heavy;
1012 worker.u.feedback_run.func_notify = func_notify;
1013 worker.u.feedback_run.send = 0;
1014 worker.u.feedback_run.received = 0;
1015 worker.func_cancel = func_cancel;
1016 worker.func_end = func_end;
1018 worker.cancel = EINA_FALSE;
1019 worker.feedback_run = EINA_TRUE;
1020 worker.message_run = EINA_FALSE;
1021 worker.kill = EINA_FALSE;
1024 worker.reschedule = EINA_FALSE;
1026 func_heavy((void *)data, (Ecore_Thread *)&worker);
1028 if (worker.cancel) func_cancel((void *)data, (Ecore_Thread *)&worker);
1029 else func_end((void *)data, (Ecore_Thread *)&worker);
1030 } while (worker.reschedule == EINA_TRUE);
1037 ecore_thread_feedback(Ecore_Thread *thread,
1040 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1042 if (!worker) return EINA_FALSE;
1044 #ifdef EFL_HAVE_THREADS
1045 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1047 if (worker->feedback_run)
1049 Ecore_Pthread_Notify *notify;
1051 notify = malloc(sizeof (Ecore_Pthread_Notify));
1052 if (!notify) return EINA_FALSE;
1054 notify->user_data = data;
1055 notify->work = worker;
1056 worker->u.feedback_run.send++;
1058 ecore_main_loop_thread_safe_call_async(_ecore_notify_handler, notify);
1060 else if (worker->message_run)
1062 Ecore_Pthread_Message *msg;
1063 Ecore_Pthread_Notify *notify;
1065 msg = malloc(sizeof (Ecore_Pthread_Message*));
1066 if (!msg) return EINA_FALSE;
1068 msg->callback = EINA_FALSE;
1069 msg->sync = EINA_FALSE;
1071 notify = malloc(sizeof (Ecore_Pthread_Notify));
1077 notify->work = worker;
1078 notify->user_data = msg;
1080 worker->u.message_run.from.send++;
1081 ecore_main_loop_thread_safe_call_async(_ecore_message_notify_handler, notify);
1088 worker->u.feedback_run.func_notify((void *)worker->data, thread, (void *)data);
1096 ecore_thread_message_run(Ecore_Thread_Cb func_main,
1097 Ecore_Thread_Notify_Cb func_notify,
1098 Ecore_Thread_Cb func_end,
1099 Ecore_Thread_Cb func_cancel,
1102 #ifdef EFL_HAVE_THREADS
1103 Ecore_Pthread_Worker *worker;
1106 if (!func_main) return NULL;
1108 worker = _ecore_thread_worker_new();
1109 if (!worker) return NULL;
1111 worker->u.message_run.func_main = func_main;
1112 worker->u.message_run.func_notify = func_notify;
1113 worker->u.message_run.direct_worker = _ecore_thread_worker_new();
1114 worker->u.message_run.send = ecore_pipe_add(_ecore_nothing_handler, worker);
1115 worker->u.message_run.from.send = 0;
1116 worker->u.message_run.from.received = 0;
1117 worker->u.message_run.to.send = 0;
1118 worker->u.message_run.to.received = 0;
1120 ecore_pipe_freeze(worker->u.message_run.send);
1122 worker->func_cancel = func_cancel;
1123 worker->func_end = func_end;
1124 worker->hash = NULL;
1125 worker->data = data;
1127 worker->cancel = EINA_FALSE;
1128 worker->message_run = EINA_TRUE;
1129 worker->feedback_run = EINA_FALSE;
1130 worker->kill = EINA_FALSE;
1131 worker->reschedule = EINA_FALSE;
1132 worker->no_queue = EINA_FALSE;
1135 eina_threads_init();
1137 if (PHC(t, _ecore_direct_worker, worker) == 0)
1138 return (Ecore_Thread*) worker;
1140 eina_threads_shutdown();
1142 if (worker->u.message_run.direct_worker) _ecore_thread_worker_free(worker->u.message_run.direct_worker);
1143 if (worker->u.message_run.send) ecore_pipe_del(worker->u.message_run.send);
1148 /* Note: This type of thread can't and never will work without thread support */
1149 WRN("ecore_thread_message_run called, but threads disable in Ecore, things will go wrong. Starting now !");
1150 # warning "You disabled threads support in ecore, I hope you know what you are doing !"
1153 func_cancel((void *) data, NULL);
1160 ecore_thread_reschedule(Ecore_Thread *thread)
1162 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1164 if (!worker) return EINA_FALSE;
1166 #ifdef EFL_HAVE_THREADS
1167 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1170 worker->reschedule = EINA_TRUE;
1175 ecore_thread_active_get(void)
1177 #ifdef EFL_HAVE_THREADS
1178 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1179 return _ecore_thread_count;
1186 ecore_thread_pending_get(void)
1188 #ifdef EFL_HAVE_THREADS
1191 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1192 LKL(_ecore_pending_job_threads_mutex);
1193 ret = eina_list_count(_ecore_pending_job_threads);
1194 LKU(_ecore_pending_job_threads_mutex);
1202 ecore_thread_pending_feedback_get(void)
1204 #ifdef EFL_HAVE_THREADS
1207 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1208 LKL(_ecore_pending_job_threads_mutex);
1209 ret = eina_list_count(_ecore_pending_job_threads_feedback);
1210 LKU(_ecore_pending_job_threads_mutex);
1218 ecore_thread_pending_total_get(void)
1220 #ifdef EFL_HAVE_THREADS
1223 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1224 LKL(_ecore_pending_job_threads_mutex);
1225 ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
1226 LKU(_ecore_pending_job_threads_mutex);
1234 ecore_thread_max_get(void)
1236 EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1237 return _ecore_thread_count_max;
1241 ecore_thread_max_set(int num)
1243 EINA_MAIN_LOOP_CHECK_RETURN;
1244 if (num < 1) return;
1245 /* avoid doing something hilarious by blocking dumb users */
1246 if (num > (16 * eina_cpu_count())) num = 16 * eina_cpu_count();
1248 _ecore_thread_count_max = num;
1252 ecore_thread_max_reset(void)
1254 EINA_MAIN_LOOP_CHECK_RETURN;
1255 _ecore_thread_count_max = eina_cpu_count();
1259 ecore_thread_available_get(void)
1261 #ifdef EFL_HAVE_THREADS
1264 LKL(_ecore_pending_job_threads_mutex);
1265 ret = _ecore_thread_count_max - _ecore_thread_count;
1266 LKU(_ecore_pending_job_threads_mutex);
1274 ecore_thread_local_data_add(Ecore_Thread *thread,
1280 #ifdef EFL_HAVE_THREADS
1281 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1282 Ecore_Thread_Data *d;
1286 if ((!thread) || (!key) || (!value))
1288 #ifdef EFL_HAVE_THREADS
1289 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1292 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1297 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1304 ret = eina_hash_direct_add(worker->hash, key, d);
1306 ret = eina_hash_add(worker->hash, key, d);
1317 ecore_thread_local_data_set(Ecore_Thread *thread,
1322 #ifdef EFL_HAVE_THREADS
1323 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1324 Ecore_Thread_Data *d, *r;
1328 if ((!thread) || (!key) || (!value))
1330 #ifdef EFL_HAVE_THREADS
1331 if (!PHE(worker->self, PHS())) return NULL;
1334 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1339 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1345 r = eina_hash_set(worker->hash, key, d);
1357 ecore_thread_local_data_find(Ecore_Thread *thread,
1360 #ifdef EFL_HAVE_THREADS
1361 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1362 Ecore_Thread_Data *d;
1365 if ((!thread) || (!key))
1367 #ifdef EFL_HAVE_THREADS
1368 if (!PHE(worker->self, PHS())) return NULL;
1373 d = eina_hash_find(worker->hash, key);
1383 ecore_thread_local_data_del(Ecore_Thread *thread,
1386 #ifdef EFL_HAVE_THREADS
1387 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1390 if ((!thread) || (!key))
1392 #ifdef EFL_HAVE_THREADS
1393 if (!PHE(worker->self, PHS())) return EINA_FALSE;
1397 return eina_hash_del_by_key(worker->hash, key);
1404 ecore_thread_global_data_add(const char *key,
1409 #ifdef EFL_HAVE_THREADS
1410 Ecore_Thread_Data *d;
1414 if ((!key) || (!value))
1416 #ifdef EFL_HAVE_THREADS
1417 LRWKWL(_ecore_thread_global_hash_lock);
1418 if (!_ecore_thread_global_hash)
1419 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1420 LRWKU(_ecore_thread_global_hash_lock);
1422 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1428 if (!_ecore_thread_global_hash)
1430 LRWKWL(_ecore_thread_global_hash_lock);
1432 ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1434 ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1435 LRWKU(_ecore_thread_global_hash_lock);
1436 CDB(_ecore_thread_global_hash_cond);
1446 ecore_thread_global_data_set(const char *key,
1450 #ifdef EFL_HAVE_THREADS
1451 Ecore_Thread_Data *d, *r;
1455 if ((!key) || (!value))
1457 #ifdef EFL_HAVE_THREADS
1458 LRWKWL(_ecore_thread_global_hash_lock);
1459 if (!_ecore_thread_global_hash)
1460 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1461 LRWKU(_ecore_thread_global_hash_lock);
1463 if (!_ecore_thread_global_hash)
1466 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1472 LRWKWL(_ecore_thread_global_hash_lock);
1473 r = eina_hash_set(_ecore_thread_global_hash, key, d);
1474 LRWKU(_ecore_thread_global_hash_lock);
1475 CDB(_ecore_thread_global_hash_cond);
1487 ecore_thread_global_data_find(const char *key)
1489 #ifdef EFL_HAVE_THREADS
1490 Ecore_Thread_Data *ret;
1495 #ifdef EFL_HAVE_THREADS
1496 if (!_ecore_thread_global_hash) return NULL;
1498 LRWKRL(_ecore_thread_global_hash_lock);
1499 ret = eina_hash_find(_ecore_thread_global_hash, key);
1500 LRWKU(_ecore_thread_global_hash_lock);
1510 ecore_thread_global_data_del(const char *key)
1512 #ifdef EFL_HAVE_THREADS
1518 #ifdef EFL_HAVE_THREADS
1519 if (!_ecore_thread_global_hash)
1522 LRWKWL(_ecore_thread_global_hash_lock);
1523 ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1524 LRWKU(_ecore_thread_global_hash_lock);
1532 ecore_thread_global_data_wait(const char *key,
1535 #ifdef EFL_HAVE_THREADS
1537 Ecore_Thread_Data *ret = NULL;
1542 #ifdef EFL_HAVE_THREADS
1543 if (!_ecore_thread_global_hash)
1546 tm = ecore_time_get() + seconds;
1550 LRWKRL(_ecore_thread_global_hash_lock);
1551 ret = eina_hash_find(_ecore_thread_global_hash, key);
1552 LRWKU(_ecore_thread_global_hash_lock);
1553 if ((ret) || (!seconds) || ((seconds > 0) && (tm <= ecore_time_get())))
1555 LKL(_ecore_thread_global_hash_mutex);
1556 CDW(_ecore_thread_global_hash_cond, tm);
1557 LKU(_ecore_thread_global_hash_mutex);
1559 if (ret) return ret->data;