9 #ifdef EFL_HAVE_PTHREAD
13 # include <sys/time.h>
14 # include <sys/resource.h>
16 # include <sys/syscall.h>
22 #include "ecore_private.h"
24 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
25 typedef struct _Ecore_Pthread Ecore_Pthread;
26 typedef struct _Ecore_Thread_Data Ecore_Thread_Data;
28 struct _Ecore_Thread_Data
34 struct _Ecore_Pthread_Worker
38 Ecore_Thread_Heavy_Cb func_blocking;
41 Ecore_Thread_Heavy_Cb func_heavy;
42 Ecore_Thread_Notify_Cb func_notify;
52 #ifdef EFL_HAVE_PTHREAD
56 pthread_mutex_t mutex;
62 Eina_Bool feedback_run : 1;
66 #ifdef EFL_HAVE_PTHREAD
67 typedef struct _Ecore_Pthread_Data Ecore_Pthread_Data;
69 struct _Ecore_Pthread_Data
77 static int _ecore_thread_count_max = 0;
78 static int ECORE_THREAD_PIPE_DEL = 0;
80 #ifdef EFL_HAVE_PTHREAD
81 static int _ecore_thread_count = 0;
83 static Eina_List *_ecore_active_job_threads = NULL;
84 static Eina_List *_ecore_pending_job_threads = NULL;
85 static Eina_List *_ecore_pending_job_threads_feedback = NULL;
86 static Ecore_Event_Handler *del_handler = NULL;
87 static pthread_mutex_t _ecore_pending_job_threads_mutex = PTHREAD_MUTEX_INITIALIZER;
89 static Eina_Hash *_ecore_thread_global_hash = NULL;
90 static pthread_rwlock_t _ecore_thread_global_hash_lock = PTHREAD_RWLOCK_INITIALIZER;
91 static pthread_mutex_t _ecore_thread_global_hash_mutex = PTHREAD_MUTEX_INITIALIZER;
92 static pthread_cond_t _ecore_thread_global_hash_cond = PTHREAD_COND_INITIALIZER;
93 static pthread_t main_loop_thread;
94 static Eina_Bool have_main_loop_thread = 0;
97 _ecore_thread_data_free(void *data)
99 Ecore_Thread_Data *d = data;
101 if (d->cb) d->cb(d->data);
106 _ecore_thread_pipe_free(void *data __UNUSED__, void *event)
108 Ecore_Pipe *p = event;
111 eina_threads_shutdown();
115 _ecore_thread_pipe_del(void *data __UNUSED__, int type __UNUSED__, void *event __UNUSED__)
117 /* This is a hack to delay pipe destruction until we are out of its internal loop. */
118 return ECORE_CALLBACK_CANCEL;
122 _ecore_thread_end(Ecore_Pthread_Data *pth)
126 if (pthread_join(pth->thread, (void **) &p) != 0)
129 _ecore_active_job_threads = eina_list_remove(_ecore_active_job_threads, pth);
131 ecore_event_add(ECORE_THREAD_PIPE_DEL, pth->p, _ecore_thread_pipe_free, NULL);
136 _ecore_thread_kill(Ecore_Pthread_Worker *work)
140 if (work->func_cancel)
141 work->func_cancel((void *) work->data);
146 work->func_end((void *) work->data);
149 if (work->feedback_run)
150 ecore_pipe_del(work->u.feedback_run.notify);
151 pthread_cond_destroy(&work->cond);
152 pthread_mutex_destroy(&work->mutex);
154 eina_hash_free(work->hash);
159 _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte)
161 Ecore_Pthread_Worker *work;
163 if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
165 work = *(Ecore_Pthread_Worker **)buffer;
167 if (work->feedback_run)
169 if (work->u.feedback_run.send != work->u.feedback_run.received)
171 work->kill = EINA_TRUE;
176 _ecore_thread_kill(work);
180 _ecore_notify_handler(void *data, void *buffer, unsigned int nbyte)
182 Ecore_Pthread_Worker *work = data;
185 if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
187 user_data = *(void **)buffer;
188 work->u.feedback_run.received++;
190 if (work->u.feedback_run.func_notify)
191 work->u.feedback_run.func_notify((Ecore_Thread *) work, user_data, (void *) work->data);
193 /* Force reading all notify event before killing the thread */
194 if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
196 _ecore_thread_kill(work);
201 _ecore_short_job(Ecore_Pipe *end_pipe)
203 Ecore_Pthread_Worker *work;
205 while (_ecore_pending_job_threads)
207 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
209 if (!_ecore_pending_job_threads)
211 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
215 work = eina_list_data_get(_ecore_pending_job_threads);
216 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
217 _ecore_pending_job_threads);
219 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
222 work->u.short_run.func_blocking((Ecore_Thread*) work, (void *) work->data);
224 ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
229 _ecore_feedback_job(Ecore_Pipe *end_pipe, pthread_t thread)
231 Ecore_Pthread_Worker *work;
233 while (_ecore_pending_job_threads_feedback)
235 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
237 if (!_ecore_pending_job_threads_feedback)
239 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
243 work = eina_list_data_get(_ecore_pending_job_threads_feedback);
244 _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
245 _ecore_pending_job_threads_feedback);
247 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
251 work->u.feedback_run.func_heavy((Ecore_Thread *) work, (void *) work->data);
253 ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
258 _ecore_direct_worker(Ecore_Pthread_Worker *work)
260 Ecore_Pthread_Data *pth;
262 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
263 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
264 eina_sched_prio_drop();
266 pth = malloc(sizeof (Ecore_Pthread_Data));
267 if (!pth) return NULL;
269 pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
275 pth->thread = pthread_self();
277 work->self = pth->thread;
278 work->u.feedback_run.func_heavy((Ecore_Thread *) work, (void *) work->data);
280 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
282 work = malloc(sizeof (Ecore_Pthread_Worker));
285 ecore_pipe_del(pth->p);
291 work->u.short_run.func_blocking = NULL;
292 work->func_end = (void *) _ecore_thread_end;
293 work->func_cancel = NULL;
294 work->cancel = EINA_FALSE;
295 work->feedback_run = EINA_FALSE;
296 work->kill = EINA_FALSE;
298 pthread_cond_init(&work->cond, NULL);
299 pthread_mutex_init(&work->mutex, NULL);
301 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
307 _ecore_thread_worker(Ecore_Pthread_Data *pth)
309 Ecore_Pthread_Worker *work;
311 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
312 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
313 eina_sched_prio_drop();
315 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
316 _ecore_thread_count++;
317 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
320 if (_ecore_pending_job_threads) _ecore_short_job(pth->p);
321 if (_ecore_pending_job_threads_feedback) _ecore_feedback_job(pth->p, pth->thread);
323 /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
325 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
326 if (_ecore_pending_job_threads)
328 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
331 if (_ecore_pending_job_threads_feedback)
333 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
336 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
338 /* Sleep a little to prevent premature death */
341 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
342 if (_ecore_pending_job_threads)
344 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
347 if (_ecore_pending_job_threads_feedback)
349 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
352 _ecore_thread_count--;
353 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
355 work = malloc(sizeof (Ecore_Pthread_Worker));
356 if (!work) return NULL;
359 work->u.short_run.func_blocking = NULL;
360 work->func_end = (void *) _ecore_thread_end;
361 work->func_cancel = NULL;
362 work->cancel = EINA_FALSE;
363 work->feedback_run = EINA_FALSE;
364 work->kill = EINA_FALSE;
366 pthread_cond_init(&work->cond, NULL);
367 pthread_mutex_init(&work->mutex, NULL);
369 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
377 _ecore_thread_init(void)
379 _ecore_thread_count_max = eina_cpu_count();
380 if (_ecore_thread_count_max <= 0)
381 _ecore_thread_count_max = 1;
383 ECORE_THREAD_PIPE_DEL = ecore_event_type_new();
384 #ifdef EFL_HAVE_PTHREAD
385 del_handler = ecore_event_handler_add(ECORE_THREAD_PIPE_DEL, _ecore_thread_pipe_del, NULL);
386 main_loop_thread = pthread_self();
387 have_main_loop_thread = 1;
392 _ecore_thread_shutdown(void)
394 /* FIXME: If function are still running in the background, should we kill them ? */
395 #ifdef EFL_HAVE_PTHREAD
396 Ecore_Pthread_Worker *work;
397 Ecore_Pthread_Data *pth;
399 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
401 EINA_LIST_FREE(_ecore_pending_job_threads, work)
403 if (work->func_cancel)
404 work->func_cancel((void *)work->data);
408 EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
410 if (work->func_cancel)
411 work->func_cancel((void *)work->data);
415 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
417 EINA_LIST_FREE(_ecore_active_job_threads, pth)
421 pthread_cancel(pth->thread);
422 pthread_join(pth->thread, (void **) &p);
424 ecore_pipe_del(pth->p);
426 if (_ecore_thread_global_hash)
427 eina_hash_free(_ecore_thread_global_hash);
428 ecore_event_handler_del(del_handler);
429 have_main_loop_thread = 0;
435 * @addtogroup Ecore_Group Ecore - Main Loop and Job Functions.
441 * @addtogroup Ecore_Thread_Group Ecore Thread functions
443 * These functions allow for ecore-managed threads which integrate with ecore's main loop.
449 * @brief Run some blocking code in a parallel thread to avoid locking the main loop.
450 * @param func_blocking The function that should run in another thread.
451 * @param func_end The function that will be called in the main loop if the thread terminate correctly.
452 * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
453 * @param data User context data to pass to all callback.
454 * @return A reference to the newly created thread instance, or NULL if it failed.
456 * ecore_thread_run provide a facility for easily managing blocking task in a
457 * parallel thread. You should provide three function. The first one, func_blocking,
458 * that will do the blocking work in another thread (so you should not use the
459 * EFL in it except Eina if you are careful). The second one, func_end,
460 * that will be called in Ecore main loop when func_blocking is done. So you
461 * can use all the EFL inside this function. The last one, func_cancel, will
462 * be called in the main loop if the thread is cancelled or could not run at all.
464 * Be aware, that you can't make assumption on the result order of func_end
465 * after many call to ecore_thread_run, as we start as much thread as the
466 * host CPU can handle.
469 ecore_thread_run(Ecore_Thread_Heavy_Cb func_blocking,
471 Ecore_Cb func_cancel,
474 Ecore_Pthread_Worker *work;
475 #ifdef EFL_HAVE_PTHREAD
476 Ecore_Pthread_Data *pth = NULL;
479 if (!func_blocking) return NULL;
481 work = malloc(sizeof (Ecore_Pthread_Worker));
485 func_cancel((void *) data);
489 work->u.short_run.func_blocking = func_blocking;
490 work->func_end = func_end;
491 work->func_cancel = func_cancel;
492 work->cancel = EINA_FALSE;
493 work->feedback_run = EINA_FALSE;
494 work->kill = EINA_FALSE;
497 #ifdef EFL_HAVE_PTHREAD
499 pthread_cond_init(&work->cond, NULL);
500 pthread_mutex_init(&work->mutex, NULL);
502 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
503 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
505 if (_ecore_thread_count == _ecore_thread_count_max)
507 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
508 return (Ecore_Thread *) work;
511 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
513 /* One more thread could be created. */
514 pth = malloc(sizeof (Ecore_Pthread_Data));
515 if (!pth) goto on_error;
517 pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
518 if (!pth->p) goto on_error;
522 if (pthread_create(&pth->thread, NULL, (void *) _ecore_thread_worker, pth) == 0)
523 return (Ecore_Thread *) work;
525 eina_threads_shutdown();
530 if (pth->p) ecore_pipe_del(pth->p);
535 if (_ecore_thread_count == 0)
537 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
538 _ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
539 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
541 if (work->func_cancel)
542 work->func_cancel((void *) work->data);
546 return (Ecore_Thread *) work;
549 If no thread and as we don't want to break app that rely on this
550 facility, we will lock the interface until we are done.
552 func_blocking((Ecore_Thread *) work, (void *)data);
553 func_end((void *)data);
560 * @brief Cancel a running thread.
561 * @param thread The thread to cancel.
562 * @return Will return EINA_TRUE if the thread has been cancelled,
563 * EINA_FALSE if it is pending.
565 * ecore_thread_cancel give the possibility to cancel a task still running. It
566 * will return EINA_FALSE, if the destruction is delayed or EINA_TRUE if it is
567 * cancelled after this call.
569 * This function work in the main loop and in the thread, but you should not pass
570 * the Ecore_Thread variable from main loop to the worker thread in any structure.
571 * You should always use the one passed to the Ecore_Thread_Heavy_Cb.
573 * func_end, func_cancel will destroy the handler, so don't use it after.
574 * And if ecore_thread_cancel return EINA_TRUE, you should not use Ecore_Thread also.
577 ecore_thread_cancel(Ecore_Thread *thread)
579 #ifdef EFL_HAVE_PTHREAD
580 Ecore_Pthread_Worker *work = (Ecore_Pthread_Worker *)thread;
588 if (work->feedback_run)
592 if (work->u.feedback_run.send != work->u.feedback_run.received)
596 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
598 if ((have_main_loop_thread) &&
599 (pthread_equal(main_loop_thread, pthread_self())))
601 if (!work->feedback_run)
602 EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
604 if ((void *) work == (void *) thread)
606 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
608 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
610 if (work->func_cancel)
611 work->func_cancel((void *) work->data);
618 EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
620 if ((void *) work == (void *) thread)
622 _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
624 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
626 if (work->func_cancel)
627 work->func_cancel((void *) work->data);
635 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
637 /* Delay the destruction */
639 ((Ecore_Pthread_Worker *)thread)->cancel = EINA_TRUE;
647 * @brief Tell if a thread was canceled or not.
648 * @param thread The thread to test.
649 * @return EINA_TRUE if the thread is cancelled,
650 * EINA_FALSE if it is not.
652 * You can use this function in main loop and in the thread.
655 ecore_thread_check(Ecore_Thread *thread)
657 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
659 if (!worker) return EINA_TRUE;
660 return worker->cancel;
664 * @brief Run some heavy code in a parallel thread to avoid locking the main loop.
665 * @param func_heavy The function that should run in another thread.
666 * @param func_notify The function that will receive the data send by func_heavy in the main loop.
667 * @param func_end The function that will be called in the main loop if the thread terminate correctly.
668 * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
669 * @param data User context data to pass to all callback.
670 * @param try_no_queue If you wan't to run outside of the thread pool.
671 * @return A reference to the newly created thread instance, or NULL if it failed.
673 * ecore_thread_feedback_run provide a facility for easily managing heavy task in a
674 * parallel thread. You should provide four functions. The first one, func_heavy,
675 * that will do the heavy work in another thread (so you should not use the
676 * EFL in it except Eina and Eet if you are careful). The second one, func_notify,
677 * will receive the data send from the thread function (func_heavy) by ecore_thread_notify
678 * in the main loop (and so, can use all the EFL). Tje third, func_end,
679 * that will be called in Ecore main loop when func_heavy is done. So you
680 * can use all the EFL inside this function. The last one, func_cancel, will
681 * be called in the main loop also, if the thread is cancelled or could not run at all.
683 * Be aware, that you can't make assumption on the result order of func_end
684 * after many call to ecore_feedback_run, as we start as much thread as the
685 * host CPU can handle.
687 * If you set try_no_queue, it will try to run outside of the thread pool, this can bring
688 * the CPU down, so be careful with that. Of course if it can't start a new thread, it will
689 * try to use one from the pool.
691 EAPI Ecore_Thread *ecore_thread_feedback_run(Ecore_Thread_Heavy_Cb func_heavy,
692 Ecore_Thread_Notify_Cb func_notify,
694 Ecore_Cb func_cancel,
696 Eina_Bool try_no_queue)
699 #ifdef EFL_HAVE_PTHREAD
700 Ecore_Pthread_Worker *worker;
701 Ecore_Pthread_Data *pth = NULL;
703 if (!func_heavy) return NULL;
705 worker = malloc(sizeof (Ecore_Pthread_Worker));
706 if (!worker) goto on_error;
708 worker->u.feedback_run.func_heavy = func_heavy;
709 worker->u.feedback_run.func_notify = func_notify;
711 pthread_cond_init(&worker->cond, NULL);
712 pthread_mutex_init(&worker->mutex, NULL);
713 worker->func_cancel = func_cancel;
714 worker->func_end = func_end;
716 worker->cancel = EINA_FALSE;
717 worker->feedback_run = EINA_TRUE;
718 worker->kill = EINA_FALSE;
719 worker->u.feedback_run.send = 0;
720 worker->u.feedback_run.received = 0;
722 worker->u.feedback_run.notify = ecore_pipe_add(_ecore_notify_handler, worker);
728 if (pthread_create(&t, NULL, (void *) _ecore_direct_worker, worker) == 0)
729 return (Ecore_Thread *) worker;
732 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
733 _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
735 if (_ecore_thread_count == _ecore_thread_count_max)
737 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
738 return (Ecore_Thread *) worker;
741 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
743 /* One more thread could be created. */
744 pth = malloc(sizeof (Ecore_Pthread_Data));
745 if (!pth) goto on_error;
747 pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
748 if (!pth->p) goto on_error;
752 if (pthread_create(&pth->thread, NULL, (void *) _ecore_thread_worker, pth) == 0)
753 return (Ecore_Thread *) worker;
755 eina_threads_shutdown();
760 if (pth->p) ecore_pipe_del(pth->p);
764 if (_ecore_thread_count == 0)
766 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
767 _ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
769 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
771 if (func_cancel) func_cancel((void *) data);
775 ecore_pipe_del(worker->u.feedback_run.notify);
781 return (Ecore_Thread *) worker;
783 Ecore_Pthread_Worker worker;
788 If no thread and as we don't want to break app that rely on this
789 facility, we will lock the interface until we are done.
791 worker.u.feedback_run.func_heavy = func_heavy;
792 worker.u.feedback_run.func_notify = func_notify;
793 worker.u.feedback_run.notify = NULL;
794 worker.u.feedback_run.send = 0;
795 worker.u.feedback_run.received = 0;
796 worker.func_cancel = func_cancel;
797 worker.func_end = func_end;
799 worker.cancel = EINA_FALSE;
800 worker.feedback_run = EINA_TRUE;
801 worker.kill = EINA_FALSE;
803 func_heavy((Ecore_Thread *) &worker, (void *)data);
805 if (worker.func_cancel) func_cancel((void *)data);
806 else func_end((void *)data);
813 * @brief Send data to main loop from worker thread.
814 * @param thread The current Ecore_Thread context to send data from
815 * @param data Data to be transmitted to the main loop
816 * @return EINA_TRUE if data was successfully send to main loop,
817 * EINA_FALSE if anything goes wrong.
819 * After a succesfull call, the data should be considered owned
822 * You should use this function only in the func_heavy call.
825 ecore_thread_feedback(Ecore_Thread *thread, const void *data)
827 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
829 if (!worker) return EINA_FALSE;
830 if (!worker->feedback_run) return EINA_FALSE;
832 #ifdef EFL_HAVE_PTHREAD
833 if (!pthread_equal(worker->self, pthread_self())) return EINA_FALSE;
835 worker->u.feedback_run.send++;
836 ecore_pipe_write(worker->u.feedback_run.notify, &data, sizeof (void *));
840 worker->u.feedback_run.func_notify(thread, (void*) data, (void*) worker->data);
847 * @brief Get number of active thread jobs
848 * @return Number of active threads running jobs
849 * This returns the number of threads currently running jobs through the
853 ecore_thread_active_get(void)
855 #ifdef EFL_HAVE_PTHREAD
856 return _ecore_thread_count;
863 * @brief Get number of pending (short) thread jobs
864 * @return Number of pending threads running "short" jobs
865 * This returns the number of threads currently running jobs through the
866 * ecore_thread_run api call.
869 ecore_thread_pending_get(void)
872 #ifdef EFL_HAVE_PTHREAD
873 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
874 ret = eina_list_count(_ecore_pending_job_threads);
875 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
883 * @brief Get number of pending feedback thread jobs
884 * @return Number of pending threads running "feedback" jobs
885 * This returns the number of threads currently running jobs through the
886 * ecore_thread_feedback_run api call.
889 ecore_thread_pending_feedback_get(void)
892 #ifdef EFL_HAVE_PTHREAD
893 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
894 ret = eina_list_count(_ecore_pending_job_threads_feedback);
895 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
903 * @brief Get number of pending thread jobs
904 * @return Number of pending threads running jobs
905 * This returns the number of threads currently running jobs through the
906 * ecore_thread_run and ecore_thread_feedback_run api calls combined.
909 ecore_thread_pending_total_get(void)
912 #ifdef EFL_HAVE_PTHREAD
913 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
914 ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
915 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
923 * @brief Get the max number of threads that can run simultaneously
924 * @return Max number of threads ecore will run
925 * This returns the total number of threads that ecore will attempt to run
929 ecore_thread_max_get(void)
931 return _ecore_thread_count_max;
935 * @brief Set the max number of threads that can run simultaneously
936 * @param num The new maximum
937 * This sets the maximum number of threads that ecore will try to run
938 * simultaneously. This number cannot be < 1 or >= 2x the number of active cpus.
941 ecore_thread_max_set(int num)
944 /* avoid doing something hilarious by blocking dumb users */
945 if (num >= (2 * eina_cpu_count())) return;
947 _ecore_thread_count_max = num;
951 * @brief Reset the max number of threads that can run simultaneously
952 * This resets the maximum number of threads that ecore will try to run
953 * simultaneously to the number of active cpus.
956 ecore_thread_max_reset(void)
958 _ecore_thread_count_max = eina_cpu_count();
962 * @brief Get the number of threads which are available to be used
963 * @return The number of available threads
964 * This returns the number of threads slots that ecore has currently available.
965 * Assuming that you haven't changed the max number of threads with @ref ecore_thread_max_set
966 * this should be equal to (num_cpus - (active_running + active_feedback_running))
969 ecore_thread_available_get(void)
972 #ifdef EFL_HAVE_PTHREAD
973 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
974 ret = _ecore_thread_count_max - _ecore_thread_count;
975 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
983 * @brief Add data to the thread for subsequent use
984 * @param thread The thread context to add to
985 * @param key The name string to add the data with
986 * @param value The data to add
987 * @param cb The callback to free the data with
988 * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
989 * @return EINA_TRUE on success, EINA_FALSE on failure
990 * This adds data to the thread context, allowing the thread
991 * to retrieve and use it without complicated mutexing. This function can only be called by a
992 * *_run thread INSIDE the thread and will return EINA_FALSE in any case but success.
993 * All data added to the thread will be freed with its associated callback (if present)
994 * upon thread termination. If no callback is specified, it is expected that the user will free the
995 * data, but this is most likely not what you want.
998 ecore_thread_local_data_add(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1000 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1001 Ecore_Thread_Data *d;
1004 if ((!thread) || (!key) || (!value))
1006 #ifdef EFL_HAVE_PTHREAD
1007 if (!pthread_equal(worker->self, pthread_self())) return EINA_FALSE;
1010 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1015 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1022 ret = eina_hash_direct_add(worker->hash, key, d);
1024 ret = eina_hash_add(worker->hash, key, d);
1025 pthread_cond_broadcast(&worker->cond);
1033 * @brief Modify data in the thread, or add if not found
1034 * @param thread The thread context
1035 * @param key The name string to add the data with
1036 * @param value The data to add
1037 * @param cb The callback to free the data with
1038 * @return The old data associated with @p key on success if modified, NULL if added
1039 * This adds/modifies data in the thread context, adding only if modify fails.
1040 * This function can only be called by a *_run thread INSIDE the thread.
1041 * All data added to the thread pool will be freed with its associated callback (if present)
1042 * upon thread termination. If no callback is specified, it is expected that the user will free the
1043 * data, but this is most likely not what you want.
1046 ecore_thread_local_data_set(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb)
1048 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1049 Ecore_Thread_Data *d, *r;
1051 if ((!thread) || (!key) || (!value))
1053 #ifdef EFL_HAVE_PTHREAD
1054 if (!pthread_equal(worker->self, pthread_self())) return NULL;
1057 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1062 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1068 r = eina_hash_set(worker->hash, key, d);
1069 pthread_cond_broadcast(&worker->cond);
1079 * @brief Find data in the thread's data
1080 * @param thread The thread context
1081 * @param key The name string the data is associated with
1082 * @return The value, or NULL on error
1083 * This finds data in the thread context that has been previously added with @ref ecore_thread_local_data_add
1084 * This function can only be called by a *_run thread INSIDE the thread, and will return NULL
1085 * in any case but success.
1089 ecore_thread_local_data_find(Ecore_Thread *thread, const char *key)
1091 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1092 Ecore_Thread_Data *d;
1094 if ((!thread) || (!key))
1096 #ifdef EFL_HAVE_PTHREAD
1097 if (!pthread_equal(worker->self, pthread_self())) return NULL;
1102 d = eina_hash_find(worker->hash, key);
1110 * @brief Delete data from the thread's data
1111 * @param thread The thread context
1112 * @param key The name string the data is associated with
1113 * @return EINA_TRUE on success, EINA_FALSE on failure
1114 * This deletes the data pointer from the thread context which was previously added with @ref ecore_thread_local_data_add
1115 * This function can only be called by a *_run thread INSIDE the thread, and will return EINA_FALSE
1116 * in any case but success. Note that this WILL free the data if a callback was specified.
1119 ecore_thread_local_data_del(Ecore_Thread *thread, const char *key)
1121 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1122 Ecore_Thread_Data *d;
1123 if ((!thread) || (!key))
1125 #ifdef EFL_HAVE_PTHREAD
1126 if (!pthread_equal(worker->self, pthread_self())) return EINA_FALSE;
1130 if ((d = eina_hash_find(worker->hash, key)))
1131 _ecore_thread_data_free(d);
1132 return eina_hash_del_by_key(worker->hash, key);
1139 * @brief Add data to the global data
1140 * @param key The name string to add the data with
1141 * @param value The data to add
1142 * @param cb The optional callback to free the data with once ecore is shut down
1143 * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1144 * @return EINA_TRUE on success, EINA_FALSE on failure
1145 * This adds data to the global thread data, and will return EINA_FALSE in any case but success.
1146 * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1147 * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback
1148 * was specified for, you will most likely encounter a segv later on.
1151 ecore_thread_global_data_add(const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1154 Ecore_Thread_Data *d;
1156 if ((!key) || (!value))
1158 #ifdef EFL_HAVE_PTHREAD
1159 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1160 if (!_ecore_thread_global_hash)
1161 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1162 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1164 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1170 if (!_ecore_thread_global_hash)
1172 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1174 ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1176 ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1177 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1178 pthread_cond_broadcast(&_ecore_thread_global_hash_cond);
1186 * @brief Add data to the global data
1187 * @param key The name string to add the data with
1188 * @param value The data to add
1189 * @param cb The optional callback to free the data with once ecore is shut down
1190 * @return An Ecore_Thread_Data on success, NULL on failure
1191 * This adds data to the global thread data and returns NULL, or replaces the previous data
1192 * associated with @p key and returning the previous data if it existed. To see if an error occurred,
1193 * one must use eina_error_get.
1194 * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1195 * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback
1196 * was specified for, you will most likely encounter a segv later on.
1199 ecore_thread_global_data_set(const char *key, void *value, Eina_Free_Cb cb)
1201 Ecore_Thread_Data *d, *r;
1204 if ((!key) || (!value))
1206 #ifdef EFL_HAVE_PTHREAD
1207 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1208 if (!_ecore_thread_global_hash)
1209 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1210 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1212 if (!_ecore_thread_global_hash)
1215 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1221 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1222 r = eina_hash_set(_ecore_thread_global_hash, key, d);
1223 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1224 pthread_cond_broadcast(&_ecore_thread_global_hash_cond);
1235 * @brief Find data in the global data
1236 * @param key The name string the data is associated with
1237 * @return The value, or NULL on error
1238 * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1239 * This function will return NULL in any case but success.
1240 * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1241 * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback
1242 * was specified for, you will most likely encounter a segv later on.
1243 * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1244 * if you will be doing anything with it.
1248 ecore_thread_global_data_find(const char *key)
1250 Ecore_Thread_Data *ret;
1253 #ifdef EFL_HAVE_PTHREAD
1254 if (!_ecore_thread_global_hash) return NULL;
1256 pthread_rwlock_rdlock(&_ecore_thread_global_hash_lock);
1257 ret = eina_hash_find(_ecore_thread_global_hash, key);
1258 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1266 * @brief Delete data from the global data
1267 * @param key The name string the data is associated with
1268 * @return EINA_TRUE on success, EINA_FALSE on failure
1269 * This deletes the data pointer from the global data which was previously added with @ref ecore_thread_global_data_add
1270 * This function will return EINA_FALSE in any case but success.
1271 * Note that this WILL free the data if an @c Eina_Free_Cb was specified when the data was added.
1274 ecore_thread_global_data_del(const char *key)
1277 Ecore_Thread_Data *d;
1281 #ifdef EFL_HAVE_PTHREAD
1282 if (!_ecore_thread_global_hash)
1285 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1286 if ((d = eina_hash_find(_ecore_thread_global_hash, key)))
1287 _ecore_thread_data_free(d);
1288 ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1289 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1297 * @brief Find data in the global data and optionally wait for the data if not found
1298 * @param key The name string the data is associated with
1299 * @param seconds The amount of time in seconds to wait for the data. If 0, the call will be async and not wait for data.
1300 * If < 0 the call will wait indefinitely for the data.
1301 * @return The value, or NULL on failure
1302 * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1303 * This function will return NULL in any case but success.
1304 * Use @p seconds to specify the amount of time to wait. Use > 0 for an actual wait time, 0 to not wait, and < 0 to wait indefinitely.
1305 * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1306 * if you will be doing anything with it.
1309 ecore_thread_global_data_wait(const char *key, double seconds)
1312 Ecore_Thread_Data *ret = NULL;
1315 #ifdef EFL_HAVE_PTHREAD
1316 if (!_ecore_thread_global_hash)
1319 time = ecore_time_get() + seconds;
1323 struct timespec t = { 0, 0 };
1325 t.tv_sec = (long int)time;
1326 t.tv_nsec = (long int)((time - (double)t.tv_sec) * 1000000000);
1327 pthread_rwlock_rdlock(&_ecore_thread_global_hash_lock);
1328 ret = eina_hash_find(_ecore_thread_global_hash, key);
1329 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1330 if ((ret) || (!seconds) || ((seconds > 0) && (time <= ecore_time_get())))
1332 pthread_mutex_lock(&_ecore_thread_global_hash_mutex);
1333 pthread_cond_timedwait(&_ecore_thread_global_hash_cond, &_ecore_thread_global_hash_mutex, &t);
1334 pthread_mutex_unlock(&_ecore_thread_global_hash_mutex);
1336 if (ret) return ret->data;