9 #ifdef EFL_HAVE_PTHREAD
14 #include "ecore_private.h"
16 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
17 typedef struct _Ecore_Pthread Ecore_Pthread;
18 typedef struct _Ecore_Thread_Data Ecore_Thread_Data;
20 struct _Ecore_Thread_Data
26 struct _Ecore_Pthread_Worker
30 Ecore_Cb func_blocking;
33 Ecore_Thread_Heavy_Cb func_heavy;
34 Ecore_Thread_Notify_Cb func_notify;
41 #ifdef EFL_HAVE_PTHREAD
45 pthread_mutex_t mutex;
51 Eina_Bool long_run : 1;
54 #ifdef EFL_HAVE_PTHREAD
55 typedef struct _Ecore_Pthread_Data Ecore_Pthread_Data;
57 struct _Ecore_Pthread_Data
65 static int _ecore_thread_count_max = 0;
66 static int ECORE_THREAD_PIPE_DEL = 0;
68 #ifdef EFL_HAVE_PTHREAD
69 static int _ecore_thread_count = 0;
71 static Eina_List *_ecore_active_job_threads = NULL;
72 static Eina_List *_ecore_pending_job_threads = NULL;
73 static Eina_List *_ecore_pending_job_threads_long = NULL;
74 static Ecore_Event_Handler *del_handler = NULL;
75 static pthread_mutex_t _ecore_pending_job_threads_mutex = PTHREAD_MUTEX_INITIALIZER;
77 static Eina_Hash *_ecore_thread_global_hash = NULL;
78 static pthread_rwlock_t _ecore_thread_global_hash_lock = PTHREAD_RWLOCK_INITIALIZER;
79 static pthread_mutex_t _ecore_thread_global_hash_mutex = PTHREAD_MUTEX_INITIALIZER;
80 static pthread_cond_t _ecore_thread_global_hash_cond = PTHREAD_COND_INITIALIZER;
81 static pthread_t main_loop_thread;
82 static Eina_Bool have_main_loop_thread = 0;
84 _ecore_thread_data_free(void *data)
86 Ecore_Thread_Data *d = data;
88 if (d->cb) d->cb(d->data);
93 _ecore_thread_pipe_free(void *data __UNUSED__, void *event)
95 Ecore_Pipe *p = event;
101 _ecore_thread_pipe_del(void *data __UNUSED__, int type __UNUSED__, void *event __UNUSED__)
103 /* This is a hack to delay pipe destruction until we are out of its internal loop. */
104 return ECORE_CALLBACK_CANCEL;
108 _ecore_thread_end(Ecore_Pthread_Data *pth)
112 if (pthread_join(pth->thread, (void **) &p) != 0)
115 _ecore_active_job_threads = eina_list_remove(_ecore_active_job_threads, pth);
117 ecore_event_add(ECORE_THREAD_PIPE_DEL, pth->p, _ecore_thread_pipe_free, NULL);
122 _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte)
124 Ecore_Pthread_Worker *work;
126 if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
128 work = *(Ecore_Pthread_Worker **)buffer;
132 if (work->func_cancel)
133 work->func_cancel((void *) work->data);
138 work->func_end((void *) work->data);
142 ecore_pipe_del(work->u.long_run.notify);
143 pthread_cond_destroy(&work->cond);
144 pthread_mutex_destroy(&work->mutex);
146 eina_hash_free(work->hash);
151 _ecore_notify_handler(void *data, void *buffer, unsigned int nbyte)
153 Ecore_Pthread_Worker *work = data;
156 if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
158 user_data = *(void **)buffer;
160 if (work->u.long_run.func_notify)
161 work->u.long_run.func_notify((Ecore_Thread *) work, user_data, (void *) work->data);
165 _ecore_short_job(Ecore_Pipe *end_pipe)
167 Ecore_Pthread_Worker *work;
169 while (_ecore_pending_job_threads)
171 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
173 if (!_ecore_pending_job_threads)
175 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
179 work = eina_list_data_get(_ecore_pending_job_threads);
180 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, _ecore_pending_job_threads);
182 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
185 work->u.short_run.func_blocking((void *) work->data);
187 ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
192 _ecore_long_job(Ecore_Pipe *end_pipe, pthread_t thread)
194 Ecore_Pthread_Worker *work;
196 while (_ecore_pending_job_threads_long)
198 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
200 if (!_ecore_pending_job_threads_long)
202 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
206 work = eina_list_data_get(_ecore_pending_job_threads_long);
207 _ecore_pending_job_threads_long = eina_list_remove_list(_ecore_pending_job_threads_long, _ecore_pending_job_threads_long);
209 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
213 work->u.long_run.func_heavy((Ecore_Thread *) work, (void *) work->data);
215 ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
220 _ecore_direct_worker(Ecore_Pthread_Worker *work)
222 Ecore_Pthread_Data *pth;
224 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
225 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
227 pth = malloc(sizeof (Ecore_Pthread_Data));
228 if (!pth) return NULL;
230 pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
236 pth->thread = pthread_self();
238 work->self = pth->thread;
239 work->u.long_run.func_heavy((Ecore_Thread *) work, (void *) work->data);
241 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
243 work = malloc(sizeof (Ecore_Pthread_Worker));
246 ecore_pipe_del(pth->p);
252 work->u.short_run.func_blocking = NULL;
253 work->func_end = (void *) _ecore_thread_end;
254 work->func_cancel = NULL;
255 work->cancel = EINA_FALSE;
256 work->long_run = EINA_FALSE;
258 pthread_cond_init(&work->cond, NULL);
259 pthread_mutex_init(&work->mutex, NULL);
261 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
267 _ecore_thread_worker(Ecore_Pthread_Data *pth)
269 Ecore_Pthread_Worker *work;
271 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
272 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
274 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
275 _ecore_thread_count++;
276 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
279 if (_ecore_pending_job_threads) _ecore_short_job(pth->p);
280 if (_ecore_pending_job_threads_long) _ecore_long_job(pth->p, pth->thread);
282 /* FIXME: Check if there is long running task todo, and switch to long run handler. */
284 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
285 if (_ecore_pending_job_threads)
287 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
290 if (_ecore_pending_job_threads_long)
292 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
296 _ecore_thread_count--;
298 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
300 work = malloc(sizeof (Ecore_Pthread_Worker));
301 if (!work) return NULL;
304 work->u.short_run.func_blocking = NULL;
305 work->func_end = (void *) _ecore_thread_end;
306 work->func_cancel = NULL;
307 work->cancel = EINA_FALSE;
308 work->long_run = EINA_FALSE;
310 pthread_cond_init(&work->cond, NULL);
311 pthread_mutex_init(&work->mutex, NULL);
313 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
321 _ecore_thread_init(void)
323 _ecore_thread_count_max = eina_cpu_count();
324 if (_ecore_thread_count_max <= 0)
325 _ecore_thread_count_max = 1;
327 ECORE_THREAD_PIPE_DEL = ecore_event_type_new();
328 #ifdef EFL_HAVE_PTHREAD
329 del_handler = ecore_event_handler_add(ECORE_THREAD_PIPE_DEL, _ecore_thread_pipe_del, NULL);
330 main_loop_thread = pthread_self();
331 have_main_loop_thread = 1;
336 _ecore_thread_shutdown(void)
338 /* FIXME: If function are still running in the background, should we kill them ? */
339 #ifdef EFL_HAVE_PTHREAD
340 Ecore_Pthread_Worker *work;
341 Ecore_Pthread_Data *pth;
343 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
345 EINA_LIST_FREE(_ecore_pending_job_threads, work)
347 if (work->func_cancel)
348 work->func_cancel((void *)work->data);
352 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
354 EINA_LIST_FREE(_ecore_active_job_threads, pth)
358 pthread_cancel(pth->thread);
359 pthread_join(pth->thread, (void **) &p);
361 ecore_pipe_del(pth->p);
363 if (_ecore_thread_global_hash)
364 eina_hash_free(_ecore_thread_global_hash);
365 ecore_event_handler_del(del_handler);
366 have_main_loop_thread = 0;
371 * @addtogroup Ecore_Thread Ecore Thread Functions
372 * These functions allow for ecore-managed threads which integrate with ecore's main loop.
377 * @brief Run some blocking code in a parrallel thread to avoid locking the main loop.
378 * @param func_blocking The function that should run in another thread.
379 * @param func_end The function that will be called in the main loop if the thread terminate correctly.
380 * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
381 * @param data User context data to pass to all callback.
382 * @return A reference to the newly created thread instance, or NULL if it failed.
384 * ecore_thread_run provide a facility for easily managing blocking task in a
385 * parallel thread. You should provide three function. The first one, func_blocking,
386 * that will do the blocking work in another thread (so you should not use the
387 * EFL in it except Eina if you are carefull). The second one, func_end,
388 * that will be called in Ecore main loop when func_blocking is done. So you
389 * can use all the EFL inside this function. The last one, func_cancel, will
390 * be called in the main loop if the thread is cancelled or could not run at all.
392 * Be aware, that you can't make assumption on the result order of func_end
393 * after many call to ecore_thread_run, as we start as much thread as the
394 * host CPU can handle.
397 ecore_thread_run(Ecore_Cb func_blocking,
399 Ecore_Cb func_cancel,
402 #ifdef EFL_HAVE_PTHREAD
403 Ecore_Pthread_Worker *work;
404 Ecore_Pthread_Data *pth = NULL;
406 if (!func_blocking) return NULL;
408 work = malloc(sizeof (Ecore_Pthread_Worker));
411 func_cancel((void *) data);
415 work->u.short_run.func_blocking = func_blocking;
417 pthread_cond_init(&work->cond, NULL);
418 pthread_mutex_init(&work->mutex, NULL);
419 work->func_end = func_end;
420 work->func_cancel = func_cancel;
421 work->cancel = EINA_FALSE;
422 work->long_run = EINA_FALSE;
425 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
426 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
428 if (_ecore_thread_count == _ecore_thread_count_max)
430 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
431 return (Ecore_Thread *) work;
434 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
436 /* One more thread could be created. */
437 pth = malloc(sizeof (Ecore_Pthread_Data));
438 if (!pth) goto on_error;
440 pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
441 if (!pth->p) goto on_error;
443 if (pthread_create(&pth->thread, NULL, (void *) _ecore_thread_worker, pth) == 0)
445 /* lower priority of worker threads so they use up "bg cpu"
446 * as it was really intended to work */
447 struct sched_param param;
449 memset(¶m, 0, sizeof(param));
450 param.sched_priority = sched_get_priority_min(SCHED_OTHER);
451 pthread_setschedparam(pth->thread, SCHED_OTHER, ¶m);
453 return (Ecore_Thread *) work;
459 if (pth->p) ecore_pipe_del(pth->p);
463 if (_ecore_thread_count == 0)
465 if (work->func_cancel)
466 work->func_cancel((void *) work->data);
470 return (Ecore_Thread *) work;
473 If no thread and as we don't want to break app that rely on this
474 facility, we will lock the interface until we are done.
476 func_blocking((void *)data);
477 func_end((void *)data);
484 * @brief Cancel a running thread.
485 * @param thread The thread to cancel.
486 * @return Will return EINA_TRUE if the thread has been cancelled,
487 * EINA_FALSE if it is pending.
489 * ecore_thread_cancel give the possibility to cancel a task still running. It
490 * will return EINA_FALSE, if the destruction is delayed or EINA_TRUE if it is
491 * cancelled after this call.
493 * You should use this function only in the main loop.
495 * func_end, func_cancel will destroy the handler, so don't use it after.
496 * And if ecore_thread_cancel return EINA_TRUE, you should not use Ecore_Thread also.
499 ecore_thread_cancel(Ecore_Thread *thread)
501 #ifdef EFL_HAVE_PTHREAD
502 Ecore_Pthread_Worker *work = (Ecore_Pthread_Worker *)thread;
508 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
510 if ((have_main_loop_thread) &&
511 (pthread_equal(main_loop_thread, pthread_self())))
513 EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
515 if ((void *) work == (void *) thread)
517 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
519 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
521 if (work->func_cancel)
522 work->func_cancel((void *) work->data);
530 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
532 /* Delay the destruction */
533 ((Ecore_Pthread_Worker *)thread)->cancel = EINA_TRUE;
541 * @brief Tell if a thread was canceled or not.
542 * @param thread The thread to test.
543 * @return EINA_TRUE if the thread is cancelled,
544 * EINA_FALSE if it is not.
546 * You can use this function in main loop and in the thread.
549 ecore_thread_check(Ecore_Thread *thread)
551 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
553 if (!worker) return EINA_TRUE;
554 return worker->cancel;
558 * @brief Run some heavy code in a parrallel thread to avoid locking the main loop.
559 * @param func_heavy The function that should run in another thread.
560 * @param func_notify The function that will receive the data send by func_heavy in the main loop.
561 * @param func_end The function that will be called in the main loop if the thread terminate correctly.
562 * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
563 * @param data User context data to pass to all callback.
564 * @param try_no_queue If you wan't to run outside of the thread pool.
565 * @return A reference to the newly created thread instance, or NULL if it failed.
567 * ecore_long_run provide a facility for easily managing heavy task in a
568 * parallel thread. You should provide four functions. The first one, func_heavy,
569 * that will do the heavy work in another thread (so you should not use the
570 * EFL in it except Eina and Eet if you are carefull). The second one, func_notify,
571 * will receive the data send from the thread function (func_heavy) by ecore_thread_notify
572 * in the main loop (and so, can use all the EFL). Tje third, func_end,
573 * that will be called in Ecore main loop when func_heavy is done. So you
574 * can use all the EFL inside this function. The last one, func_cancel, will
575 * be called in the main loop also, if the thread is cancelled or could not run at all.
577 * Be aware, that you can't make assumption on the result order of func_end
578 * after many call to ecore_long_run, as we start as much thread as the
579 * host CPU can handle.
581 * If you set try_no_queue, it will try to run outside of the thread pool, this can bring
582 * the CPU down, so be carefull with that. Of course if it can't start a new thread, it will
583 * try to use one from the pool.
585 EAPI Ecore_Thread *ecore_long_run(Ecore_Thread_Heavy_Cb func_heavy,
586 Ecore_Thread_Notify_Cb func_notify,
588 Ecore_Cb func_cancel,
590 Eina_Bool try_no_queue)
593 #ifdef EFL_HAVE_PTHREAD
594 Ecore_Pthread_Worker *worker;
595 Ecore_Pthread_Data *pth = NULL;
597 if (!func_heavy) return NULL;
599 worker = malloc(sizeof (Ecore_Pthread_Worker));
600 if (!worker) goto on_error;
602 worker->u.long_run.func_heavy = func_heavy;
603 worker->u.long_run.func_notify = func_notify;
605 pthread_cond_init(&worker->cond, NULL);
606 pthread_mutex_init(&worker->mutex, NULL);
607 worker->func_cancel = func_cancel;
608 worker->func_end = func_end;
610 worker->cancel = EINA_FALSE;
611 worker->long_run = EINA_TRUE;
613 worker->u.long_run.notify = ecore_pipe_add(_ecore_notify_handler, worker);
619 if (pthread_create(&t, NULL, (void *) _ecore_direct_worker, worker) == 0)
621 /* lower priority of worker threads so they use up "bg cpu"
622 * as it was really intended to work */
623 struct sched_param param;
625 memset(¶m, 0, sizeof(param));
626 param.sched_priority = sched_get_priority_min(SCHED_OTHER);
627 pthread_setschedparam(t, SCHED_OTHER, ¶m);
629 return (Ecore_Thread *) worker;
633 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
634 _ecore_pending_job_threads_long = eina_list_append(_ecore_pending_job_threads_long, worker);
636 if (_ecore_thread_count == _ecore_thread_count_max)
638 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
639 return (Ecore_Thread *) worker;
642 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
644 /* One more thread could be created. */
645 pth = malloc(sizeof (Ecore_Pthread_Data));
646 if (!pth) goto on_error;
648 pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
649 if (pth->p) goto on_error;
651 if (pthread_create(&pth->thread, NULL, (void *) _ecore_thread_worker, pth) == 0)
653 /* lower priority of worker threads so they use up "bg cpu"
654 * as it was really intended to work */
655 struct sched_param param;
657 memset(¶m, 0, sizeof(param));
658 param.sched_priority = sched_get_priority_min(SCHED_OTHER);
659 pthread_setschedparam(pth->thread, SCHED_OTHER, ¶m);
661 return (Ecore_Thread *) worker;
667 if (pth->p) ecore_pipe_del(pth->p);
671 if (_ecore_thread_count == 0)
673 if (func_cancel) func_cancel((void *) data);
677 ecore_pipe_del(worker->u.long_run.notify);
683 return (Ecore_Thread *) worker;
685 Ecore_Pthread_Worker worker;
690 If no thread and as we don't want to break app that rely on this
691 facility, we will lock the interface until we are done.
693 worker.u.long_run.func_heavy = func_heavy;
694 worker.u.long_run.func_notify = func_notify;
695 worker.u.long_run.notify = NULL;
696 worker.func_cancel = func_cancel;
697 worker.func_end = func_end;
699 worker.cancel = EINA_FALSE;
700 worker.long_run = EINA_TRUE;
702 func_heavy((Ecore_Thread *) &worker, (void *)data);
704 if (worker.cancel) func_cancel((void *)data);
705 else func_end((void *)data);
712 * @brief Send data to main loop from worker thread.
713 * @param thread The current Ecore_Thread context to send data from
714 * @param data Data to be transmitted to the main loop
715 * @return EINA_TRUE if data was successfully send to main loop,
716 * EINA_FALSE if anything goes wrong.
718 * After a succesfull call, the data should be considered owned
721 * You should use this function only in the func_heavy call.
724 ecore_thread_notify(Ecore_Thread *thread, const void *data)
726 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
728 if (!worker) return EINA_FALSE;
729 if (!worker->long_run) return EINA_FALSE;
731 #ifdef EFL_HAVE_PTHREAD
732 if (!pthread_equal(worker->self, pthread_self())) return EINA_FALSE;
734 ecore_pipe_write(worker->u.long_run.notify, &data, sizeof (void *));
738 worker->u.long_run.func_notify(thread, (void*) data, (void*) worker->data);
745 * @brief Get number of active thread jobs
746 * @return Number of active threads running jobs
747 * This returns the number of threads currently running jobs through the
751 ecore_thread_active_get(void)
753 #ifdef EFL_HAVE_PTHREAD
754 return _ecore_thread_count;
761 * @brief Get number of pending (short) thread jobs
762 * @return Number of pending threads running "short" jobs
763 * This returns the number of threads currently running jobs through the
764 * ecore_thread_run api call.
767 ecore_thread_pending_get(void)
770 #ifdef EFL_HAVE_PTHREAD
771 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
772 ret = eina_list_count(_ecore_pending_job_threads);
773 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
781 * @brief Get number of pending long thread jobs
782 * @return Number of pending threads running "long" jobs
783 * This returns the number of threads currently running jobs through the
784 * ecore_long_run api call.
787 ecore_thread_pending_long_get(void)
790 #ifdef EFL_HAVE_PTHREAD
791 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
792 ret = eina_list_count(_ecore_pending_job_threads_long);
793 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
801 * @brief Get number of pending thread jobs
802 * @return Number of pending threads running jobs
803 * This returns the number of threads currently running jobs through the
804 * ecore_thread_run and ecore_long_run api calls combined.
807 ecore_thread_pending_total_get(void)
810 #ifdef EFL_HAVE_PTHREAD
811 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
812 ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_long);
813 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
821 * @brief Get the max number of threads that can run simultaneously
822 * @return Max number of threads ecore will run
823 * This returns the total number of threads that ecore will attempt to run
827 ecore_thread_max_get(void)
829 return _ecore_thread_count_max;
833 * @brief Set the max number of threads that can run simultaneously
834 * @param num The new maximum
835 * This sets the maximum number of threads that ecore will try to run
836 * simultaneously. This number cannot be < 1 or >= 2x the number of active cpus.
839 ecore_thread_max_set(int num)
842 /* avoid doing something hilarious by blocking dumb users */
843 if (num >= (2 * eina_cpu_count())) return;
845 _ecore_thread_count_max = num;
849 * @brief Reset the max number of threads that can run simultaneously
850 * This resets the maximum number of threads that ecore will try to run
851 * simultaneously to the number of active cpus.
854 ecore_thread_max_reset(void)
856 _ecore_thread_count_max = eina_cpu_count();
860 * @brief Get the number of threads which are available to be used
861 * @return The number of available threads
862 * This returns the number of threads slots that ecore has currently available.
863 * Assuming that you haven't changed the max number of threads with @ref ecore_thread_max_set
864 * this should be equal to (num_cpus - (active_running + active_long_running))
867 ecore_thread_available_get(void)
870 #ifdef EFL_HAVE_PTHREAD
871 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
872 ret = _ecore_thread_count_max - _ecore_thread_count;
873 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
881 * @brief Add data to the thread for subsequent use
882 * @param thread The thread context to add to
883 * @param key The name string to add the data with
884 * @param value The data to add
885 * @param cb The callback to free the data with
886 * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
887 * @return EINA_TRUE on success, EINA_FALSE on failure
888 * This adds data to the thread context, allowing the thread
889 * to retrieve and use it without complicated mutexing. This function can only be called by a
890 * *_run thread INSIDE the thread and will return EINA_FALSE in any case but success.
891 * All data added to the thread will be freed with its associated callback (if present)
892 * upon thread termination. If no callback is specified, it is expected that the user will free the
893 * data, but this is most likely not what you want.
896 ecore_thread_local_data_add(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
898 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
899 Ecore_Thread_Data *d;
902 if ((!thread) || (!key) || (!value))
904 #ifdef EFL_HAVE_PTHREAD
905 if (!pthread_equal(worker->self, pthread_self())) return EINA_FALSE;
908 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
913 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
920 ret = eina_hash_direct_add(worker->hash, key, d);
922 ret = eina_hash_add(worker->hash, key, d);
923 pthread_cond_broadcast(&worker->cond);
931 * @brief Modify data in the thread, or add if not found
932 * @param thread The thread context
933 * @param key The name string to add the data with
934 * @param value The data to add
935 * @param cb The callback to free the data with
936 * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
937 * @return The old data associated with @p key on success if modified, NULL if added
938 * This adds/modifies data in the thread context, adding only if modify fails.
939 * This function can only be called by a *_run thread INSIDE the thread.
940 * All data added to the thread pool will be freed with its associated callback (if present)
941 * upon thread termination. If no callback is specified, it is expected that the user will free the
942 * data, but this is most likely not what you want.
945 ecore_thread_local_data_set(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb)
947 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
948 Ecore_Thread_Data *d, *r;
950 if ((!thread) || (!key) || (!value))
952 #ifdef EFL_HAVE_PTHREAD
953 if (!pthread_equal(worker->self, pthread_self())) return NULL;
956 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
961 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
967 r = eina_hash_set(worker->hash, key, d);
968 pthread_cond_broadcast(&worker->cond);
978 * @brief Find data in the thread's data
979 * @param thread The thread context
980 * @param key The name string the data is associated with
981 * @return The value, or NULL on error
982 * This finds data in the thread context that has been previously added with @ref ecore_thread_local_data_add
983 * This function can only be called by a *_run thread INSIDE the thread, and will return NULL
984 * in any case but success.
988 ecore_thread_local_data_find(Ecore_Thread *thread, const char *key)
990 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
991 Ecore_Thread_Data *d;
993 if ((!thread) || (!key))
995 #ifdef EFL_HAVE_PTHREAD
996 if (!pthread_equal(worker->self, pthread_self())) return NULL;
1001 d = eina_hash_find(worker->hash, key);
1009 * @brief Delete data from the thread's data
1010 * @param thread The thread context
1011 * @param key The name string the data is associated with
1012 * @return EINA_TRUE on success, EINA_FALSE on failure
1013 * This deletes the data pointer from the thread context which was previously added with @ref ecore_thread_local_data_add
1014 * This function can only be called by a *_run thread INSIDE the thread, and will return EINA_FALSE
1015 * in any case but success. Note that this WILL free the data if a callback was specified.
1018 ecore_thread_local_data_del(Ecore_Thread *thread, const char *key)
1020 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1021 Ecore_Thread_Data *d;
1022 if ((!thread) || (!key))
1024 #ifdef EFL_HAVE_PTHREAD
1025 if (!pthread_equal(worker->self, pthread_self())) return EINA_FALSE;
1029 if ((d = eina_hash_find(worker->hash, key)))
1030 _ecore_thread_data_free(d);
1031 return eina_hash_del_by_key(worker->hash, key);
1038 * @brief Add data to the global data
1039 * @param key The name string to add the data with
1040 * @param value The data to add
1041 * @param cb The optional callback to free the data with once ecore is shut down
1042 * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1043 * @return EINA_TRUE on success, EINA_FALSE on failure
1044 * This adds data to the global thread data, and will return EINA_FALSE in any case but success.
1045 * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1046 * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback
1047 * was specified for, you will most likely encounter a segv later on.
1050 ecore_thread_global_data_add(const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1053 Ecore_Thread_Data *d;
1055 if ((!key) || (!value))
1057 #ifdef EFL_HAVE_PTHREAD
1058 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1059 if (!_ecore_thread_global_hash)
1060 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1061 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1063 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1069 if (!_ecore_thread_global_hash)
1071 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1073 ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1075 ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1076 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1077 pthread_cond_broadcast(&_ecore_thread_global_hash_cond);
1085 * @brief Add data to the global data
1086 * @param key The name string to add the data with
1087 * @param value The data to add
1088 * @param cb The optional callback to free the data with once ecore is shut down
1089 * @return An @ref Ecore_Thread_Data on success, NULL on failure
1090 * This adds data to the global thread data and returns NULL, or replaces the previous data
1091 * associated with @p key and returning the previous data if it existed. To see if an error occurred,
1092 * one must use eina_error_get.
1093 * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1094 * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback
1095 * was specified for, you will most likely encounter a segv later on.
1098 ecore_thread_global_data_set(const char *key, void *value, Eina_Free_Cb cb)
1100 Ecore_Thread_Data *d, *r;
1103 if ((!key) || (!value))
1105 #ifdef EFL_HAVE_PTHREAD
1106 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1107 if (!_ecore_thread_global_hash)
1108 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1109 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1111 if (!_ecore_thread_global_hash)
1114 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1120 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1121 r = eina_hash_set(_ecore_thread_global_hash, key, d);
1122 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1123 pthread_cond_broadcast(&_ecore_thread_global_hash_cond);
1134 * @brief Find data in the global data
1135 * @param key The name string the data is associated with
1136 * @return The value, or NULL on error
1137 * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1138 * This function will return NULL in any case but success.
1139 * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1140 * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback
1141 * was specified for, you will most likely encounter a segv later on.
1142 * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1143 * if you will be doing anything with it.
1147 ecore_thread_global_data_find(const char *key)
1149 Ecore_Thread_Data *ret;
1152 #ifdef EFL_HAVE_PTHREAD
1153 if (!_ecore_thread_global_hash) return NULL;
1155 pthread_rwlock_rdlock(&_ecore_thread_global_hash_lock);
1156 ret = eina_hash_find(_ecore_thread_global_hash, key);
1157 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1165 * @brief Delete data from the global data
1166 * @param key The name string the data is associated with
1167 * @return EINA_TRUE on success, EINA_FALSE on failure
1168 * This deletes the data pointer from the global data which was previously added with @ref ecore_thread_global_data_add
1169 * This function will return EINA_FALSE in any case but success.
1170 * Note that this WILL free the data if an @c Eina_Free_Cb was specified when the data was added.
1173 ecore_thread_global_data_del(const char *key)
1176 Ecore_Thread_Data *d;
1180 #ifdef EFL_HAVE_PTHREAD
1181 if (!_ecore_thread_global_hash)
1184 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1185 if ((d = eina_hash_find(_ecore_thread_global_hash, key)))
1186 _ecore_thread_data_free(d);
1187 ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1188 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1196 * @brief Find data in the global data and optionally wait for the data if not found
1197 * @param key The name string the data is associated with
1198 * @param seconds The amount of time in seconds to wait for the data. If 0, the call will be async and not wait for data.
1199 * If < 0 the call will wait indefinitely for the data.
1200 * @return The value, or NULL on failure
1201 * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1202 * This function will return NULL in any case but success.
1203 * Use @p seconds to specify the amount of time to wait. Use > 0 for an actual wait time, 0 to not wait, and < 0 to wait indefinitely.
1204 * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1205 * if you will be doing anything with it.
1208 ecore_thread_global_data_wait(const char *key, double seconds)
1211 Ecore_Thread_Data *ret = NULL;
1214 #ifdef EFL_HAVE_PTHREAD
1215 if (!_ecore_thread_global_hash)
1218 time = ecore_time_get() + seconds;
1222 struct timespec t = { 0, 0 };
1224 t.tv_sec = (long int)time;
1225 t.tv_nsec = (long int)((time - (double)t.tv_sec) * 1000000000);
1226 pthread_rwlock_rdlock(&_ecore_thread_global_hash_lock);
1227 ret = eina_hash_find(_ecore_thread_global_hash, key);
1228 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1229 if ((ret) || (!seconds) || ((seconds > 0) && (time <= ecore_time_get())))
1231 pthread_mutex_lock(&_ecore_thread_global_hash_mutex);
1232 pthread_cond_timedwait(&_ecore_thread_global_hash_cond, &_ecore_thread_global_hash_mutex, &t);
1233 pthread_mutex_unlock(&_ecore_thread_global_hash_mutex);
1235 if (ret) return ret->data;