9 #ifdef EFL_HAVE_PTHREAD
13 # define _GNU_SOURCE 1
16 # include <sys/time.h>
17 # include <sys/resource.h>
19 # include <sys/syscall.h>
25 #include "ecore_private.h"
27 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
28 typedef struct _Ecore_Pthread Ecore_Pthread;
29 typedef struct _Ecore_Thread_Data Ecore_Thread_Data;
31 struct _Ecore_Thread_Data
37 struct _Ecore_Pthread_Worker
41 Ecore_Cb func_blocking;
44 Ecore_Thread_Heavy_Cb func_heavy;
45 Ecore_Thread_Notify_Cb func_notify;
52 #ifdef EFL_HAVE_PTHREAD
56 pthread_mutex_t mutex;
62 Eina_Bool long_run : 1;
65 #ifdef EFL_HAVE_PTHREAD
66 typedef struct _Ecore_Pthread_Data Ecore_Pthread_Data;
68 struct _Ecore_Pthread_Data
76 static int _ecore_thread_count_max = 0;
77 static int ECORE_THREAD_PIPE_DEL = 0;
79 #ifdef EFL_HAVE_PTHREAD
80 static int _ecore_thread_count = 0;
82 static Eina_List *_ecore_active_job_threads = NULL;
83 static Eina_List *_ecore_pending_job_threads = NULL;
84 static Eina_List *_ecore_pending_job_threads_long = NULL;
85 static Ecore_Event_Handler *del_handler = NULL;
86 static pthread_mutex_t _ecore_pending_job_threads_mutex = PTHREAD_MUTEX_INITIALIZER;
88 static Eina_Hash *_ecore_thread_global_hash = NULL;
89 static pthread_rwlock_t _ecore_thread_global_hash_lock = PTHREAD_RWLOCK_INITIALIZER;
90 static pthread_mutex_t _ecore_thread_global_hash_mutex = PTHREAD_MUTEX_INITIALIZER;
91 static pthread_cond_t _ecore_thread_global_hash_cond = PTHREAD_COND_INITIALIZER;
92 static pthread_t main_loop_thread;
93 static Eina_Bool have_main_loop_thread = 0;
95 _ecore_thread_data_free(void *data)
97 Ecore_Thread_Data *d = data;
99 if (d->cb) d->cb(d->data);
104 _ecore_thread_pipe_free(void *data __UNUSED__, void *event)
106 Ecore_Pipe *p = event;
112 _ecore_thread_pipe_del(void *data __UNUSED__, int type __UNUSED__, void *event __UNUSED__)
114 /* This is a hack to delay pipe destruction until we are out of its internal loop. */
115 return ECORE_CALLBACK_CANCEL;
119 _ecore_thread_end(Ecore_Pthread_Data *pth)
123 if (pthread_join(pth->thread, (void **) &p) != 0)
126 _ecore_active_job_threads = eina_list_remove(_ecore_active_job_threads, pth);
128 ecore_event_add(ECORE_THREAD_PIPE_DEL, pth->p, _ecore_thread_pipe_free, NULL);
133 _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte)
135 Ecore_Pthread_Worker *work;
137 if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
139 work = *(Ecore_Pthread_Worker **)buffer;
143 if (work->func_cancel)
144 work->func_cancel((void *) work->data);
149 work->func_end((void *) work->data);
153 ecore_pipe_del(work->u.long_run.notify);
154 pthread_cond_destroy(&work->cond);
155 pthread_mutex_destroy(&work->mutex);
157 eina_hash_free(work->hash);
162 _ecore_notify_handler(void *data, void *buffer, unsigned int nbyte)
164 Ecore_Pthread_Worker *work = data;
167 if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
169 user_data = *(void **)buffer;
171 if (work->u.long_run.func_notify)
172 work->u.long_run.func_notify((Ecore_Thread *) work, user_data, (void *) work->data);
176 _ecore_short_job(Ecore_Pipe *end_pipe)
178 Ecore_Pthread_Worker *work;
180 while (_ecore_pending_job_threads)
182 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
184 if (!_ecore_pending_job_threads)
186 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
190 work = eina_list_data_get(_ecore_pending_job_threads);
191 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, _ecore_pending_job_threads);
193 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
196 work->u.short_run.func_blocking((void *) work->data);
198 ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
203 _ecore_long_job(Ecore_Pipe *end_pipe, pthread_t thread)
205 Ecore_Pthread_Worker *work;
207 while (_ecore_pending_job_threads_long)
209 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
211 if (!_ecore_pending_job_threads_long)
213 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
217 work = eina_list_data_get(_ecore_pending_job_threads_long);
218 _ecore_pending_job_threads_long = eina_list_remove_list(_ecore_pending_job_threads_long, _ecore_pending_job_threads_long);
220 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
224 work->u.long_run.func_heavy((Ecore_Thread *) work, (void *) work->data);
226 ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
230 /* Lower priority of current thread.
232 * It's used by worker threads so they use up "bg cpu" as it was really intended
233 * to work. If current thread is running with real-time priority, we decrease
234 * our priority by 5. This is done in a portable way. Otherwise we are
235 * running with SCHED_OTHER policy and there's no portable way to set the nice
236 * level on current thread. In Linux, it does work and it's the only one that is
240 _ecore_thread_pri_drop(void)
242 struct sched_param param;
245 pthread_t pthread_id;
247 pthread_id = pthread_self();
248 ret = pthread_getschedparam(pthread_id, &pol, ¶m);
251 ERR("Unable to query sched parameters");
255 if (EINA_UNLIKELY(pol == SCHED_RR || pol == SCHED_FIFO))
257 prio = sched_get_priority_max(pol);
258 param.sched_priority += 5;
259 if (prio > 0 && param.sched_priority > prio)
260 param.sched_priority = prio;
262 pthread_setschedparam(pthread_id, pol, ¶m);
268 prio = getpriority(PRIO_PROCESS, 0);
275 setpriority(PRIO_PROCESS, 0, prio);
282 _ecore_direct_worker(Ecore_Pthread_Worker *work)
284 Ecore_Pthread_Data *pth;
286 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
287 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
288 _ecore_thread_pri_drop();
290 pth = malloc(sizeof (Ecore_Pthread_Data));
291 if (!pth) return NULL;
293 pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
299 pth->thread = pthread_self();
301 work->self = pth->thread;
302 work->u.long_run.func_heavy((Ecore_Thread *) work, (void *) work->data);
304 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
306 work = malloc(sizeof (Ecore_Pthread_Worker));
309 ecore_pipe_del(pth->p);
315 work->u.short_run.func_blocking = NULL;
316 work->func_end = (void *) _ecore_thread_end;
317 work->func_cancel = NULL;
318 work->cancel = EINA_FALSE;
319 work->long_run = EINA_FALSE;
321 pthread_cond_init(&work->cond, NULL);
322 pthread_mutex_init(&work->mutex, NULL);
324 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
330 _ecore_thread_worker(Ecore_Pthread_Data *pth)
332 Ecore_Pthread_Worker *work;
334 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
335 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
336 _ecore_thread_pri_drop();
338 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
339 _ecore_thread_count++;
340 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
343 if (_ecore_pending_job_threads) _ecore_short_job(pth->p);
344 if (_ecore_pending_job_threads_long) _ecore_long_job(pth->p, pth->thread);
346 /* FIXME: Check if there is long running task todo, and switch to long run handler. */
348 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
349 if (_ecore_pending_job_threads)
351 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
354 if (_ecore_pending_job_threads_long)
356 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
360 _ecore_thread_count--;
362 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
364 work = malloc(sizeof (Ecore_Pthread_Worker));
365 if (!work) return NULL;
368 work->u.short_run.func_blocking = NULL;
369 work->func_end = (void *) _ecore_thread_end;
370 work->func_cancel = NULL;
371 work->cancel = EINA_FALSE;
372 work->long_run = EINA_FALSE;
374 pthread_cond_init(&work->cond, NULL);
375 pthread_mutex_init(&work->mutex, NULL);
377 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
385 _ecore_thread_init(void)
387 _ecore_thread_count_max = eina_cpu_count();
388 if (_ecore_thread_count_max <= 0)
389 _ecore_thread_count_max = 1;
391 ECORE_THREAD_PIPE_DEL = ecore_event_type_new();
392 #ifdef EFL_HAVE_PTHREAD
393 del_handler = ecore_event_handler_add(ECORE_THREAD_PIPE_DEL, _ecore_thread_pipe_del, NULL);
394 main_loop_thread = pthread_self();
395 have_main_loop_thread = 1;
400 _ecore_thread_shutdown(void)
402 /* FIXME: If function are still running in the background, should we kill them ? */
403 #ifdef EFL_HAVE_PTHREAD
404 Ecore_Pthread_Worker *work;
405 Ecore_Pthread_Data *pth;
407 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
409 EINA_LIST_FREE(_ecore_pending_job_threads, work)
411 if (work->func_cancel)
412 work->func_cancel((void *)work->data);
416 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
418 EINA_LIST_FREE(_ecore_active_job_threads, pth)
422 pthread_cancel(pth->thread);
423 pthread_join(pth->thread, (void **) &p);
425 ecore_pipe_del(pth->p);
427 if (_ecore_thread_global_hash)
428 eina_hash_free(_ecore_thread_global_hash);
429 ecore_event_handler_del(del_handler);
430 have_main_loop_thread = 0;
435 * @addtogroup Ecore_Thread Ecore Thread Functions
436 * These functions allow for ecore-managed threads which integrate with ecore's main loop.
441 * @brief Run some blocking code in a parallel thread to avoid locking the main loop.
442 * @param func_blocking The function that should run in another thread.
443 * @param func_end The function that will be called in the main loop if the thread terminate correctly.
444 * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
445 * @param data User context data to pass to all callback.
446 * @return A reference to the newly created thread instance, or NULL if it failed.
448 * ecore_thread_run provide a facility for easily managing blocking task in a
449 * parallel thread. You should provide three function. The first one, func_blocking,
450 * that will do the blocking work in another thread (so you should not use the
451 * EFL in it except Eina if you are careful). The second one, func_end,
452 * that will be called in Ecore main loop when func_blocking is done. So you
453 * can use all the EFL inside this function. The last one, func_cancel, will
454 * be called in the main loop if the thread is cancelled or could not run at all.
456 * Be aware, that you can't make assumption on the result order of func_end
457 * after many call to ecore_thread_run, as we start as much thread as the
458 * host CPU can handle.
461 ecore_thread_run(Ecore_Cb func_blocking,
463 Ecore_Cb func_cancel,
466 #ifdef EFL_HAVE_PTHREAD
467 Ecore_Pthread_Worker *work;
468 Ecore_Pthread_Data *pth = NULL;
470 if (!func_blocking) return NULL;
472 work = malloc(sizeof (Ecore_Pthread_Worker));
475 func_cancel((void *) data);
479 work->u.short_run.func_blocking = func_blocking;
481 pthread_cond_init(&work->cond, NULL);
482 pthread_mutex_init(&work->mutex, NULL);
483 work->func_end = func_end;
484 work->func_cancel = func_cancel;
485 work->cancel = EINA_FALSE;
486 work->long_run = EINA_FALSE;
489 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
490 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
492 if (_ecore_thread_count == _ecore_thread_count_max)
494 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
495 return (Ecore_Thread *) work;
498 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
500 /* One more thread could be created. */
501 pth = malloc(sizeof (Ecore_Pthread_Data));
502 if (!pth) goto on_error;
504 pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
505 if (!pth->p) goto on_error;
507 if (pthread_create(&pth->thread, NULL, (void *) _ecore_thread_worker, pth) == 0)
508 return (Ecore_Thread *) work;
513 if (pth->p) ecore_pipe_del(pth->p);
517 if (_ecore_thread_count == 0)
519 if (work->func_cancel)
520 work->func_cancel((void *) work->data);
524 return (Ecore_Thread *) work;
527 If no thread and as we don't want to break app that rely on this
528 facility, we will lock the interface until we are done.
530 func_blocking((void *)data);
531 func_end((void *)data);
538 * @brief Cancel a running thread.
539 * @param thread The thread to cancel.
540 * @return Will return EINA_TRUE if the thread has been cancelled,
541 * EINA_FALSE if it is pending.
543 * ecore_thread_cancel give the possibility to cancel a task still running. It
544 * will return EINA_FALSE, if the destruction is delayed or EINA_TRUE if it is
545 * cancelled after this call.
547 * You should use this function only in the main loop.
549 * func_end, func_cancel will destroy the handler, so don't use it after.
550 * And if ecore_thread_cancel return EINA_TRUE, you should not use Ecore_Thread also.
553 ecore_thread_cancel(Ecore_Thread *thread)
555 #ifdef EFL_HAVE_PTHREAD
556 Ecore_Pthread_Worker *work = (Ecore_Pthread_Worker *)thread;
562 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
564 if ((have_main_loop_thread) &&
565 (pthread_equal(main_loop_thread, pthread_self())))
567 EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
569 if ((void *) work == (void *) thread)
571 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
573 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
575 if (work->func_cancel)
576 work->func_cancel((void *) work->data);
584 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
586 /* Delay the destruction */
587 ((Ecore_Pthread_Worker *)thread)->cancel = EINA_TRUE;
595 * @brief Tell if a thread was canceled or not.
596 * @param thread The thread to test.
597 * @return EINA_TRUE if the thread is cancelled,
598 * EINA_FALSE if it is not.
600 * You can use this function in main loop and in the thread.
603 ecore_thread_check(Ecore_Thread *thread)
605 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
607 if (!worker) return EINA_TRUE;
608 return worker->cancel;
612 * @brief Run some heavy code in a parallel thread to avoid locking the main loop.
613 * @param func_heavy The function that should run in another thread.
614 * @param func_notify The function that will receive the data send by func_heavy in the main loop.
615 * @param func_end The function that will be called in the main loop if the thread terminate correctly.
616 * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
617 * @param data User context data to pass to all callback.
618 * @param try_no_queue If you wan't to run outside of the thread pool.
619 * @return A reference to the newly created thread instance, or NULL if it failed.
621 * ecore_long_run provide a facility for easily managing heavy task in a
622 * parallel thread. You should provide four functions. The first one, func_heavy,
623 * that will do the heavy work in another thread (so you should not use the
624 * EFL in it except Eina and Eet if you are careful). The second one, func_notify,
625 * will receive the data send from the thread function (func_heavy) by ecore_thread_notify
626 * in the main loop (and so, can use all the EFL). Tje third, func_end,
627 * that will be called in Ecore main loop when func_heavy is done. So you
628 * can use all the EFL inside this function. The last one, func_cancel, will
629 * be called in the main loop also, if the thread is cancelled or could not run at all.
631 * Be aware, that you can't make assumption on the result order of func_end
632 * after many call to ecore_long_run, as we start as much thread as the
633 * host CPU can handle.
635 * If you set try_no_queue, it will try to run outside of the thread pool, this can bring
636 * the CPU down, so be careful with that. Of course if it can't start a new thread, it will
637 * try to use one from the pool.
639 EAPI Ecore_Thread *ecore_long_run(Ecore_Thread_Heavy_Cb func_heavy,
640 Ecore_Thread_Notify_Cb func_notify,
642 Ecore_Cb func_cancel,
644 Eina_Bool try_no_queue)
647 #ifdef EFL_HAVE_PTHREAD
648 Ecore_Pthread_Worker *worker;
649 Ecore_Pthread_Data *pth = NULL;
651 if (!func_heavy) return NULL;
653 worker = malloc(sizeof (Ecore_Pthread_Worker));
654 if (!worker) goto on_error;
656 worker->u.long_run.func_heavy = func_heavy;
657 worker->u.long_run.func_notify = func_notify;
659 pthread_cond_init(&worker->cond, NULL);
660 pthread_mutex_init(&worker->mutex, NULL);
661 worker->func_cancel = func_cancel;
662 worker->func_end = func_end;
664 worker->cancel = EINA_FALSE;
665 worker->long_run = EINA_TRUE;
667 worker->u.long_run.notify = ecore_pipe_add(_ecore_notify_handler, worker);
673 if (pthread_create(&t, NULL, (void *) _ecore_direct_worker, worker) == 0)
674 return (Ecore_Thread *) worker;
677 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
678 _ecore_pending_job_threads_long = eina_list_append(_ecore_pending_job_threads_long, worker);
680 if (_ecore_thread_count == _ecore_thread_count_max)
682 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
683 return (Ecore_Thread *) worker;
686 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
688 /* One more thread could be created. */
689 pth = malloc(sizeof (Ecore_Pthread_Data));
690 if (!pth) goto on_error;
692 pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
693 if (!pth->p) goto on_error;
695 if (pthread_create(&pth->thread, NULL, (void *) _ecore_thread_worker, pth) == 0)
696 return (Ecore_Thread *) worker;
701 if (pth->p) ecore_pipe_del(pth->p);
705 if (_ecore_thread_count == 0)
707 if (func_cancel) func_cancel((void *) data);
711 ecore_pipe_del(worker->u.long_run.notify);
717 return (Ecore_Thread *) worker;
719 Ecore_Pthread_Worker worker;
724 If no thread and as we don't want to break app that rely on this
725 facility, we will lock the interface until we are done.
727 worker.u.long_run.func_heavy = func_heavy;
728 worker.u.long_run.func_notify = func_notify;
729 worker.u.long_run.notify = NULL;
730 worker.func_cancel = func_cancel;
731 worker.func_end = func_end;
733 worker.cancel = EINA_FALSE;
734 worker.long_run = EINA_TRUE;
736 func_heavy((Ecore_Thread *) &worker, (void *)data);
738 if (worker.cancel) func_cancel((void *)data);
739 else func_end((void *)data);
746 * @brief Send data to main loop from worker thread.
747 * @param thread The current Ecore_Thread context to send data from
748 * @param data Data to be transmitted to the main loop
749 * @return EINA_TRUE if data was successfully send to main loop,
750 * EINA_FALSE if anything goes wrong.
752 * After a succesfull call, the data should be considered owned
755 * You should use this function only in the func_heavy call.
758 ecore_thread_notify(Ecore_Thread *thread, const void *data)
760 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
762 if (!worker) return EINA_FALSE;
763 if (!worker->long_run) return EINA_FALSE;
765 #ifdef EFL_HAVE_PTHREAD
766 if (!pthread_equal(worker->self, pthread_self())) return EINA_FALSE;
768 ecore_pipe_write(worker->u.long_run.notify, &data, sizeof (void *));
772 worker->u.long_run.func_notify(thread, (void*) data, (void*) worker->data);
779 * @brief Get number of active thread jobs
780 * @return Number of active threads running jobs
781 * This returns the number of threads currently running jobs through the
785 ecore_thread_active_get(void)
787 #ifdef EFL_HAVE_PTHREAD
788 return _ecore_thread_count;
795 * @brief Get number of pending (short) thread jobs
796 * @return Number of pending threads running "short" jobs
797 * This returns the number of threads currently running jobs through the
798 * ecore_thread_run api call.
801 ecore_thread_pending_get(void)
804 #ifdef EFL_HAVE_PTHREAD
805 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
806 ret = eina_list_count(_ecore_pending_job_threads);
807 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
815 * @brief Get number of pending long thread jobs
816 * @return Number of pending threads running "long" jobs
817 * This returns the number of threads currently running jobs through the
818 * ecore_long_run api call.
821 ecore_thread_pending_long_get(void)
824 #ifdef EFL_HAVE_PTHREAD
825 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
826 ret = eina_list_count(_ecore_pending_job_threads_long);
827 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
835 * @brief Get number of pending thread jobs
836 * @return Number of pending threads running jobs
837 * This returns the number of threads currently running jobs through the
838 * ecore_thread_run and ecore_long_run api calls combined.
841 ecore_thread_pending_total_get(void)
844 #ifdef EFL_HAVE_PTHREAD
845 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
846 ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_long);
847 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
855 * @brief Get the max number of threads that can run simultaneously
856 * @return Max number of threads ecore will run
857 * This returns the total number of threads that ecore will attempt to run
861 ecore_thread_max_get(void)
863 return _ecore_thread_count_max;
867 * @brief Set the max number of threads that can run simultaneously
868 * @param num The new maximum
869 * This sets the maximum number of threads that ecore will try to run
870 * simultaneously. This number cannot be < 1 or >= 2x the number of active cpus.
873 ecore_thread_max_set(int num)
876 /* avoid doing something hilarious by blocking dumb users */
877 if (num >= (2 * eina_cpu_count())) return;
879 _ecore_thread_count_max = num;
883 * @brief Reset the max number of threads that can run simultaneously
884 * This resets the maximum number of threads that ecore will try to run
885 * simultaneously to the number of active cpus.
888 ecore_thread_max_reset(void)
890 _ecore_thread_count_max = eina_cpu_count();
894 * @brief Get the number of threads which are available to be used
895 * @return The number of available threads
896 * This returns the number of threads slots that ecore has currently available.
897 * Assuming that you haven't changed the max number of threads with @ref ecore_thread_max_set
898 * this should be equal to (num_cpus - (active_running + active_long_running))
901 ecore_thread_available_get(void)
904 #ifdef EFL_HAVE_PTHREAD
905 pthread_mutex_lock(&_ecore_pending_job_threads_mutex);
906 ret = _ecore_thread_count_max - _ecore_thread_count;
907 pthread_mutex_unlock(&_ecore_pending_job_threads_mutex);
915 * @brief Add data to the thread for subsequent use
916 * @param thread The thread context to add to
917 * @param key The name string to add the data with
918 * @param value The data to add
919 * @param cb The callback to free the data with
920 * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
921 * @return EINA_TRUE on success, EINA_FALSE on failure
922 * This adds data to the thread context, allowing the thread
923 * to retrieve and use it without complicated mutexing. This function can only be called by a
924 * *_run thread INSIDE the thread and will return EINA_FALSE in any case but success.
925 * All data added to the thread will be freed with its associated callback (if present)
926 * upon thread termination. If no callback is specified, it is expected that the user will free the
927 * data, but this is most likely not what you want.
930 ecore_thread_local_data_add(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
932 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
933 Ecore_Thread_Data *d;
936 if ((!thread) || (!key) || (!value))
938 #ifdef EFL_HAVE_PTHREAD
939 if (!pthread_equal(worker->self, pthread_self())) return EINA_FALSE;
942 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
947 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
954 ret = eina_hash_direct_add(worker->hash, key, d);
956 ret = eina_hash_add(worker->hash, key, d);
957 pthread_cond_broadcast(&worker->cond);
965 * @brief Modify data in the thread, or add if not found
966 * @param thread The thread context
967 * @param key The name string to add the data with
968 * @param value The data to add
969 * @param cb The callback to free the data with
970 * @return The old data associated with @p key on success if modified, NULL if added
971 * This adds/modifies data in the thread context, adding only if modify fails.
972 * This function can only be called by a *_run thread INSIDE the thread.
973 * All data added to the thread pool will be freed with its associated callback (if present)
974 * upon thread termination. If no callback is specified, it is expected that the user will free the
975 * data, but this is most likely not what you want.
978 ecore_thread_local_data_set(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb)
980 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
981 Ecore_Thread_Data *d, *r;
983 if ((!thread) || (!key) || (!value))
985 #ifdef EFL_HAVE_PTHREAD
986 if (!pthread_equal(worker->self, pthread_self())) return NULL;
989 worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
994 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1000 r = eina_hash_set(worker->hash, key, d);
1001 pthread_cond_broadcast(&worker->cond);
1011 * @brief Find data in the thread's data
1012 * @param thread The thread context
1013 * @param key The name string the data is associated with
1014 * @return The value, or NULL on error
1015 * This finds data in the thread context that has been previously added with @ref ecore_thread_local_data_add
1016 * This function can only be called by a *_run thread INSIDE the thread, and will return NULL
1017 * in any case but success.
1021 ecore_thread_local_data_find(Ecore_Thread *thread, const char *key)
1023 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1024 Ecore_Thread_Data *d;
1026 if ((!thread) || (!key))
1028 #ifdef EFL_HAVE_PTHREAD
1029 if (!pthread_equal(worker->self, pthread_self())) return NULL;
1034 d = eina_hash_find(worker->hash, key);
1042 * @brief Delete data from the thread's data
1043 * @param thread The thread context
1044 * @param key The name string the data is associated with
1045 * @return EINA_TRUE on success, EINA_FALSE on failure
1046 * This deletes the data pointer from the thread context which was previously added with @ref ecore_thread_local_data_add
1047 * This function can only be called by a *_run thread INSIDE the thread, and will return EINA_FALSE
1048 * in any case but success. Note that this WILL free the data if a callback was specified.
1051 ecore_thread_local_data_del(Ecore_Thread *thread, const char *key)
1053 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1054 Ecore_Thread_Data *d;
1055 if ((!thread) || (!key))
1057 #ifdef EFL_HAVE_PTHREAD
1058 if (!pthread_equal(worker->self, pthread_self())) return EINA_FALSE;
1062 if ((d = eina_hash_find(worker->hash, key)))
1063 _ecore_thread_data_free(d);
1064 return eina_hash_del_by_key(worker->hash, key);
1071 * @brief Add data to the global data
1072 * @param key The name string to add the data with
1073 * @param value The data to add
1074 * @param cb The optional callback to free the data with once ecore is shut down
1075 * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1076 * @return EINA_TRUE on success, EINA_FALSE on failure
1077 * This adds data to the global thread data, and will return EINA_FALSE in any case but success.
1078 * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1079 * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback
1080 * was specified for, you will most likely encounter a segv later on.
1083 ecore_thread_global_data_add(const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1086 Ecore_Thread_Data *d;
1088 if ((!key) || (!value))
1090 #ifdef EFL_HAVE_PTHREAD
1091 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1092 if (!_ecore_thread_global_hash)
1093 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1094 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1096 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1102 if (!_ecore_thread_global_hash)
1104 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1106 ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1108 ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1109 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1110 pthread_cond_broadcast(&_ecore_thread_global_hash_cond);
1118 * @brief Add data to the global data
1119 * @param key The name string to add the data with
1120 * @param value The data to add
1121 * @param cb The optional callback to free the data with once ecore is shut down
1122 * @return An Ecore_Thread_Data on success, NULL on failure
1123 * This adds data to the global thread data and returns NULL, or replaces the previous data
1124 * associated with @p key and returning the previous data if it existed. To see if an error occurred,
1125 * one must use eina_error_get.
1126 * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1127 * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback
1128 * was specified for, you will most likely encounter a segv later on.
1131 ecore_thread_global_data_set(const char *key, void *value, Eina_Free_Cb cb)
1133 Ecore_Thread_Data *d, *r;
1136 if ((!key) || (!value))
1138 #ifdef EFL_HAVE_PTHREAD
1139 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1140 if (!_ecore_thread_global_hash)
1141 _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1142 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1144 if (!_ecore_thread_global_hash)
1147 if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1153 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1154 r = eina_hash_set(_ecore_thread_global_hash, key, d);
1155 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1156 pthread_cond_broadcast(&_ecore_thread_global_hash_cond);
1167 * @brief Find data in the global data
1168 * @param key The name string the data is associated with
1169 * @return The value, or NULL on error
1170 * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1171 * This function will return NULL in any case but success.
1172 * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1173 * be called upon ecore_thread shutting down. Note that if you have manually freed data that a callback
1174 * was specified for, you will most likely encounter a segv later on.
1175 * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1176 * if you will be doing anything with it.
1180 ecore_thread_global_data_find(const char *key)
1182 Ecore_Thread_Data *ret;
1185 #ifdef EFL_HAVE_PTHREAD
1186 if (!_ecore_thread_global_hash) return NULL;
1188 pthread_rwlock_rdlock(&_ecore_thread_global_hash_lock);
1189 ret = eina_hash_find(_ecore_thread_global_hash, key);
1190 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1198 * @brief Delete data from the global data
1199 * @param key The name string the data is associated with
1200 * @return EINA_TRUE on success, EINA_FALSE on failure
1201 * This deletes the data pointer from the global data which was previously added with @ref ecore_thread_global_data_add
1202 * This function will return EINA_FALSE in any case but success.
1203 * Note that this WILL free the data if an @c Eina_Free_Cb was specified when the data was added.
1206 ecore_thread_global_data_del(const char *key)
1209 Ecore_Thread_Data *d;
1213 #ifdef EFL_HAVE_PTHREAD
1214 if (!_ecore_thread_global_hash)
1217 pthread_rwlock_wrlock(&_ecore_thread_global_hash_lock);
1218 if ((d = eina_hash_find(_ecore_thread_global_hash, key)))
1219 _ecore_thread_data_free(d);
1220 ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1221 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1229 * @brief Find data in the global data and optionally wait for the data if not found
1230 * @param key The name string the data is associated with
1231 * @param seconds The amount of time in seconds to wait for the data. If 0, the call will be async and not wait for data.
1232 * If < 0 the call will wait indefinitely for the data.
1233 * @return The value, or NULL on failure
1234 * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1235 * This function will return NULL in any case but success.
1236 * Use @p seconds to specify the amount of time to wait. Use > 0 for an actual wait time, 0 to not wait, and < 0 to wait indefinitely.
1237 * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1238 * if you will be doing anything with it.
1241 ecore_thread_global_data_wait(const char *key, double seconds)
1244 Ecore_Thread_Data *ret = NULL;
1247 #ifdef EFL_HAVE_PTHREAD
1248 if (!_ecore_thread_global_hash)
1251 time = ecore_time_get() + seconds;
1255 struct timespec t = { 0, 0 };
1257 t.tv_sec = (long int)time;
1258 t.tv_nsec = (long int)((time - (double)t.tv_sec) * 1000000000);
1259 pthread_rwlock_rdlock(&_ecore_thread_global_hash_lock);
1260 ret = eina_hash_find(_ecore_thread_global_hash, key);
1261 pthread_rwlock_unlock(&_ecore_thread_global_hash_lock);
1262 if ((ret) || (!seconds) || ((seconds > 0) && (time <= ecore_time_get())))
1264 pthread_mutex_lock(&_ecore_thread_global_hash_mutex);
1265 pthread_cond_timedwait(&_ecore_thread_global_hash_cond, &_ecore_thread_global_hash_mutex, &t);
1266 pthread_mutex_unlock(&_ecore_thread_global_hash_mutex);
1268 if (ret) return ret->data;