9 #ifdef EFL_HAVE_PTHREAD
14 #include "ecore_private.h"
16 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
17 typedef struct _Ecore_Pthread Ecore_Pthread;
19 struct _Ecore_Pthread_Worker
25 void (*func_blocking)(void *data);
29 void (*func_heavy)(Ecore_Thread *thread, void *data);
30 void (*func_notify)(Ecore_Thread *thread, void *msg_data, void *data);
34 #ifdef EFL_HAVE_PTHREAD
40 void (*func_cancel)(void *data);
41 void (*func_end)(void *data);
46 Eina_Bool long_run : 1;
49 #ifdef EFL_HAVE_PTHREAD
50 typedef struct _Ecore_Pthread_Data Ecore_Pthread_Data;
52 struct _Ecore_Pthread_Data
60 static int _ecore_thread_count_max = 0;
61 static int ECORE_THREAD_PIPE_DEL = 0;
63 #ifdef EFL_HAVE_PTHREAD
64 static int _ecore_thread_count = 0;
65 static Eina_Hash *_ecore_thread_global_hash = NULL;
66 static Eina_List *_ecore_active_job_threads = NULL;
67 static Eina_List *_ecore_pending_job_threads = NULL;
68 static Eina_List *_ecore_pending_job_threads_long = NULL;
69 static Ecore_Event_Handler *del_handler = NULL;
71 static pthread_mutex_t _mutex = PTHREAD_MUTEX_INITIALIZER;
74 _ecore_thread_pipe_free(void *data __UNUSED__, void *event)
76 Ecore_Pipe *p = event;
82 _ecore_thread_pipe_del(void *data __UNUSED__, int type __UNUSED__, void *event __UNUSED__)
84 /* This is a hack to delay pipe destruction until we are out of it's internal loop. */
85 return ECORE_CALLBACK_CANCEL;
89 _ecore_thread_end(Ecore_Pthread_Data *pth)
93 if (pthread_join(pth->thread, (void **) &p) != 0)
96 _ecore_active_job_threads = eina_list_remove(_ecore_active_job_threads, pth);
98 ecore_event_add(ECORE_THREAD_PIPE_DEL, pth->p, _ecore_thread_pipe_free, NULL);
102 _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte)
104 Ecore_Pthread_Worker *work;
106 if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
108 work = *(Ecore_Pthread_Worker **)buffer;
112 if (work->func_cancel)
113 work->func_cancel((void *) work->data);
118 work->func_end((void *) work->data);
123 ecore_pipe_del(work->u.long_run.notify);
124 eina_hash_free(work->u.long_run.hash);
130 _ecore_notify_handler(void *data, void *buffer, unsigned int nbyte)
132 Ecore_Pthread_Worker *work = data;
135 if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
137 user_data = *(void **)buffer;
139 if (work->u.long_run.func_notify)
140 work->u.long_run.func_notify((Ecore_Thread *) work, user_data, (void *) work->data);
144 _ecore_short_job(Ecore_Pipe *end_pipe)
146 Ecore_Pthread_Worker *work;
148 while (_ecore_pending_job_threads)
150 pthread_mutex_lock(&_mutex);
152 if (!_ecore_pending_job_threads)
154 pthread_mutex_unlock(&_mutex);
158 work = eina_list_data_get(_ecore_pending_job_threads);
159 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, _ecore_pending_job_threads);
161 pthread_mutex_unlock(&_mutex);
163 work->u.short_run.func_blocking((void *) work->data);
165 ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
170 _ecore_long_job(Ecore_Pipe *end_pipe, pthread_t thread)
172 Ecore_Pthread_Worker *work;
174 while (_ecore_pending_job_threads_long)
176 pthread_mutex_lock(&_mutex);
178 if (!_ecore_pending_job_threads_long)
180 pthread_mutex_unlock(&_mutex);
184 work = eina_list_data_get(_ecore_pending_job_threads_long);
185 _ecore_pending_job_threads_long = eina_list_remove_list(_ecore_pending_job_threads_long, _ecore_pending_job_threads_long);
187 pthread_mutex_unlock(&_mutex);
189 work->u.long_run.self = thread;
190 work->u.long_run.func_heavy((Ecore_Thread *) work, (void *) work->data);
192 ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
197 _ecore_direct_worker(Ecore_Pthread_Worker *work)
199 Ecore_Pthread_Data *pth;
201 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
202 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
204 pth = malloc(sizeof (Ecore_Pthread_Data));
205 if (!pth) return NULL;
207 pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
213 pth->thread = pthread_self();
215 work->u.long_run.self = pth->thread;
216 work->u.long_run.func_heavy((Ecore_Thread *) work, (void *) work->data);
218 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
220 work = malloc(sizeof (Ecore_Pthread_Worker));
223 ecore_pipe_del(pth->p);
229 work->u.short_run.func_blocking = NULL;
230 work->func_end = (void *) _ecore_thread_end;
231 work->func_cancel = NULL;
232 work->cancel = EINA_FALSE;
233 work->long_run = EINA_FALSE;
235 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
241 _ecore_thread_worker(Ecore_Pthread_Data *pth)
243 Ecore_Pthread_Worker *work;
245 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
246 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
248 pthread_mutex_lock(&_mutex);
249 _ecore_thread_count++;
250 pthread_mutex_unlock(&_mutex);
253 if (_ecore_pending_job_threads) _ecore_short_job(pth->p);
254 if (_ecore_pending_job_threads_long) _ecore_long_job(pth->p, pth->thread);
256 /* FIXME: Check if there is long running task todo, and switch to long run handler. */
258 pthread_mutex_lock(&_mutex);
259 if (_ecore_pending_job_threads)
261 pthread_mutex_unlock(&_mutex);
264 if (_ecore_pending_job_threads_long)
266 pthread_mutex_unlock(&_mutex);
270 _ecore_thread_count--;
272 pthread_mutex_unlock(&_mutex);
274 work = malloc(sizeof (Ecore_Pthread_Worker));
275 if (!work) return NULL;
278 work->u.short_run.func_blocking = NULL;
279 work->func_end = (void *) _ecore_thread_end;
280 work->func_cancel = NULL;
281 work->cancel = EINA_FALSE;
282 work->long_run = EINA_FALSE;
284 ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
292 _ecore_thread_init(void)
294 _ecore_thread_count_max = eina_cpu_count();
295 if (_ecore_thread_count_max <= 0)
296 _ecore_thread_count_max = 1;
298 ECORE_THREAD_PIPE_DEL = ecore_event_type_new();
299 #ifdef EFL_HAVE_PTHREAD
300 del_handler = ecore_event_handler_add(ECORE_THREAD_PIPE_DEL, _ecore_thread_pipe_del, NULL);
305 _ecore_thread_shutdown(void)
307 /* FIXME: If function are still running in the background, should we kill them ? */
308 #ifdef EFL_HAVE_PTHREAD
309 Ecore_Pthread_Worker *work;
310 Ecore_Pthread_Data *pth;
312 pthread_mutex_lock(&_mutex);
314 EINA_LIST_FREE(_ecore_pending_job_threads, work)
316 if (work->func_cancel)
317 work->func_cancel((void *)work->data);
321 pthread_mutex_unlock(&_mutex);
323 EINA_LIST_FREE(_ecore_active_job_threads, pth)
327 pthread_cancel(pth->thread);
328 pthread_join(pth->thread, (void **) &p);
330 ecore_pipe_del(pth->p);
333 ecore_event_handler_del(del_handler);
338 * @defgroup Ecore_Thread Ecore Thread Functions
339 * These functions allow for ecore-managed threads which integrate with ecore's main loop.
343 * @brief Run some blocking code in a parrallel thread to avoid locking the main loop.
344 * @param func_blocking The function that should run in another thread.
345 * @param func_end The function that will be called in the main loop if the thread terminate correctly.
346 * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
347 * @param data User context data to pass to all callback.
348 * @return A reference to the newly created thread instance, or NULL if it failed.
350 * ecore_thread_run provide a facility for easily managing blocking task in a
351 * parallel thread. You should provide three function. The first one, func_blocking,
352 * that will do the blocking work in another thread (so you should not use the
353 * EFL in it except Eina if you are carefull). The second one, func_end,
354 * that will be called in Ecore main loop when func_blocking is done. So you
355 * can use all the EFL inside this function. The last one, func_cancel, will
356 * be called in the main loop if the thread is cancelled or could not run at all.
358 * Be aware, that you can't make assumption on the result order of func_end
359 * after many call to ecore_thread_run, as we start as much thread as the
360 * host CPU can handle.
363 ecore_thread_run(void (*func_blocking)(void *data),
364 void (*func_end)(void *data),
365 void (*func_cancel)(void *data),
368 #ifdef EFL_HAVE_PTHREAD
369 Ecore_Pthread_Worker *work;
370 Ecore_Pthread_Data *pth = NULL;
372 if (!func_blocking) return NULL;
374 work = malloc(sizeof (Ecore_Pthread_Worker));
377 func_cancel((void *) data);
381 work->u.short_run.func_blocking = func_blocking;
382 work->func_end = func_end;
383 work->func_cancel = func_cancel;
384 work->cancel = EINA_FALSE;
385 work->long_run = EINA_FALSE;
388 pthread_mutex_lock(&_mutex);
389 _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
391 if (_ecore_thread_count == _ecore_thread_count_max)
393 pthread_mutex_unlock(&_mutex);
394 return (Ecore_Thread *) work;
397 pthread_mutex_unlock(&_mutex);
399 /* One more thread could be created. */
400 pth = malloc(sizeof (Ecore_Pthread_Data));
401 if (!pth) goto on_error;
403 pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
404 if (!pth->p) goto on_error;
406 if (pthread_create(&pth->thread, NULL, (void *) _ecore_thread_worker, pth) == 0)
407 return (Ecore_Thread *) work;
412 if (pth->p) ecore_pipe_del(pth->p);
416 if (_ecore_thread_count == 0)
418 if (work->func_cancel)
419 work->func_cancel((void *) work->data);
423 return (Ecore_Thread *) work;
426 If no thread and as we don't want to break app that rely on this
427 facility, we will lock the interface until we are done.
429 func_blocking((void *)data);
430 func_end((void *)data);
437 * @brief Cancel a running thread.
438 * @param thread The thread to cancel.
439 * @return Will return EINA_TRUE if the thread has been cancelled,
440 * EINA_FALSE if it is pending.
442 * ecore_thread_cancel give the possibility to cancel a task still running. It
443 * will return EINA_FALSE, if the destruction is delayed or EINA_TRUE if it is
444 * cancelled after this call.
446 * You should use this function only in the main loop.
448 * func_end, func_cancel will destroy the handler, so don't use it after.
449 * And if ecore_thread_cancel return EINA_TRUE, you should not use Ecore_Thread also.
452 ecore_thread_cancel(Ecore_Thread *thread)
454 #ifdef EFL_HAVE_PTHREAD
455 Ecore_Pthread_Worker *work;
458 pthread_mutex_lock(&_mutex);
460 EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
461 if ((void *) work == (void *) thread)
463 _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
465 pthread_mutex_unlock(&_mutex);
467 if (work->func_cancel)
468 work->func_cancel((void *) work->data);
474 pthread_mutex_unlock(&_mutex);
476 /* Delay the destruction */
477 ((Ecore_Pthread_Worker *)thread)->cancel = EINA_TRUE;
485 * @brief Tell if a thread was canceled or not.
486 * @param thread The thread to test.
487 * @return EINA_TRUE if the thread is cancelled,
488 * EINA_FALSE if it is not.
490 * You can use this function in main loop and in the thread.
493 ecore_thread_check(Ecore_Thread *thread)
495 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
497 if (!worker) return EINA_TRUE;
498 return worker->cancel;
502 * @brief Run some heavy code in a parrallel thread to avoid locking the main loop.
503 * @param func_heavy The function that should run in another thread.
504 * @param func_notify The function that will receive the data send by func_heavy in the main loop.
505 * @param func_end The function that will be called in the main loop if the thread terminate correctly.
506 * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
507 * @param data User context data to pass to all callback.
508 * @param try_no_queue If you wan't to run outside of the thread pool.
509 * @return A reference to the newly created thread instance, or NULL if it failed.
511 * ecore_long_run provide a facility for easily managing heavy task in a
512 * parallel thread. You should provide four functions. The first one, func_heavy,
513 * that will do the heavy work in another thread (so you should not use the
514 * EFL in it except Eina and Eet if you are carefull). The second one, func_notify,
515 * will receive the data send from the thread function (func_heavy) by ecore_thread_notify
516 * in the main loop (and so, can use all the EFL). Tje third, func_end,
517 * that will be called in Ecore main loop when func_heavy is done. So you
518 * can use all the EFL inside this function. The last one, func_cancel, will
519 * be called in the main loop also, if the thread is cancelled or could not run at all.
521 * Be aware, that you can't make assumption on the result order of func_end
522 * after many call to ecore_long_run, as we start as much thread as the
523 * host CPU can handle.
525 * If you set try_no_queue, it will try to run outside of the thread pool, this can bring
526 * the CPU down, so be carefull with that. Of course if it can't start a new thread, it will
527 * try to use one from the pool.
530 ecore_long_run(void (*func_heavy)(Ecore_Thread *thread, void *data),
531 void (*func_notify)(Ecore_Thread *thread, void *msg_data, void *data),
532 void (*func_end)(void *data),
533 void (*func_cancel)(void *data),
535 Eina_Bool try_no_queue)
538 #ifdef EFL_HAVE_PTHREAD
539 Ecore_Pthread_Worker *worker;
540 Ecore_Pthread_Data *pth = NULL;
542 if (!func_heavy) return NULL;
544 worker = malloc(sizeof (Ecore_Pthread_Worker));
545 if (!worker) goto on_error;
547 worker->u.long_run.func_heavy = func_heavy;
548 worker->u.long_run.func_notify = func_notify;
549 worker->u.long_run.hash = NULL;
550 worker->func_cancel = func_cancel;
551 worker->func_end = func_end;
553 worker->cancel = EINA_FALSE;
554 worker->long_run = EINA_TRUE;
556 worker->u.long_run.notify = ecore_pipe_add(_ecore_notify_handler, worker);
562 if (pthread_create(&t, NULL, (void *) _ecore_direct_worker, worker) == 0)
563 return (Ecore_Thread *) worker;
566 pthread_mutex_lock(&_mutex);
567 _ecore_pending_job_threads_long = eina_list_append(_ecore_pending_job_threads_long, worker);
569 if (_ecore_thread_count == _ecore_thread_count_max)
571 pthread_mutex_unlock(&_mutex);
572 return (Ecore_Thread *) worker;
575 pthread_mutex_unlock(&_mutex);
577 /* One more thread could be created. */
578 pth = malloc(sizeof (Ecore_Pthread_Data));
579 if (!pth) goto on_error;
581 pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
582 if (pth->p) goto on_error;
584 if (pthread_create(&pth->thread, NULL, (void *) _ecore_thread_worker, pth) == 0)
585 return (Ecore_Thread *) worker;
590 if (pth->p) ecore_pipe_del(pth->p);
594 if (_ecore_thread_count == 0)
596 if (func_cancel) func_cancel((void *) data);
600 ecore_pipe_del(worker->u.long_run.notify);
606 return (Ecore_Thread *) worker;
608 Ecore_Pthread_Worker worker;
613 If no thread and as we don't want to break app that rely on this
614 facility, we will lock the interface until we are done.
616 worker.u.long_run.func_heavy = func_heavy;
617 worker.u.long_run.func_notify = func_notify;
618 worker->u.long_run.hash = NULL;
619 worker.u.long_run.notify = NULL;
620 worker.func_cancel = func_cancel;
621 worker.func_end = func_end;
623 worker.cancel = EINA_FALSE;
624 worker.long_run = EINA_TRUE;
626 func_heavy((Ecore_Thread *) &worker, (void *)data);
628 if (worker.cancel) func_cancel((void *)data);
629 else func_end((void *)data);
636 * @brief Send data to main loop from worker thread.
637 * @param thread The current Ecore_Thread context to send data from
638 * @param data Data to be transmitted to the main loop
639 * @return EINA_TRUE if data was successfully send to main loop,
640 * EINA_FALSE if anything goes wrong.
642 * After a succesfull call, the data should be considered owned
645 * You should use this function only in the func_heavy call.
648 ecore_thread_notify(Ecore_Thread *thread, const void *data)
650 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
652 if (!worker) return EINA_FALSE;
653 if (!worker->long_run) return EINA_FALSE;
655 #ifdef EFL_HAVE_PTHREAD
656 if (worker->u.long_run.self != pthread_self()) return EINA_FALSE;
658 ecore_pipe_write(worker->u.long_run.notify, &data, sizeof (void *));
662 worker->u.long_run.func_notify(thread, (void*) data, (void*) worker->data);
669 * @brief Get number of active thread jobs
670 * @return Number of active threads running jobs
671 * This returns the number of threads currently running jobs through the
675 ecore_thread_active_get(void)
678 #ifdef EFL_HAVE_PTHREAD
679 pthread_mutex_lock(&_mutex);
680 ret = _ecore_thread_count;
681 pthread_mutex_unlock(&_mutex);
689 * @brief Get number of pending (short) thread jobs
690 * @return Number of pending threads running "short" jobs
691 * This returns the number of threads currently running jobs through the
692 * ecore_thread_run api call.
695 ecore_thread_pending_get(void)
698 #ifdef EFL_HAVE_PTHREAD
699 pthread_mutex_lock(&_mutex);
700 ret = eina_list_count(_ecore_pending_job_threads);
701 pthread_mutex_unlock(&_mutex);
709 * @brief Get number of pending long thread jobs
710 * @return Number of pending threads running "long" jobs
711 * This returns the number of threads currently running jobs through the
712 * ecore_long_run api call.
715 ecore_thread_pending_long_get(void)
718 #ifdef EFL_HAVE_PTHREAD
719 pthread_mutex_lock(&_mutex);
720 ret = eina_list_count(_ecore_pending_job_threads_long);
721 pthread_mutex_unlock(&_mutex);
729 * @brief Get number of pending thread jobs
730 * @return Number of pending threads running jobs
731 * This returns the number of threads currently running jobs through the
732 * ecore_thread_run and ecore_long_run api calls combined.
735 ecore_thread_pending_total_get(void)
738 #ifdef EFL_HAVE_PTHREAD
739 pthread_mutex_lock(&_mutex);
740 ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_long);
741 pthread_mutex_unlock(&_mutex);
749 * @brief Get the max number of threads that can run simultaneously
750 * @return Max number of threads ecore will run
751 * This returns the total number of threads that ecore will attempt to run
755 ecore_thread_max_get(void)
758 pthread_mutex_lock(&_mutex);
759 ret = _ecore_thread_count_max;
760 pthread_mutex_unlock(&_mutex);
762 return _ecore_thread_count_max;
766 * @brief Set the max number of threads that can run simultaneously
767 * @param num The new maximum
768 * This sets the maximum number of threads that ecore will try to run
769 * simultaneously. This number cannot be < 1 or >= 2x the number of active cpus.
772 ecore_thread_max_set(int num)
775 /* avoid doing something hilarious by blocking dumb users */
776 if (num >= (2 * eina_cpu_count())) return;
778 pthread_mutex_lock(&_mutex);
779 _ecore_thread_count_max = num;
780 pthread_mutex_unlock(&_mutex);
784 * @brief Reset the max number of threads that can run simultaneously
785 * This resets the maximum number of threads that ecore will try to run
786 * simultaneously to the number of active cpus.
789 ecore_thread_max_reset(void)
791 pthread_mutex_lock(&_mutex);
792 _ecore_thread_count_max = eina_cpu_count();
793 pthread_mutex_unlock(&_mutex);
797 * @brief Get the number of threads which are available to be used
798 * @return The number of available threads
799 * This returns the number of threads slots that ecore has currently available.
800 * Assuming that you haven't changed the max number of threads with @ref ecore_thread_max_set
801 * this should be equal to (num_cpus - (active_running + active_long_running))
804 ecore_thread_available_get(void)
807 #ifdef EFL_HAVE_PTHREAD
808 pthread_mutex_lock(&_mutex);
809 ret = _ecore_thread_count_max - _ecore_thread_count;
810 pthread_mutex_unlock(&_mutex);
818 * @brief Add data to the pool for subsequent uses
819 * @param thread The thread context to add to
820 * @param key The name string to add the data with
821 * @param value The data to add
822 * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
823 * @return EINA_TRUE on success, EINA_FALSE on failure
824 * This adds data to the thread context, allowing for subsequent users of the thread's pool
825 * to retrieve and use it without complicated mutexing. This function can only be called by a
826 * heavy_run thread INSIDE the thread and will return EINA_FALSE in any case but success.
827 * All data added to the thread pool must be freed in the thread's func_end/func_cancel
828 * functions to avoid leaks.
831 ecore_thread_pool_data_add(Ecore_Thread *thread, const char *key, const void *value, Eina_Bool direct)
833 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
834 if ((!thread) || (!key) || (!value))
836 #ifdef EFL_HAVE_PTHREAD
837 if (worker->u.long_run.self != pthread_self()) return EINA_FALSE;
839 if (!worker->u.long_run.hash)
840 worker->u.long_run.hash = eina_hash_string_small_new(NULL);
842 if (!worker->u.long_run.hash)
845 return eina_hash_direct_add(worker->u.long_run.hash, key, value);
846 return eina_hash_add(worker->u.long_run.hash, key, value);
853 * @brief Modify data in the pool, or add if not found
854 * @param thread The thread context
855 * @param key The name string to add the data with
856 * @param value The data to add
857 * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
858 * @return The old data associated with @p key on success if modified, NULL if added
859 * This adds/modifies data in the thread context, adding only if modify fails.
860 * This function can only be called by a heavy_run thread INSIDE the thread.
861 * All data added to the thread pool must be freed in the thread's func_end/func_cancel
862 * functions to avoid leaks.
865 ecore_thread_pool_data_modify_or_add(Ecore_Thread *thread, const char *key, const void *value)
867 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
868 if ((!thread) || (!key) || (!value))
870 #ifdef EFL_HAVE_PTHREAD
871 if (worker->u.long_run.self != pthread_self()) return NULL;
873 if (!worker->u.long_run.hash)
874 worker->u.long_run.hash = eina_hash_string_small_new(NULL);
876 if (!worker->u.long_run.hash)
879 return eina_hash_modify_or_add(worker->u.long_run.hash, key, value);
886 * @brief Find data in the pool's data
887 * @param thread The thread context
888 * @param key The name string the data is associated with
889 * @return The value, or NULL on error
890 * This finds data in the thread context that has been previously added with @ref ecore_thread_pool_data_add
891 * This function can only be called by a heavy_run thread INSIDE the thread, and will return NULL
892 * in any case but success.
896 ecore_thread_pool_data_find(Ecore_Thread *thread, const char *key)
898 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
899 if ((!thread) || (!key))
901 #ifdef EFL_HAVE_PTHREAD
902 if (worker->u.long_run.self != pthread_self()) return NULL;
904 if (!worker->u.long_run.hash)
907 return eina_hash_find(worker->u.long_run.hash, key);
914 * @brief Delete data from the pool's data
915 * @param thread The thread context
916 * @param key The name string the data is associated with
917 * @return EINA_TRUE on success, EINA_FALSE on failure
918 * This deletes the data pointer from the thread context which was previously added with @ref ecore_thread_pool_data_add
919 * This function can only be called by a heavy_run thread INSIDE the thread, and will return EINA_FALSE
920 * in any case but success. Note that this WILL NOT free the data, it merely removes it from the thread pool.
923 ecore_thread_pool_data_del(Ecore_Thread *thread, const char *key)
925 Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
926 if ((!thread) || (!key))
928 #ifdef EFL_HAVE_PTHREAD
929 if (worker->u.long_run.self != pthread_self()) return EINA_FALSE;
931 if (!worker->u.long_run.hash)
934 return eina_hash_del_by_key(worker->u.long_run.hash, key);