e2c491409f9bd519c0b5074587ba0d72d8fca4b1
[framework/uifw/ecore.git] / src / lib / ecore / ecore_thread.c
1
2 #ifdef HAVE_CONFIG_H
3 # include <config.h>
4 #endif
5
6 #include <stdlib.h>
7 #include <sys/time.h>
8 #include <assert.h>
9 #include <sys/types.h>
10 #include <unistd.h>
11
12 #ifdef HAVE_EVIL
13 # include <Evil.h>
14 #endif
15
16 #include "Ecore.h"
17 #include "ecore_private.h"
18
19 #ifdef EFL_HAVE_THREADS
20
21 # define LK(x) Eina_Lock x
22 # define LKI(x) eina_lock_new(&(x))
23 # define LKD(x) eina_lock_free(&(x))
24 # define LKL(x) eina_lock_take(&(x))
25 # define LKU(x) eina_lock_release(&(x))
26
27 # define CD(x) Eina_Condition x
28 # define CDI(x, m) eina_condition_new(&(x), &(m))
29 # define CDD(x) eina_condition_free(&(x))
30 # define CDB(x) eina_condition_broadcast(&(x))
31 # define CDW(x, t) eina_condition_timedwait(&(x), t)
32
33 # define LRWK(x) Eina_RWLock x
34 # define LRWKI(x) eina_rwlock_new(&(x));
35 # define LRWKD(x) eina_rwlock_free(&(x));
36 # define LRWKWL(x) eina_rwlock_take_write(&(x));
37 # define LRWKRL(x) eina_rwlock_take_read(&(x));
38 # define LRWKU(x) eina_rwlock_release(&(x));
39
40 # ifdef EFL_HAVE_POSIX_THREADS
41 #  include <pthread.h>
42 #  ifdef __linux__
43 #   include <sched.h>
44 #   include <sys/resource.h>
45 #   include <unistd.h>
46 #   include <sys/syscall.h>
47 #   include <errno.h>
48 #  endif
49
50 #  define PH(x)        pthread_t x
51 #  define PHE(x, y)    pthread_equal(x, y)
52 #  define PHS()        pthread_self()
53 #  define PHC(x, f, d) pthread_create(&(x), NULL, (void *)f, d)
54 #  define PHJ(x)       pthread_join(x, NULL)
55 #  define PHA(x)       pthread_cancel(x)
56
57 # else /* EFL_HAVE_WIN32_THREADS */
58
59 #  define WIN32_LEAN_AND_MEAN
60 #  include <windows.h>
61 #  undef WIN32_LEAN_AND_MEAN
62
63 typedef struct
64 {
65    HANDLE thread;
66    void  *val;
67 } win32_thread;
68
69 #  define PH(x)     win32_thread * x
70 #  define PHE(x, y) ((x) == (y))
71 #  define PHS()     (HANDLE)GetCurrentThreadId()
72
73 int
74 _ecore_thread_win32_create(win32_thread         **x,
75                            LPTHREAD_START_ROUTINE f,
76                            void                  *d)
77 {
78    win32_thread *t;
79    t = (win32_thread *)calloc(1, sizeof(win32_thread));
80    if (!t)
81      return -1;
82
83    (t)->thread = CreateThread(NULL, 0, f, d, 0, NULL);
84    if (!t->thread)
85      {
86         free(t);
87         return -1;
88      }
89    t->val = d;
90    *x = t;
91
92    return 0;
93 }
94
95 #  define PHC(x, f, d) _ecore_thread_win32_create(&(x), (LPTHREAD_START_ROUTINE)f, d)
96
97 int
98 _ecore_thread_win32_join(win32_thread *x,
99                          void        **res)
100 {
101    if (!PHE(x, PHS()))
102      {
103         WaitForSingleObject(x->thread, INFINITE);
104         CloseHandle(x->thread);
105      }
106    if (res) *res = x->val;
107    free(x);
108
109    return 0;
110 }
111
112 #  define PHJ(x) _ecore_thread_win32_join(x, NULL)
113 #  define PHA(x)    TerminateThread(x->thread, 0)
114
115 # endif
116
117 #endif
118
119 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
120 typedef struct _Ecore_Pthread        Ecore_Pthread;
121 typedef struct _Ecore_Thread_Data    Ecore_Thread_Data;
122
123 struct _Ecore_Thread_Data
124 {
125    void        *data;
126    Eina_Free_Cb cb;
127 };
128
129 struct _Ecore_Pthread_Worker
130 {
131    union {
132       struct
133       {
134          Ecore_Thread_Cb func_blocking;
135       } short_run;
136       struct
137       {
138          Ecore_Thread_Cb        func_heavy;
139          Ecore_Thread_Notify_Cb func_notify;
140
141          Ecore_Pthread_Worker  *direct_worker;
142
143          int                    send;
144          int                    received;
145       } feedback_run;
146       struct {
147          Ecore_Thread_Cb func_main;
148          Ecore_Thread_Notify_Cb func_notify;
149
150          Ecore_Pipe            *send;
151          Ecore_Pthread_Worker  *direct_worker;
152
153          struct {
154             int send;
155             int received;
156          } from, to;
157       } message_run;
158    } u;
159
160    Ecore_Thread_Cb func_cancel;
161    Ecore_Thread_Cb func_end;
162 #ifdef EFL_HAVE_THREADS
163                    PH(self);
164    Eina_Hash      *hash;
165                    CD(cond);
166                    LK(mutex);
167 #endif
168
169    const void     *data;
170
171    int cancel;
172
173 #ifdef EFL_HAVE_THREADS
174    LK(cancel_mutex);
175 #endif
176
177    Eina_Bool message_run : 1;
178    Eina_Bool feedback_run : 1;
179    Eina_Bool kill : 1;
180    Eina_Bool reschedule : 1;
181    Eina_Bool no_queue : 1;
182 };
183
184 #ifdef EFL_HAVE_THREADS
185 typedef struct _Ecore_Pthread_Notify Ecore_Pthread_Notify;
186 struct _Ecore_Pthread_Notify
187 {
188    Ecore_Pthread_Worker *work;
189    const void *user_data;
190 };
191
192 typedef void *(*Ecore_Thread_Sync_Cb)(void* data, Ecore_Thread *thread);
193
194 typedef struct _Ecore_Pthread_Message Ecore_Pthread_Message;
195 struct _Ecore_Pthread_Message
196 {
197    union {
198       Ecore_Thread_Cb async;
199       Ecore_Thread_Sync_Cb sync;
200    } u;
201
202    const void *data;
203
204    int code;
205
206    Eina_Bool callback : 1;
207    Eina_Bool sync : 1;
208 };
209
210 #endif
211
212 static int _ecore_thread_count_max = 0;
213
214 #ifdef EFL_HAVE_THREADS
215
216 static void _ecore_thread_handler(void *data);
217
218 static int _ecore_thread_count = 0;
219
220 static Eina_List *_ecore_running_job = NULL;
221 static Eina_List *_ecore_pending_job_threads = NULL;
222 static Eina_List *_ecore_pending_job_threads_feedback = NULL;
223 static LK(_ecore_pending_job_threads_mutex);
224
225 static Eina_Hash *_ecore_thread_global_hash = NULL;
226 static LRWK(_ecore_thread_global_hash_lock);
227 static LK(_ecore_thread_global_hash_mutex);
228 static CD(_ecore_thread_global_hash_cond);
229
230 static Eina_Bool have_main_loop_thread = 0;
231
232 static Eina_Trash *_ecore_thread_worker_trash = NULL;
233 static int _ecore_thread_worker_count = 0;
234
235 static void                 *_ecore_thread_worker(void *);
236 static Ecore_Pthread_Worker *_ecore_thread_worker_new(void);
237
238 static PH(get_main_loop_thread) (void)
239 {
240    static PH(main_loop_thread);
241    static pid_t main_loop_pid;
242    pid_t pid = getpid();
243
244    if (pid != main_loop_pid)
245      {
246         main_loop_pid = pid;
247         main_loop_thread = PHS();
248         have_main_loop_thread = 1;
249      }
250
251    return main_loop_thread;
252 }
253
254 static void
255 _ecore_thread_worker_free(Ecore_Pthread_Worker *worker)
256 {
257    LKD(worker->cancel_mutex);
258    CDD(worker->cond);
259    LKD(worker->mutex);
260
261    if (_ecore_thread_worker_count > ((_ecore_thread_count_max + 1) * 16))
262      {
263         _ecore_thread_worker_count--;
264         free(worker);
265         return;
266      }
267
268    eina_trash_push(&_ecore_thread_worker_trash, worker);
269 }
270
271 static void
272 _ecore_thread_data_free(void *data)
273 {
274    Ecore_Thread_Data *d = data;
275
276    if (d->cb) d->cb(d->data);
277    free(d);
278 }
279
280 static void
281 _ecore_thread_join(PH(thread))
282 {
283    PHJ(thread);
284 }
285
286 static void
287 _ecore_thread_kill(Ecore_Pthread_Worker *work)
288 {
289    if (work->cancel)
290      {
291         if (work->func_cancel)
292           work->func_cancel((void *)work->data, (Ecore_Thread *)work);
293      }
294    else
295      {
296         if (work->func_end)
297           work->func_end((void *)work->data, (Ecore_Thread *)work);
298      }
299
300    if (work->feedback_run)
301      {
302         if (work->u.feedback_run.direct_worker)
303           _ecore_thread_worker_free(work->u.feedback_run.direct_worker);
304      }
305    if (work->hash)
306      eina_hash_free(work->hash);
307    _ecore_thread_worker_free(work);
308 }
309
310 static void
311 _ecore_thread_handler(void *data)
312 {
313    Ecore_Pthread_Worker *work = data;
314
315    if (work->feedback_run)
316      {
317         if (work->u.feedback_run.send != work->u.feedback_run.received)
318           {
319              work->kill = EINA_TRUE;
320              return;
321           }
322      }
323
324    _ecore_thread_kill(work);
325 }
326
327 #if 0
328 static void
329 _ecore_nothing_handler(void *data __UNUSED__, void *buffer __UNUSED__, unsigned int nbyte __UNUSED__)
330 {
331 }
332 #endif
333
334 static void
335 _ecore_notify_handler(void *data)
336 {
337    Ecore_Pthread_Notify *notify = data;
338    Ecore_Pthread_Worker *work = notify->work;
339    void *user_data = (void*) notify->user_data;
340
341    work->u.feedback_run.received++;
342
343    if (work->u.feedback_run.func_notify)
344      work->u.feedback_run.func_notify((void *)work->data, (Ecore_Thread *)work, user_data);
345
346    /* Force reading all notify event before killing the thread */
347    if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
348      {
349         _ecore_thread_kill(work);
350      }
351
352    free(notify);
353 }
354
355 static void
356 _ecore_message_notify_handler(void *data)
357 {
358    Ecore_Pthread_Notify *notify = data;
359    Ecore_Pthread_Worker *work = notify->work;
360    Ecore_Pthread_Message *user_data = (void *) notify->user_data;
361    Eina_Bool delete = EINA_TRUE;
362
363    work->u.message_run.from.received++;
364
365    if (!user_data->callback)
366      {
367         if (work->u.message_run.func_notify)
368           work->u.message_run.func_notify((void *) work->data, (Ecore_Thread *) work, (void *) user_data->data);
369      }
370    else
371      {
372         if (user_data->sync)
373           {
374              user_data->data = user_data->u.sync((void*) user_data->data, (Ecore_Thread *) work);
375              user_data->callback = EINA_FALSE;
376              user_data->code = INT_MAX;
377              ecore_pipe_write(work->u.message_run.send, &user_data, sizeof (Ecore_Pthread_Message *));
378
379              delete = EINA_FALSE;
380           }
381         else
382           {
383              user_data->u.async((void*) user_data->data, (Ecore_Thread *) work);
384           }
385      }
386
387    if (delete)
388      {
389         free(user_data);
390      }
391
392    /* Force reading all notify event before killing the thread */
393    if (work->kill && work->u.message_run.from.send == work->u.message_run.from.received)
394      {
395         _ecore_thread_kill(work);
396      }
397    free(notify);
398 }
399
400 static void
401 _ecore_short_job(PH(thread))
402 {
403    Ecore_Pthread_Worker *work;
404    int cancel;
405
406    LKL(_ecore_pending_job_threads_mutex);
407    
408    if (!_ecore_pending_job_threads)
409      {
410         LKU(_ecore_pending_job_threads_mutex);
411         return;
412      }
413    
414    work = eina_list_data_get(_ecore_pending_job_threads);
415    _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
416                                                       _ecore_pending_job_threads);
417    _ecore_running_job = eina_list_append(_ecore_running_job, work);
418    LKU(_ecore_pending_job_threads_mutex);
419    
420    LKL(work->cancel_mutex);
421    cancel = work->cancel;
422    LKU(work->cancel_mutex);
423    work->self = thread;
424    if (!cancel)
425      work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
426
427    LKL(_ecore_pending_job_threads_mutex);
428    _ecore_running_job = eina_list_remove(_ecore_running_job, work);
429    LKU(_ecore_pending_job_threads_mutex);
430    
431    if (work->reschedule)
432      {
433         work->reschedule = EINA_FALSE;
434         
435         LKL(_ecore_pending_job_threads_mutex);
436         _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
437         LKU(_ecore_pending_job_threads_mutex);
438      }
439    else
440      {
441         ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
442      }
443 }
444
445 static void
446 _ecore_feedback_job(PH(thread))
447 {
448    Ecore_Pthread_Worker *work;
449    int cancel;
450    
451    LKL(_ecore_pending_job_threads_mutex);
452    
453    if (!_ecore_pending_job_threads_feedback)
454      {
455         LKU(_ecore_pending_job_threads_mutex);
456         return;
457      }
458    
459    work = eina_list_data_get(_ecore_pending_job_threads_feedback);
460    _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
461                                                                _ecore_pending_job_threads_feedback);
462    _ecore_running_job = eina_list_append(_ecore_running_job, work);
463    LKU(_ecore_pending_job_threads_mutex);
464    
465    LKL(work->cancel_mutex);
466    cancel = work->cancel;
467    LKU(work->cancel_mutex);
468    work->self = thread;
469    if (!cancel)
470      work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
471
472    LKL(_ecore_pending_job_threads_mutex);
473    _ecore_running_job = eina_list_remove(_ecore_running_job, work);
474    LKU(_ecore_pending_job_threads_mutex);
475
476    if (work->reschedule)
477      {
478         work->reschedule = EINA_FALSE;
479         
480         LKL(_ecore_pending_job_threads_mutex);
481         _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
482         LKU(_ecore_pending_job_threads_mutex);
483      }
484    else
485      {
486         ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
487      }
488 }
489
490 static void *
491 _ecore_direct_worker(Ecore_Pthread_Worker *work)
492 {
493 #ifdef EFL_POSIX_THREADS
494    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
495    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
496 #endif
497
498    eina_sched_prio_drop();
499
500    work->self = PHS();
501    if (work->message_run)
502      work->u.message_run.func_main((void *) work->data, (Ecore_Thread *) work);
503    else
504      work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
505
506    ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
507
508    ecore_main_loop_thread_safe_call_async((Ecore_Cb) _ecore_thread_join, 
509                                           (void*) PHS());
510
511    return NULL;
512 }
513
514 static void *
515 _ecore_thread_worker(void *data __UNUSED__)
516 {
517 #ifdef EFL_POSIX_THREADS
518    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
519    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
520 #endif
521
522    eina_sched_prio_drop();
523
524 restart:
525    _ecore_short_job(PHS());
526    _ecore_feedback_job(PHS());
527
528    /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
529
530    LKL(_ecore_pending_job_threads_mutex);
531    if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
532      {
533         LKU(_ecore_pending_job_threads_mutex);
534         goto restart;
535      }
536    LKU(_ecore_pending_job_threads_mutex);
537
538    /* Sleep a little to prevent premature death */
539 #ifdef _WIN32
540    Sleep(1); /* around 50ms */
541 #else
542    usleep(50);
543 #endif
544
545    LKL(_ecore_pending_job_threads_mutex);
546    if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
547      {
548         LKU(_ecore_pending_job_threads_mutex);
549         goto restart;
550      }
551    _ecore_thread_count--;
552
553    ecore_main_loop_thread_safe_call_async((Ecore_Cb) _ecore_thread_join,
554                                           (void*) PHS());
555    LKU(_ecore_pending_job_threads_mutex);
556
557    return NULL;
558 }
559
560 #endif
561
562 static Ecore_Pthread_Worker *
563 _ecore_thread_worker_new(void)
564 {
565 #ifdef EFL_HAVE_THREADS
566    Ecore_Pthread_Worker *result;
567
568    result = eina_trash_pop(&_ecore_thread_worker_trash);
569
570    if (!result) 
571      {
572        result = calloc(1, sizeof(Ecore_Pthread_Worker));
573        _ecore_thread_worker_count++;
574      }
575
576    LKI(result->cancel_mutex);
577    LKI(result->mutex);
578    CDI(result->cond, result->mutex);
579
580    return result;
581 #else
582    return malloc(sizeof (Ecore_Pthread_Worker));
583 #endif
584 }
585
586 void
587 _ecore_thread_init(void)
588 {
589    _ecore_thread_count_max = eina_cpu_count();
590    if (_ecore_thread_count_max <= 0)
591      _ecore_thread_count_max = 1;
592
593 #ifdef EFL_HAVE_THREADS
594    LKI(_ecore_pending_job_threads_mutex);
595    LRWKI(_ecore_thread_global_hash_lock);
596    LKI(_ecore_thread_global_hash_mutex);
597    CDI(_ecore_thread_global_hash_cond, _ecore_thread_global_hash_mutex);
598 #endif
599 }
600
601 void
602 _ecore_thread_shutdown(void)
603 {
604    /* FIXME: If function are still running in the background, should we kill them ? */
605 #ifdef EFL_HAVE_THREADS
606     Ecore_Pthread_Worker *work;
607     Eina_List *l;
608     Eina_Bool test;
609     int iteration = 0;
610
611     LKL(_ecore_pending_job_threads_mutex);
612
613     EINA_LIST_FREE(_ecore_pending_job_threads, work)
614       {
615          if (work->func_cancel)
616            work->func_cancel((void *)work->data, (Ecore_Thread *) work);
617          free(work);
618       }
619
620     EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
621       {
622          if (work->func_cancel)
623            work->func_cancel((void *)work->data, (Ecore_Thread *) work);
624          free(work);
625       }
626
627     EINA_LIST_FOREACH(_ecore_running_job, l, work)
628       ecore_thread_cancel((Ecore_Thread*) work);
629
630     LKU(_ecore_pending_job_threads_mutex);
631
632     do
633       {
634          LKL(_ecore_pending_job_threads_mutex);
635          if (_ecore_thread_count > 0)
636            {
637               test = EINA_TRUE;
638            }
639          else
640            {
641               test = EINA_FALSE;
642            }
643          LKU(_ecore_pending_job_threads_mutex);
644          iteration++;
645          if (test) usleep(50000);
646       }
647     while (test == EINA_TRUE && iteration < 20);
648
649     if (iteration == 20 && _ecore_thread_count > 0)
650       {
651          ERR("%i of the child thread are still running after 1s. This can lead to a segv. Sorry.", _ecore_thread_count);
652       }
653
654     if (_ecore_thread_global_hash)
655       eina_hash_free(_ecore_thread_global_hash);
656     have_main_loop_thread = 0;
657
658     while ((work = eina_trash_pop(&_ecore_thread_worker_trash)))
659       {
660          free(work);
661       }
662
663     LKD(_ecore_pending_job_threads_mutex);
664     LRWKD(_ecore_thread_global_hash_lock);
665     LKD(_ecore_thread_global_hash_mutex);
666     CDD(_ecore_thread_global_hash_cond);
667 #endif
668 }
669
670 EAPI Ecore_Thread *
671 ecore_thread_run(Ecore_Thread_Cb func_blocking,
672                  Ecore_Thread_Cb func_end,
673                  Ecore_Thread_Cb func_cancel,
674                  const void     *data)
675 {
676    Ecore_Pthread_Worker *work;
677    Eina_Bool tried = EINA_FALSE;
678 #ifdef EFL_HAVE_THREADS
679    PH(thread);
680 #endif
681
682    EINA_MAIN_LOOP_CHECK_RETURN_VAL(NULL);
683    
684    if (!func_blocking) return NULL;
685
686    work = _ecore_thread_worker_new();
687    if (!work)
688      {
689         if (func_cancel)
690           func_cancel((void *)data, NULL);
691         return NULL;
692      }
693
694    work->u.short_run.func_blocking = func_blocking;
695    work->func_end = func_end;
696    work->func_cancel = func_cancel;
697    work->cancel = EINA_FALSE;
698    work->feedback_run = EINA_FALSE;
699    work->message_run = EINA_FALSE;
700    work->kill = EINA_FALSE;
701    work->reschedule = EINA_FALSE;
702    work->no_queue = EINA_FALSE;
703    work->data = data;
704
705 #ifdef EFL_HAVE_THREADS
706    work->self = 0;
707    work->hash = NULL;
708
709    LKL(_ecore_pending_job_threads_mutex);
710    _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
711
712    if (_ecore_thread_count == _ecore_thread_count_max)
713      {
714         LKU(_ecore_pending_job_threads_mutex);
715         return (Ecore_Thread *)work;
716      }
717
718    LKU(_ecore_pending_job_threads_mutex);
719
720    /* One more thread could be created. */
721    eina_threads_init();
722
723    LKL(_ecore_pending_job_threads_mutex);
724
725  retry:
726    if (PHC(thread, _ecore_thread_worker, NULL) == 0)
727      {
728         _ecore_thread_count++;
729         LKU(_ecore_pending_job_threads_mutex);
730         return (Ecore_Thread *)work;
731      }
732    if (!tried)
733      {
734        _ecore_main_call_flush();
735        tried = EINA_TRUE;
736        goto retry;
737      }
738
739    if (_ecore_thread_count == 0)
740      {
741         _ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
742
743         if (work->func_cancel)
744           work->func_cancel((void *) work->data, (Ecore_Thread *) work);
745
746         CDD(work->cond);
747         LKD(work->mutex);
748         LKD(work->cancel_mutex);
749         free(work);
750         work = NULL;
751      }
752    LKU(_ecore_pending_job_threads_mutex);
753
754    eina_threads_shutdown();
755
756    return (Ecore_Thread *)work;
757 #else
758    /*
759       If no thread and as we don't want to break app that rely on this
760       facility, we will lock the interface until we are done.
761     */
762    do {
763         /* Handle reschedule by forcing it here. That would mean locking the app,
764          * would be better with an idler, but really to complex for a case where
765          * thread should really exist.
766          */
767           work->reschedule = EINA_FALSE;
768
769           func_blocking((void *)data, (Ecore_Thread *)work);
770           if (work->cancel == EINA_FALSE) func_end((void *)data, (Ecore_Thread *)work);
771           else func_cancel((void *)data, (Ecore_Thread *)work);
772      } while (work->reschedule == EINA_TRUE);
773
774    free(work);
775
776    return NULL;
777 #endif
778 }
779
780 EAPI Eina_Bool
781 ecore_thread_cancel(Ecore_Thread *thread)
782 {
783 #ifdef EFL_HAVE_THREADS
784    Ecore_Pthread_Worker *volatile work = (Ecore_Pthread_Worker *)thread;
785    Eina_List *l;
786    int cancel;
787
788    if (!work)
789      return EINA_TRUE;
790    LKL(work->cancel_mutex);
791    cancel = work->cancel;
792    LKU(work->cancel_mutex);
793    if (cancel)
794      return EINA_FALSE;
795
796    if (work->feedback_run)
797      {
798         if (work->kill)
799           return EINA_TRUE;
800         if (work->u.feedback_run.send != work->u.feedback_run.received)
801           goto on_exit;
802      }
803
804    LKL(_ecore_pending_job_threads_mutex);
805
806    if ((have_main_loop_thread) &&
807        (PHE(get_main_loop_thread(), PHS())))
808      {
809         if (!work->feedback_run)
810           EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
811             {
812                if ((void *)work == (void *)thread)
813                  {
814                     _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
815
816                     LKU(_ecore_pending_job_threads_mutex);
817
818                     if (work->func_cancel)
819                       work->func_cancel((void *)work->data, (Ecore_Thread *)work);
820                     free(work);
821
822                     return EINA_TRUE;
823                  }
824             }
825         else
826           EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
827             {
828                if ((void *)work == (void *)thread)
829                  {
830                     _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
831
832                     LKU(_ecore_pending_job_threads_mutex);
833
834                     if (work->func_cancel)
835                       work->func_cancel((void *)work->data, (Ecore_Thread *)work);
836                     free(work);
837
838                     return EINA_TRUE;
839                  }
840             }
841      }
842
843    LKU(_ecore_pending_job_threads_mutex);
844
845    work = (Ecore_Pthread_Worker *)thread;
846
847    /* Delay the destruction */
848  on_exit:
849    LKL(work->cancel_mutex);
850    work->cancel = EINA_TRUE;
851    LKU(work->cancel_mutex);
852
853    return EINA_FALSE;
854 #else
855    (void) thread;
856    return EINA_TRUE;
857 #endif
858 }
859
860 EAPI Eina_Bool
861 ecore_thread_check(Ecore_Thread *thread)
862 {
863    Ecore_Pthread_Worker *volatile worker = (Ecore_Pthread_Worker *) thread;
864    int cancel;
865
866    if (!worker) return EINA_TRUE;
867 #ifdef EFL_HAVE_THREADS
868    LKL(worker->cancel_mutex);
869 #endif
870    cancel = worker->cancel;
871    /* FIXME: there is an insane bug driving me nuts here. I don't know if
872     it's a race condition, some cache issue or some alien attack on our software.
873     But ecore_thread_check will only work correctly with a printf, all the volatile,
874     lock and even usleep don't help here... */
875    /* fprintf(stderr, "wc: %i\n", cancel); */
876 #ifdef EFL_HAVE_THREADS
877    LKU(worker->cancel_mutex);
878 #endif
879    return cancel;
880 }
881
882 EAPI Ecore_Thread *
883 ecore_thread_feedback_run(Ecore_Thread_Cb        func_heavy,
884                           Ecore_Thread_Notify_Cb func_notify,
885                           Ecore_Thread_Cb        func_end,
886                           Ecore_Thread_Cb        func_cancel,
887                           const void            *data,
888                           Eina_Bool              try_no_queue)
889 {
890 #ifdef EFL_HAVE_THREADS
891    Ecore_Pthread_Worker *worker;
892    Eina_Bool tried = EINA_FALSE;
893    PH(thread);
894
895    EINA_MAIN_LOOP_CHECK_RETURN_VAL(NULL);
896    
897    if (!func_heavy) return NULL;
898
899    worker = _ecore_thread_worker_new();
900    if (!worker) goto on_error;
901
902    worker->u.feedback_run.func_heavy = func_heavy;
903    worker->u.feedback_run.func_notify = func_notify;
904    worker->hash = NULL;
905    worker->func_cancel = func_cancel;
906    worker->func_end = func_end;
907    worker->data = data;
908    worker->cancel = EINA_FALSE;
909    worker->message_run = EINA_FALSE;
910    worker->feedback_run = EINA_TRUE;
911    worker->kill = EINA_FALSE;
912    worker->reschedule = EINA_FALSE;
913    worker->self = 0;
914
915    worker->u.feedback_run.send = 0;
916    worker->u.feedback_run.received = 0;
917
918    worker->u.feedback_run.direct_worker = NULL;
919
920    if (try_no_queue)
921      {
922         PH(t);
923
924         worker->u.feedback_run.direct_worker = _ecore_thread_worker_new();
925         worker->no_queue = EINA_TRUE;
926
927         eina_threads_init();
928
929      retry_direct:
930         if (PHC(t, _ecore_direct_worker, worker) == 0)
931           return (Ecore_Thread *)worker;
932         if (!tried)
933           {
934              _ecore_main_call_flush();
935              tried = EINA_TRUE;
936              goto retry_direct;
937           }
938
939         if (worker->u.feedback_run.direct_worker)
940           {
941              _ecore_thread_worker_free(worker->u.feedback_run.direct_worker);
942              worker->u.feedback_run.direct_worker = NULL;
943           }
944
945         eina_threads_shutdown();
946      }
947
948    worker->no_queue = EINA_FALSE;
949
950    LKL(_ecore_pending_job_threads_mutex);
951    _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
952
953    if (_ecore_thread_count == _ecore_thread_count_max)
954      {
955         LKU(_ecore_pending_job_threads_mutex);
956         return (Ecore_Thread *)worker;
957      }
958
959    LKU(_ecore_pending_job_threads_mutex);
960
961    /* One more thread could be created. */
962    eina_threads_init();
963
964    LKL(_ecore_pending_job_threads_mutex);
965  retry:
966    if (PHC(thread, _ecore_thread_worker, NULL) == 0)
967      {
968         _ecore_thread_count++;
969         LKU(_ecore_pending_job_threads_mutex);
970         return (Ecore_Thread *)worker;
971      }
972    if (!tried)
973      {
974         _ecore_main_call_flush();
975         tried = EINA_TRUE;
976         goto retry;
977      }
978    LKU(_ecore_pending_job_threads_mutex);
979
980    eina_threads_shutdown();
981
982 on_error:
983    LKL(_ecore_pending_job_threads_mutex);
984    if (_ecore_thread_count == 0)
985      {
986         _ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
987                                                                worker);
988
989         if (func_cancel) func_cancel((void *)data, NULL);
990
991         if (worker)
992           {
993              CDD(worker->cond);
994              LKD(worker->mutex);
995              free(worker);
996              worker = NULL;
997           }
998      }
999    LKU(_ecore_pending_job_threads_mutex);
1000
1001    return (Ecore_Thread *)worker;
1002 #else
1003    Ecore_Pthread_Worker worker;
1004
1005    (void)try_no_queue;
1006
1007    /*
1008       If no thread and as we don't want to break app that rely on this
1009       facility, we will lock the interface until we are done.
1010     */
1011    worker.u.feedback_run.func_heavy = func_heavy;
1012    worker.u.feedback_run.func_notify = func_notify;
1013    worker.u.feedback_run.send = 0;
1014    worker.u.feedback_run.received = 0;
1015    worker.func_cancel = func_cancel;
1016    worker.func_end = func_end;
1017    worker.data = data;
1018    worker.cancel = EINA_FALSE;
1019    worker.feedback_run = EINA_TRUE;
1020    worker.message_run = EINA_FALSE;
1021    worker.kill = EINA_FALSE;
1022
1023    do {
1024         worker.reschedule = EINA_FALSE;
1025
1026         func_heavy((void *)data, (Ecore_Thread *)&worker);
1027
1028         if (worker.cancel) func_cancel((void *)data, (Ecore_Thread *)&worker);
1029         else func_end((void *)data, (Ecore_Thread *)&worker);
1030      } while (worker.reschedule == EINA_TRUE);
1031
1032    return NULL;
1033 #endif
1034 }
1035
1036 EAPI Eina_Bool
1037 ecore_thread_feedback(Ecore_Thread *thread,
1038                       const void   *data)
1039 {
1040    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1041
1042    if (!worker) return EINA_FALSE;
1043
1044 #ifdef EFL_HAVE_THREADS
1045    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1046
1047    if (worker->feedback_run)
1048      {
1049         Ecore_Pthread_Notify *notify;
1050
1051         notify = malloc(sizeof (Ecore_Pthread_Notify));
1052         if (!notify) return EINA_FALSE;
1053
1054         notify->user_data = data;
1055         notify->work = worker;
1056         worker->u.feedback_run.send++;
1057
1058         ecore_main_loop_thread_safe_call_async(_ecore_notify_handler, notify);
1059      }
1060    else if (worker->message_run)
1061      {
1062         Ecore_Pthread_Message *msg;
1063         Ecore_Pthread_Notify *notify;
1064
1065         msg = malloc(sizeof (Ecore_Pthread_Message*));
1066         if (!msg) return EINA_FALSE;
1067         msg->data = data;
1068         msg->callback = EINA_FALSE;
1069         msg->sync = EINA_FALSE;
1070
1071         notify = malloc(sizeof (Ecore_Pthread_Notify));
1072         if (!notify)
1073           {
1074              free(msg);
1075              return EINA_FALSE;
1076           }
1077         notify->work = worker;
1078         notify->user_data = msg;
1079
1080         worker->u.message_run.from.send++;
1081         ecore_main_loop_thread_safe_call_async(_ecore_message_notify_handler, notify);
1082      }
1083    else
1084      return EINA_FALSE;
1085
1086    return EINA_TRUE;
1087 #else
1088    worker->u.feedback_run.func_notify((void *)worker->data, thread, (void *)data);
1089
1090    return EINA_TRUE;
1091 #endif
1092 }
1093
1094 #if 0
1095 EAPI Ecore_Thread *
1096 ecore_thread_message_run(Ecore_Thread_Cb func_main,
1097                          Ecore_Thread_Notify_Cb func_notify,
1098                          Ecore_Thread_Cb func_end,
1099                          Ecore_Thread_Cb func_cancel,
1100                          const void *data)
1101 {
1102 #ifdef EFL_HAVE_THREADS
1103   Ecore_Pthread_Worker *worker;
1104   PH(t);
1105
1106   if (!func_main) return NULL;
1107
1108   worker = _ecore_thread_worker_new();
1109   if (!worker) return NULL;
1110
1111   worker->u.message_run.func_main = func_main;
1112   worker->u.message_run.func_notify = func_notify;
1113   worker->u.message_run.direct_worker = _ecore_thread_worker_new();
1114   worker->u.message_run.send = ecore_pipe_add(_ecore_nothing_handler, worker);
1115   worker->u.message_run.from.send = 0;
1116   worker->u.message_run.from.received = 0;
1117   worker->u.message_run.to.send = 0;
1118   worker->u.message_run.to.received = 0;
1119
1120   ecore_pipe_freeze(worker->u.message_run.send);
1121
1122   worker->func_cancel = func_cancel;
1123   worker->func_end = func_end;
1124   worker->hash = NULL;
1125   worker->data = data;
1126
1127   worker->cancel = EINA_FALSE;
1128   worker->message_run = EINA_TRUE;
1129   worker->feedback_run = EINA_FALSE;
1130   worker->kill = EINA_FALSE;
1131   worker->reschedule = EINA_FALSE;
1132   worker->no_queue = EINA_FALSE;
1133   worker->self = 0;
1134
1135   eina_threads_init();
1136
1137   if (PHC(t, _ecore_direct_worker, worker) == 0)
1138     return (Ecore_Thread*) worker;
1139
1140   eina_threads_shutdown();
1141
1142   if (worker->u.message_run.direct_worker) _ecore_thread_worker_free(worker->u.message_run.direct_worker);
1143   if (worker->u.message_run.send) ecore_pipe_del(worker->u.message_run.send);
1144
1145   CDD(worker->cond);
1146   LKD(worker->mutex);
1147 #else
1148   /* Note: This type of thread can't and never will work without thread support */
1149   WRN("ecore_thread_message_run called, but threads disable in Ecore, things will go wrong. Starting now !");
1150 # warning "You disabled threads support in ecore, I hope you know what you are doing !"
1151 #endif
1152
1153   func_cancel((void *) data, NULL);
1154
1155   return NULL;
1156 }
1157 #endif
1158
1159 EAPI Eina_Bool
1160 ecore_thread_reschedule(Ecore_Thread *thread)
1161 {
1162    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1163
1164    if (!worker) return EINA_FALSE;
1165
1166 #ifdef EFL_HAVE_THREADS
1167    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1168 #endif
1169
1170    worker->reschedule = EINA_TRUE;
1171    return EINA_TRUE;
1172 }
1173
1174 EAPI int
1175 ecore_thread_active_get(void)
1176 {
1177 #ifdef EFL_HAVE_THREADS
1178    EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1179    return _ecore_thread_count;
1180 #else
1181    return 0;
1182 #endif
1183 }
1184
1185 EAPI int
1186 ecore_thread_pending_get(void)
1187 {
1188 #ifdef EFL_HAVE_THREADS
1189    int ret;
1190
1191    EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1192    LKL(_ecore_pending_job_threads_mutex);
1193    ret = eina_list_count(_ecore_pending_job_threads);
1194    LKU(_ecore_pending_job_threads_mutex);
1195    return ret;
1196 #else
1197    return 0;
1198 #endif
1199 }
1200
1201 EAPI int
1202 ecore_thread_pending_feedback_get(void)
1203 {
1204 #ifdef EFL_HAVE_THREADS
1205    int ret;
1206
1207    EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1208    LKL(_ecore_pending_job_threads_mutex);
1209    ret = eina_list_count(_ecore_pending_job_threads_feedback);
1210    LKU(_ecore_pending_job_threads_mutex);
1211    return ret;
1212 #else
1213    return 0;
1214 #endif
1215 }
1216
1217 EAPI int
1218 ecore_thread_pending_total_get(void)
1219 {
1220 #ifdef EFL_HAVE_THREADS
1221    int ret;
1222
1223    EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1224    LKL(_ecore_pending_job_threads_mutex);
1225    ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
1226    LKU(_ecore_pending_job_threads_mutex);
1227    return ret;
1228 #else
1229    return 0;
1230 #endif
1231 }
1232
1233 EAPI int
1234 ecore_thread_max_get(void)
1235 {
1236    EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1237    return _ecore_thread_count_max;
1238 }
1239
1240 EAPI void
1241 ecore_thread_max_set(int num)
1242 {
1243    EINA_MAIN_LOOP_CHECK_RETURN;
1244    if (num < 1) return;
1245    /* avoid doing something hilarious by blocking dumb users */
1246    if (num > (16 * eina_cpu_count())) num = 16 * eina_cpu_count();
1247
1248    _ecore_thread_count_max = num;
1249 }
1250
1251 EAPI void
1252 ecore_thread_max_reset(void)
1253 {
1254    EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1255    _ecore_thread_count_max = eina_cpu_count();
1256 }
1257
1258 EAPI int
1259 ecore_thread_available_get(void)
1260 {
1261 #ifdef EFL_HAVE_THREADS
1262    int ret;
1263
1264    LKL(_ecore_pending_job_threads_mutex);
1265    ret = _ecore_thread_count_max - _ecore_thread_count;
1266    LKU(_ecore_pending_job_threads_mutex);
1267    return ret;
1268 #else
1269    return 0;
1270 #endif
1271 }
1272
1273 EAPI Eina_Bool
1274 ecore_thread_local_data_add(Ecore_Thread *thread,
1275                             const char   *key,
1276                             void         *value,
1277                             Eina_Free_Cb  cb,
1278                             Eina_Bool     direct)
1279 {
1280 #ifdef EFL_HAVE_THREADS
1281    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1282    Ecore_Thread_Data *d;
1283    Eina_Bool ret;
1284 #endif
1285
1286    if ((!thread) || (!key) || (!value))
1287      return EINA_FALSE;
1288 #ifdef EFL_HAVE_THREADS
1289    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1290
1291    if (!worker->hash)
1292      worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1293
1294    if (!worker->hash)
1295      return EINA_FALSE;
1296
1297    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1298      return EINA_FALSE;
1299
1300    d->data = value;
1301    d->cb = cb;
1302
1303    if (direct)
1304      ret = eina_hash_direct_add(worker->hash, key, d);
1305    else
1306      ret = eina_hash_add(worker->hash, key, d);
1307    CDB(worker->cond);
1308    return ret;
1309 #else
1310    (void) cb;
1311    (void) direct;
1312    return EINA_FALSE;
1313 #endif
1314 }
1315
1316 EAPI void *
1317 ecore_thread_local_data_set(Ecore_Thread *thread,
1318                             const char   *key,
1319                             void         *value,
1320                             Eina_Free_Cb  cb)
1321 {
1322 #ifdef EFL_HAVE_THREADS
1323    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1324    Ecore_Thread_Data *d, *r;
1325    void *ret;
1326 #endif
1327
1328    if ((!thread) || (!key) || (!value))
1329      return NULL;
1330 #ifdef EFL_HAVE_THREADS
1331    if (!PHE(worker->self, PHS())) return NULL;
1332
1333    if (!worker->hash)
1334      worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1335
1336    if (!worker->hash)
1337      return NULL;
1338
1339    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1340      return NULL;
1341
1342    d->data = value;
1343    d->cb = cb;
1344
1345    r = eina_hash_set(worker->hash, key, d);
1346    CDB(worker->cond);
1347    ret = r->data;
1348    free(r);
1349    return ret;
1350 #else
1351    (void) cb;
1352    return NULL;
1353 #endif
1354 }
1355
1356 EAPI void *
1357 ecore_thread_local_data_find(Ecore_Thread *thread,
1358                              const char   *key)
1359 {
1360 #ifdef EFL_HAVE_THREADS
1361    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1362    Ecore_Thread_Data *d;
1363 #endif
1364
1365    if ((!thread) || (!key))
1366      return NULL;
1367 #ifdef EFL_HAVE_THREADS
1368    if (!PHE(worker->self, PHS())) return NULL;
1369
1370    if (!worker->hash)
1371      return NULL;
1372
1373    d = eina_hash_find(worker->hash, key);
1374    if (d)
1375      return d->data;
1376    return NULL;
1377 #else
1378    return NULL;
1379 #endif
1380 }
1381
1382 EAPI Eina_Bool
1383 ecore_thread_local_data_del(Ecore_Thread *thread,
1384                             const char   *key)
1385 {
1386 #ifdef EFL_HAVE_THREADS
1387    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1388 #endif
1389
1390    if ((!thread) || (!key))
1391      return EINA_FALSE;
1392 #ifdef EFL_HAVE_THREADS
1393    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1394
1395    if (!worker->hash)
1396      return EINA_FALSE;
1397    return eina_hash_del_by_key(worker->hash, key);
1398 #else
1399    return EINA_TRUE;
1400 #endif
1401 }
1402
1403 EAPI Eina_Bool
1404 ecore_thread_global_data_add(const char  *key,
1405                              void        *value,
1406                              Eina_Free_Cb cb,
1407                              Eina_Bool    direct)
1408 {
1409 #ifdef EFL_HAVE_THREADS
1410    Ecore_Thread_Data *d;
1411    Eina_Bool ret;
1412 #endif
1413
1414    if ((!key) || (!value))
1415      return EINA_FALSE;
1416 #ifdef EFL_HAVE_THREADS
1417    LRWKWL(_ecore_thread_global_hash_lock);
1418    if (!_ecore_thread_global_hash)
1419      _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1420    LRWKU(_ecore_thread_global_hash_lock);
1421
1422    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1423      return EINA_FALSE;
1424
1425    d->data = value;
1426    d->cb = cb;
1427
1428    if (!_ecore_thread_global_hash)
1429      return EINA_FALSE;
1430    LRWKWL(_ecore_thread_global_hash_lock);
1431    if (direct)
1432      ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1433    else
1434      ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1435    LRWKU(_ecore_thread_global_hash_lock);
1436    CDB(_ecore_thread_global_hash_cond);
1437    return ret;
1438 #else
1439    (void) cb;
1440    (void) direct;
1441    return EINA_TRUE;
1442 #endif
1443 }
1444
1445 EAPI void *
1446 ecore_thread_global_data_set(const char  *key,
1447                              void        *value,
1448                              Eina_Free_Cb cb)
1449 {
1450 #ifdef EFL_HAVE_THREADS
1451    Ecore_Thread_Data *d, *r;
1452    void *ret;
1453 #endif
1454
1455    if ((!key) || (!value))
1456      return NULL;
1457 #ifdef EFL_HAVE_THREADS
1458    LRWKWL(_ecore_thread_global_hash_lock);
1459    if (!_ecore_thread_global_hash)
1460      _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1461    LRWKU(_ecore_thread_global_hash_lock);
1462
1463    if (!_ecore_thread_global_hash)
1464      return NULL;
1465
1466    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1467      return NULL;
1468
1469    d->data = value;
1470    d->cb = cb;
1471
1472    LRWKWL(_ecore_thread_global_hash_lock);
1473    r = eina_hash_set(_ecore_thread_global_hash, key, d);
1474    LRWKU(_ecore_thread_global_hash_lock);
1475    CDB(_ecore_thread_global_hash_cond);
1476
1477    ret = r->data;
1478    free(r);
1479    return ret;
1480 #else
1481    (void) cb;
1482    return NULL;
1483 #endif
1484 }
1485
1486 EAPI void *
1487 ecore_thread_global_data_find(const char *key)
1488 {
1489 #ifdef EFL_HAVE_THREADS
1490    Ecore_Thread_Data *ret;
1491 #endif
1492
1493    if (!key)
1494      return NULL;
1495 #ifdef EFL_HAVE_THREADS
1496    if (!_ecore_thread_global_hash) return NULL;
1497
1498    LRWKRL(_ecore_thread_global_hash_lock);
1499    ret = eina_hash_find(_ecore_thread_global_hash, key);
1500    LRWKU(_ecore_thread_global_hash_lock);
1501    if (ret)
1502      return ret->data;
1503    return NULL;
1504 #else
1505    return NULL;
1506 #endif
1507 }
1508
1509 EAPI Eina_Bool
1510 ecore_thread_global_data_del(const char *key)
1511 {
1512 #ifdef EFL_HAVE_THREADS
1513    Eina_Bool ret;
1514 #endif
1515
1516    if (!key)
1517      return EINA_FALSE;
1518 #ifdef EFL_HAVE_THREADS
1519    if (!_ecore_thread_global_hash)
1520      return EINA_FALSE;
1521
1522    LRWKWL(_ecore_thread_global_hash_lock);
1523    ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1524    LRWKU(_ecore_thread_global_hash_lock);
1525    return ret;
1526 #else
1527    return EINA_TRUE;
1528 #endif
1529 }
1530
1531 EAPI void *
1532 ecore_thread_global_data_wait(const char *key,
1533                               double      seconds)
1534 {
1535 #ifdef EFL_HAVE_THREADS
1536    double tm = 0;
1537    Ecore_Thread_Data *ret = NULL;
1538 #endif
1539
1540    if (!key)
1541      return NULL;
1542 #ifdef EFL_HAVE_THREADS
1543    if (!_ecore_thread_global_hash)
1544      return NULL;
1545    if (seconds > 0)
1546      tm = ecore_time_get() + seconds;
1547
1548    while (1)
1549      {
1550         LRWKRL(_ecore_thread_global_hash_lock);
1551         ret = eina_hash_find(_ecore_thread_global_hash, key);
1552         LRWKU(_ecore_thread_global_hash_lock);
1553         if ((ret) || (!seconds) || ((seconds > 0) && (tm <= ecore_time_get())))
1554           break;
1555         LKL(_ecore_thread_global_hash_mutex);
1556         CDW(_ecore_thread_global_hash_cond, tm);
1557         LKU(_ecore_thread_global_hash_mutex);
1558      }
1559    if (ret) return ret->data;
1560    return NULL;
1561 #else
1562    (void) seconds;
1563    return NULL;
1564 #endif
1565 }
1566