Tizen 2.1 base
[framework/uifw/ecore.git] / src / lib / ecore / ecore_thread.c
1
2 #ifdef HAVE_CONFIG_H
3 # include <config.h>
4 #endif
5
6 #include <stdlib.h>
7 #include <sys/time.h>
8 #include <assert.h>
9 #include <sys/types.h>
10 #include <unistd.h>
11
12 #ifdef HAVE_EVIL
13 # include <Evil.h>
14 #endif
15
16 #include "Ecore.h"
17 #include "ecore_private.h"
18
19 #ifdef EFL_HAVE_THREADS
20
21 # define LK(x) Eina_Lock x
22 # define LKI(x) eina_lock_new(&(x))
23 # define LKD(x) eina_lock_free(&(x))
24 # define LKL(x) eina_lock_take(&(x))
25 # define LKU(x) eina_lock_release(&(x))
26
27 # define CD(x) Eina_Condition x
28 # define CDI(x, m) eina_condition_new(&(x), &(m))
29 # define CDD(x) eina_condition_free(&(x))
30 # define CDB(x) eina_condition_broadcast(&(x))
31 # define CDW(x, t) eina_condition_timedwait(&(x), t)
32
33 # define LRWK(x) Eina_RWLock x
34 # define LRWKI(x) eina_rwlock_new(&(x));
35 # define LRWKD(x) eina_rwlock_free(&(x));
36 # define LRWKWL(x) eina_rwlock_take_write(&(x));
37 # define LRWKRL(x) eina_rwlock_take_read(&(x));
38 # define LRWKU(x) eina_rwlock_release(&(x));
39
40 # ifdef EFL_HAVE_POSIX_THREADS
41 #  include <pthread.h>
42 #  ifdef __linux__
43 #   include <sched.h>
44 #   include <sys/resource.h>
45 #   include <unistd.h>
46 #   include <sys/syscall.h>
47 #   include <errno.h>
48 #  endif
49
50 #  define PH(x)        pthread_t x
51 #  define PHE(x, y)    pthread_equal(x, y)
52 #  define PHS()        pthread_self()
53 #  define PHC(x, f, d) pthread_create(&(x), NULL, (void *)f, d)
54 #  define PHJ(x)       pthread_join(x, NULL)
55 #  define PHA(x)       pthread_cancel(x)
56
57 # else /* EFL_HAVE_WIN32_THREADS */
58
59 #  define WIN32_LEAN_AND_MEAN
60 #  include <windows.h>
61 #  undef WIN32_LEAN_AND_MEAN
62
63 typedef struct
64 {
65    HANDLE thread;
66    void  *val;
67 } win32_thread;
68
69 static Eina_List    *_ecore_thread_win32_threads = NULL;
70 static Eina_Lock     _ecore_thread_win32_lock;
71
72 #  define PH(x)     win32_thread * x
73 #  define PHE(x, y) ((x) == (y))
74
75 static win32_thread *
76 _ecore_thread_win32_self()
77 {
78    win32_thread *t;
79    Eina_List *l;
80
81    LKL(_ecore_thread_win32_lock);
82    EINA_LIST_FOREACH(_ecore_thread_win32_threads, l, t)
83      if (t->thread == GetCurrentThread())
84        {
85           LKU(_ecore_thread_win32_lock);
86           return t;
87        }
88
89    LKU(_ecore_thread_win32_lock);
90    return NULL;
91 }
92
93 #  define PHS()     _ecore_thread_win32_self()
94
95 static int
96 _ecore_thread_win32_create(win32_thread         **x,
97                            LPTHREAD_START_ROUTINE f,
98                            void                  *d)
99 {
100    win32_thread *t;
101
102    t = (win32_thread *)calloc(1, sizeof(win32_thread));
103    if (!t)
104      return -1;
105
106    LKL(_ecore_thread_win32_lock);
107    (t)->thread = CreateThread(NULL, 0, f, d, 0, NULL);
108    if (!t->thread)
109      {
110         free(t);
111         LKU(_ecore_thread_win32_lock);
112         return -1;
113      }
114    t->val = d;
115    *x = t;
116    _ecore_thread_win32_threads = eina_list_append(_ecore_thread_win32_threads, t);
117    LKU(_ecore_thread_win32_lock);
118
119    return 0;
120 }
121
122 #  define PHC(x, f, d) _ecore_thread_win32_create(&(x), (LPTHREAD_START_ROUTINE)f, d)
123
124 static int
125 _ecore_thread_win32_join(win32_thread *x,
126                          void        **res)
127 {
128    if (!PHE(x, PHS()))
129      {
130         WaitForSingleObject(x->thread, INFINITE);
131         CloseHandle(x->thread);
132      }
133    if (res) *res = x->val;
134    _ecore_thread_win32_threads = eina_list_remove(_ecore_thread_win32_threads, x);
135    free(x);
136
137    return 0;
138 }
139
140 #  define PHJ(x) _ecore_thread_win32_join(x, NULL)
141 #  define PHA(x)    TerminateThread(x->thread, 0)
142
143 # endif
144
145 #endif
146
147 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
148 typedef struct _Ecore_Pthread        Ecore_Pthread;
149 typedef struct _Ecore_Thread_Data    Ecore_Thread_Data;
150
151 struct _Ecore_Thread_Data
152 {
153    void        *data;
154    Eina_Free_Cb cb;
155 };
156
157 struct _Ecore_Pthread_Worker
158 {
159    union {
160       struct
161       {
162          Ecore_Thread_Cb func_blocking;
163       } short_run;
164       struct
165       {
166          Ecore_Thread_Cb        func_heavy;
167          Ecore_Thread_Notify_Cb func_notify;
168
169          Ecore_Pthread_Worker  *direct_worker;
170
171          int                    send;
172          int                    received;
173       } feedback_run;
174       struct {
175          Ecore_Thread_Cb func_main;
176          Ecore_Thread_Notify_Cb func_notify;
177
178          Ecore_Pipe            *send;
179          Ecore_Pthread_Worker  *direct_worker;
180
181          struct {
182             int send;
183             int received;
184          } from, to;
185       } message_run;
186    } u;
187
188    Ecore_Thread_Cb func_cancel;
189    Ecore_Thread_Cb func_end;
190 #ifdef EFL_HAVE_THREADS
191                    PH(self);
192    Eina_Hash      *hash;
193                    CD(cond);
194                    LK(mutex);
195 #endif
196
197    const void     *data;
198
199    int cancel;
200
201 #ifdef EFL_HAVE_THREADS
202    LK(cancel_mutex);
203 #endif
204
205    Eina_Bool message_run : 1;
206    Eina_Bool feedback_run : 1;
207    Eina_Bool kill : 1;
208    Eina_Bool reschedule : 1;
209    Eina_Bool no_queue : 1;
210 };
211
212 #ifdef EFL_HAVE_THREADS
213 typedef struct _Ecore_Pthread_Notify Ecore_Pthread_Notify;
214 struct _Ecore_Pthread_Notify
215 {
216    Ecore_Pthread_Worker *work;
217    const void *user_data;
218 };
219
220 typedef void *(*Ecore_Thread_Sync_Cb)(void* data, Ecore_Thread *thread);
221
222 typedef struct _Ecore_Pthread_Message Ecore_Pthread_Message;
223 struct _Ecore_Pthread_Message
224 {
225    union {
226       Ecore_Thread_Cb async;
227       Ecore_Thread_Sync_Cb sync;
228    } u;
229
230    const void *data;
231
232    int code;
233
234    Eina_Bool callback : 1;
235    Eina_Bool sync : 1;
236 };
237
238 #endif
239
240 static int _ecore_thread_count_max = 0;
241
242 #ifdef EFL_HAVE_THREADS
243
244 static void _ecore_thread_handler(void *data);
245
246 static int _ecore_thread_count = 0;
247
248 static Eina_List *_ecore_running_job = NULL;
249 static Eina_List *_ecore_pending_job_threads = NULL;
250 static Eina_List *_ecore_pending_job_threads_feedback = NULL;
251 static LK(_ecore_pending_job_threads_mutex);
252 static LK(_ecore_running_job_mutex);
253
254 static Eina_Hash *_ecore_thread_global_hash = NULL;
255 static LRWK(_ecore_thread_global_hash_lock);
256 static LK(_ecore_thread_global_hash_mutex);
257 static CD(_ecore_thread_global_hash_cond);
258
259 static Eina_Bool have_main_loop_thread = 0;
260
261 static Eina_Trash *_ecore_thread_worker_trash = NULL;
262 static int _ecore_thread_worker_count = 0;
263
264 static void                 *_ecore_thread_worker(void *);
265 static Ecore_Pthread_Worker *_ecore_thread_worker_new(void);
266
267 static PH(get_main_loop_thread) (void)
268 {
269    static PH(main_loop_thread);
270    static pid_t main_loop_pid;
271    pid_t pid = getpid();
272
273    if (pid != main_loop_pid)
274      {
275         main_loop_pid = pid;
276         main_loop_thread = PHS();
277         have_main_loop_thread = 1;
278      }
279
280    return main_loop_thread;
281 }
282
283 static void
284 _ecore_thread_worker_free(Ecore_Pthread_Worker *worker)
285 {
286    LKD(worker->cancel_mutex);
287    CDD(worker->cond);
288    LKD(worker->mutex);
289
290    if (_ecore_thread_worker_count > ((_ecore_thread_count_max + 1) * 16))
291      {
292         _ecore_thread_worker_count--;
293         free(worker);
294         return;
295      }
296
297    eina_trash_push(&_ecore_thread_worker_trash, worker);
298 }
299
300 static void
301 _ecore_thread_data_free(void *data)
302 {
303    Ecore_Thread_Data *d = data;
304
305    if (d->cb) d->cb(d->data);
306    free(d);
307 }
308
309 static void
310 _ecore_thread_join(PH(thread))
311 {
312    PHJ(thread);
313 }
314
315 static void
316 _ecore_thread_kill(Ecore_Pthread_Worker *work)
317 {
318    if (work->cancel)
319      {
320         if (work->func_cancel)
321           work->func_cancel((void *)work->data, (Ecore_Thread *)work);
322      }
323    else
324      {
325         if (work->func_end)
326           work->func_end((void *)work->data, (Ecore_Thread *)work);
327      }
328
329    if (work->feedback_run)
330      {
331         if (work->u.feedback_run.direct_worker)
332           _ecore_thread_worker_free(work->u.feedback_run.direct_worker);
333      }
334    if (work->hash)
335      eina_hash_free(work->hash);
336    _ecore_thread_worker_free(work);
337 }
338
339 static void
340 _ecore_thread_handler(void *data)
341 {
342    Ecore_Pthread_Worker *work = data;
343
344    if (work->feedback_run)
345      {
346         if (work->u.feedback_run.send != work->u.feedback_run.received)
347           {
348              work->kill = EINA_TRUE;
349              return;
350           }
351      }
352
353    _ecore_thread_kill(work);
354 }
355
356 #if 0
357 static void
358 _ecore_nothing_handler(void *data __UNUSED__, void *buffer __UNUSED__, unsigned int nbyte __UNUSED__)
359 {
360 }
361 #endif
362
363 static void
364 _ecore_notify_handler(void *data)
365 {
366    Ecore_Pthread_Notify *notify = data;
367    Ecore_Pthread_Worker *work = notify->work;
368    void *user_data = (void*) notify->user_data;
369
370    work->u.feedback_run.received++;
371
372    if (work->u.feedback_run.func_notify)
373      work->u.feedback_run.func_notify((void *)work->data, (Ecore_Thread *)work, user_data);
374
375    /* Force reading all notify event before killing the thread */
376    if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
377      {
378         _ecore_thread_kill(work);
379      }
380
381    free(notify);
382 }
383
384 static void
385 _ecore_message_notify_handler(void *data)
386 {
387    Ecore_Pthread_Notify *notify = data;
388    Ecore_Pthread_Worker *work = notify->work;
389    Ecore_Pthread_Message *user_data = (void *) notify->user_data;
390    Eina_Bool delete = EINA_TRUE;
391
392    work->u.message_run.from.received++;
393
394    if (!user_data->callback)
395      {
396         if (work->u.message_run.func_notify)
397           work->u.message_run.func_notify((void *) work->data, (Ecore_Thread *) work, (void *) user_data->data);
398      }
399    else
400      {
401         if (user_data->sync)
402           {
403              user_data->data = user_data->u.sync((void*) user_data->data, (Ecore_Thread *) work);
404              user_data->callback = EINA_FALSE;
405              user_data->code = INT_MAX;
406              ecore_pipe_write(work->u.message_run.send, &user_data, sizeof (Ecore_Pthread_Message *));
407
408              delete = EINA_FALSE;
409           }
410         else
411           {
412              user_data->u.async((void*) user_data->data, (Ecore_Thread *) work);
413           }
414      }
415
416    if (delete)
417      {
418         free(user_data);
419      }
420
421    /* Force reading all notify event before killing the thread */
422    if (work->kill && work->u.message_run.from.send == work->u.message_run.from.received)
423      {
424         _ecore_thread_kill(work);
425      }
426    free(notify);
427 }
428
429 static void
430 _ecore_short_job(PH(thread))
431 {
432    Ecore_Pthread_Worker *work;
433    int cancel;
434
435    LKL(_ecore_pending_job_threads_mutex);
436    
437    if (!_ecore_pending_job_threads)
438      {
439         LKU(_ecore_pending_job_threads_mutex);
440         return;
441      }
442    
443    work = eina_list_data_get(_ecore_pending_job_threads);
444    _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
445                                                       _ecore_pending_job_threads);
446    LKU(_ecore_pending_job_threads_mutex);
447
448    LKL(_ecore_running_job_mutex);
449    _ecore_running_job = eina_list_append(_ecore_running_job, work);
450    LKU(_ecore_running_job_mutex);
451    
452    LKL(work->cancel_mutex);
453    cancel = work->cancel;
454    LKU(work->cancel_mutex);
455    work->self = thread;
456    if (!cancel)
457      work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
458
459    LKL(_ecore_running_job_mutex);
460    _ecore_running_job = eina_list_remove(_ecore_running_job, work);
461    LKU(_ecore_running_job_mutex);
462    
463    if (work->reschedule)
464      {
465         work->reschedule = EINA_FALSE;
466         
467         LKL(_ecore_pending_job_threads_mutex);
468         _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
469         LKU(_ecore_pending_job_threads_mutex);
470      }
471    else
472      {
473         ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
474      }
475 }
476
477 static void
478 _ecore_feedback_job(PH(thread))
479 {
480    Ecore_Pthread_Worker *work;
481    int cancel;
482    
483    LKL(_ecore_pending_job_threads_mutex);
484    
485    if (!_ecore_pending_job_threads_feedback)
486      {
487         LKU(_ecore_pending_job_threads_mutex);
488         return;
489      }
490    
491    work = eina_list_data_get(_ecore_pending_job_threads_feedback);
492    _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
493                                                                _ecore_pending_job_threads_feedback);
494    LKU(_ecore_pending_job_threads_mutex);
495    LKL(_ecore_running_job_mutex);
496    _ecore_running_job = eina_list_append(_ecore_running_job, work);
497    LKU(_ecore_running_job_mutex);
498    
499    LKL(work->cancel_mutex);
500    cancel = work->cancel;
501    LKU(work->cancel_mutex);
502    work->self = thread;
503    if (!cancel)
504      work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
505
506    LKL(_ecore_running_job_mutex);
507    _ecore_running_job = eina_list_remove(_ecore_running_job, work);
508    LKU(_ecore_running_job_mutex);
509
510    if (work->reschedule)
511      {
512         work->reschedule = EINA_FALSE;
513         
514         LKL(_ecore_pending_job_threads_mutex);
515         _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
516         LKU(_ecore_pending_job_threads_mutex);
517      }
518    else
519      {
520         ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
521      }
522 }
523
524 static void *
525 _ecore_direct_worker(Ecore_Pthread_Worker *work)
526 {
527 #ifdef EFL_POSIX_THREADS
528    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
529    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
530 #endif
531
532    eina_sched_prio_drop();
533
534    work->self = PHS();
535    if (work->message_run)
536      work->u.message_run.func_main((void *) work->data, (Ecore_Thread *) work);
537    else
538      work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
539
540    ecore_main_loop_thread_safe_call_async(_ecore_thread_handler, work);
541
542    ecore_main_loop_thread_safe_call_async((Ecore_Cb) _ecore_thread_join, 
543                                           (void*) PHS());
544
545    return NULL;
546 }
547
548 static void *
549 _ecore_thread_worker(void *data __UNUSED__)
550 {
551 #ifdef EFL_POSIX_THREADS
552    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
553    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
554 #endif
555
556    eina_sched_prio_drop();
557
558 restart:
559    _ecore_short_job(PHS());
560    _ecore_feedback_job(PHS());
561
562    /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
563
564    LKL(_ecore_pending_job_threads_mutex);
565    if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
566      {
567         LKU(_ecore_pending_job_threads_mutex);
568         goto restart;
569      }
570    LKU(_ecore_pending_job_threads_mutex);
571
572    /* Sleep a little to prevent premature death */
573 #ifdef _WIN32
574    Sleep(1); /* around 50ms */
575 #else
576    usleep(50);
577 #endif
578
579    LKL(_ecore_pending_job_threads_mutex);
580    if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
581      {
582         LKU(_ecore_pending_job_threads_mutex);
583         goto restart;
584      }
585    _ecore_thread_count--;
586
587    ecore_main_loop_thread_safe_call_async((Ecore_Cb) _ecore_thread_join,
588                                           (void*) PHS());
589    LKU(_ecore_pending_job_threads_mutex);
590
591    return NULL;
592 }
593
594 #endif
595
596 static Ecore_Pthread_Worker *
597 _ecore_thread_worker_new(void)
598 {
599 #ifdef EFL_HAVE_THREADS
600    Ecore_Pthread_Worker *result;
601
602    result = eina_trash_pop(&_ecore_thread_worker_trash);
603
604    if (!result) 
605      {
606        result = calloc(1, sizeof(Ecore_Pthread_Worker));
607        _ecore_thread_worker_count++;
608      }
609
610    LKI(result->cancel_mutex);
611    LKI(result->mutex);
612    CDI(result->cond, result->mutex);
613
614    return result;
615 #else
616    return malloc(sizeof (Ecore_Pthread_Worker));
617 #endif
618 }
619
620 void
621 _ecore_thread_init(void)
622 {
623    _ecore_thread_count_max = eina_cpu_count();
624    if (_ecore_thread_count_max <= 0)
625      _ecore_thread_count_max = 1;
626
627 #ifdef EFL_HAVE_THREADS
628 # ifdef EFL_HAVE_WIN32_THREADS
629    LKI(_ecore_thread_win32_lock);
630 # endif
631    LKI(_ecore_pending_job_threads_mutex);
632    LRWKI(_ecore_thread_global_hash_lock);
633    LKI(_ecore_thread_global_hash_mutex);
634    LKI(_ecore_running_job_mutex);
635    CDI(_ecore_thread_global_hash_cond, _ecore_thread_global_hash_mutex);
636 #endif
637 }
638
639 void
640 _ecore_thread_shutdown(void)
641 {
642    /* FIXME: If function are still running in the background, should we kill them ? */
643 #ifdef EFL_HAVE_THREADS
644     Ecore_Pthread_Worker *work;
645     Eina_List *l;
646     Eina_Bool test;
647     int iteration = 0;
648
649     LKL(_ecore_pending_job_threads_mutex);
650
651     EINA_LIST_FREE(_ecore_pending_job_threads, work)
652       {
653          if (work->func_cancel)
654            work->func_cancel((void *)work->data, (Ecore_Thread *) work);
655          free(work);
656       }
657
658     EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
659       {
660          if (work->func_cancel)
661            work->func_cancel((void *)work->data, (Ecore_Thread *) work);
662          free(work);
663       }
664
665     LKU(_ecore_pending_job_threads_mutex);
666     LKL(_ecore_running_job_mutex);
667
668     EINA_LIST_FOREACH(_ecore_running_job, l, work)
669       ecore_thread_cancel((Ecore_Thread*) work);
670
671     LKU(_ecore_running_job_mutex);
672
673     do
674       {
675          LKL(_ecore_pending_job_threads_mutex);
676          if (_ecore_thread_count > 0)
677            {
678               test = EINA_TRUE;
679            }
680          else
681            {
682               test = EINA_FALSE;
683            }
684          LKU(_ecore_pending_job_threads_mutex);
685          iteration++;
686          if (test) usleep(50000);
687       }
688     while (test == EINA_TRUE && iteration < 20);
689
690     if (iteration == 20 && _ecore_thread_count > 0)
691       {
692          ERR("%i of the child thread are still running after 1s. This can lead to a segv. Sorry.", _ecore_thread_count);
693       }
694
695     if (_ecore_thread_global_hash)
696       eina_hash_free(_ecore_thread_global_hash);
697     have_main_loop_thread = 0;
698
699     while ((work = eina_trash_pop(&_ecore_thread_worker_trash)))
700       {
701          free(work);
702       }
703
704     LKD(_ecore_pending_job_threads_mutex);
705     LRWKD(_ecore_thread_global_hash_lock);
706     LKD(_ecore_thread_global_hash_mutex);
707     LKD(_ecore_running_job_mutex);
708     CDD(_ecore_thread_global_hash_cond);
709 # ifdef EFL_HAVE_WIN32_THREADS
710    LKU(_ecore_thread_win32_lock);
711 # endif
712 #endif
713 }
714
715 EAPI Ecore_Thread *
716 ecore_thread_run(Ecore_Thread_Cb func_blocking,
717                  Ecore_Thread_Cb func_end,
718                  Ecore_Thread_Cb func_cancel,
719                  const void     *data)
720 {
721    Ecore_Pthread_Worker *work;
722    Eina_Bool tried = EINA_FALSE;
723 #ifdef EFL_HAVE_THREADS
724    PH(thread);
725 #endif
726
727    EINA_MAIN_LOOP_CHECK_RETURN_VAL(NULL);
728    
729    if (!func_blocking) return NULL;
730
731    work = _ecore_thread_worker_new();
732    if (!work)
733      {
734         if (func_cancel)
735           func_cancel((void *)data, NULL);
736         return NULL;
737      }
738
739    work->u.short_run.func_blocking = func_blocking;
740    work->func_end = func_end;
741    work->func_cancel = func_cancel;
742    work->cancel = EINA_FALSE;
743    work->feedback_run = EINA_FALSE;
744    work->message_run = EINA_FALSE;
745    work->kill = EINA_FALSE;
746    work->reschedule = EINA_FALSE;
747    work->no_queue = EINA_FALSE;
748    work->data = data;
749
750 #ifdef EFL_HAVE_THREADS
751    work->self = 0;
752    work->hash = NULL;
753
754    LKL(_ecore_pending_job_threads_mutex);
755    _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
756
757    if (_ecore_thread_count == _ecore_thread_count_max)
758      {
759         LKU(_ecore_pending_job_threads_mutex);
760         return (Ecore_Thread *)work;
761      }
762
763    LKU(_ecore_pending_job_threads_mutex);
764
765    /* One more thread could be created. */
766    eina_threads_init();
767
768    LKL(_ecore_pending_job_threads_mutex);
769
770  retry:
771    if (PHC(thread, _ecore_thread_worker, NULL) == 0)
772      {
773         _ecore_thread_count++;
774         LKU(_ecore_pending_job_threads_mutex);
775         return (Ecore_Thread *)work;
776      }
777    if (!tried)
778      {
779        _ecore_main_call_flush();
780        tried = EINA_TRUE;
781        goto retry;
782      }
783
784    if (_ecore_thread_count == 0)
785      {
786         _ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
787
788         if (work->func_cancel)
789           work->func_cancel((void *) work->data, (Ecore_Thread *) work);
790
791         CDD(work->cond);
792         LKD(work->mutex);
793         LKD(work->cancel_mutex);
794         free(work);
795         work = NULL;
796      }
797    LKU(_ecore_pending_job_threads_mutex);
798
799    eina_threads_shutdown();
800
801    return (Ecore_Thread *)work;
802 #else
803    /*
804       If no thread and as we don't want to break app that rely on this
805       facility, we will lock the interface until we are done.
806     */
807    do {
808         /* Handle reschedule by forcing it here. That would mean locking the app,
809          * would be better with an idler, but really to complex for a case where
810          * thread should really exist.
811          */
812           work->reschedule = EINA_FALSE;
813
814           func_blocking((void *)data, (Ecore_Thread *)work);
815           if (work->cancel == EINA_FALSE) func_end((void *)data, (Ecore_Thread *)work);
816           else func_cancel((void *)data, (Ecore_Thread *)work);
817      } while (work->reschedule == EINA_TRUE);
818
819    free(work);
820
821    return NULL;
822 #endif
823 }
824
825 EAPI Eina_Bool
826 ecore_thread_cancel(Ecore_Thread *thread)
827 {
828 #ifdef EFL_HAVE_THREADS
829    Ecore_Pthread_Worker *volatile work = (Ecore_Pthread_Worker *)thread;
830    Eina_List *l;
831    int cancel;
832
833    if (!work)
834      return EINA_TRUE;
835    LKL(work->cancel_mutex);
836    cancel = work->cancel;
837    LKU(work->cancel_mutex);
838    if (cancel)
839      return EINA_FALSE;
840
841    if (work->feedback_run)
842      {
843         if (work->kill)
844           return EINA_TRUE;
845         if (work->u.feedback_run.send != work->u.feedback_run.received)
846           goto on_exit;
847      }
848
849    LKL(_ecore_pending_job_threads_mutex);
850
851    if ((have_main_loop_thread) &&
852        (PHE(get_main_loop_thread(), PHS())))
853      {
854         if (!work->feedback_run)
855           EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
856             {
857                if ((void *)work == (void *)thread)
858                  {
859                     _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
860
861                     LKU(_ecore_pending_job_threads_mutex);
862
863                     if (work->func_cancel)
864                       work->func_cancel((void *)work->data, (Ecore_Thread *)work);
865                     free(work);
866
867                     return EINA_TRUE;
868                  }
869             }
870         else
871           EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
872             {
873                if ((void *)work == (void *)thread)
874                  {
875                     _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
876
877                     LKU(_ecore_pending_job_threads_mutex);
878
879                     if (work->func_cancel)
880                       work->func_cancel((void *)work->data, (Ecore_Thread *)work);
881                     free(work);
882
883                     return EINA_TRUE;
884                  }
885             }
886      }
887
888    LKU(_ecore_pending_job_threads_mutex);
889
890    work = (Ecore_Pthread_Worker *)thread;
891
892    /* Delay the destruction */
893  on_exit:
894    LKL(work->cancel_mutex);
895    work->cancel = EINA_TRUE;
896    LKU(work->cancel_mutex);
897
898    return EINA_FALSE;
899 #else
900    (void) thread;
901    return EINA_TRUE;
902 #endif
903 }
904
905 EAPI Eina_Bool
906 ecore_thread_check(Ecore_Thread *thread)
907 {
908    Ecore_Pthread_Worker *volatile worker = (Ecore_Pthread_Worker *) thread;
909    int cancel;
910
911    if (!worker) return EINA_TRUE;
912 #ifdef EFL_HAVE_THREADS
913    LKL(worker->cancel_mutex);
914 #endif
915    cancel = worker->cancel;
916    /* FIXME: there is an insane bug driving me nuts here. I don't know if
917     it's a race condition, some cache issue or some alien attack on our software.
918     But ecore_thread_check will only work correctly with a printf, all the volatile,
919     lock and even usleep don't help here... */
920    /* fprintf(stderr, "wc: %i\n", cancel); */
921 #ifdef EFL_HAVE_THREADS
922    LKU(worker->cancel_mutex);
923 #endif
924    return cancel;
925 }
926
927 EAPI Ecore_Thread *
928 ecore_thread_feedback_run(Ecore_Thread_Cb        func_heavy,
929                           Ecore_Thread_Notify_Cb func_notify,
930                           Ecore_Thread_Cb        func_end,
931                           Ecore_Thread_Cb        func_cancel,
932                           const void            *data,
933                           Eina_Bool              try_no_queue)
934 {
935 #ifdef EFL_HAVE_THREADS
936    Ecore_Pthread_Worker *worker;
937    Eina_Bool tried = EINA_FALSE;
938    PH(thread);
939
940    EINA_MAIN_LOOP_CHECK_RETURN_VAL(NULL);
941    
942    if (!func_heavy) return NULL;
943
944    worker = _ecore_thread_worker_new();
945    if (!worker) goto on_error;
946
947    worker->u.feedback_run.func_heavy = func_heavy;
948    worker->u.feedback_run.func_notify = func_notify;
949    worker->hash = NULL;
950    worker->func_cancel = func_cancel;
951    worker->func_end = func_end;
952    worker->data = data;
953    worker->cancel = EINA_FALSE;
954    worker->message_run = EINA_FALSE;
955    worker->feedback_run = EINA_TRUE;
956    worker->kill = EINA_FALSE;
957    worker->reschedule = EINA_FALSE;
958    worker->self = 0;
959
960    worker->u.feedback_run.send = 0;
961    worker->u.feedback_run.received = 0;
962
963    worker->u.feedback_run.direct_worker = NULL;
964
965    if (try_no_queue)
966      {
967         PH(t);
968
969         worker->u.feedback_run.direct_worker = _ecore_thread_worker_new();
970         worker->no_queue = EINA_TRUE;
971
972         eina_threads_init();
973
974      retry_direct:
975         if (PHC(t, _ecore_direct_worker, worker) == 0)
976           return (Ecore_Thread *)worker;
977         if (!tried)
978           {
979              _ecore_main_call_flush();
980              tried = EINA_TRUE;
981              goto retry_direct;
982           }
983
984         if (worker->u.feedback_run.direct_worker)
985           {
986              _ecore_thread_worker_free(worker->u.feedback_run.direct_worker);
987              worker->u.feedback_run.direct_worker = NULL;
988           }
989
990         eina_threads_shutdown();
991      }
992
993    worker->no_queue = EINA_FALSE;
994
995    LKL(_ecore_pending_job_threads_mutex);
996    _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
997
998    if (_ecore_thread_count == _ecore_thread_count_max)
999      {
1000         LKU(_ecore_pending_job_threads_mutex);
1001         return (Ecore_Thread *)worker;
1002      }
1003
1004    LKU(_ecore_pending_job_threads_mutex);
1005
1006    /* One more thread could be created. */
1007    eina_threads_init();
1008
1009    LKL(_ecore_pending_job_threads_mutex);
1010  retry:
1011    if (PHC(thread, _ecore_thread_worker, NULL) == 0)
1012      {
1013         _ecore_thread_count++;
1014         LKU(_ecore_pending_job_threads_mutex);
1015         return (Ecore_Thread *)worker;
1016      }
1017    if (!tried)
1018      {
1019         _ecore_main_call_flush();
1020         tried = EINA_TRUE;
1021         goto retry;
1022      }
1023    LKU(_ecore_pending_job_threads_mutex);
1024
1025    eina_threads_shutdown();
1026
1027 on_error:
1028    LKL(_ecore_pending_job_threads_mutex);
1029    if (_ecore_thread_count == 0)
1030      {
1031         _ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
1032                                                                worker);
1033
1034         if (func_cancel) func_cancel((void *)data, NULL);
1035
1036         if (worker)
1037           {
1038              CDD(worker->cond);
1039              LKD(worker->mutex);
1040              free(worker);
1041              worker = NULL;
1042           }
1043      }
1044    LKU(_ecore_pending_job_threads_mutex);
1045
1046    return (Ecore_Thread *)worker;
1047 #else
1048    Ecore_Pthread_Worker worker;
1049
1050    (void)try_no_queue;
1051
1052    /*
1053       If no thread and as we don't want to break app that rely on this
1054       facility, we will lock the interface until we are done.
1055     */
1056    worker.u.feedback_run.func_heavy = func_heavy;
1057    worker.u.feedback_run.func_notify = func_notify;
1058    worker.u.feedback_run.send = 0;
1059    worker.u.feedback_run.received = 0;
1060    worker.func_cancel = func_cancel;
1061    worker.func_end = func_end;
1062    worker.data = data;
1063    worker.cancel = EINA_FALSE;
1064    worker.feedback_run = EINA_TRUE;
1065    worker.message_run = EINA_FALSE;
1066    worker.kill = EINA_FALSE;
1067
1068    do {
1069         worker.reschedule = EINA_FALSE;
1070
1071         func_heavy((void *)data, (Ecore_Thread *)&worker);
1072
1073         if (worker.cancel) func_cancel((void *)data, (Ecore_Thread *)&worker);
1074         else func_end((void *)data, (Ecore_Thread *)&worker);
1075      } while (worker.reschedule == EINA_TRUE);
1076
1077    return NULL;
1078 #endif
1079 }
1080
1081 EAPI Eina_Bool
1082 ecore_thread_feedback(Ecore_Thread *thread,
1083                       const void   *data)
1084 {
1085    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1086
1087    if (!worker) return EINA_FALSE;
1088
1089 #ifdef EFL_HAVE_THREADS
1090    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1091
1092    if (worker->feedback_run)
1093      {
1094         Ecore_Pthread_Notify *notify;
1095
1096         notify = malloc(sizeof (Ecore_Pthread_Notify));
1097         if (!notify) return EINA_FALSE;
1098
1099         notify->user_data = data;
1100         notify->work = worker;
1101         worker->u.feedback_run.send++;
1102
1103         ecore_main_loop_thread_safe_call_async(_ecore_notify_handler, notify);
1104      }
1105    else if (worker->message_run)
1106      {
1107         Ecore_Pthread_Message *msg;
1108         Ecore_Pthread_Notify *notify;
1109
1110         msg = malloc(sizeof (Ecore_Pthread_Message));
1111         if (!msg) return EINA_FALSE;
1112         msg->data = data;
1113         msg->callback = EINA_FALSE;
1114         msg->sync = EINA_FALSE;
1115
1116         notify = malloc(sizeof (Ecore_Pthread_Notify));
1117         if (!notify)
1118           {
1119              free(msg);
1120              return EINA_FALSE;
1121           }
1122         notify->work = worker;
1123         notify->user_data = msg;
1124
1125         worker->u.message_run.from.send++;
1126         ecore_main_loop_thread_safe_call_async(_ecore_message_notify_handler, notify);
1127      }
1128    else
1129      return EINA_FALSE;
1130
1131    return EINA_TRUE;
1132 #else
1133    worker->u.feedback_run.func_notify((void *)worker->data, thread, (void *)data);
1134
1135    return EINA_TRUE;
1136 #endif
1137 }
1138
1139 #if 0
1140 EAPI Ecore_Thread *
1141 ecore_thread_message_run(Ecore_Thread_Cb func_main,
1142                          Ecore_Thread_Notify_Cb func_notify,
1143                          Ecore_Thread_Cb func_end,
1144                          Ecore_Thread_Cb func_cancel,
1145                          const void *data)
1146 {
1147 #ifdef EFL_HAVE_THREADS
1148   Ecore_Pthread_Worker *worker;
1149   PH(t);
1150
1151   if (!func_main) return NULL;
1152
1153   worker = _ecore_thread_worker_new();
1154   if (!worker) return NULL;
1155
1156   worker->u.message_run.func_main = func_main;
1157   worker->u.message_run.func_notify = func_notify;
1158   worker->u.message_run.direct_worker = _ecore_thread_worker_new();
1159   worker->u.message_run.send = ecore_pipe_add(_ecore_nothing_handler, worker);
1160   worker->u.message_run.from.send = 0;
1161   worker->u.message_run.from.received = 0;
1162   worker->u.message_run.to.send = 0;
1163   worker->u.message_run.to.received = 0;
1164
1165   ecore_pipe_freeze(worker->u.message_run.send);
1166
1167   worker->func_cancel = func_cancel;
1168   worker->func_end = func_end;
1169   worker->hash = NULL;
1170   worker->data = data;
1171
1172   worker->cancel = EINA_FALSE;
1173   worker->message_run = EINA_TRUE;
1174   worker->feedback_run = EINA_FALSE;
1175   worker->kill = EINA_FALSE;
1176   worker->reschedule = EINA_FALSE;
1177   worker->no_queue = EINA_FALSE;
1178   worker->self = 0;
1179
1180   eina_threads_init();
1181
1182   if (PHC(t, _ecore_direct_worker, worker) == 0)
1183     return (Ecore_Thread*) worker;
1184
1185   eina_threads_shutdown();
1186
1187   if (worker->u.message_run.direct_worker) _ecore_thread_worker_free(worker->u.message_run.direct_worker);
1188   if (worker->u.message_run.send) ecore_pipe_del(worker->u.message_run.send);
1189
1190   CDD(worker->cond);
1191   LKD(worker->mutex);
1192 #else
1193   /* Note: This type of thread can't and never will work without thread support */
1194   WRN("ecore_thread_message_run called, but threads disable in Ecore, things will go wrong. Starting now !");
1195 # warning "You disabled threads support in ecore, I hope you know what you are doing !"
1196 #endif
1197
1198   func_cancel((void *) data, NULL);
1199
1200   return NULL;
1201 }
1202 #endif
1203
1204 EAPI Eina_Bool
1205 ecore_thread_reschedule(Ecore_Thread *thread)
1206 {
1207    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1208
1209    if (!worker) return EINA_FALSE;
1210
1211 #ifdef EFL_HAVE_THREADS
1212    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1213 #endif
1214
1215    worker->reschedule = EINA_TRUE;
1216    return EINA_TRUE;
1217 }
1218
1219 EAPI int
1220 ecore_thread_active_get(void)
1221 {
1222 #ifdef EFL_HAVE_THREADS
1223    EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1224    return _ecore_thread_count;
1225 #else
1226    return 0;
1227 #endif
1228 }
1229
1230 EAPI int
1231 ecore_thread_pending_get(void)
1232 {
1233 #ifdef EFL_HAVE_THREADS
1234    int ret;
1235
1236    EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1237    LKL(_ecore_pending_job_threads_mutex);
1238    ret = eina_list_count(_ecore_pending_job_threads);
1239    LKU(_ecore_pending_job_threads_mutex);
1240    return ret;
1241 #else
1242    return 0;
1243 #endif
1244 }
1245
1246 EAPI int
1247 ecore_thread_pending_feedback_get(void)
1248 {
1249 #ifdef EFL_HAVE_THREADS
1250    int ret;
1251
1252    EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1253    LKL(_ecore_pending_job_threads_mutex);
1254    ret = eina_list_count(_ecore_pending_job_threads_feedback);
1255    LKU(_ecore_pending_job_threads_mutex);
1256    return ret;
1257 #else
1258    return 0;
1259 #endif
1260 }
1261
1262 EAPI int
1263 ecore_thread_pending_total_get(void)
1264 {
1265 #ifdef EFL_HAVE_THREADS
1266    int ret;
1267
1268    EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1269    LKL(_ecore_pending_job_threads_mutex);
1270    ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
1271    LKU(_ecore_pending_job_threads_mutex);
1272    return ret;
1273 #else
1274    return 0;
1275 #endif
1276 }
1277
1278 EAPI int
1279 ecore_thread_max_get(void)
1280 {
1281    EINA_MAIN_LOOP_CHECK_RETURN_VAL(0);
1282    return _ecore_thread_count_max;
1283 }
1284
1285 EAPI void
1286 ecore_thread_max_set(int num)
1287 {
1288    EINA_MAIN_LOOP_CHECK_RETURN;
1289    if (num < 1) return;
1290    /* avoid doing something hilarious by blocking dumb users */
1291    if (num > (16 * eina_cpu_count())) num = 16 * eina_cpu_count();
1292
1293    _ecore_thread_count_max = num;
1294 }
1295
1296 EAPI void
1297 ecore_thread_max_reset(void)
1298 {
1299    EINA_MAIN_LOOP_CHECK_RETURN;
1300    _ecore_thread_count_max = eina_cpu_count();
1301 }
1302
1303 EAPI int
1304 ecore_thread_available_get(void)
1305 {
1306 #ifdef EFL_HAVE_THREADS
1307    int ret;
1308
1309    LKL(_ecore_pending_job_threads_mutex);
1310    ret = _ecore_thread_count_max - _ecore_thread_count;
1311    LKU(_ecore_pending_job_threads_mutex);
1312    return ret;
1313 #else
1314    return 0;
1315 #endif
1316 }
1317
1318 EAPI Eina_Bool
1319 ecore_thread_local_data_add(Ecore_Thread *thread,
1320                             const char   *key,
1321                             void         *value,
1322                             Eina_Free_Cb  cb,
1323                             Eina_Bool     direct)
1324 {
1325 #ifdef EFL_HAVE_THREADS
1326    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1327    Ecore_Thread_Data *d;
1328    Eina_Bool ret;
1329 #endif
1330
1331    if ((!thread) || (!key) || (!value))
1332      return EINA_FALSE;
1333 #ifdef EFL_HAVE_THREADS
1334    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1335
1336    if (!worker->hash)
1337      worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1338
1339    if (!worker->hash)
1340      return EINA_FALSE;
1341
1342    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1343      return EINA_FALSE;
1344
1345    d->data = value;
1346    d->cb = cb;
1347
1348    if (direct)
1349      ret = eina_hash_direct_add(worker->hash, key, d);
1350    else
1351      ret = eina_hash_add(worker->hash, key, d);
1352    CDB(worker->cond);
1353    return ret;
1354 #else
1355    (void) cb;
1356    (void) direct;
1357    return EINA_FALSE;
1358 #endif
1359 }
1360
1361 EAPI void *
1362 ecore_thread_local_data_set(Ecore_Thread *thread,
1363                             const char   *key,
1364                             void         *value,
1365                             Eina_Free_Cb  cb)
1366 {
1367 #ifdef EFL_HAVE_THREADS
1368    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1369    Ecore_Thread_Data *d, *r;
1370    void *ret;
1371 #endif
1372
1373    if ((!thread) || (!key) || (!value))
1374      return NULL;
1375 #ifdef EFL_HAVE_THREADS
1376    if (!PHE(worker->self, PHS())) return NULL;
1377
1378    if (!worker->hash)
1379      worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1380
1381    if (!worker->hash)
1382      return NULL;
1383
1384    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1385      return NULL;
1386
1387    d->data = value;
1388    d->cb = cb;
1389
1390    r = eina_hash_set(worker->hash, key, d);
1391    CDB(worker->cond);
1392    ret = r->data;
1393    free(r);
1394    return ret;
1395 #else
1396    (void) cb;
1397    return NULL;
1398 #endif
1399 }
1400
1401 EAPI void *
1402 ecore_thread_local_data_find(Ecore_Thread *thread,
1403                              const char   *key)
1404 {
1405 #ifdef EFL_HAVE_THREADS
1406    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1407    Ecore_Thread_Data *d;
1408 #endif
1409
1410    if ((!thread) || (!key))
1411      return NULL;
1412 #ifdef EFL_HAVE_THREADS
1413    if (!PHE(worker->self, PHS())) return NULL;
1414
1415    if (!worker->hash)
1416      return NULL;
1417
1418    d = eina_hash_find(worker->hash, key);
1419    if (d)
1420      return d->data;
1421    return NULL;
1422 #else
1423    return NULL;
1424 #endif
1425 }
1426
1427 EAPI Eina_Bool
1428 ecore_thread_local_data_del(Ecore_Thread *thread,
1429                             const char   *key)
1430 {
1431 #ifdef EFL_HAVE_THREADS
1432    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *)thread;
1433 #endif
1434
1435    if ((!thread) || (!key))
1436      return EINA_FALSE;
1437 #ifdef EFL_HAVE_THREADS
1438    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1439
1440    if (!worker->hash)
1441      return EINA_FALSE;
1442    return eina_hash_del_by_key(worker->hash, key);
1443 #else
1444    return EINA_TRUE;
1445 #endif
1446 }
1447
1448 EAPI Eina_Bool
1449 ecore_thread_global_data_add(const char  *key,
1450                              void        *value,
1451                              Eina_Free_Cb cb,
1452                              Eina_Bool    direct)
1453 {
1454 #ifdef EFL_HAVE_THREADS
1455    Ecore_Thread_Data *d;
1456    Eina_Bool ret;
1457 #endif
1458
1459    if ((!key) || (!value))
1460      return EINA_FALSE;
1461 #ifdef EFL_HAVE_THREADS
1462    LRWKWL(_ecore_thread_global_hash_lock);
1463    if (!_ecore_thread_global_hash)
1464      _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1465    LRWKU(_ecore_thread_global_hash_lock);
1466
1467    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1468      return EINA_FALSE;
1469
1470    d->data = value;
1471    d->cb = cb;
1472
1473    if (!_ecore_thread_global_hash)
1474      return EINA_FALSE;
1475    LRWKWL(_ecore_thread_global_hash_lock);
1476    if (direct)
1477      ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1478    else
1479      ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1480    LRWKU(_ecore_thread_global_hash_lock);
1481    CDB(_ecore_thread_global_hash_cond);
1482    return ret;
1483 #else
1484    (void) cb;
1485    (void) direct;
1486    return EINA_TRUE;
1487 #endif
1488 }
1489
1490 EAPI void *
1491 ecore_thread_global_data_set(const char  *key,
1492                              void        *value,
1493                              Eina_Free_Cb cb)
1494 {
1495 #ifdef EFL_HAVE_THREADS
1496    Ecore_Thread_Data *d, *r;
1497    void *ret;
1498 #endif
1499
1500    if ((!key) || (!value))
1501      return NULL;
1502 #ifdef EFL_HAVE_THREADS
1503    LRWKWL(_ecore_thread_global_hash_lock);
1504    if (!_ecore_thread_global_hash)
1505      _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1506    LRWKU(_ecore_thread_global_hash_lock);
1507
1508    if (!_ecore_thread_global_hash)
1509      return NULL;
1510
1511    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1512      return NULL;
1513
1514    d->data = value;
1515    d->cb = cb;
1516
1517    LRWKWL(_ecore_thread_global_hash_lock);
1518    r = eina_hash_set(_ecore_thread_global_hash, key, d);
1519    LRWKU(_ecore_thread_global_hash_lock);
1520    CDB(_ecore_thread_global_hash_cond);
1521
1522    ret = r->data;
1523    free(r);
1524    return ret;
1525 #else
1526    (void) cb;
1527    return NULL;
1528 #endif
1529 }
1530
1531 EAPI void *
1532 ecore_thread_global_data_find(const char *key)
1533 {
1534 #ifdef EFL_HAVE_THREADS
1535    Ecore_Thread_Data *ret;
1536 #endif
1537
1538    if (!key)
1539      return NULL;
1540 #ifdef EFL_HAVE_THREADS
1541    if (!_ecore_thread_global_hash) return NULL;
1542
1543    LRWKRL(_ecore_thread_global_hash_lock);
1544    ret = eina_hash_find(_ecore_thread_global_hash, key);
1545    LRWKU(_ecore_thread_global_hash_lock);
1546    if (ret)
1547      return ret->data;
1548    return NULL;
1549 #else
1550    return NULL;
1551 #endif
1552 }
1553
1554 EAPI Eina_Bool
1555 ecore_thread_global_data_del(const char *key)
1556 {
1557 #ifdef EFL_HAVE_THREADS
1558    Eina_Bool ret;
1559 #endif
1560
1561    if (!key)
1562      return EINA_FALSE;
1563 #ifdef EFL_HAVE_THREADS
1564    if (!_ecore_thread_global_hash)
1565      return EINA_FALSE;
1566
1567    LRWKWL(_ecore_thread_global_hash_lock);
1568    ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1569    LRWKU(_ecore_thread_global_hash_lock);
1570    return ret;
1571 #else
1572    return EINA_TRUE;
1573 #endif
1574 }
1575
1576 EAPI void *
1577 ecore_thread_global_data_wait(const char *key,
1578                               double      seconds)
1579 {
1580 #ifdef EFL_HAVE_THREADS
1581    double tm = 0;
1582    Ecore_Thread_Data *ret = NULL;
1583 #endif
1584
1585    if (!key)
1586      return NULL;
1587 #ifdef EFL_HAVE_THREADS
1588    if (!_ecore_thread_global_hash)
1589      return NULL;
1590    if (seconds > 0)
1591      tm = ecore_time_get() + seconds;
1592
1593    while (1)
1594      {
1595         LRWKRL(_ecore_thread_global_hash_lock);
1596         ret = eina_hash_find(_ecore_thread_global_hash, key);
1597         LRWKU(_ecore_thread_global_hash_lock);
1598         if ((ret) || (!seconds) || ((seconds > 0) && (tm <= ecore_time_get())))
1599           break;
1600         LKL(_ecore_thread_global_hash_mutex);
1601         CDW(_ecore_thread_global_hash_cond, tm);
1602         LKU(_ecore_thread_global_hash_mutex);
1603      }
1604    if (ret) return ret->data;
1605    return NULL;
1606 #else
1607    (void) seconds;
1608    return NULL;
1609 #endif
1610 }
1611