Ecore: Fix shadow declaration of variable 'time'.
[profile/ivi/ecore.git] / src / lib / ecore / ecore_thread.c
1 #ifdef HAVE_CONFIG_H
2 # include <config.h>
3 #endif
4
5 #include <sys/time.h>
6
7 #ifdef HAVE_EVIL
8 # include <Evil.h>
9 #endif
10
11 #include "Ecore.h"
12 #include "ecore_private.h"
13
14 #ifdef EFL_HAVE_THREADS
15
16 # ifdef EFL_HAVE_POSIX_THREADS
17 #  include <pthread.h>
18 #  ifdef __linux__
19 #   include <sched.h>
20 #   include <sys/resource.h>
21 #   include <unistd.h>
22 #   include <sys/syscall.h>
23 #   include <errno.h>
24 #  endif
25
26 #  define PH(x)        pthread_t x
27 #  define PHE(x, y)    pthread_equal(x, y)
28 #  define PHS()        pthread_self()
29 #  define PHC(x, f, d) pthread_create(&(x), NULL, (void*) f, d)
30 #  define PHJ(x, p)    pthread_join(x, (void**)(&(p)))
31 #  define PHA(x)       pthread_cancel(x)
32
33 #  define CD(x)  pthread_cond_t x
34 #  define CDI(x) pthread_cond_init(&(x), NULL);
35 #  define CDD(x) pthread_cond_destroy(&(x));
36 #  define CDB(x) pthread_cond_broadcast(&(x));
37 #  define CDW(x, y, t) pthread_cond_timedwait(&(x), &(y), t);
38
39 #  define LK(x)  pthread_mutex_t x
40 #  define LKI(x) pthread_mutex_init(&(x), NULL);
41 #  define LKD(x) pthread_mutex_destroy(&(x));
42 #  define LKL(x) pthread_mutex_lock(&(x));
43 #  define LKU(x) pthread_mutex_unlock(&(x));
44
45 #  define LRWK(x)   pthread_rwlock_t x
46 #  define LRWKI(x)  pthread_rwlock_init(&(x), NULL);
47 #  define LRWKD(x)  pthread_rwlock_destroy(&(x));
48 #  define LRWKWL(x) pthread_rwlock_wrlock(&(x));
49 #  define LRWKRL(x) pthread_rwlock_rdlock(&(x));
50 #  define LRWKU(x)  pthread_rwlock_unlock(&(x));
51
52 # else /* EFL_HAVE_WIN32_THREADS */
53
54 #  define WIN32_LEAN_AND_MEAN
55 #  include <windows.h>
56 #  undef WIN32_LEAN_AND_MEAN
57
58 typedef struct
59 {
60   HANDLE thread;
61   void *val;
62 } win32_thread;
63
64 #  define PH(x)        win32_thread *x
65 #  define PHE(x, y)    ((x) == (y))
66 #  define PHS()        (HANDLE)GetCurrentThreadId()
67
68 int _ecore_thread_win32_create(win32_thread **x, LPTHREAD_START_ROUTINE f, void *d)
69 {
70   win32_thread *t;
71   t = (win32_thread *)calloc(1, sizeof(win32_thread));
72   if (!t)
73     return -1;
74
75   (t)->thread = CreateThread(NULL, 0, f, d, 0, NULL);
76   if (!t->thread)
77     {
78       free(t);
79       return -1;
80     }
81   t->val = d;
82   *x = t;
83
84   return 0;
85 }
86 #  define PHC(x, f, d) _ecore_thread_win32_create(&(x), (LPTHREAD_START_ROUTINE)f, d)
87
88 int _ecore_thread_win32_join(win32_thread *x, void **res)
89 {
90   if (!PHE(x, PHS()))
91     {
92       WaitForSingleObject(x->thread, INFINITE);
93       CloseHandle(x->thread);
94     }
95   if (res) *res = x->val;
96
97   return 0;
98 }
99
100 #  define PHJ(x, p) _ecore_thread_win32_join(x, (void**)(&(p)))
101 #  define PHA(x) TerminateThread(x->thread, 0)
102
103 #  define LK(x)  HANDLE x
104 #  define LKI(x) x = CreateMutex(NULL, FALSE, NULL)
105 #  define LKD(x) CloseHandle(x)
106 #  define LKL(x) WaitForSingleObject(x, INFINITE)
107 #  define LKU(x) ReleaseMutex(x)
108
109 typedef struct
110 {
111   HANDLE semaphore;
112   LONG threads_count;
113   CRITICAL_SECTION threads_count_lock;
114 } win32_cond;
115
116 #  define CD(x)  win32_cond *x
117
118 #  define CDI(x)                                                     \
119    do {                                                              \
120      x = (win32_cond *)calloc(1, sizeof(win32_cond));                \
121      if (x)                                                          \
122         {                                                            \
123           x->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL); \
124           if (x->semaphore)                                          \
125             InitializeCriticalSection(&x->threads_count_lock);     \
126           else                                                       \
127             {                                                        \
128               free(x);                                               \
129               x = NULL;                                              \
130             }                                                        \
131         }                                                            \
132    } while (0)
133
134 #  define CDD(x)               \
135   do {                         \
136     CloseHandle(x->semaphore); \
137     free(x);                   \
138     x = NULL;                  \
139    } while (0)
140
141 #  define CDB(x)                                            \
142 do {                                                        \
143   EnterCriticalSection(&x->threads_count_lock);             \
144   if (x->threads_count > 0)                                 \
145     ReleaseSemaphore(x->semaphore, x->threads_count, NULL); \
146   LeaveCriticalSection (&x->threads_count_lock);            \
147  } while (0)
148
149 int _ecore_thread_win32_cond_timedwait(win32_cond *c, HANDLE *external_mutex, struct timeval *t)
150 {
151   DWORD res;
152   DWORD val = t->tv_sec * 1000 + (t->tv_usec / 1000);
153   LKL(external_mutex);
154   EnterCriticalSection (&c->threads_count_lock);
155   c->threads_count++;
156   LeaveCriticalSection (&c->threads_count_lock);
157   LKU(external_mutex);
158   res = WaitForSingleObject(c->semaphore, val);
159   if (res == WAIT_OBJECT_0)
160     return 0;
161   else
162     return -1;
163 }
164 #  define CDW(x, y, t) _ecore_thread_win32_cond_timedwait(x, y, t)
165
166 typedef struct
167 {
168   LONG readers_count;
169   LONG writers_count;
170   int readers;
171   int writers;
172   LK(mutex);
173   CD(cond_read);
174   CD(cond_write);
175 } win32_rwl;
176
177 #  define LRWK(x)   win32_rwl *x
178 #  define LRWKI(x)                                 \
179   do {                                             \
180     x = (win32_rwl *)calloc(1, sizeof(win32_rwl)); \
181     if (x)                                         \
182       {                                            \
183         LKI(x->mutex);                             \
184         if (x->mutex)                              \
185           {                                        \
186             CDI(x->cond_read);                     \
187             if (x->cond_read)                      \
188               {                                    \
189                 CDI(x->cond_write);                \
190                 if (!x->cond_write)                \
191                   {                                \
192                     CDD(x->cond_read);             \
193                     LKD(x->mutex);                 \
194                     free(x);                       \
195                     x = NULL;                      \
196                   }                                \
197               }                                    \
198             else                                   \
199               {                                    \
200                 LKD(x->mutex);                     \
201                 free(x);                           \
202                 x = NULL;                          \
203               }                                    \
204           }                                        \
205         else                                       \
206           {                                        \
207             free(x);                               \
208             x = NULL;                              \
209           }                                        \
210       }                                            \
211   } while (0)
212
213 #  define LRWKD(x)                   \
214   do {                               \
215     LKU(x->mutex);                   \
216     LKD(x->mutex);                   \
217     CDD(x->cond_write);              \
218     CDD(x->cond_read);               \
219     free(x);                         \
220   } while (0)
221 #  define LRWKWL(x)                                                       \
222   do {                                                                    \
223     DWORD res;                                                            \
224     LKU(x->mutex);                                                        \
225     if (x->writers || x->readers > 0)                                     \
226       {                                                                   \
227         x->writers_count++;                                               \
228         while (x->writers || x->readers > 0)                              \
229           {                                                               \
230             EnterCriticalSection(&x->cond_write->threads_count_lock);     \
231             x->cond_read->threads_count++;                                \
232             LeaveCriticalSection(&x->cond_write->threads_count_lock);     \
233             res = WaitForSingleObject(x->cond_write->semaphore, INFINITE); \
234             if (res != WAIT_OBJECT_0) break;                              \
235           }                                                               \
236         x->writers_count--;                                               \
237       }                                                                   \
238     if (res == 0) x->writers_count = 1;                                   \
239     LKU(x->mutex);                                                        \
240   } while (0)
241 #  define LRWKRL(x)                                                       \
242   do {                                                                    \
243     DWORD res;                                                            \
244     LKL(x->mutex);                                                        \
245     if (x->writers)                                                       \
246       {                                                                   \
247         x->readers_count++;                                               \
248         while (x->writers)                                                \
249           {                                                               \
250             EnterCriticalSection(&x->cond_write->threads_count_lock);     \
251             x->cond_read->threads_count++;                                \
252             LeaveCriticalSection(&x->cond_write->threads_count_lock);     \
253             res = WaitForSingleObject(x->cond_write->semaphore, INFINITE); \
254             if (res != WAIT_OBJECT_0) break;                              \
255           }                                                               \
256         x->readers_count--;                                               \
257       }                                                                   \
258     if (res == 0)                                                         \
259       x->readers++;                                                       \
260     LKU(x->mutex);                                                        \
261   } while (0)
262 #  define LRWKU(x)                                                     \
263   do {                                                                 \
264     LKL(x->mutex);                                                     \
265     if (x->writers)                                                    \
266       {                                                                \
267         x->writers = 0;                                                \
268         if (x->readers_count == 1)                                     \
269           {                                                            \
270             EnterCriticalSection(&x->cond_read->threads_count_lock);   \
271             if (x->cond_read->threads_count > 0)                       \
272               ReleaseSemaphore(x->cond_read->semaphore, 1, 0);         \
273             LeaveCriticalSection(&x->cond_read->threads_count_lock);   \
274           }                                                            \
275         else if (x->readers_count > 0)                                 \
276           CDB(x->cond_read);                                           \
277         else if (x->writers_count > 0)                                 \
278           {                                                            \
279             EnterCriticalSection (&x->cond_write->threads_count_lock); \
280             if (x->cond_write->threads_count > 0)                      \
281               ReleaseSemaphore(x->cond_write->semaphore, 1, 0);        \
282             LeaveCriticalSection (&x->cond_write->threads_count_lock); \
283           }                                                            \
284       }                                                                \
285     else if (x->readers > 0)                                           \
286       {                                                                \
287         x->readers--;                                                  \
288         if (x->readers == 0 && x->writers_count > 0)                   \
289           {                                                            \
290             EnterCriticalSection (&x->cond_write->threads_count_lock); \
291             if (x->cond_write->threads_count > 0)                      \
292               ReleaseSemaphore(x->cond_write->semaphore, 1, 0);        \
293             LeaveCriticalSection (&x->cond_write->threads_count_lock); \
294           }                                                            \
295       }                                                                \
296     LKU(x->mutex);                                                     \
297   } while (0)
298
299 # endif
300
301 #endif
302
303 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
304 typedef struct _Ecore_Pthread Ecore_Pthread;
305 typedef struct _Ecore_Thread_Data  Ecore_Thread_Data;
306
307 struct _Ecore_Thread_Data
308 {
309    void *data;
310    Eina_Free_Cb cb;
311 };
312
313 struct _Ecore_Pthread_Worker
314 {
315    union {
316       struct {
317          Ecore_Thread_Cb func_blocking;
318       } short_run;
319       struct {
320          Ecore_Thread_Cb func_heavy;
321          Ecore_Thread_Notify_Cb func_notify;
322          Ecore_Pipe *notify;
323
324          Ecore_Pipe *direct_pipe;
325          Ecore_Pthread_Worker *direct_worker;
326
327          int send;
328          int received;
329       } feedback_run;
330    } u;
331
332    Ecore_Thread_Cb func_cancel;
333    Ecore_Thread_Cb func_end;
334 #ifdef EFL_HAVE_THREADS
335    PH(self);
336    Eina_Hash *hash;
337    CD(cond);
338    LK(mutex);
339 #endif
340
341    const void *data;
342
343    Eina_Bool cancel : 1;
344    Eina_Bool feedback_run : 1;
345    Eina_Bool kill : 1;
346    Eina_Bool reschedule : 1;
347 };
348
349 #ifdef EFL_HAVE_THREADS
350 typedef struct _Ecore_Pthread_Data Ecore_Pthread_Data;
351
352 struct _Ecore_Pthread_Data
353 {
354    Ecore_Pthread_Worker *death_job;
355    Ecore_Pipe *p;
356    void *data;
357    PH(thread);
358 };
359 #endif
360
361 static void _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte);
362
363 static int _ecore_thread_count_max = 0;
364 static int ECORE_THREAD_PIPE_DEL = 0;
365 static Eina_Array *_ecore_thread_pipe = NULL;
366
367 static Ecore_Pipe*
368 _ecore_thread_pipe_get(void)
369 {
370    if (eina_array_count_get(_ecore_thread_pipe) > 0)
371      return eina_array_pop(_ecore_thread_pipe);
372
373    return ecore_pipe_add(_ecore_thread_handler, NULL);
374 }
375
376 #ifdef EFL_HAVE_THREADS
377 static int _ecore_thread_count = 0;
378
379 static Ecore_Event_Handler *del_handler = NULL;
380 static Eina_List *_ecore_active_job_threads = NULL;
381 static Eina_List *_ecore_pending_job_threads = NULL;
382 static Eina_List *_ecore_pending_job_threads_feedback = NULL;
383 static LK(_ecore_pending_job_threads_mutex);
384
385 static Eina_Hash *_ecore_thread_global_hash = NULL;
386 static LRWK(_ecore_thread_global_hash_lock);
387 static LK(_ecore_thread_global_hash_mutex);
388 static CD(_ecore_thread_global_hash_cond);
389
390 static PH(main_loop_thread);
391 static Eina_Bool have_main_loop_thread = 0;
392
393 static Eina_Trash *_ecore_thread_worker_trash = NULL;
394 static int _ecore_thread_worker_count = 0;
395
396 static void
397 _ecore_thread_worker_free(Ecore_Pthread_Worker *worker)
398 {
399    if (_ecore_thread_worker_count > (_ecore_thread_count_max + 1) * 16)
400      {
401         free(worker);
402         return ;
403      }
404
405    eina_trash_push(&_ecore_thread_worker_trash, worker);
406 }
407
408 static void
409 _ecore_thread_data_free(void *data)
410 {
411    Ecore_Thread_Data *d = data;
412
413    if (d->cb) d->cb(d->data);
414    free(d);
415 }
416
417 static void
418 _ecore_thread_pipe_free(void *data __UNUSED__, void *event)
419 {
420    Ecore_Pipe *p = event;
421
422    eina_array_push(_ecore_thread_pipe, p);
423    eina_threads_shutdown();
424 }
425
426 static Eina_Bool
427 _ecore_thread_pipe_del(void *data __UNUSED__, int type __UNUSED__, void *event __UNUSED__)
428 {
429    /* This is a hack to delay pipe destruction until we are out of its internal loop. */
430    return ECORE_CALLBACK_CANCEL;
431 }
432
433 static void
434 _ecore_thread_end(Ecore_Pthread_Data *pth, __UNUSED__ Ecore_Thread *work)
435 {
436    Ecore_Pipe *p;
437
438    if (PHJ(pth->thread, p) != 0)
439      return ;
440
441    _ecore_active_job_threads = eina_list_remove(_ecore_active_job_threads, pth);
442
443    ecore_event_add(ECORE_THREAD_PIPE_DEL, pth->p, _ecore_thread_pipe_free, NULL);
444    free(pth);
445 }
446
447 static void
448 _ecore_thread_kill(Ecore_Pthread_Worker *work)
449 {
450    if (work->cancel)
451      {
452         if (work->func_cancel)
453           work->func_cancel((void *) work->data, (Ecore_Thread *) work);
454      }
455    else
456      {
457         if (work->func_end)
458           work->func_end((void *) work->data, (Ecore_Thread *) work);
459      }
460
461    if (work->feedback_run)
462      {
463         ecore_pipe_del(work->u.feedback_run.notify);
464
465         if (work->u.feedback_run.direct_pipe)
466           eina_array_push(_ecore_thread_pipe, work->u.feedback_run.direct_pipe);
467         if (work->u.feedback_run.direct_worker)
468           _ecore_thread_worker_free(work->u.feedback_run.direct_worker);
469      }
470    CDD(work->cond);
471    LKD(work->mutex);
472    if (work->hash)
473      eina_hash_free(work->hash);
474    free(work);
475 }
476
477 static void
478 _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte)
479 {
480    Ecore_Pthread_Worker *work;
481
482    if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
483
484    work = *(Ecore_Pthread_Worker **)buffer;
485
486    if (work->feedback_run)
487      {
488         if (work->u.feedback_run.send != work->u.feedback_run.received)
489           {
490              work->kill = EINA_TRUE;
491              return ;
492           }
493      }
494
495    _ecore_thread_kill(work);
496 }
497
498 static void
499 _ecore_notify_handler(void *data, void *buffer, unsigned int nbyte)
500 {
501    Ecore_Pthread_Worker *work = data;
502    void *user_data;
503
504    if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
505
506    user_data = *(void **)buffer;
507    work->u.feedback_run.received++;
508
509    if (work->u.feedback_run.func_notify)
510      work->u.feedback_run.func_notify((void *) work->data, (Ecore_Thread *) work, user_data);
511
512    /* Force reading all notify event before killing the thread */
513    if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
514      {
515         _ecore_thread_kill(work);
516      }
517 }
518
519 static void
520 _ecore_short_job(Ecore_Pipe *end_pipe)
521 {
522    Ecore_Pthread_Worker *work;
523
524    while (_ecore_pending_job_threads)
525      {
526         LKL(_ecore_pending_job_threads_mutex);
527
528         if (!_ecore_pending_job_threads)
529           {
530              LKU(_ecore_pending_job_threads_mutex);
531              break;
532           }
533
534         work = eina_list_data_get(_ecore_pending_job_threads);
535         _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
536                                                            _ecore_pending_job_threads);
537
538         LKU(_ecore_pending_job_threads_mutex);
539
540         if (!work->cancel)
541           work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
542
543         if (work->reschedule)
544           {
545              work->reschedule = EINA_FALSE;
546
547              LKL(_ecore_pending_job_threads_mutex);
548              _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
549              LKU(_ecore_pending_job_threads_mutex);
550           }
551         else
552           {
553              ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
554           }
555      }
556 }
557
558 static void
559 _ecore_feedback_job(Ecore_Pipe *end_pipe, PH(thread))
560 {
561    Ecore_Pthread_Worker *work;
562
563    while (_ecore_pending_job_threads_feedback)
564      {
565         LKL(_ecore_pending_job_threads_mutex);
566
567         if (!_ecore_pending_job_threads_feedback)
568           {
569              LKU(_ecore_pending_job_threads_mutex);
570              break;
571           }
572
573         work = eina_list_data_get(_ecore_pending_job_threads_feedback);
574         _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
575                                                                     _ecore_pending_job_threads_feedback);
576
577         LKU(_ecore_pending_job_threads_mutex);
578
579         work->self = thread;
580         if (!work->cancel)
581           work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
582
583         if (work->reschedule)
584           {
585              work->reschedule = EINA_FALSE;
586
587              LKL(_ecore_pending_job_threads_mutex);
588              _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
589              LKU(_ecore_pending_job_threads_mutex);
590           }
591         else
592           {
593              ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
594           }
595      }
596 }
597
598 static void *
599 _ecore_direct_worker(Ecore_Pthread_Worker *work)
600 {
601    Ecore_Pthread_Data *pth;
602
603 #ifdef EFL_POSIX_THREADS
604    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
605    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
606 #endif
607
608    eina_sched_prio_drop();
609
610    pth = malloc(sizeof (Ecore_Pthread_Data));
611    if (!pth) return NULL;
612
613    pth->p = work->u.feedback_run.direct_pipe;
614    if (!pth->p)
615      {
616         free(pth);
617         return NULL;
618      }
619    pth->thread = PHS();
620
621    work->self = pth->thread;
622    work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
623
624    ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
625
626    work = work->u.feedback_run.direct_worker;
627    if (!work)
628      {
629         free(pth);
630         return NULL;
631      }
632
633    work->data = pth;
634    work->u.short_run.func_blocking = NULL;
635    work->func_end = (void *) _ecore_thread_end;
636    work->func_cancel = NULL;
637    work->cancel = EINA_FALSE;
638    work->feedback_run = EINA_FALSE;
639    work->kill = EINA_FALSE;
640    work->hash = NULL;
641    CDI(work->cond);
642    LKI(work->mutex);
643
644    ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
645
646    return pth->p;
647 }
648
649 static void *
650 _ecore_thread_worker(Ecore_Pthread_Data *pth)
651 {
652    Ecore_Pthread_Worker *work;
653
654 #ifdef EFL_POSIX_THREADS
655    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
656    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
657 #endif
658
659    eina_sched_prio_drop();
660
661    LKL(_ecore_pending_job_threads_mutex);
662    _ecore_thread_count++;
663    LKU(_ecore_pending_job_threads_mutex);
664
665  restart:
666    if (_ecore_pending_job_threads) _ecore_short_job(pth->p);
667    if (_ecore_pending_job_threads_feedback) _ecore_feedback_job(pth->p, pth->thread);
668
669    /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
670
671    LKL(_ecore_pending_job_threads_mutex);
672    if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
673      {
674         LKU(_ecore_pending_job_threads_mutex);
675         goto restart;
676      }
677    LKU(_ecore_pending_job_threads_mutex);
678
679    /* Sleep a little to prevent premature death */
680 #ifdef _WIN32
681    Sleep(1); /* around 50ms */
682 #else
683    usleep(200);
684 #endif
685
686    LKL(_ecore_pending_job_threads_mutex);
687    if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
688      {
689         LKU(_ecore_pending_job_threads_mutex);
690         goto restart;
691      }
692    _ecore_thread_count--;
693    LKU(_ecore_pending_job_threads_mutex);
694
695    work = pth->death_job;
696    if (!work) return NULL;
697
698    work->data = pth;
699    work->u.short_run.func_blocking = NULL;
700    work->func_end = (void *) _ecore_thread_end;
701    work->func_cancel = NULL;
702    work->cancel = EINA_FALSE;
703    work->feedback_run = EINA_FALSE;
704    work->kill = EINA_FALSE;
705    work->hash = NULL;
706    CDI(work->cond);
707    LKI(work->mutex);
708
709    ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
710
711    return pth->p;
712 }
713
714 #endif
715
716 static Ecore_Pthread_Worker *
717 _ecore_thread_worker_new(void)
718 {
719    Ecore_Pthread_Worker *result;
720
721 #ifdef EFL_HAVE_THREADS
722    result = eina_trash_pop(&_ecore_thread_worker_trash);
723
724    if (!result) result = malloc(sizeof (Ecore_Pthread_Worker));
725    else _ecore_thread_worker_count--;
726
727    return result;
728 #else
729    return malloc(sizeof (Ecore_Pthread_Worker));
730 #endif
731 }
732
733 void
734 _ecore_thread_init(void)
735 {
736    _ecore_thread_count_max = eina_cpu_count();
737    if (_ecore_thread_count_max <= 0)
738      _ecore_thread_count_max = 1;
739
740    ECORE_THREAD_PIPE_DEL = ecore_event_type_new();
741    _ecore_thread_pipe = eina_array_new(8);
742
743 #ifdef EFL_HAVE_THREADS
744    del_handler = ecore_event_handler_add(ECORE_THREAD_PIPE_DEL, _ecore_thread_pipe_del, NULL);
745    main_loop_thread = PHS();
746    have_main_loop_thread = 1;
747
748    LKI(_ecore_pending_job_threads_mutex);
749    LRWKI(_ecore_thread_global_hash_lock);
750    LKI(_ecore_thread_global_hash_mutex);
751    CDI(_ecore_thread_global_hash_cond);
752 #endif
753 }
754
755 void
756 _ecore_thread_shutdown(void)
757 {
758    /* FIXME: If function are still running in the background, should we kill them ? */
759    Ecore_Pipe *p;
760    Eina_Array_Iterator it;
761    unsigned int i;
762
763 #ifdef EFL_HAVE_THREADS
764    Ecore_Pthread_Worker *work;
765    Ecore_Pthread_Data *pth;
766
767    LKL(_ecore_pending_job_threads_mutex);
768
769    EINA_LIST_FREE(_ecore_pending_job_threads, work)
770      {
771         if (work->func_cancel)
772           work->func_cancel((void *)work->data, (Ecore_Thread *) work);
773         free(work);
774      }
775
776    EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
777      {
778         if (work->func_cancel)
779           work->func_cancel((void *)work->data, (Ecore_Thread *) work);
780         free(work);
781      }
782
783    LKU(_ecore_pending_job_threads_mutex);
784
785    /* Improve emergency shutdown */
786    EINA_LIST_FREE(_ecore_active_job_threads, pth)
787      {
788         Ecore_Pipe *ep;
789
790         PHA(pth->thread);
791         PHJ(pth->thread, ep);
792
793         ecore_pipe_del(pth->p);
794      }
795    if (_ecore_thread_global_hash)
796      eina_hash_free(_ecore_thread_global_hash);
797    ecore_event_handler_del(del_handler);
798    have_main_loop_thread = 0;
799    del_handler = NULL;
800
801    LKD(_ecore_pending_job_threads_mutex);
802    LRWKD(_ecore_thread_global_hash_lock);
803    LKD(_ecore_thread_global_hash_mutex);
804    CDD(_ecore_thread_global_hash_cond);
805 #endif
806
807    EINA_ARRAY_ITER_NEXT(_ecore_thread_pipe, i, p, it)
808      ecore_pipe_del(p);
809
810    eina_array_free(_ecore_thread_pipe);
811    _ecore_thread_pipe = NULL;
812 }
813
814 /**
815  * @addtogroup Ecore_Group Ecore - Main Loop and Job Functions.
816  *
817  * @{
818  */
819
820 /**
821  * @addtogroup Ecore_Thread_Group Ecore Thread functions
822  *
823  * These functions allow for ecore-managed threads which integrate with ecore's main loop.
824  *
825  * @{
826  */
827
828 /**
829  * @brief Run some blocking code in a parallel thread to avoid locking the main loop.
830  * @param func_blocking The function that should run in another thread.
831  * @param func_end The function that will be called in the main loop if the thread terminate correctly.
832  * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
833  * @param data User context data to pass to all callback.
834  * @return A reference to the newly created thread instance, or NULL if it failed.
835  *
836  * ecore_thread_run provide a facility for easily managing blocking task in a
837  * parallel thread. You should provide three function. The first one, func_blocking,
838  * that will do the blocking work in another thread (so you should not use the
839  * EFL in it except Eina if you are careful). The second one, func_end,
840  * that will be called in Ecore main loop when func_blocking is done. So you
841  * can use all the EFL inside this function. The last one, func_cancel, will
842  * be called in the main loop if the thread is cancelled or could not run at all.
843  *
844  * Be aware, that you can't make assumption on the result order of func_end
845  * after many call to ecore_thread_run, as we start as much thread as the
846  * host CPU can handle.
847  */
848 EAPI Ecore_Thread *
849 ecore_thread_run(Ecore_Thread_Cb func_blocking,
850                  Ecore_Thread_Cb func_end,
851                  Ecore_Thread_Cb func_cancel,
852                  const void *data)
853 {
854    Ecore_Pthread_Worker *work;
855 #ifdef EFL_HAVE_THREADS
856    Ecore_Pthread_Data *pth = NULL;
857 #endif
858
859    if (!func_blocking) return NULL;
860
861    work = _ecore_thread_worker_new();
862    if (!work)
863      {
864         if (func_cancel)
865           func_cancel((void *) data, NULL);
866         return NULL;
867      }
868
869    work->u.short_run.func_blocking = func_blocking;
870    work->func_end = func_end;
871    work->func_cancel = func_cancel;
872    work->cancel = EINA_FALSE;
873    work->feedback_run = EINA_FALSE;
874    work->kill = EINA_FALSE;
875    work->reschedule = EINA_FALSE;
876    work->data = data;
877
878 #ifdef EFL_HAVE_THREADS
879    work->hash = NULL;
880    CDI(work->cond);
881    LKI(work->mutex);
882
883    LKL(_ecore_pending_job_threads_mutex);
884    _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
885
886    if (_ecore_thread_count == _ecore_thread_count_max)
887      {
888         LKU(_ecore_pending_job_threads_mutex);
889         return (Ecore_Thread *) work;
890      }
891
892    LKU(_ecore_pending_job_threads_mutex);
893
894    /* One more thread could be created. */
895    pth = malloc(sizeof (Ecore_Pthread_Data));
896    if (!pth) goto on_error;
897
898    pth->p = _ecore_thread_pipe_get();
899    pth->death_job = _ecore_thread_worker_new();
900    if (!pth->p || !pth->death_job) goto on_error;
901
902    eina_threads_init();
903
904    if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
905       return (Ecore_Thread *) work;
906
907    eina_threads_shutdown();
908
909  on_error:
910    if (pth)
911      {
912         if (pth->p) eina_array_push(_ecore_thread_pipe, pth->p);
913         if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
914         free(pth);
915      }
916
917    if (_ecore_thread_count == 0)
918      {
919         LKL(_ecore_pending_job_threads_mutex);
920         _ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
921         LKU(_ecore_pending_job_threads_mutex);
922
923         if (work->func_cancel)
924           work->func_cancel((void *) work->data, (Ecore_Thread *) work);
925         free(work);
926         work = NULL;
927      }
928    return (Ecore_Thread *) work;
929 #else
930    /*
931      If no thread and as we don't want to break app that rely on this
932      facility, we will lock the interface until we are done.
933     */
934    do {
935       /* Handle reschedule by forcing it here. That would mean locking the app,
936        * would be better with an idler, but really to complex for a case where
937        * thread should really exist.
938        */
939       work->reschedule = EINA_FALSE;
940
941       func_blocking((void *)data, (Ecore_Thread *) work);
942       if (work->cancel == EINA_FALSE) func_end((void *)data, (Ecore_Thread *) work);
943       else func_end((void *)data, (Ecore_Thread *) work);
944
945    } while (work->reschedule == EINA_TRUE);
946
947    free(work);
948
949    return NULL;
950 #endif
951 }
952
953 /**
954  * @brief Cancel a running thread.
955  * @param thread The thread to cancel.
956  * @return Will return EINA_TRUE if the thread has been cancelled,
957  *         EINA_FALSE if it is pending.
958  *
959  * ecore_thread_cancel give the possibility to cancel a task still running. It
960  * will return EINA_FALSE, if the destruction is delayed or EINA_TRUE if it is
961  * cancelled after this call.
962  *
963  * This function work in the main loop and in the thread, but you should not pass
964  * the Ecore_Thread variable from main loop to the worker thread in any structure.
965  * You should always use the one passed to the Ecore_Thread_Heavy_Cb.
966  *
967  * func_end, func_cancel will destroy the handler, so don't use it after.
968  * And if ecore_thread_cancel return EINA_TRUE, you should not use Ecore_Thread also.
969  */
970 EAPI Eina_Bool
971 ecore_thread_cancel(Ecore_Thread *thread)
972 {
973 #ifdef EFL_HAVE_THREADS
974    Ecore_Pthread_Worker *work = (Ecore_Pthread_Worker *)thread;
975    Eina_List *l;
976
977    if (!work)
978      return EINA_TRUE;
979    if (work->cancel)
980      return EINA_FALSE;
981
982    if (work->feedback_run)
983      {
984         if (work->kill)
985           return EINA_TRUE;
986         if (work->u.feedback_run.send != work->u.feedback_run.received)
987           goto on_exit;
988      }
989
990    LKL(_ecore_pending_job_threads_mutex);
991
992    if ((have_main_loop_thread) &&
993        (PHE(main_loop_thread, PHS())))
994      {
995         if (!work->feedback_run)
996           EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
997             {
998                if ((void *) work == (void *) thread)
999                  {
1000                     _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
1001
1002                     LKU(_ecore_pending_job_threads_mutex);
1003
1004                     if (work->func_cancel)
1005                       work->func_cancel((void *) work->data, (Ecore_Thread *) work);
1006                     free(work);
1007
1008                     return EINA_TRUE;
1009                  }
1010             }
1011         else
1012           EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
1013             {
1014                if ((void *) work == (void *) thread)
1015                  {
1016                     _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
1017
1018                     LKU(_ecore_pending_job_threads_mutex);
1019
1020                     if (work->func_cancel)
1021                       work->func_cancel((void *) work->data, (Ecore_Thread *) work);
1022                     free(work);
1023
1024                     return EINA_TRUE;
1025                  }
1026             }
1027      }
1028
1029    LKU(_ecore_pending_job_threads_mutex);
1030
1031    /* Delay the destruction */
1032  on_exit:
1033    ((Ecore_Pthread_Worker *)thread)->cancel = EINA_TRUE;
1034    return EINA_FALSE;
1035 #else
1036    return EINA_TRUE;
1037 #endif
1038 }
1039
1040 /**
1041  * @brief Tell if a thread was canceled or not.
1042  * @param thread The thread to test.
1043  * @return EINA_TRUE if the thread is cancelled,
1044  *         EINA_FALSE if it is not.
1045  *
1046  * You can use this function in main loop and in the thread.
1047  */
1048 EAPI Eina_Bool
1049 ecore_thread_check(Ecore_Thread *thread)
1050 {
1051    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1052
1053    if (!worker) return EINA_TRUE;
1054    return worker->cancel;
1055 }
1056
1057 /**
1058  * @brief Run some heavy code in a parallel thread to avoid locking the main loop.
1059  * @param func_heavy The function that should run in another thread.
1060  * @param func_notify The function that will receive the data send by func_heavy in the main loop.
1061  * @param func_end The function that will be called in the main loop if the thread terminate correctly.
1062  * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
1063  * @param data User context data to pass to all callback.
1064  * @param try_no_queue If you want to run outside of the thread pool.
1065  * @return A reference to the newly created thread instance, or NULL if it failed.
1066  *
1067  * ecore_thread_feedback_run provide a facility for easily managing heavy task in a
1068  * parallel thread. You should provide four functions. The first one, func_heavy,
1069  * that will do the heavy work in another thread (so you should not use the
1070  * EFL in it except Eina and Eet if you are careful). The second one, func_notify,
1071  * will receive the data send from the thread function (func_heavy) by ecore_thread_feedback
1072  * in the main loop (and so, can use all the EFL). The third, func_end,
1073  * that will be called in Ecore main loop when func_heavy is done. So you
1074  * can use all the EFL inside this function. The last one, func_cancel, will
1075  * be called in the main loop also, if the thread is cancelled or could not run at all.
1076  *
1077  * Be aware, that you can't make assumption on the result order of func_end
1078  * after many call to ecore_feedback_run, as we start as much thread as the
1079  * host CPU can handle.
1080  *
1081  * If you set try_no_queue, it will try to run outside of the thread pool, this can bring
1082  * the CPU down, so be careful with that. Of course if it can't start a new thread, it will
1083  * try to use one from the pool.
1084  */
1085 EAPI Ecore_Thread *ecore_thread_feedback_run(Ecore_Thread_Cb func_heavy,
1086                                              Ecore_Thread_Notify_Cb func_notify,
1087                                              Ecore_Thread_Cb func_end,
1088                                              Ecore_Thread_Cb func_cancel,
1089                                              const void *data,
1090                                              Eina_Bool try_no_queue)
1091 {
1092
1093 #ifdef EFL_HAVE_THREADS
1094    Ecore_Pthread_Worker *worker;
1095    Ecore_Pthread_Data *pth = NULL;
1096
1097    if (!func_heavy) return NULL;
1098
1099    worker = _ecore_thread_worker_new();
1100    if (!worker) goto on_error;
1101
1102    worker->u.feedback_run.func_heavy = func_heavy;
1103    worker->u.feedback_run.func_notify = func_notify;
1104    worker->hash = NULL;
1105    CDI(worker->cond);
1106    LKI(worker->mutex);
1107    worker->func_cancel = func_cancel;
1108    worker->func_end = func_end;
1109    worker->data = data;
1110    worker->cancel = EINA_FALSE;
1111    worker->feedback_run = EINA_TRUE;
1112    worker->kill = EINA_FALSE;
1113    worker->reschedule = EINA_FALSE;
1114
1115    worker->u.feedback_run.send = 0;
1116    worker->u.feedback_run.received = 0;
1117
1118    worker->u.feedback_run.notify = ecore_pipe_add(_ecore_notify_handler, worker);
1119    worker->u.feedback_run.direct_pipe = NULL;
1120    worker->u.feedback_run.direct_worker = NULL;
1121
1122    if (!try_no_queue)
1123      {
1124         PH(t);
1125
1126         worker->u.feedback_run.direct_pipe = _ecore_thread_pipe_get();
1127         worker->u.feedback_run.direct_worker = _ecore_thread_worker_new();
1128
1129         if (PHC(t, _ecore_direct_worker, worker) == 0)
1130            return (Ecore_Thread *) worker;
1131      }
1132
1133    LKL(_ecore_pending_job_threads_mutex);
1134    _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
1135
1136    if (_ecore_thread_count == _ecore_thread_count_max)
1137      {
1138         LKU(_ecore_pending_job_threads_mutex);
1139         return (Ecore_Thread *) worker;
1140      }
1141
1142    LKU(_ecore_pending_job_threads_mutex);
1143
1144    /* One more thread could be created. */
1145    pth = malloc(sizeof (Ecore_Pthread_Data));
1146    if (!pth) goto on_error;
1147
1148    pth->p = _ecore_thread_pipe_get();
1149    pth->death_job = _ecore_thread_worker_new();
1150    if (!pth->p || !pth->death_job) goto on_error;
1151
1152    eina_threads_init();
1153
1154    if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
1155       return (Ecore_Thread *) worker;
1156
1157    eina_threads_shutdown();
1158
1159  on_error:
1160    if (pth)
1161      {
1162         if (pth->p) eina_array_push(_ecore_thread_pipe, pth->p);
1163         if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
1164         free(pth);
1165      }
1166
1167    if (_ecore_thread_count == 0)
1168      {
1169         LKL(_ecore_pending_job_threads_mutex);
1170         _ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
1171                                                                worker);
1172         LKU(_ecore_pending_job_threads_mutex);
1173
1174         if (func_cancel) func_cancel((void *) data, NULL);
1175
1176         if (worker)
1177           {
1178              ecore_pipe_del(worker->u.feedback_run.notify);
1179              free(worker);
1180              worker = NULL;
1181           }
1182      }
1183
1184    return (Ecore_Thread *) worker;
1185 #else
1186    Ecore_Pthread_Worker worker;
1187
1188    (void) try_no_queue;
1189
1190    /*
1191      If no thread and as we don't want to break app that rely on this
1192      facility, we will lock the interface until we are done.
1193     */
1194    worker.u.feedback_run.func_heavy = func_heavy;
1195    worker.u.feedback_run.func_notify = func_notify;
1196    worker.u.feedback_run.notify = NULL;
1197    worker.u.feedback_run.send = 0;
1198    worker.u.feedback_run.received = 0;
1199    worker.func_cancel = func_cancel;
1200    worker.func_end = func_end;
1201    worker.data = data;
1202    worker.cancel = EINA_FALSE;
1203    worker.feedback_run = EINA_TRUE;
1204    worker.kill = EINA_FALSE;
1205
1206    do {
1207       worker.reschedule = EINA_FALSE;
1208
1209       func_heavy((void *)data, (Ecore_Thread *) &worker);
1210
1211       if (worker.cancel) func_cancel((void *)data, (Ecore_Thread *) &worker);
1212       else func_end((void *)data, (Ecore_Thread *) &worker);
1213    } while (worker.reschedule == EINA_FALSE);
1214
1215    return NULL;
1216 #endif
1217 }
1218
1219 /**
1220  * @brief Send data to main loop from worker thread.
1221  * @param thread The current Ecore_Thread context to send data from
1222  * @param data Data to be transmitted to the main loop
1223  * @return EINA_TRUE if data was successfully send to main loop,
1224  *         EINA_FALSE if anything goes wrong.
1225  *
1226  * After a succesfull call, the data should be considered owned
1227  * by the main loop.
1228  *
1229  * You should use this function only in the func_heavy call.
1230  */
1231 EAPI Eina_Bool
1232 ecore_thread_feedback(Ecore_Thread *thread, const void *data)
1233 {
1234    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1235
1236    if (!worker) return EINA_FALSE;
1237    if (!worker->feedback_run) return EINA_FALSE;
1238
1239 #ifdef EFL_HAVE_THREADS
1240    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1241
1242    worker->u.feedback_run.send++;
1243    ecore_pipe_write(worker->u.feedback_run.notify, &data, sizeof (void *));
1244
1245    return EINA_TRUE;
1246 #else
1247    worker->u.feedback_run.func_notify((void*) worker->data, thread, (void*) data);
1248
1249    return EINA_TRUE;
1250 #endif
1251 }
1252
1253 /**
1254  * @brief Plan to recall the heavy function once it exist it.
1255  * @param thread The current Ecore_Thread context to reschedule
1256  * @return EINA_TRUE if data was successfully send to main loop,
1257  *         EINA_FALSE if anything goes wrong.
1258  *
1259  * After a succesfull call, you can still do what you want in your thread, it
1260  * will only reschedule it once you exit the heavy loop.
1261  *
1262  * You should use this function only in the func_heavy call.
1263  */
1264 EAPI Eina_Bool
1265 ecore_thread_reschedule(Ecore_Thread *thread)
1266 {
1267    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1268
1269    if (!worker) return EINA_FALSE;
1270
1271 #ifdef EFL_HAVE_THREADS
1272    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1273 #endif
1274
1275    worker->reschedule = EINA_TRUE;
1276    return EINA_TRUE;
1277 }
1278
1279 /**
1280  * @brief Get number of active thread jobs
1281  * @return Number of active threads running jobs
1282  * This returns the number of threads currently running jobs through the
1283  * ecore_thread api.
1284  */
1285 EAPI int
1286 ecore_thread_active_get(void)
1287 {
1288 #ifdef EFL_HAVE_THREADS
1289    return _ecore_thread_count;
1290 #else
1291    return 0;
1292 #endif
1293 }
1294
1295 /**
1296  * @brief Get number of pending (short) thread jobs
1297  * @return Number of pending threads running "short" jobs
1298  * This returns the number of threads currently running jobs through the
1299  * ecore_thread_run api call.
1300  */
1301 EAPI int
1302 ecore_thread_pending_get(void)
1303 {
1304    int ret;
1305 #ifdef EFL_HAVE_THREADS
1306    LKL(_ecore_pending_job_threads_mutex);
1307    ret = eina_list_count(_ecore_pending_job_threads);
1308    LKU(_ecore_pending_job_threads_mutex);
1309    return ret;
1310 #else
1311    return 0;
1312 #endif
1313 }
1314
1315 /**
1316  * @brief Get number of pending feedback thread jobs
1317  * @return Number of pending threads running "feedback" jobs
1318  * This returns the number of threads currently running jobs through the
1319  * ecore_thread_feedback_run api call.
1320  */
1321 EAPI int
1322 ecore_thread_pending_feedback_get(void)
1323 {
1324    int ret;
1325 #ifdef EFL_HAVE_THREADS
1326    LKL(_ecore_pending_job_threads_mutex);
1327    ret = eina_list_count(_ecore_pending_job_threads_feedback);
1328    LKU(_ecore_pending_job_threads_mutex);
1329    return ret;
1330 #else
1331    return 0;
1332 #endif
1333 }
1334
1335 /**
1336  * @brief Get number of pending thread jobs
1337  * @return Number of pending threads running jobs
1338  * This returns the number of threads currently running jobs through the
1339  * ecore_thread_run and ecore_thread_feedback_run api calls combined.
1340  */
1341 EAPI int
1342 ecore_thread_pending_total_get(void)
1343 {
1344    int ret;
1345 #ifdef EFL_HAVE_THREADS
1346    LKL(_ecore_pending_job_threads_mutex);
1347    ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
1348    LKU(_ecore_pending_job_threads_mutex);
1349    return ret;
1350 #else
1351    return 0;
1352 #endif
1353 }
1354
1355 /**
1356  * @brief Get the max number of threads that can run simultaneously
1357  * @return Max number of threads ecore will run
1358  * This returns the total number of threads that ecore will attempt to run
1359  * simultaneously.
1360  */
1361 EAPI int
1362 ecore_thread_max_get(void)
1363 {
1364    return _ecore_thread_count_max;
1365 }
1366
1367 /**
1368  * @brief Set the max number of threads that can run simultaneously
1369  * @param num The new maximum
1370  * This sets the maximum number of threads that ecore will try to run
1371  * simultaneously.  This number cannot be < 1 or >= 2x the number of active cpus.
1372  */
1373 EAPI void
1374 ecore_thread_max_set(int num)
1375 {
1376    if (num < 1) return;
1377    /* avoid doing something hilarious by blocking dumb users */
1378    if (num >= (2 * eina_cpu_count())) return;
1379
1380    _ecore_thread_count_max = num;
1381 }
1382
1383 /**
1384  * @brief Reset the max number of threads that can run simultaneously
1385  * This resets the maximum number of threads that ecore will try to run
1386  * simultaneously to the number of active cpus.
1387  */
1388 EAPI void
1389 ecore_thread_max_reset(void)
1390 {
1391    _ecore_thread_count_max = eina_cpu_count();
1392 }
1393
1394 /**
1395  * @brief Get the number of threads which are available to be used
1396  * @return The number of available threads
1397  * This returns the number of threads slots that ecore has currently available.
1398  * Assuming that you haven't changed the max number of threads with @ref ecore_thread_max_set
1399  * this should be equal to (num_cpus - (active_running + active_feedback_running))
1400  */
1401 EAPI int
1402 ecore_thread_available_get(void)
1403 {
1404    int ret;
1405 #ifdef EFL_HAVE_THREADS
1406    LKL(_ecore_pending_job_threads_mutex);
1407    ret = _ecore_thread_count_max - _ecore_thread_count;
1408    LKU(_ecore_pending_job_threads_mutex);
1409    return ret;
1410 #else
1411    return 0;
1412 #endif
1413 }
1414
1415 /**
1416  * @brief Add data to the thread for subsequent use
1417  * @param thread The thread context to add to
1418  * @param key The name string to add the data with
1419  * @param value The data to add
1420  * @param cb The callback to free the data with
1421  * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1422  * @return EINA_TRUE on success, EINA_FALSE on failure
1423  * This adds data to the thread context, allowing the thread
1424  * to retrieve and use it without complicated mutexing.  This function can only be called by a
1425  * *_run thread INSIDE the thread and will return EINA_FALSE in any case but success.
1426  * All data added to the thread will be freed with its associated callback (if present)
1427  * upon thread termination.  If no callback is specified, it is expected that the user will free the
1428  * data, but this is most likely not what you want.
1429  */
1430 EAPI Eina_Bool
1431 ecore_thread_local_data_add(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1432 {
1433    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1434    Ecore_Thread_Data *d;
1435    Eina_Bool ret;
1436
1437    if ((!thread) || (!key) || (!value))
1438      return EINA_FALSE;
1439 #ifdef EFL_HAVE_THREADS
1440    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1441
1442    if (!worker->hash)
1443      worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1444
1445    if (!worker->hash)
1446      return EINA_FALSE;
1447
1448    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1449      return EINA_FALSE;
1450
1451    d->data = value;
1452    d->cb = cb;
1453
1454    if (direct)
1455      ret = eina_hash_direct_add(worker->hash, key, d);
1456    else
1457      ret = eina_hash_add(worker->hash, key, d);
1458    CDB(worker->cond);
1459    return ret;
1460 #else
1461    return EINA_TRUE;
1462 #endif
1463 }
1464
1465 /**
1466  * @brief Modify data in the thread, or add if not found
1467  * @param thread The thread context
1468  * @param key The name string to add the data with
1469  * @param value The data to add
1470  * @param cb The callback to free the data with
1471  * @return The old data associated with @p key on success if modified, NULL if added
1472  * This adds/modifies data in the thread context, adding only if modify fails.
1473  * This function can only be called by a *_run thread INSIDE the thread.
1474  * All data added to the thread pool will be freed with its associated callback (if present)
1475  * upon thread termination.  If no callback is specified, it is expected that the user will free the
1476  * data, but this is most likely not what you want.
1477  */
1478 EAPI void *
1479 ecore_thread_local_data_set(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb)
1480 {
1481    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1482    Ecore_Thread_Data *d, *r;
1483    void *ret;
1484    if ((!thread) || (!key) || (!value))
1485      return NULL;
1486 #ifdef EFL_HAVE_THREADS
1487    if (!PHE(worker->self, PHS())) return NULL;
1488
1489    if (!worker->hash)
1490      worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1491
1492    if (!worker->hash)
1493      return NULL;
1494
1495    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1496      return NULL;
1497
1498    d->data = value;
1499    d->cb = cb;
1500
1501    r = eina_hash_set(worker->hash, key, d);
1502    CDB(worker->cond);
1503    ret = r->data;
1504    free(r);
1505    return ret;
1506 #else
1507    return NULL;
1508 #endif
1509 }
1510
1511 /**
1512  * @brief Find data in the thread's data
1513  * @param thread The thread context
1514  * @param key The name string the data is associated with
1515  * @return The value, or NULL on error
1516  * This finds data in the thread context that has been previously added with @ref ecore_thread_local_data_add
1517  * This function can only be called by a *_run thread INSIDE the thread, and will return NULL
1518  * in any case but success.
1519  */
1520
1521 EAPI void *
1522 ecore_thread_local_data_find(Ecore_Thread *thread, const char *key)
1523 {
1524    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1525    Ecore_Thread_Data *d;
1526
1527    if ((!thread) || (!key))
1528      return NULL;
1529 #ifdef EFL_HAVE_THREADS
1530    if (!PHE(worker->self, PHS())) return NULL;
1531
1532    if (!worker->hash)
1533      return NULL;
1534
1535    d = eina_hash_find(worker->hash, key);
1536    return d->data;
1537 #else
1538    return NULL;
1539 #endif
1540 }
1541
1542 /**
1543  * @brief Delete data from the thread's data
1544  * @param thread The thread context
1545  * @param key The name string the data is associated with
1546  * @return EINA_TRUE on success, EINA_FALSE on failure
1547  * This deletes the data pointer from the thread context which was previously added with @ref ecore_thread_local_data_add
1548  * This function can only be called by a *_run thread INSIDE the thread, and will return EINA_FALSE
1549  * in any case but success.  Note that this WILL free the data if a callback was specified.
1550  */
1551 EAPI Eina_Bool
1552 ecore_thread_local_data_del(Ecore_Thread *thread, const char *key)
1553 {
1554    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1555    Ecore_Thread_Data *d;
1556    if ((!thread) || (!key))
1557      return EINA_FALSE;
1558 #ifdef EFL_HAVE_THREADS
1559    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1560
1561    if (!worker->hash)
1562      return EINA_FALSE;
1563    if ((d = eina_hash_find(worker->hash, key)))
1564      _ecore_thread_data_free(d);
1565    return eina_hash_del_by_key(worker->hash, key);
1566 #else
1567    return EINA_TRUE;
1568 #endif
1569 }
1570
1571 /**
1572  * @brief Add data to the global data
1573  * @param key The name string to add the data with
1574  * @param value The data to add
1575  * @param cb The optional callback to free the data with once ecore is shut down
1576  * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1577  * @return EINA_TRUE on success, EINA_FALSE on failure
1578  * This adds data to the global thread data, and will return EINA_FALSE in any case but success.
1579  * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1580  * be called upon ecore_thread shutting down.  Note that if you have manually freed data that a callback
1581  * was specified for, you will most likely encounter a segv later on.
1582  */
1583 EAPI Eina_Bool
1584 ecore_thread_global_data_add(const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1585 {
1586    Eina_Bool ret;
1587    Ecore_Thread_Data *d;
1588
1589    if ((!key) || (!value))
1590      return EINA_FALSE;
1591 #ifdef EFL_HAVE_THREADS
1592    LRWKWL(_ecore_thread_global_hash_lock);
1593    if (!_ecore_thread_global_hash)
1594      _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1595    LRWKU(_ecore_thread_global_hash_lock);
1596
1597    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1598      return EINA_FALSE;
1599
1600    d->data = value;
1601    d->cb = cb;
1602
1603    if (!_ecore_thread_global_hash)
1604      return EINA_FALSE;
1605    LRWKWL(_ecore_thread_global_hash_lock);
1606    if (direct)
1607      ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1608    else
1609      ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1610    LRWKU(_ecore_thread_global_hash_lock);
1611    CDB(_ecore_thread_global_hash_cond);
1612    return ret;
1613 #else
1614    return EINA_TRUE;
1615 #endif
1616 }
1617
1618 /**
1619  * @brief Add data to the global data
1620  * @param key The name string to add the data with
1621  * @param value The data to add
1622  * @param cb The optional callback to free the data with once ecore is shut down
1623  * @return An Ecore_Thread_Data on success, NULL on failure
1624  * This adds data to the global thread data and returns NULL, or replaces the previous data
1625  * associated with @p key and returning the previous data if it existed.  To see if an error occurred,
1626  * one must use eina_error_get.
1627  * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1628  * be called upon ecore_thread shutting down.  Note that if you have manually freed data that a callback
1629  * was specified for, you will most likely encounter a segv later on.
1630  */
1631 EAPI void *
1632 ecore_thread_global_data_set(const char *key, void *value, Eina_Free_Cb cb)
1633 {
1634    Ecore_Thread_Data *d, *r;
1635    void *ret;
1636
1637    if ((!key) || (!value))
1638      return NULL;
1639 #ifdef EFL_HAVE_THREADS
1640    LRWKWL(_ecore_thread_global_hash_lock);
1641    if (!_ecore_thread_global_hash)
1642      _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1643    LRWKU(_ecore_thread_global_hash_lock);
1644
1645    if (!_ecore_thread_global_hash)
1646      return NULL;
1647
1648    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1649      return NULL;
1650
1651    d->data = value;
1652    d->cb = cb;
1653
1654    LRWKWL(_ecore_thread_global_hash_lock);
1655    r = eina_hash_set(_ecore_thread_global_hash, key, d);
1656    LRWKU(_ecore_thread_global_hash_lock);
1657    CDB(_ecore_thread_global_hash_cond);
1658
1659    ret = r->data;
1660    free(r);
1661    return ret;
1662 #else
1663    return NULL;
1664 #endif
1665 }
1666
1667 /**
1668  * @brief Find data in the global data
1669  * @param key The name string the data is associated with
1670  * @return The value, or NULL on error
1671  * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1672  * This function will return NULL in any case but success.
1673  * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1674  * be called upon ecore_thread shutting down.  Note that if you have manually freed data that a callback
1675  * was specified for, you will most likely encounter a segv later on.
1676  * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1677  * if you will be doing anything with it.
1678  */
1679
1680 EAPI void *
1681 ecore_thread_global_data_find(const char *key)
1682 {
1683    Ecore_Thread_Data *ret;
1684    if (!key)
1685      return NULL;
1686 #ifdef EFL_HAVE_THREADS
1687    if (!_ecore_thread_global_hash) return NULL;
1688
1689    LRWKRL(_ecore_thread_global_hash_lock);
1690    ret = eina_hash_find(_ecore_thread_global_hash, key);
1691    LRWKU(_ecore_thread_global_hash_lock);
1692    return ret->data;
1693 #else
1694    return NULL;
1695 #endif
1696 }
1697
1698 /**
1699  * @brief Delete data from the global data
1700  * @param key The name string the data is associated with
1701  * @return EINA_TRUE on success, EINA_FALSE on failure
1702  * This deletes the data pointer from the global data which was previously added with @ref ecore_thread_global_data_add
1703  * This function will return EINA_FALSE in any case but success.
1704  * Note that this WILL free the data if an @c Eina_Free_Cb was specified when the data was added.
1705  */
1706 EAPI Eina_Bool
1707 ecore_thread_global_data_del(const char *key)
1708 {
1709    Eina_Bool ret;
1710    Ecore_Thread_Data *d;
1711
1712    if (!key)
1713      return EINA_FALSE;
1714 #ifdef EFL_HAVE_THREADS
1715    if (!_ecore_thread_global_hash)
1716      return EINA_FALSE;
1717
1718    LRWKWL(_ecore_thread_global_hash_lock);
1719    if ((d = eina_hash_find(_ecore_thread_global_hash, key)))
1720      _ecore_thread_data_free(d);
1721    ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1722    LRWKU(_ecore_thread_global_hash_lock);
1723    return ret;
1724 #else
1725    return EINA_TRUE;
1726 #endif
1727 }
1728
1729 /**
1730  * @brief Find data in the global data and optionally wait for the data if not found
1731  * @param key The name string the data is associated with
1732  * @param seconds The amount of time in seconds to wait for the data.  If 0, the call will be async and not wait for data.
1733  * If < 0 the call will wait indefinitely for the data.
1734  * @return The value, or NULL on failure
1735  * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1736  * This function will return NULL in any case but success.
1737  * Use @p seconds to specify the amount of time to wait.  Use > 0 for an actual wait time, 0 to not wait, and < 0 to wait indefinitely.
1738  * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1739  * if you will be doing anything with it.
1740  */
1741 EAPI void *
1742 ecore_thread_global_data_wait(const char *key, double seconds)
1743 {
1744    double tm = 0;
1745    Ecore_Thread_Data *ret = NULL;
1746
1747    if (!key)
1748      return NULL;
1749 #ifdef EFL_HAVE_THREADS
1750    if (!_ecore_thread_global_hash)
1751      return NULL;
1752    if (seconds > 0)
1753      tm = ecore_time_get() + seconds;
1754
1755    while (1)
1756      {
1757 #ifndef _WIN32
1758         struct timespec t = { 0, 0 };
1759
1760         t.tv_sec = (long int)tm;
1761         t.tv_nsec = (long int)((tm - (double)t.tv_sec) * 1000000000);
1762 #else
1763         struct timeval t = { 0, 0 };
1764
1765         t.tv_sec = (long int)tm;
1766         t.tv_usec = (long int)((tm - (double)t.tv_sec) * 1000000);
1767 #endif
1768         LRWKRL(_ecore_thread_global_hash_lock);
1769         ret = eina_hash_find(_ecore_thread_global_hash, key);
1770         LRWKU(_ecore_thread_global_hash_lock);
1771         if ((ret) || (!seconds) || ((seconds > 0) && (tm <= ecore_time_get())))
1772           break;
1773         LKL(_ecore_thread_global_hash_mutex);
1774         CDW(_ecore_thread_global_hash_cond, _ecore_thread_global_hash_mutex, &t);
1775         LKU(_ecore_thread_global_hash_mutex);
1776      }
1777    if (ret) return ret->data;
1778    return NULL;
1779 #else
1780    return NULL;
1781 #endif
1782 }
1783
1784 /**
1785  * @}
1786  */
1787
1788 /**
1789  * @}
1790  */