Ecore: ecore_thread : free resources, spotted by Ulrich Eckhardt
[framework/uifw/ecore.git] / src / lib / ecore / ecore_thread.c
1 #ifdef HAVE_CONFIG_H
2 # include <config.h>
3 #endif
4
5 #include <sys/time.h>
6
7 #ifdef HAVE_EVIL
8 # include <Evil.h>
9 #endif
10
11 #include "Ecore.h"
12 #include "ecore_private.h"
13
14 #ifdef EFL_HAVE_THREADS
15
16 # ifdef EFL_HAVE_POSIX_THREADS
17 #  include <pthread.h>
18 #  ifdef __linux__
19 #   include <sched.h>
20 #   include <sys/resource.h>
21 #   include <unistd.h>
22 #   include <sys/syscall.h>
23 #   include <errno.h>
24 #  endif
25
26 #  define PH(x)        pthread_t x
27 #  define PHE(x, y)    pthread_equal(x, y)
28 #  define PHS()        pthread_self()
29 #  define PHC(x, f, d) pthread_create(&(x), NULL, (void*) f, d)
30 #  define PHJ(x, p)    pthread_join(x, (void**)(&(p)))
31 #  define PHA(x)       pthread_cancel(x)
32
33 #  define CD(x)  pthread_cond_t x
34 #  define CDI(x) pthread_cond_init(&(x), NULL);
35 #  define CDD(x) pthread_cond_destroy(&(x));
36 #  define CDB(x) pthread_cond_broadcast(&(x));
37 #  define CDW(x, y, t) pthread_cond_timedwait(&(x), &(y), t);
38
39 #  define LK(x)  pthread_mutex_t x
40 #  define LKI(x) pthread_mutex_init(&(x), NULL);
41 #  define LKD(x) pthread_mutex_destroy(&(x));
42 #  define LKL(x) pthread_mutex_lock(&(x));
43 #  define LKU(x) pthread_mutex_unlock(&(x));
44
45 #  define LRWK(x)   pthread_rwlock_t x
46 #  define LRWKI(x)  pthread_rwlock_init(&(x), NULL);
47 #  define LRWKD(x)  pthread_rwlock_destroy(&(x));
48 #  define LRWKWL(x) pthread_rwlock_wrlock(&(x));
49 #  define LRWKRL(x) pthread_rwlock_rdlock(&(x));
50 #  define LRWKU(x)  pthread_rwlock_unlock(&(x));
51
52 # else /* EFL_HAVE_WIN32_THREADS */
53
54 #  define WIN32_LEAN_AND_MEAN
55 #  include <windows.h>
56 #  undef WIN32_LEAN_AND_MEAN
57
58 typedef struct
59 {
60   HANDLE thread;
61   void *val;
62 } win32_thread;
63
64 #  define PH(x)        win32_thread *x
65 #  define PHE(x, y)    ((x) == (y))
66 #  define PHS()        (HANDLE)GetCurrentThreadId()
67
68 int _ecore_thread_win32_create(win32_thread **x, LPTHREAD_START_ROUTINE f, void *d)
69 {
70   win32_thread *t;
71   t = (win32_thread *)calloc(1, sizeof(win32_thread));
72   if (!t)
73     return -1;
74
75   (t)->thread = CreateThread(NULL, 0, f, d, 0, NULL);
76   if (!t->thread)
77     {
78       free(t);
79       return -1;
80     }
81   t->val = d;
82   *x = t;
83
84   return 0;
85 }
86 #  define PHC(x, f, d) _ecore_thread_win32_create(&(x), (LPTHREAD_START_ROUTINE)f, d)
87
88 int _ecore_thread_win32_join(win32_thread *x, void **res)
89 {
90   if (!PHE(x, PHS()))
91     {
92       WaitForSingleObject(x->thread, INFINITE);
93       CloseHandle(x->thread);
94     }
95   if (res) *res = x->val;
96   free(x);
97
98   return 0;
99 }
100
101 #  define PHJ(x, p) _ecore_thread_win32_join(x, (void**)(&(p)))
102 #  define PHA(x) TerminateThread(x->thread, 0)
103
104 #  define LK(x)  HANDLE x
105 #  define LKI(x) x = CreateMutex(NULL, FALSE, NULL)
106 #  define LKD(x) CloseHandle(x)
107 #  define LKL(x) WaitForSingleObject(x, INFINITE)
108 #  define LKU(x) ReleaseMutex(x)
109
110 typedef struct
111 {
112   HANDLE semaphore;
113   LONG threads_count;
114   CRITICAL_SECTION threads_count_lock;
115 } win32_cond;
116
117 #  define CD(x)  win32_cond *x
118
119 #  define CDI(x)                                                     \
120    do {                                                              \
121      x = (win32_cond *)calloc(1, sizeof(win32_cond));                \
122      if (x)                                                          \
123         {                                                            \
124           x->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL); \
125           if (x->semaphore)                                          \
126             InitializeCriticalSection(&x->threads_count_lock);     \
127           else                                                       \
128             {                                                        \
129               free(x);                                               \
130               x = NULL;                                              \
131             }                                                        \
132         }                                                            \
133    } while (0)
134
135 #  define CDD(x)               \
136   do {                         \
137     CloseHandle(x->semaphore); \
138     free(x);                   \
139     x = NULL;                  \
140    } while (0)
141
142 #  define CDB(x)                                            \
143 do {                                                        \
144   EnterCriticalSection(&x->threads_count_lock);             \
145   if (x->threads_count > 0)                                 \
146     ReleaseSemaphore(x->semaphore, x->threads_count, NULL); \
147   LeaveCriticalSection (&x->threads_count_lock);            \
148  } while (0)
149
150 int _ecore_thread_win32_cond_timedwait(win32_cond *c, HANDLE *external_mutex, struct timeval *t)
151 {
152   DWORD res;
153   DWORD val = t->tv_sec * 1000 + (t->tv_usec / 1000);
154   LKL(external_mutex);
155   EnterCriticalSection (&c->threads_count_lock);
156   c->threads_count++;
157   LeaveCriticalSection (&c->threads_count_lock);
158   LKU(external_mutex);
159   res = WaitForSingleObject(c->semaphore, val);
160   if (res == WAIT_OBJECT_0)
161     return 0;
162   else
163     return -1;
164 }
165 #  define CDW(x, y, t) _ecore_thread_win32_cond_timedwait(x, y, t)
166
167 typedef struct
168 {
169   LONG readers_count;
170   LONG writers_count;
171   int readers;
172   int writers;
173   LK(mutex);
174   CD(cond_read);
175   CD(cond_write);
176 } win32_rwl;
177
178 #  define LRWK(x)   win32_rwl *x
179 #  define LRWKI(x)                                 \
180   do {                                             \
181     x = (win32_rwl *)calloc(1, sizeof(win32_rwl)); \
182     if (x)                                         \
183       {                                            \
184         LKI(x->mutex);                             \
185         if (x->mutex)                              \
186           {                                        \
187             CDI(x->cond_read);                     \
188             if (x->cond_read)                      \
189               {                                    \
190                 CDI(x->cond_write);                \
191                 if (!x->cond_write)                \
192                   {                                \
193                     CDD(x->cond_read);             \
194                     LKD(x->mutex);                 \
195                     free(x);                       \
196                     x = NULL;                      \
197                   }                                \
198               }                                    \
199             else                                   \
200               {                                    \
201                 LKD(x->mutex);                     \
202                 free(x);                           \
203                 x = NULL;                          \
204               }                                    \
205           }                                        \
206         else                                       \
207           {                                        \
208             free(x);                               \
209             x = NULL;                              \
210           }                                        \
211       }                                            \
212   } while (0)
213
214 #  define LRWKD(x)                   \
215   do {                               \
216     LKU(x->mutex);                   \
217     LKD(x->mutex);                   \
218     CDD(x->cond_write);              \
219     CDD(x->cond_read);               \
220     free(x);                         \
221   } while (0)
222 #  define LRWKWL(x)                                                       \
223   do {                                                                    \
224     DWORD res;                                                            \
225     LKU(x->mutex);                                                        \
226     if (x->writers || x->readers > 0)                                     \
227       {                                                                   \
228         x->writers_count++;                                               \
229         while (x->writers || x->readers > 0)                              \
230           {                                                               \
231             EnterCriticalSection(&x->cond_write->threads_count_lock);     \
232             x->cond_read->threads_count++;                                \
233             LeaveCriticalSection(&x->cond_write->threads_count_lock);     \
234             res = WaitForSingleObject(x->cond_write->semaphore, INFINITE); \
235             if (res != WAIT_OBJECT_0) break;                              \
236           }                                                               \
237         x->writers_count--;                                               \
238       }                                                                   \
239     if (res == 0) x->writers_count = 1;                                   \
240     LKU(x->mutex);                                                        \
241   } while (0)
242 #  define LRWKRL(x)                                                       \
243   do {                                                                    \
244     DWORD res;                                                            \
245     LKL(x->mutex);                                                        \
246     if (x->writers)                                                       \
247       {                                                                   \
248         x->readers_count++;                                               \
249         while (x->writers)                                                \
250           {                                                               \
251             EnterCriticalSection(&x->cond_write->threads_count_lock);     \
252             x->cond_read->threads_count++;                                \
253             LeaveCriticalSection(&x->cond_write->threads_count_lock);     \
254             res = WaitForSingleObject(x->cond_write->semaphore, INFINITE); \
255             if (res != WAIT_OBJECT_0) break;                              \
256           }                                                               \
257         x->readers_count--;                                               \
258       }                                                                   \
259     if (res == 0)                                                         \
260       x->readers++;                                                       \
261     LKU(x->mutex);                                                        \
262   } while (0)
263 #  define LRWKU(x)                                                     \
264   do {                                                                 \
265     LKL(x->mutex);                                                     \
266     if (x->writers)                                                    \
267       {                                                                \
268         x->writers = 0;                                                \
269         if (x->readers_count == 1)                                     \
270           {                                                            \
271             EnterCriticalSection(&x->cond_read->threads_count_lock);   \
272             if (x->cond_read->threads_count > 0)                       \
273               ReleaseSemaphore(x->cond_read->semaphore, 1, 0);         \
274             LeaveCriticalSection(&x->cond_read->threads_count_lock);   \
275           }                                                            \
276         else if (x->readers_count > 0)                                 \
277           CDB(x->cond_read);                                           \
278         else if (x->writers_count > 0)                                 \
279           {                                                            \
280             EnterCriticalSection (&x->cond_write->threads_count_lock); \
281             if (x->cond_write->threads_count > 0)                      \
282               ReleaseSemaphore(x->cond_write->semaphore, 1, 0);        \
283             LeaveCriticalSection (&x->cond_write->threads_count_lock); \
284           }                                                            \
285       }                                                                \
286     else if (x->readers > 0)                                           \
287       {                                                                \
288         x->readers--;                                                  \
289         if (x->readers == 0 && x->writers_count > 0)                   \
290           {                                                            \
291             EnterCriticalSection (&x->cond_write->threads_count_lock); \
292             if (x->cond_write->threads_count > 0)                      \
293               ReleaseSemaphore(x->cond_write->semaphore, 1, 0);        \
294             LeaveCriticalSection (&x->cond_write->threads_count_lock); \
295           }                                                            \
296       }                                                                \
297     LKU(x->mutex);                                                     \
298   } while (0)
299
300 # endif
301
302 #endif
303
304 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
305 typedef struct _Ecore_Pthread Ecore_Pthread;
306 typedef struct _Ecore_Thread_Data  Ecore_Thread_Data;
307
308 struct _Ecore_Thread_Data
309 {
310    void *data;
311    Eina_Free_Cb cb;
312 };
313
314 struct _Ecore_Pthread_Worker
315 {
316    union {
317       struct {
318          Ecore_Thread_Cb func_blocking;
319       } short_run;
320       struct {
321          Ecore_Thread_Cb func_heavy;
322          Ecore_Thread_Notify_Cb func_notify;
323          Ecore_Pipe *notify;
324
325          Ecore_Pipe *direct_pipe;
326          Ecore_Pthread_Worker *direct_worker;
327
328          int send;
329          int received;
330       } feedback_run;
331    } u;
332
333    Ecore_Thread_Cb func_cancel;
334    Ecore_Thread_Cb func_end;
335 #ifdef EFL_HAVE_THREADS
336    PH(self);
337    Eina_Hash *hash;
338    CD(cond);
339    LK(mutex);
340 #endif
341
342    const void *data;
343
344    Eina_Bool cancel : 1;
345    Eina_Bool feedback_run : 1;
346    Eina_Bool kill : 1;
347    Eina_Bool reschedule : 1;
348 };
349
350 #ifdef EFL_HAVE_THREADS
351 typedef struct _Ecore_Pthread_Data Ecore_Pthread_Data;
352
353 struct _Ecore_Pthread_Data
354 {
355    Ecore_Pthread_Worker *death_job;
356    Ecore_Pipe *p;
357    void *data;
358    PH(thread);
359 };
360 #endif
361
362 static void _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte);
363
364 static int _ecore_thread_count_max = 0;
365 static int ECORE_THREAD_PIPE_DEL = 0;
366 static Eina_Array *_ecore_thread_pipe = NULL;
367
368 static Ecore_Pipe*
369 _ecore_thread_pipe_get(void)
370 {
371    if (eina_array_count_get(_ecore_thread_pipe) > 0)
372      return eina_array_pop(_ecore_thread_pipe);
373
374    return ecore_pipe_add(_ecore_thread_handler, NULL);
375 }
376
377 #ifdef EFL_HAVE_THREADS
378 static int _ecore_thread_count = 0;
379
380 static Ecore_Event_Handler *del_handler = NULL;
381 static Eina_List *_ecore_active_job_threads = NULL;
382 static Eina_List *_ecore_pending_job_threads = NULL;
383 static Eina_List *_ecore_pending_job_threads_feedback = NULL;
384 static LK(_ecore_pending_job_threads_mutex);
385
386 static Eina_Hash *_ecore_thread_global_hash = NULL;
387 static LRWK(_ecore_thread_global_hash_lock);
388 static LK(_ecore_thread_global_hash_mutex);
389 static CD(_ecore_thread_global_hash_cond);
390
391 static PH(main_loop_thread);
392 static Eina_Bool have_main_loop_thread = 0;
393
394 static Eina_Trash *_ecore_thread_worker_trash = NULL;
395 static int _ecore_thread_worker_count = 0;
396
397 static void
398 _ecore_thread_worker_free(Ecore_Pthread_Worker *worker)
399 {
400    if (_ecore_thread_worker_count > (_ecore_thread_count_max + 1) * 16)
401      {
402         free(worker);
403         return ;
404      }
405
406    eina_trash_push(&_ecore_thread_worker_trash, worker);
407 }
408
409 static void
410 _ecore_thread_data_free(void *data)
411 {
412    Ecore_Thread_Data *d = data;
413
414    if (d->cb) d->cb(d->data);
415    free(d);
416 }
417
418 static void
419 _ecore_thread_pipe_free(void *data __UNUSED__, void *event)
420 {
421    Ecore_Pipe *p = event;
422
423    eina_array_push(_ecore_thread_pipe, p);
424    eina_threads_shutdown();
425 }
426
427 static Eina_Bool
428 _ecore_thread_pipe_del(void *data __UNUSED__, int type __UNUSED__, void *event __UNUSED__)
429 {
430    /* This is a hack to delay pipe destruction until we are out of its internal loop. */
431    return ECORE_CALLBACK_CANCEL;
432 }
433
434 static void
435 _ecore_thread_end(Ecore_Pthread_Data *pth, __UNUSED__ Ecore_Thread *work)
436 {
437    Ecore_Pipe *p;
438
439    if (PHJ(pth->thread, p) != 0)
440      return ;
441
442    _ecore_active_job_threads = eina_list_remove(_ecore_active_job_threads, pth);
443
444    ecore_event_add(ECORE_THREAD_PIPE_DEL, pth->p, _ecore_thread_pipe_free, NULL);
445    free(pth);
446 }
447
448 static void
449 _ecore_thread_kill(Ecore_Pthread_Worker *work)
450 {
451    if (work->cancel)
452      {
453         if (work->func_cancel)
454           work->func_cancel((void *) work->data, (Ecore_Thread *) work);
455      }
456    else
457      {
458         if (work->func_end)
459           work->func_end((void *) work->data, (Ecore_Thread *) work);
460      }
461
462    if (work->feedback_run)
463      {
464         ecore_pipe_del(work->u.feedback_run.notify);
465
466         if (work->u.feedback_run.direct_pipe)
467           eina_array_push(_ecore_thread_pipe, work->u.feedback_run.direct_pipe);
468         if (work->u.feedback_run.direct_worker)
469           _ecore_thread_worker_free(work->u.feedback_run.direct_worker);
470      }
471    CDD(work->cond);
472    LKD(work->mutex);
473    if (work->hash)
474      eina_hash_free(work->hash);
475    free(work);
476 }
477
478 static void
479 _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte)
480 {
481    Ecore_Pthread_Worker *work;
482
483    if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
484
485    work = *(Ecore_Pthread_Worker **)buffer;
486
487    if (work->feedback_run)
488      {
489         if (work->u.feedback_run.send != work->u.feedback_run.received)
490           {
491              work->kill = EINA_TRUE;
492              return ;
493           }
494      }
495
496    _ecore_thread_kill(work);
497 }
498
499 static void
500 _ecore_notify_handler(void *data, void *buffer, unsigned int nbyte)
501 {
502    Ecore_Pthread_Worker *work = data;
503    void *user_data;
504
505    if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
506
507    user_data = *(void **)buffer;
508    work->u.feedback_run.received++;
509
510    if (work->u.feedback_run.func_notify)
511      work->u.feedback_run.func_notify((void *) work->data, (Ecore_Thread *) work, user_data);
512
513    /* Force reading all notify event before killing the thread */
514    if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
515      {
516         _ecore_thread_kill(work);
517      }
518 }
519
520 static void
521 _ecore_short_job(Ecore_Pipe *end_pipe)
522 {
523    Ecore_Pthread_Worker *work;
524
525    while (_ecore_pending_job_threads)
526      {
527         LKL(_ecore_pending_job_threads_mutex);
528
529         if (!_ecore_pending_job_threads)
530           {
531              LKU(_ecore_pending_job_threads_mutex);
532              break;
533           }
534
535         work = eina_list_data_get(_ecore_pending_job_threads);
536         _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
537                                                            _ecore_pending_job_threads);
538
539         LKU(_ecore_pending_job_threads_mutex);
540
541         if (!work->cancel)
542           work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
543
544         if (work->reschedule)
545           {
546              work->reschedule = EINA_FALSE;
547
548              LKL(_ecore_pending_job_threads_mutex);
549              _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
550              LKU(_ecore_pending_job_threads_mutex);
551           }
552         else
553           {
554              ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
555           }
556      }
557 }
558
559 static void
560 _ecore_feedback_job(Ecore_Pipe *end_pipe, PH(thread))
561 {
562    Ecore_Pthread_Worker *work;
563
564    while (_ecore_pending_job_threads_feedback)
565      {
566         LKL(_ecore_pending_job_threads_mutex);
567
568         if (!_ecore_pending_job_threads_feedback)
569           {
570              LKU(_ecore_pending_job_threads_mutex);
571              break;
572           }
573
574         work = eina_list_data_get(_ecore_pending_job_threads_feedback);
575         _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
576                                                                     _ecore_pending_job_threads_feedback);
577
578         LKU(_ecore_pending_job_threads_mutex);
579
580         work->self = thread;
581         if (!work->cancel)
582           work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
583
584         if (work->reschedule)
585           {
586              work->reschedule = EINA_FALSE;
587
588              LKL(_ecore_pending_job_threads_mutex);
589              _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
590              LKU(_ecore_pending_job_threads_mutex);
591           }
592         else
593           {
594              ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
595           }
596      }
597 }
598
599 static void *
600 _ecore_direct_worker(Ecore_Pthread_Worker *work)
601 {
602    Ecore_Pthread_Data *pth;
603
604 #ifdef EFL_POSIX_THREADS
605    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
606    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
607 #endif
608
609    eina_sched_prio_drop();
610
611    pth = malloc(sizeof (Ecore_Pthread_Data));
612    if (!pth) return NULL;
613
614    pth->p = work->u.feedback_run.direct_pipe;
615    if (!pth->p)
616      {
617         free(pth);
618         return NULL;
619      }
620    pth->thread = PHS();
621
622    work->self = pth->thread;
623    work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
624
625    ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
626
627    work = work->u.feedback_run.direct_worker;
628    if (!work)
629      {
630         free(pth);
631         return NULL;
632      }
633
634    work->data = pth;
635    work->u.short_run.func_blocking = NULL;
636    work->func_end = (void *) _ecore_thread_end;
637    work->func_cancel = NULL;
638    work->cancel = EINA_FALSE;
639    work->feedback_run = EINA_FALSE;
640    work->kill = EINA_FALSE;
641    work->hash = NULL;
642    CDI(work->cond);
643    LKI(work->mutex);
644
645    ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
646
647    return pth->p;
648 }
649
650 static void *
651 _ecore_thread_worker(Ecore_Pthread_Data *pth)
652 {
653    Ecore_Pthread_Worker *work;
654
655 #ifdef EFL_POSIX_THREADS
656    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
657    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
658 #endif
659
660    eina_sched_prio_drop();
661
662    LKL(_ecore_pending_job_threads_mutex);
663    _ecore_thread_count++;
664    LKU(_ecore_pending_job_threads_mutex);
665
666  restart:
667    if (_ecore_pending_job_threads) _ecore_short_job(pth->p);
668    if (_ecore_pending_job_threads_feedback) _ecore_feedback_job(pth->p, pth->thread);
669
670    /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
671
672    LKL(_ecore_pending_job_threads_mutex);
673    if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
674      {
675         LKU(_ecore_pending_job_threads_mutex);
676         goto restart;
677      }
678    LKU(_ecore_pending_job_threads_mutex);
679
680    /* Sleep a little to prevent premature death */
681 #ifdef _WIN32
682    Sleep(1); /* around 50ms */
683 #else
684    usleep(200);
685 #endif
686
687    LKL(_ecore_pending_job_threads_mutex);
688    if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
689      {
690         LKU(_ecore_pending_job_threads_mutex);
691         goto restart;
692      }
693    _ecore_thread_count--;
694    LKU(_ecore_pending_job_threads_mutex);
695
696    work = pth->death_job;
697    if (!work) return NULL;
698
699    work->data = pth;
700    work->u.short_run.func_blocking = NULL;
701    work->func_end = (void *) _ecore_thread_end;
702    work->func_cancel = NULL;
703    work->cancel = EINA_FALSE;
704    work->feedback_run = EINA_FALSE;
705    work->kill = EINA_FALSE;
706    work->hash = NULL;
707    CDI(work->cond);
708    LKI(work->mutex);
709
710    ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
711
712    return pth->p;
713 }
714
715 #endif
716
717 static Ecore_Pthread_Worker *
718 _ecore_thread_worker_new(void)
719 {
720    Ecore_Pthread_Worker *result;
721
722 #ifdef EFL_HAVE_THREADS
723    result = eina_trash_pop(&_ecore_thread_worker_trash);
724
725    if (!result) result = malloc(sizeof (Ecore_Pthread_Worker));
726    else _ecore_thread_worker_count--;
727
728    return result;
729 #else
730    return malloc(sizeof (Ecore_Pthread_Worker));
731 #endif
732 }
733
734 void
735 _ecore_thread_init(void)
736 {
737    _ecore_thread_count_max = eina_cpu_count();
738    if (_ecore_thread_count_max <= 0)
739      _ecore_thread_count_max = 1;
740
741    ECORE_THREAD_PIPE_DEL = ecore_event_type_new();
742    _ecore_thread_pipe = eina_array_new(8);
743
744 #ifdef EFL_HAVE_THREADS
745    del_handler = ecore_event_handler_add(ECORE_THREAD_PIPE_DEL, _ecore_thread_pipe_del, NULL);
746    main_loop_thread = PHS();
747    have_main_loop_thread = 1;
748
749    LKI(_ecore_pending_job_threads_mutex);
750    LRWKI(_ecore_thread_global_hash_lock);
751    LKI(_ecore_thread_global_hash_mutex);
752    CDI(_ecore_thread_global_hash_cond);
753 #endif
754 }
755
756 void
757 _ecore_thread_shutdown(void)
758 {
759    /* FIXME: If function are still running in the background, should we kill them ? */
760    Ecore_Pipe *p;
761    Eina_Array_Iterator it;
762    unsigned int i;
763
764 #ifdef EFL_HAVE_THREADS
765    Ecore_Pthread_Worker *work;
766    Ecore_Pthread_Data *pth;
767
768    LKL(_ecore_pending_job_threads_mutex);
769
770    EINA_LIST_FREE(_ecore_pending_job_threads, work)
771      {
772         if (work->func_cancel)
773           work->func_cancel((void *)work->data, (Ecore_Thread *) work);
774         free(work);
775      }
776
777    EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
778      {
779         if (work->func_cancel)
780           work->func_cancel((void *)work->data, (Ecore_Thread *) work);
781         free(work);
782      }
783
784    LKU(_ecore_pending_job_threads_mutex);
785
786    /* Improve emergency shutdown */
787    EINA_LIST_FREE(_ecore_active_job_threads, pth)
788      {
789         Ecore_Pipe *ep;
790
791         PHA(pth->thread);
792         PHJ(pth->thread, ep);
793
794         ecore_pipe_del(pth->p);
795      }
796    if (_ecore_thread_global_hash)
797      eina_hash_free(_ecore_thread_global_hash);
798    ecore_event_handler_del(del_handler);
799    have_main_loop_thread = 0;
800    del_handler = NULL;
801
802    LKD(_ecore_pending_job_threads_mutex);
803    LRWKD(_ecore_thread_global_hash_lock);
804    LKD(_ecore_thread_global_hash_mutex);
805    CDD(_ecore_thread_global_hash_cond);
806 #endif
807
808    EINA_ARRAY_ITER_NEXT(_ecore_thread_pipe, i, p, it)
809      ecore_pipe_del(p);
810
811    eina_array_free(_ecore_thread_pipe);
812    _ecore_thread_pipe = NULL;
813 }
814
815 /**
816  * @addtogroup Ecore_Group Ecore - Main Loop and Job Functions.
817  *
818  * @{
819  */
820
821 /**
822  * @addtogroup Ecore_Thread_Group Ecore Thread functions
823  *
824  * These functions allow for ecore-managed threads which integrate with ecore's main loop.
825  *
826  * @{
827  */
828
829 /**
830  * @brief Run some blocking code in a parallel thread to avoid locking the main loop.
831  * @param func_blocking The function that should run in another thread.
832  * @param func_end The function that will be called in the main loop if the thread terminate correctly.
833  * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
834  * @param data User context data to pass to all callback.
835  * @return A reference to the newly created thread instance, or NULL if it failed.
836  *
837  * ecore_thread_run provide a facility for easily managing blocking task in a
838  * parallel thread. You should provide three function. The first one, func_blocking,
839  * that will do the blocking work in another thread (so you should not use the
840  * EFL in it except Eina if you are careful). The second one, func_end,
841  * that will be called in Ecore main loop when func_blocking is done. So you
842  * can use all the EFL inside this function. The last one, func_cancel, will
843  * be called in the main loop if the thread is cancelled or could not run at all.
844  *
845  * Be aware, that you can't make assumption on the result order of func_end
846  * after many call to ecore_thread_run, as we start as much thread as the
847  * host CPU can handle.
848  */
849 EAPI Ecore_Thread *
850 ecore_thread_run(Ecore_Thread_Cb func_blocking,
851                  Ecore_Thread_Cb func_end,
852                  Ecore_Thread_Cb func_cancel,
853                  const void *data)
854 {
855    Ecore_Pthread_Worker *work;
856 #ifdef EFL_HAVE_THREADS
857    Ecore_Pthread_Data *pth = NULL;
858 #endif
859
860    if (!func_blocking) return NULL;
861
862    work = _ecore_thread_worker_new();
863    if (!work)
864      {
865         if (func_cancel)
866           func_cancel((void *) data, NULL);
867         return NULL;
868      }
869
870    work->u.short_run.func_blocking = func_blocking;
871    work->func_end = func_end;
872    work->func_cancel = func_cancel;
873    work->cancel = EINA_FALSE;
874    work->feedback_run = EINA_FALSE;
875    work->kill = EINA_FALSE;
876    work->reschedule = EINA_FALSE;
877    work->data = data;
878
879 #ifdef EFL_HAVE_THREADS
880    work->hash = NULL;
881    CDI(work->cond);
882    LKI(work->mutex);
883
884    LKL(_ecore_pending_job_threads_mutex);
885    _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
886
887    if (_ecore_thread_count == _ecore_thread_count_max)
888      {
889         LKU(_ecore_pending_job_threads_mutex);
890         return (Ecore_Thread *) work;
891      }
892
893    LKU(_ecore_pending_job_threads_mutex);
894
895    /* One more thread could be created. */
896    pth = malloc(sizeof (Ecore_Pthread_Data));
897    if (!pth) goto on_error;
898
899    pth->p = _ecore_thread_pipe_get();
900    pth->death_job = _ecore_thread_worker_new();
901    if (!pth->p || !pth->death_job) goto on_error;
902
903    eina_threads_init();
904
905    if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
906       return (Ecore_Thread *) work;
907
908    eina_threads_shutdown();
909
910  on_error:
911    if (pth)
912      {
913         if (pth->p) eina_array_push(_ecore_thread_pipe, pth->p);
914         if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
915         free(pth);
916      }
917
918    if (_ecore_thread_count == 0)
919      {
920         LKL(_ecore_pending_job_threads_mutex);
921         _ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
922         LKU(_ecore_pending_job_threads_mutex);
923
924         if (work->func_cancel)
925           work->func_cancel((void *) work->data, (Ecore_Thread *) work);
926         free(work);
927         work = NULL;
928      }
929    return (Ecore_Thread *) work;
930 #else
931    /*
932      If no thread and as we don't want to break app that rely on this
933      facility, we will lock the interface until we are done.
934     */
935    do {
936       /* Handle reschedule by forcing it here. That would mean locking the app,
937        * would be better with an idler, but really to complex for a case where
938        * thread should really exist.
939        */
940       work->reschedule = EINA_FALSE;
941
942       func_blocking((void *)data, (Ecore_Thread *) work);
943       if (work->cancel == EINA_FALSE) func_end((void *)data, (Ecore_Thread *) work);
944       else func_end((void *)data, (Ecore_Thread *) work);
945
946    } while (work->reschedule == EINA_TRUE);
947
948    free(work);
949
950    return NULL;
951 #endif
952 }
953
954 /**
955  * @brief Cancel a running thread.
956  * @param thread The thread to cancel.
957  * @return Will return EINA_TRUE if the thread has been cancelled,
958  *         EINA_FALSE if it is pending.
959  *
960  * ecore_thread_cancel give the possibility to cancel a task still running. It
961  * will return EINA_FALSE, if the destruction is delayed or EINA_TRUE if it is
962  * cancelled after this call.
963  *
964  * This function work in the main loop and in the thread, but you should not pass
965  * the Ecore_Thread variable from main loop to the worker thread in any structure.
966  * You should always use the one passed to the Ecore_Thread_Heavy_Cb.
967  *
968  * func_end, func_cancel will destroy the handler, so don't use it after.
969  * And if ecore_thread_cancel return EINA_TRUE, you should not use Ecore_Thread also.
970  */
971 EAPI Eina_Bool
972 ecore_thread_cancel(Ecore_Thread *thread)
973 {
974 #ifdef EFL_HAVE_THREADS
975    Ecore_Pthread_Worker *work = (Ecore_Pthread_Worker *)thread;
976    Eina_List *l;
977
978    if (!work)
979      return EINA_TRUE;
980    if (work->cancel)
981      return EINA_FALSE;
982
983    if (work->feedback_run)
984      {
985         if (work->kill)
986           return EINA_TRUE;
987         if (work->u.feedback_run.send != work->u.feedback_run.received)
988           goto on_exit;
989      }
990
991    LKL(_ecore_pending_job_threads_mutex);
992
993    if ((have_main_loop_thread) &&
994        (PHE(main_loop_thread, PHS())))
995      {
996         if (!work->feedback_run)
997           EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
998             {
999                if ((void *) work == (void *) thread)
1000                  {
1001                     _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
1002
1003                     LKU(_ecore_pending_job_threads_mutex);
1004
1005                     if (work->func_cancel)
1006                       work->func_cancel((void *) work->data, (Ecore_Thread *) work);
1007                     free(work);
1008
1009                     return EINA_TRUE;
1010                  }
1011             }
1012         else
1013           EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
1014             {
1015                if ((void *) work == (void *) thread)
1016                  {
1017                     _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
1018
1019                     LKU(_ecore_pending_job_threads_mutex);
1020
1021                     if (work->func_cancel)
1022                       work->func_cancel((void *) work->data, (Ecore_Thread *) work);
1023                     free(work);
1024
1025                     return EINA_TRUE;
1026                  }
1027             }
1028      }
1029
1030    LKU(_ecore_pending_job_threads_mutex);
1031
1032    /* Delay the destruction */
1033  on_exit:
1034    ((Ecore_Pthread_Worker *)thread)->cancel = EINA_TRUE;
1035    return EINA_FALSE;
1036 #else
1037    return EINA_TRUE;
1038 #endif
1039 }
1040
1041 /**
1042  * @brief Tell if a thread was canceled or not.
1043  * @param thread The thread to test.
1044  * @return EINA_TRUE if the thread is cancelled,
1045  *         EINA_FALSE if it is not.
1046  *
1047  * You can use this function in main loop and in the thread.
1048  */
1049 EAPI Eina_Bool
1050 ecore_thread_check(Ecore_Thread *thread)
1051 {
1052    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1053
1054    if (!worker) return EINA_TRUE;
1055    return worker->cancel;
1056 }
1057
1058 /**
1059  * @brief Run some heavy code in a parallel thread to avoid locking the main loop.
1060  * @param func_heavy The function that should run in another thread.
1061  * @param func_notify The function that will receive the data send by func_heavy in the main loop.
1062  * @param func_end The function that will be called in the main loop if the thread terminate correctly.
1063  * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
1064  * @param data User context data to pass to all callback.
1065  * @param try_no_queue If you want to run outside of the thread pool.
1066  * @return A reference to the newly created thread instance, or NULL if it failed.
1067  *
1068  * ecore_thread_feedback_run provide a facility for easily managing heavy task in a
1069  * parallel thread. You should provide four functions. The first one, func_heavy,
1070  * that will do the heavy work in another thread (so you should not use the
1071  * EFL in it except Eina and Eet if you are careful). The second one, func_notify,
1072  * will receive the data send from the thread function (func_heavy) by ecore_thread_feedback
1073  * in the main loop (and so, can use all the EFL). The third, func_end,
1074  * that will be called in Ecore main loop when func_heavy is done. So you
1075  * can use all the EFL inside this function. The last one, func_cancel, will
1076  * be called in the main loop also, if the thread is cancelled or could not run at all.
1077  *
1078  * Be aware, that you can't make assumption on the result order of func_end
1079  * after many call to ecore_feedback_run, as we start as much thread as the
1080  * host CPU can handle.
1081  *
1082  * If you set try_no_queue, it will try to run outside of the thread pool, this can bring
1083  * the CPU down, so be careful with that. Of course if it can't start a new thread, it will
1084  * try to use one from the pool.
1085  */
1086 EAPI Ecore_Thread *ecore_thread_feedback_run(Ecore_Thread_Cb func_heavy,
1087                                              Ecore_Thread_Notify_Cb func_notify,
1088                                              Ecore_Thread_Cb func_end,
1089                                              Ecore_Thread_Cb func_cancel,
1090                                              const void *data,
1091                                              Eina_Bool try_no_queue)
1092 {
1093
1094 #ifdef EFL_HAVE_THREADS
1095    Ecore_Pthread_Worker *worker;
1096    Ecore_Pthread_Data *pth = NULL;
1097
1098    if (!func_heavy) return NULL;
1099
1100    worker = _ecore_thread_worker_new();
1101    if (!worker) goto on_error;
1102
1103    worker->u.feedback_run.func_heavy = func_heavy;
1104    worker->u.feedback_run.func_notify = func_notify;
1105    worker->hash = NULL;
1106    CDI(worker->cond);
1107    LKI(worker->mutex);
1108    worker->func_cancel = func_cancel;
1109    worker->func_end = func_end;
1110    worker->data = data;
1111    worker->cancel = EINA_FALSE;
1112    worker->feedback_run = EINA_TRUE;
1113    worker->kill = EINA_FALSE;
1114    worker->reschedule = EINA_FALSE;
1115
1116    worker->u.feedback_run.send = 0;
1117    worker->u.feedback_run.received = 0;
1118
1119    worker->u.feedback_run.notify = ecore_pipe_add(_ecore_notify_handler, worker);
1120    worker->u.feedback_run.direct_pipe = NULL;
1121    worker->u.feedback_run.direct_worker = NULL;
1122
1123    if (!try_no_queue)
1124      {
1125         PH(t);
1126
1127         worker->u.feedback_run.direct_pipe = _ecore_thread_pipe_get();
1128         worker->u.feedback_run.direct_worker = _ecore_thread_worker_new();
1129
1130         if (PHC(t, _ecore_direct_worker, worker) == 0)
1131            return (Ecore_Thread *) worker;
1132      }
1133
1134    LKL(_ecore_pending_job_threads_mutex);
1135    _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
1136
1137    if (_ecore_thread_count == _ecore_thread_count_max)
1138      {
1139         LKU(_ecore_pending_job_threads_mutex);
1140         return (Ecore_Thread *) worker;
1141      }
1142
1143    LKU(_ecore_pending_job_threads_mutex);
1144
1145    /* One more thread could be created. */
1146    pth = malloc(sizeof (Ecore_Pthread_Data));
1147    if (!pth) goto on_error;
1148
1149    pth->p = _ecore_thread_pipe_get();
1150    pth->death_job = _ecore_thread_worker_new();
1151    if (!pth->p || !pth->death_job) goto on_error;
1152
1153    eina_threads_init();
1154
1155    if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
1156       return (Ecore_Thread *) worker;
1157
1158    eina_threads_shutdown();
1159
1160  on_error:
1161    if (pth)
1162      {
1163         if (pth->p) eina_array_push(_ecore_thread_pipe, pth->p);
1164         if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
1165         free(pth);
1166      }
1167
1168    if (_ecore_thread_count == 0)
1169      {
1170         LKL(_ecore_pending_job_threads_mutex);
1171         _ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
1172                                                                worker);
1173         LKU(_ecore_pending_job_threads_mutex);
1174
1175         if (func_cancel) func_cancel((void *) data, NULL);
1176
1177         if (worker)
1178           {
1179              ecore_pipe_del(worker->u.feedback_run.notify);
1180              free(worker);
1181              worker = NULL;
1182           }
1183      }
1184
1185    return (Ecore_Thread *) worker;
1186 #else
1187    Ecore_Pthread_Worker worker;
1188
1189    (void) try_no_queue;
1190
1191    /*
1192      If no thread and as we don't want to break app that rely on this
1193      facility, we will lock the interface until we are done.
1194     */
1195    worker.u.feedback_run.func_heavy = func_heavy;
1196    worker.u.feedback_run.func_notify = func_notify;
1197    worker.u.feedback_run.notify = NULL;
1198    worker.u.feedback_run.send = 0;
1199    worker.u.feedback_run.received = 0;
1200    worker.func_cancel = func_cancel;
1201    worker.func_end = func_end;
1202    worker.data = data;
1203    worker.cancel = EINA_FALSE;
1204    worker.feedback_run = EINA_TRUE;
1205    worker.kill = EINA_FALSE;
1206
1207    do {
1208       worker.reschedule = EINA_FALSE;
1209
1210       func_heavy((void *)data, (Ecore_Thread *) &worker);
1211
1212       if (worker.cancel) func_cancel((void *)data, (Ecore_Thread *) &worker);
1213       else func_end((void *)data, (Ecore_Thread *) &worker);
1214    } while (worker.reschedule == EINA_FALSE);
1215
1216    return NULL;
1217 #endif
1218 }
1219
1220 /**
1221  * @brief Send data to main loop from worker thread.
1222  * @param thread The current Ecore_Thread context to send data from
1223  * @param data Data to be transmitted to the main loop
1224  * @return EINA_TRUE if data was successfully send to main loop,
1225  *         EINA_FALSE if anything goes wrong.
1226  *
1227  * After a succesfull call, the data should be considered owned
1228  * by the main loop.
1229  *
1230  * You should use this function only in the func_heavy call.
1231  */
1232 EAPI Eina_Bool
1233 ecore_thread_feedback(Ecore_Thread *thread, const void *data)
1234 {
1235    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1236
1237    if (!worker) return EINA_FALSE;
1238    if (!worker->feedback_run) return EINA_FALSE;
1239
1240 #ifdef EFL_HAVE_THREADS
1241    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1242
1243    worker->u.feedback_run.send++;
1244    ecore_pipe_write(worker->u.feedback_run.notify, &data, sizeof (void *));
1245
1246    return EINA_TRUE;
1247 #else
1248    worker->u.feedback_run.func_notify((void*) worker->data, thread, (void*) data);
1249
1250    return EINA_TRUE;
1251 #endif
1252 }
1253
1254 /**
1255  * @brief Plan to recall the heavy function once it exist it.
1256  * @param thread The current Ecore_Thread context to reschedule
1257  * @return EINA_TRUE if data was successfully send to main loop,
1258  *         EINA_FALSE if anything goes wrong.
1259  *
1260  * After a succesfull call, you can still do what you want in your thread, it
1261  * will only reschedule it once you exit the heavy loop.
1262  *
1263  * You should use this function only in the func_heavy call.
1264  */
1265 EAPI Eina_Bool
1266 ecore_thread_reschedule(Ecore_Thread *thread)
1267 {
1268    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1269
1270    if (!worker) return EINA_FALSE;
1271
1272 #ifdef EFL_HAVE_THREADS
1273    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1274 #endif
1275
1276    worker->reschedule = EINA_TRUE;
1277    return EINA_TRUE;
1278 }
1279
1280 /**
1281  * @brief Get number of active thread jobs
1282  * @return Number of active threads running jobs
1283  * This returns the number of threads currently running jobs through the
1284  * ecore_thread api.
1285  */
1286 EAPI int
1287 ecore_thread_active_get(void)
1288 {
1289 #ifdef EFL_HAVE_THREADS
1290    return _ecore_thread_count;
1291 #else
1292    return 0;
1293 #endif
1294 }
1295
1296 /**
1297  * @brief Get number of pending (short) thread jobs
1298  * @return Number of pending threads running "short" jobs
1299  * This returns the number of threads currently running jobs through the
1300  * ecore_thread_run api call.
1301  */
1302 EAPI int
1303 ecore_thread_pending_get(void)
1304 {
1305    int ret;
1306 #ifdef EFL_HAVE_THREADS
1307    LKL(_ecore_pending_job_threads_mutex);
1308    ret = eina_list_count(_ecore_pending_job_threads);
1309    LKU(_ecore_pending_job_threads_mutex);
1310    return ret;
1311 #else
1312    return 0;
1313 #endif
1314 }
1315
1316 /**
1317  * @brief Get number of pending feedback thread jobs
1318  * @return Number of pending threads running "feedback" jobs
1319  * This returns the number of threads currently running jobs through the
1320  * ecore_thread_feedback_run api call.
1321  */
1322 EAPI int
1323 ecore_thread_pending_feedback_get(void)
1324 {
1325    int ret;
1326 #ifdef EFL_HAVE_THREADS
1327    LKL(_ecore_pending_job_threads_mutex);
1328    ret = eina_list_count(_ecore_pending_job_threads_feedback);
1329    LKU(_ecore_pending_job_threads_mutex);
1330    return ret;
1331 #else
1332    return 0;
1333 #endif
1334 }
1335
1336 /**
1337  * @brief Get number of pending thread jobs
1338  * @return Number of pending threads running jobs
1339  * This returns the number of threads currently running jobs through the
1340  * ecore_thread_run and ecore_thread_feedback_run api calls combined.
1341  */
1342 EAPI int
1343 ecore_thread_pending_total_get(void)
1344 {
1345    int ret;
1346 #ifdef EFL_HAVE_THREADS
1347    LKL(_ecore_pending_job_threads_mutex);
1348    ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
1349    LKU(_ecore_pending_job_threads_mutex);
1350    return ret;
1351 #else
1352    return 0;
1353 #endif
1354 }
1355
1356 /**
1357  * @brief Get the max number of threads that can run simultaneously
1358  * @return Max number of threads ecore will run
1359  * This returns the total number of threads that ecore will attempt to run
1360  * simultaneously.
1361  */
1362 EAPI int
1363 ecore_thread_max_get(void)
1364 {
1365    return _ecore_thread_count_max;
1366 }
1367
1368 /**
1369  * @brief Set the max number of threads that can run simultaneously
1370  * @param num The new maximum
1371  * This sets the maximum number of threads that ecore will try to run
1372  * simultaneously.  This number cannot be < 1 or >= 2x the number of active cpus.
1373  */
1374 EAPI void
1375 ecore_thread_max_set(int num)
1376 {
1377    if (num < 1) return;
1378    /* avoid doing something hilarious by blocking dumb users */
1379    if (num >= (2 * eina_cpu_count())) return;
1380
1381    _ecore_thread_count_max = num;
1382 }
1383
1384 /**
1385  * @brief Reset the max number of threads that can run simultaneously
1386  * This resets the maximum number of threads that ecore will try to run
1387  * simultaneously to the number of active cpus.
1388  */
1389 EAPI void
1390 ecore_thread_max_reset(void)
1391 {
1392    _ecore_thread_count_max = eina_cpu_count();
1393 }
1394
1395 /**
1396  * @brief Get the number of threads which are available to be used
1397  * @return The number of available threads
1398  * This returns the number of threads slots that ecore has currently available.
1399  * Assuming that you haven't changed the max number of threads with @ref ecore_thread_max_set
1400  * this should be equal to (num_cpus - (active_running + active_feedback_running))
1401  */
1402 EAPI int
1403 ecore_thread_available_get(void)
1404 {
1405    int ret;
1406 #ifdef EFL_HAVE_THREADS
1407    LKL(_ecore_pending_job_threads_mutex);
1408    ret = _ecore_thread_count_max - _ecore_thread_count;
1409    LKU(_ecore_pending_job_threads_mutex);
1410    return ret;
1411 #else
1412    return 0;
1413 #endif
1414 }
1415
1416 /**
1417  * @brief Add data to the thread for subsequent use
1418  * @param thread The thread context to add to
1419  * @param key The name string to add the data with
1420  * @param value The data to add
1421  * @param cb The callback to free the data with
1422  * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1423  * @return EINA_TRUE on success, EINA_FALSE on failure
1424  * This adds data to the thread context, allowing the thread
1425  * to retrieve and use it without complicated mutexing.  This function can only be called by a
1426  * *_run thread INSIDE the thread and will return EINA_FALSE in any case but success.
1427  * All data added to the thread will be freed with its associated callback (if present)
1428  * upon thread termination.  If no callback is specified, it is expected that the user will free the
1429  * data, but this is most likely not what you want.
1430  */
1431 EAPI Eina_Bool
1432 ecore_thread_local_data_add(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1433 {
1434    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1435    Ecore_Thread_Data *d;
1436    Eina_Bool ret;
1437
1438    if ((!thread) || (!key) || (!value))
1439      return EINA_FALSE;
1440 #ifdef EFL_HAVE_THREADS
1441    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1442
1443    if (!worker->hash)
1444      worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1445
1446    if (!worker->hash)
1447      return EINA_FALSE;
1448
1449    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1450      return EINA_FALSE;
1451
1452    d->data = value;
1453    d->cb = cb;
1454
1455    if (direct)
1456      ret = eina_hash_direct_add(worker->hash, key, d);
1457    else
1458      ret = eina_hash_add(worker->hash, key, d);
1459    CDB(worker->cond);
1460    return ret;
1461 #else
1462    return EINA_TRUE;
1463 #endif
1464 }
1465
1466 /**
1467  * @brief Modify data in the thread, or add if not found
1468  * @param thread The thread context
1469  * @param key The name string to add the data with
1470  * @param value The data to add
1471  * @param cb The callback to free the data with
1472  * @return The old data associated with @p key on success if modified, NULL if added
1473  * This adds/modifies data in the thread context, adding only if modify fails.
1474  * This function can only be called by a *_run thread INSIDE the thread.
1475  * All data added to the thread pool will be freed with its associated callback (if present)
1476  * upon thread termination.  If no callback is specified, it is expected that the user will free the
1477  * data, but this is most likely not what you want.
1478  */
1479 EAPI void *
1480 ecore_thread_local_data_set(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb)
1481 {
1482    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1483    Ecore_Thread_Data *d, *r;
1484    void *ret;
1485    if ((!thread) || (!key) || (!value))
1486      return NULL;
1487 #ifdef EFL_HAVE_THREADS
1488    if (!PHE(worker->self, PHS())) return NULL;
1489
1490    if (!worker->hash)
1491      worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1492
1493    if (!worker->hash)
1494      return NULL;
1495
1496    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1497      return NULL;
1498
1499    d->data = value;
1500    d->cb = cb;
1501
1502    r = eina_hash_set(worker->hash, key, d);
1503    CDB(worker->cond);
1504    ret = r->data;
1505    free(r);
1506    return ret;
1507 #else
1508    return NULL;
1509 #endif
1510 }
1511
1512 /**
1513  * @brief Find data in the thread's data
1514  * @param thread The thread context
1515  * @param key The name string the data is associated with
1516  * @return The value, or NULL on error
1517  * This finds data in the thread context that has been previously added with @ref ecore_thread_local_data_add
1518  * This function can only be called by a *_run thread INSIDE the thread, and will return NULL
1519  * in any case but success.
1520  */
1521
1522 EAPI void *
1523 ecore_thread_local_data_find(Ecore_Thread *thread, const char *key)
1524 {
1525    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1526    Ecore_Thread_Data *d;
1527
1528    if ((!thread) || (!key))
1529      return NULL;
1530 #ifdef EFL_HAVE_THREADS
1531    if (!PHE(worker->self, PHS())) return NULL;
1532
1533    if (!worker->hash)
1534      return NULL;
1535
1536    d = eina_hash_find(worker->hash, key);
1537    return d->data;
1538 #else
1539    return NULL;
1540 #endif
1541 }
1542
1543 /**
1544  * @brief Delete data from the thread's data
1545  * @param thread The thread context
1546  * @param key The name string the data is associated with
1547  * @return EINA_TRUE on success, EINA_FALSE on failure
1548  * This deletes the data pointer from the thread context which was previously added with @ref ecore_thread_local_data_add
1549  * This function can only be called by a *_run thread INSIDE the thread, and will return EINA_FALSE
1550  * in any case but success.  Note that this WILL free the data if a callback was specified.
1551  */
1552 EAPI Eina_Bool
1553 ecore_thread_local_data_del(Ecore_Thread *thread, const char *key)
1554 {
1555    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1556    Ecore_Thread_Data *d;
1557    if ((!thread) || (!key))
1558      return EINA_FALSE;
1559 #ifdef EFL_HAVE_THREADS
1560    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1561
1562    if (!worker->hash)
1563      return EINA_FALSE;
1564    if ((d = eina_hash_find(worker->hash, key)))
1565      _ecore_thread_data_free(d);
1566    return eina_hash_del_by_key(worker->hash, key);
1567 #else
1568    return EINA_TRUE;
1569 #endif
1570 }
1571
1572 /**
1573  * @brief Add data to the global data
1574  * @param key The name string to add the data with
1575  * @param value The data to add
1576  * @param cb The optional callback to free the data with once ecore is shut down
1577  * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1578  * @return EINA_TRUE on success, EINA_FALSE on failure
1579  * This adds data to the global thread data, and will return EINA_FALSE in any case but success.
1580  * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1581  * be called upon ecore_thread shutting down.  Note that if you have manually freed data that a callback
1582  * was specified for, you will most likely encounter a segv later on.
1583  */
1584 EAPI Eina_Bool
1585 ecore_thread_global_data_add(const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1586 {
1587    Eina_Bool ret;
1588    Ecore_Thread_Data *d;
1589
1590    if ((!key) || (!value))
1591      return EINA_FALSE;
1592 #ifdef EFL_HAVE_THREADS
1593    LRWKWL(_ecore_thread_global_hash_lock);
1594    if (!_ecore_thread_global_hash)
1595      _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1596    LRWKU(_ecore_thread_global_hash_lock);
1597
1598    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1599      return EINA_FALSE;
1600
1601    d->data = value;
1602    d->cb = cb;
1603
1604    if (!_ecore_thread_global_hash)
1605      return EINA_FALSE;
1606    LRWKWL(_ecore_thread_global_hash_lock);
1607    if (direct)
1608      ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1609    else
1610      ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1611    LRWKU(_ecore_thread_global_hash_lock);
1612    CDB(_ecore_thread_global_hash_cond);
1613    return ret;
1614 #else
1615    return EINA_TRUE;
1616 #endif
1617 }
1618
1619 /**
1620  * @brief Add data to the global data
1621  * @param key The name string to add the data with
1622  * @param value The data to add
1623  * @param cb The optional callback to free the data with once ecore is shut down
1624  * @return An Ecore_Thread_Data on success, NULL on failure
1625  * This adds data to the global thread data and returns NULL, or replaces the previous data
1626  * associated with @p key and returning the previous data if it existed.  To see if an error occurred,
1627  * one must use eina_error_get.
1628  * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1629  * be called upon ecore_thread shutting down.  Note that if you have manually freed data that a callback
1630  * was specified for, you will most likely encounter a segv later on.
1631  */
1632 EAPI void *
1633 ecore_thread_global_data_set(const char *key, void *value, Eina_Free_Cb cb)
1634 {
1635    Ecore_Thread_Data *d, *r;
1636    void *ret;
1637
1638    if ((!key) || (!value))
1639      return NULL;
1640 #ifdef EFL_HAVE_THREADS
1641    LRWKWL(_ecore_thread_global_hash_lock);
1642    if (!_ecore_thread_global_hash)
1643      _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1644    LRWKU(_ecore_thread_global_hash_lock);
1645
1646    if (!_ecore_thread_global_hash)
1647      return NULL;
1648
1649    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1650      return NULL;
1651
1652    d->data = value;
1653    d->cb = cb;
1654
1655    LRWKWL(_ecore_thread_global_hash_lock);
1656    r = eina_hash_set(_ecore_thread_global_hash, key, d);
1657    LRWKU(_ecore_thread_global_hash_lock);
1658    CDB(_ecore_thread_global_hash_cond);
1659
1660    ret = r->data;
1661    free(r);
1662    return ret;
1663 #else
1664    return NULL;
1665 #endif
1666 }
1667
1668 /**
1669  * @brief Find data in the global data
1670  * @param key The name string the data is associated with
1671  * @return The value, or NULL on error
1672  * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1673  * This function will return NULL in any case but success.
1674  * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1675  * be called upon ecore_thread shutting down.  Note that if you have manually freed data that a callback
1676  * was specified for, you will most likely encounter a segv later on.
1677  * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1678  * if you will be doing anything with it.
1679  */
1680
1681 EAPI void *
1682 ecore_thread_global_data_find(const char *key)
1683 {
1684    Ecore_Thread_Data *ret;
1685    if (!key)
1686      return NULL;
1687 #ifdef EFL_HAVE_THREADS
1688    if (!_ecore_thread_global_hash) return NULL;
1689
1690    LRWKRL(_ecore_thread_global_hash_lock);
1691    ret = eina_hash_find(_ecore_thread_global_hash, key);
1692    LRWKU(_ecore_thread_global_hash_lock);
1693    return ret->data;
1694 #else
1695    return NULL;
1696 #endif
1697 }
1698
1699 /**
1700  * @brief Delete data from the global data
1701  * @param key The name string the data is associated with
1702  * @return EINA_TRUE on success, EINA_FALSE on failure
1703  * This deletes the data pointer from the global data which was previously added with @ref ecore_thread_global_data_add
1704  * This function will return EINA_FALSE in any case but success.
1705  * Note that this WILL free the data if an @c Eina_Free_Cb was specified when the data was added.
1706  */
1707 EAPI Eina_Bool
1708 ecore_thread_global_data_del(const char *key)
1709 {
1710    Eina_Bool ret;
1711    Ecore_Thread_Data *d;
1712
1713    if (!key)
1714      return EINA_FALSE;
1715 #ifdef EFL_HAVE_THREADS
1716    if (!_ecore_thread_global_hash)
1717      return EINA_FALSE;
1718
1719    LRWKWL(_ecore_thread_global_hash_lock);
1720    if ((d = eina_hash_find(_ecore_thread_global_hash, key)))
1721      _ecore_thread_data_free(d);
1722    ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1723    LRWKU(_ecore_thread_global_hash_lock);
1724    return ret;
1725 #else
1726    return EINA_TRUE;
1727 #endif
1728 }
1729
1730 /**
1731  * @brief Find data in the global data and optionally wait for the data if not found
1732  * @param key The name string the data is associated with
1733  * @param seconds The amount of time in seconds to wait for the data.  If 0, the call will be async and not wait for data.
1734  * If < 0 the call will wait indefinitely for the data.
1735  * @return The value, or NULL on failure
1736  * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1737  * This function will return NULL in any case but success.
1738  * Use @p seconds to specify the amount of time to wait.  Use > 0 for an actual wait time, 0 to not wait, and < 0 to wait indefinitely.
1739  * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1740  * if you will be doing anything with it.
1741  */
1742 EAPI void *
1743 ecore_thread_global_data_wait(const char *key, double seconds)
1744 {
1745    double tm = 0;
1746    Ecore_Thread_Data *ret = NULL;
1747
1748    if (!key)
1749      return NULL;
1750 #ifdef EFL_HAVE_THREADS
1751    if (!_ecore_thread_global_hash)
1752      return NULL;
1753    if (seconds > 0)
1754      tm = ecore_time_get() + seconds;
1755
1756    while (1)
1757      {
1758 #ifndef _WIN32
1759         struct timespec t = { 0, 0 };
1760
1761         t.tv_sec = (long int)tm;
1762         t.tv_nsec = (long int)((tm - (double)t.tv_sec) * 1000000000);
1763 #else
1764         struct timeval t = { 0, 0 };
1765
1766         t.tv_sec = (long int)tm;
1767         t.tv_usec = (long int)((tm - (double)t.tv_sec) * 1000000);
1768 #endif
1769         LRWKRL(_ecore_thread_global_hash_lock);
1770         ret = eina_hash_find(_ecore_thread_global_hash, key);
1771         LRWKU(_ecore_thread_global_hash_lock);
1772         if ((ret) || (!seconds) || ((seconds > 0) && (tm <= ecore_time_get())))
1773           break;
1774         LKL(_ecore_thread_global_hash_mutex);
1775         CDW(_ecore_thread_global_hash_cond, _ecore_thread_global_hash_mutex, &t);
1776         LKU(_ecore_thread_global_hash_mutex);
1777      }
1778    if (ret) return ret->data;
1779    return NULL;
1780 #else
1781    return NULL;
1782 #endif
1783 }
1784
1785 /**
1786  * @}
1787  */
1788
1789 /**
1790  * @}
1791  */