ecore: actually limit the number of pipe in the cache.
[profile/ivi/ecore.git] / src / lib / ecore / ecore_thread.c
1 #ifdef HAVE_CONFIG_H
2 # include <config.h>
3 #endif
4
5 #include <sys/time.h>
6
7 #ifdef HAVE_EVIL
8 # include <Evil.h>
9 #endif
10
11 #include "Ecore.h"
12 #include "ecore_private.h"
13
14 #ifdef EFL_HAVE_THREADS
15
16 # ifdef EFL_HAVE_POSIX_THREADS
17 #  include <pthread.h>
18 #  ifdef __linux__
19 #   include <sched.h>
20 #   include <sys/resource.h>
21 #   include <unistd.h>
22 #   include <sys/syscall.h>
23 #   include <errno.h>
24 #  endif
25
26 #  define PH(x)        pthread_t x
27 #  define PHE(x, y)    pthread_equal(x, y)
28 #  define PHS()        pthread_self()
29 #  define PHC(x, f, d) pthread_create(&(x), NULL, (void*) f, d)
30 #  define PHJ(x, p)    pthread_join(x, (void**)(&(p)))
31 #  define PHA(x)       pthread_cancel(x)
32
33 #  define CD(x)  pthread_cond_t x
34 #  define CDI(x) pthread_cond_init(&(x), NULL);
35 #  define CDD(x) pthread_cond_destroy(&(x));
36 #  define CDB(x) pthread_cond_broadcast(&(x));
37 #  define CDW(x, y, t) pthread_cond_timedwait(&(x), &(y), t);
38
39 #  define LK(x)  pthread_mutex_t x
40 #  define LKI(x) pthread_mutex_init(&(x), NULL);
41 #  define LKD(x) pthread_mutex_destroy(&(x));
42 #  define LKL(x) pthread_mutex_lock(&(x));
43 #  define LKU(x) pthread_mutex_unlock(&(x));
44
45 #  define LRWK(x)   pthread_rwlock_t x
46 #  define LRWKI(x)  pthread_rwlock_init(&(x), NULL);
47 #  define LRWKD(x)  pthread_rwlock_destroy(&(x));
48 #  define LRWKWL(x) pthread_rwlock_wrlock(&(x));
49 #  define LRWKRL(x) pthread_rwlock_rdlock(&(x));
50 #  define LRWKU(x)  pthread_rwlock_unlock(&(x));
51
52 # else /* EFL_HAVE_WIN32_THREADS */
53
54 #  define WIN32_LEAN_AND_MEAN
55 #  include <windows.h>
56 #  undef WIN32_LEAN_AND_MEAN
57
58 typedef struct
59 {
60   HANDLE thread;
61   void *val;
62 } win32_thread;
63
64 #  define PH(x)        win32_thread *x
65 #  define PHE(x, y)    ((x) == (y))
66 #  define PHS()        (HANDLE)GetCurrentThreadId()
67
68 int _ecore_thread_win32_create(win32_thread **x, LPTHREAD_START_ROUTINE f, void *d)
69 {
70   win32_thread *t;
71   t = (win32_thread *)calloc(1, sizeof(win32_thread));
72   if (!t)
73     return -1;
74
75   (t)->thread = CreateThread(NULL, 0, f, d, 0, NULL);
76   if (!t->thread)
77     {
78       free(t);
79       return -1;
80     }
81   t->val = d;
82   *x = t;
83
84   return 0;
85 }
86 #  define PHC(x, f, d) _ecore_thread_win32_create(&(x), (LPTHREAD_START_ROUTINE)f, d)
87
88 int _ecore_thread_win32_join(win32_thread *x, void **res)
89 {
90   if (!PHE(x, PHS()))
91     {
92       WaitForSingleObject(x->thread, INFINITE);
93       CloseHandle(x->thread);
94     }
95   if (res) *res = x->val;
96   free(x);
97
98   return 0;
99 }
100
101 #  define PHJ(x, p) _ecore_thread_win32_join(x, (void**)(&(p)))
102 #  define PHA(x) TerminateThread(x->thread, 0)
103
104 #  define LK(x)  HANDLE x
105 #  define LKI(x) x = CreateMutex(NULL, FALSE, NULL)
106 #  define LKD(x) CloseHandle(x)
107 #  define LKL(x) WaitForSingleObject(x, INFINITE)
108 #  define LKU(x) ReleaseMutex(x)
109
110 typedef struct
111 {
112   HANDLE semaphore;
113   LONG threads_count;
114   CRITICAL_SECTION threads_count_lock;
115 } win32_cond;
116
117 #  define CD(x)  win32_cond *x
118
119 #  define CDI(x)                                                     \
120    do {                                                              \
121      x = (win32_cond *)calloc(1, sizeof(win32_cond));                \
122      if (x)                                                          \
123         {                                                            \
124           x->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL); \
125           if (x->semaphore)                                          \
126             InitializeCriticalSection(&x->threads_count_lock);     \
127           else                                                       \
128             {                                                        \
129               free(x);                                               \
130               x = NULL;                                              \
131             }                                                        \
132         }                                                            \
133    } while (0)
134
135 #  define CDD(x)               \
136   do {                         \
137     CloseHandle(x->semaphore); \
138     free(x);                   \
139     x = NULL;                  \
140    } while (0)
141
142 #  define CDB(x)                                            \
143 do {                                                        \
144   EnterCriticalSection(&x->threads_count_lock);             \
145   if (x->threads_count > 0)                                 \
146     ReleaseSemaphore(x->semaphore, x->threads_count, NULL); \
147   LeaveCriticalSection (&x->threads_count_lock);            \
148  } while (0)
149
150 int _ecore_thread_win32_cond_timedwait(win32_cond *c, HANDLE *external_mutex, struct timeval *t)
151 {
152   DWORD res;
153   DWORD val = t->tv_sec * 1000 + (t->tv_usec / 1000);
154   LKL(external_mutex);
155   EnterCriticalSection (&c->threads_count_lock);
156   c->threads_count++;
157   LeaveCriticalSection (&c->threads_count_lock);
158   LKU(external_mutex);
159   res = WaitForSingleObject(c->semaphore, val);
160   if (res == WAIT_OBJECT_0)
161     return 0;
162   else
163     return -1;
164 }
165 #  define CDW(x, y, t) _ecore_thread_win32_cond_timedwait(x, y, t)
166
167 typedef struct
168 {
169   LONG readers_count;
170   LONG writers_count;
171   int readers;
172   int writers;
173   LK(mutex);
174   CD(cond_read);
175   CD(cond_write);
176 } win32_rwl;
177
178 #  define LRWK(x)   win32_rwl *x
179 #  define LRWKI(x)                                 \
180   do {                                             \
181     x = (win32_rwl *)calloc(1, sizeof(win32_rwl)); \
182     if (x)                                         \
183       {                                            \
184         LKI(x->mutex);                             \
185         if (x->mutex)                              \
186           {                                        \
187             CDI(x->cond_read);                     \
188             if (x->cond_read)                      \
189               {                                    \
190                 CDI(x->cond_write);                \
191                 if (!x->cond_write)                \
192                   {                                \
193                     CDD(x->cond_read);             \
194                     LKD(x->mutex);                 \
195                     free(x);                       \
196                     x = NULL;                      \
197                   }                                \
198               }                                    \
199             else                                   \
200               {                                    \
201                 LKD(x->mutex);                     \
202                 free(x);                           \
203                 x = NULL;                          \
204               }                                    \
205           }                                        \
206         else                                       \
207           {                                        \
208             free(x);                               \
209             x = NULL;                              \
210           }                                        \
211       }                                            \
212   } while (0)
213
214 #  define LRWKD(x)                   \
215   do {                               \
216     LKU(x->mutex);                   \
217     LKD(x->mutex);                   \
218     CDD(x->cond_write);              \
219     CDD(x->cond_read);               \
220     free(x);                         \
221   } while (0)
222 #  define LRWKWL(x)                                                       \
223   do {                                                                    \
224     DWORD res;                                                            \
225     LKU(x->mutex);                                                        \
226     if (x->writers || x->readers > 0)                                     \
227       {                                                                   \
228         x->writers_count++;                                               \
229         while (x->writers || x->readers > 0)                              \
230           {                                                               \
231             EnterCriticalSection(&x->cond_write->threads_count_lock);     \
232             x->cond_read->threads_count++;                                \
233             LeaveCriticalSection(&x->cond_write->threads_count_lock);     \
234             res = WaitForSingleObject(x->cond_write->semaphore, INFINITE); \
235             if (res != WAIT_OBJECT_0) break;                              \
236           }                                                               \
237         x->writers_count--;                                               \
238       }                                                                   \
239     if (res == 0) x->writers_count = 1;                                   \
240     LKU(x->mutex);                                                        \
241   } while (0)
242 #  define LRWKRL(x)                                                       \
243   do {                                                                    \
244     DWORD res;                                                            \
245     LKL(x->mutex);                                                        \
246     if (x->writers)                                                       \
247       {                                                                   \
248         x->readers_count++;                                               \
249         while (x->writers)                                                \
250           {                                                               \
251             EnterCriticalSection(&x->cond_write->threads_count_lock);     \
252             x->cond_read->threads_count++;                                \
253             LeaveCriticalSection(&x->cond_write->threads_count_lock);     \
254             res = WaitForSingleObject(x->cond_write->semaphore, INFINITE); \
255             if (res != WAIT_OBJECT_0) break;                              \
256           }                                                               \
257         x->readers_count--;                                               \
258       }                                                                   \
259     if (res == 0)                                                         \
260       x->readers++;                                                       \
261     LKU(x->mutex);                                                        \
262   } while (0)
263 #  define LRWKU(x)                                                     \
264   do {                                                                 \
265     LKL(x->mutex);                                                     \
266     if (x->writers)                                                    \
267       {                                                                \
268         x->writers = 0;                                                \
269         if (x->readers_count == 1)                                     \
270           {                                                            \
271             EnterCriticalSection(&x->cond_read->threads_count_lock);   \
272             if (x->cond_read->threads_count > 0)                       \
273               ReleaseSemaphore(x->cond_read->semaphore, 1, 0);         \
274             LeaveCriticalSection(&x->cond_read->threads_count_lock);   \
275           }                                                            \
276         else if (x->readers_count > 0)                                 \
277           CDB(x->cond_read);                                           \
278         else if (x->writers_count > 0)                                 \
279           {                                                            \
280             EnterCriticalSection (&x->cond_write->threads_count_lock); \
281             if (x->cond_write->threads_count > 0)                      \
282               ReleaseSemaphore(x->cond_write->semaphore, 1, 0);        \
283             LeaveCriticalSection (&x->cond_write->threads_count_lock); \
284           }                                                            \
285       }                                                                \
286     else if (x->readers > 0)                                           \
287       {                                                                \
288         x->readers--;                                                  \
289         if (x->readers == 0 && x->writers_count > 0)                   \
290           {                                                            \
291             EnterCriticalSection (&x->cond_write->threads_count_lock); \
292             if (x->cond_write->threads_count > 0)                      \
293               ReleaseSemaphore(x->cond_write->semaphore, 1, 0);        \
294             LeaveCriticalSection (&x->cond_write->threads_count_lock); \
295           }                                                            \
296       }                                                                \
297     LKU(x->mutex);                                                     \
298   } while (0)
299
300 # endif
301
302 #endif
303
304 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
305 typedef struct _Ecore_Pthread Ecore_Pthread;
306 typedef struct _Ecore_Thread_Data  Ecore_Thread_Data;
307
308 struct _Ecore_Thread_Data
309 {
310    void *data;
311    Eina_Free_Cb cb;
312 };
313
314 struct _Ecore_Pthread_Worker
315 {
316    union {
317       struct {
318          Ecore_Thread_Cb func_blocking;
319       } short_run;
320       struct {
321          Ecore_Thread_Cb func_heavy;
322          Ecore_Thread_Notify_Cb func_notify;
323          Ecore_Pipe *notify;
324
325          Ecore_Pipe *direct_pipe;
326          Ecore_Pthread_Worker *direct_worker;
327
328          int send;
329          int received;
330       } feedback_run;
331    } u;
332
333    Ecore_Thread_Cb func_cancel;
334    Ecore_Thread_Cb func_end;
335 #ifdef EFL_HAVE_THREADS
336    PH(self);
337    Eina_Hash *hash;
338    CD(cond);
339    LK(mutex);
340 #endif
341
342    const void *data;
343
344    Eina_Bool cancel : 1;
345    Eina_Bool feedback_run : 1;
346    Eina_Bool kill : 1;
347    Eina_Bool reschedule : 1;
348 };
349
350 #ifdef EFL_HAVE_THREADS
351 typedef struct _Ecore_Pthread_Data Ecore_Pthread_Data;
352
353 struct _Ecore_Pthread_Data
354 {
355    Ecore_Pthread_Worker *death_job;
356    Ecore_Pipe *p;
357    void *data;
358    PH(thread);
359 };
360 #endif
361
362 static void _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte);
363
364 static int _ecore_thread_count_max = 0;
365 static int ECORE_THREAD_PIPE_DEL = 0;
366 static Eina_Array *_ecore_thread_pipe = NULL;
367
368 static Ecore_Pipe*
369 _ecore_thread_pipe_get(void)
370 {
371    if (eina_array_count_get(_ecore_thread_pipe) > 0)
372      return eina_array_pop(_ecore_thread_pipe);
373
374    return ecore_pipe_add(_ecore_thread_handler, NULL);
375 }
376
377 #ifdef EFL_HAVE_THREADS
378 static int _ecore_thread_count = 0;
379
380 static Ecore_Event_Handler *del_handler = NULL;
381 static Eina_List *_ecore_active_job_threads = NULL;
382 static Eina_List *_ecore_pending_job_threads = NULL;
383 static Eina_List *_ecore_pending_job_threads_feedback = NULL;
384 static LK(_ecore_pending_job_threads_mutex);
385
386 static Eina_Hash *_ecore_thread_global_hash = NULL;
387 static LRWK(_ecore_thread_global_hash_lock);
388 static LK(_ecore_thread_global_hash_mutex);
389 static CD(_ecore_thread_global_hash_cond);
390
391 static PH(main_loop_thread);
392 static Eina_Bool have_main_loop_thread = 0;
393
394 static Eina_Trash *_ecore_thread_worker_trash = NULL;
395 static int _ecore_thread_worker_count = 0;
396
397 static void
398 _ecore_thread_worker_free(Ecore_Pthread_Worker *worker)
399 {
400    if (_ecore_thread_worker_count > (_ecore_thread_count_max + 1) * 16)
401      {
402         free(worker);
403         return ;
404      }
405
406    eina_trash_push(&_ecore_thread_worker_trash, worker);
407 }
408
409 static void
410 _ecore_thread_data_free(void *data)
411 {
412    Ecore_Thread_Data *d = data;
413
414    if (d->cb) d->cb(d->data);
415    free(d);
416 }
417
418 static void
419 _ecore_thread_pipe_free(void *data __UNUSED__, void *event)
420 {
421    Ecore_Pipe *p = event;
422
423    if (eina_array_count_get(_ecore_thread_pipe) < 50)
424      eina_array_push(_ecore_thread_pipe, p);
425    else
426      ecore_pipe_del(p);
427    eina_threads_shutdown();
428 }
429
430 static Eina_Bool
431 _ecore_thread_pipe_del(void *data __UNUSED__, int type __UNUSED__, void *event __UNUSED__)
432 {
433    /* This is a hack to delay pipe destruction until we are out of its internal loop. */
434    return ECORE_CALLBACK_CANCEL;
435 }
436
437 static void
438 _ecore_thread_end(Ecore_Pthread_Data *pth, __UNUSED__ Ecore_Thread *work)
439 {
440    Ecore_Pipe *p;
441
442    if (PHJ(pth->thread, p) != 0)
443      return ;
444
445    _ecore_active_job_threads = eina_list_remove(_ecore_active_job_threads, pth);
446
447    ecore_event_add(ECORE_THREAD_PIPE_DEL, pth->p, _ecore_thread_pipe_free, NULL);
448    free(pth);
449 }
450
451 static void
452 _ecore_thread_kill(Ecore_Pthread_Worker *work)
453 {
454    if (work->cancel)
455      {
456         if (work->func_cancel)
457           work->func_cancel((void *) work->data, (Ecore_Thread *) work);
458      }
459    else
460      {
461         if (work->func_end)
462           work->func_end((void *) work->data, (Ecore_Thread *) work);
463      }
464
465    if (work->feedback_run)
466      {
467         ecore_pipe_del(work->u.feedback_run.notify);
468
469         if (work->u.feedback_run.direct_pipe)
470           eina_array_push(_ecore_thread_pipe, work->u.feedback_run.direct_pipe);
471         if (work->u.feedback_run.direct_worker)
472           _ecore_thread_worker_free(work->u.feedback_run.direct_worker);
473      }
474    CDD(work->cond);
475    LKD(work->mutex);
476    if (work->hash)
477      eina_hash_free(work->hash);
478    free(work);
479 }
480
481 static void
482 _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte)
483 {
484    Ecore_Pthread_Worker *work;
485
486    if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
487
488    work = *(Ecore_Pthread_Worker **)buffer;
489
490    if (work->feedback_run)
491      {
492         if (work->u.feedback_run.send != work->u.feedback_run.received)
493           {
494              work->kill = EINA_TRUE;
495              return ;
496           }
497      }
498
499    _ecore_thread_kill(work);
500 }
501
502 static void
503 _ecore_notify_handler(void *data, void *buffer, unsigned int nbyte)
504 {
505    Ecore_Pthread_Worker *work = data;
506    void *user_data;
507
508    if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
509
510    user_data = *(void **)buffer;
511    work->u.feedback_run.received++;
512
513    if (work->u.feedback_run.func_notify)
514      work->u.feedback_run.func_notify((void *) work->data, (Ecore_Thread *) work, user_data);
515
516    /* Force reading all notify event before killing the thread */
517    if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
518      {
519         _ecore_thread_kill(work);
520      }
521 }
522
523 static void
524 _ecore_short_job(Ecore_Pipe *end_pipe)
525 {
526    Ecore_Pthread_Worker *work;
527
528    while (_ecore_pending_job_threads)
529      {
530         LKL(_ecore_pending_job_threads_mutex);
531
532         if (!_ecore_pending_job_threads)
533           {
534              LKU(_ecore_pending_job_threads_mutex);
535              break;
536           }
537
538         work = eina_list_data_get(_ecore_pending_job_threads);
539         _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
540                                                            _ecore_pending_job_threads);
541
542         LKU(_ecore_pending_job_threads_mutex);
543
544         if (!work->cancel)
545           work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
546
547         if (work->reschedule)
548           {
549              work->reschedule = EINA_FALSE;
550
551              LKL(_ecore_pending_job_threads_mutex);
552              _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
553              LKU(_ecore_pending_job_threads_mutex);
554           }
555         else
556           {
557              ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
558           }
559      }
560 }
561
562 static void
563 _ecore_feedback_job(Ecore_Pipe *end_pipe, PH(thread))
564 {
565    Ecore_Pthread_Worker *work;
566
567    while (_ecore_pending_job_threads_feedback)
568      {
569         LKL(_ecore_pending_job_threads_mutex);
570
571         if (!_ecore_pending_job_threads_feedback)
572           {
573              LKU(_ecore_pending_job_threads_mutex);
574              break;
575           }
576
577         work = eina_list_data_get(_ecore_pending_job_threads_feedback);
578         _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
579                                                                     _ecore_pending_job_threads_feedback);
580
581         LKU(_ecore_pending_job_threads_mutex);
582
583         work->self = thread;
584         if (!work->cancel)
585           work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
586
587         if (work->reschedule)
588           {
589              work->reschedule = EINA_FALSE;
590
591              LKL(_ecore_pending_job_threads_mutex);
592              _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
593              LKU(_ecore_pending_job_threads_mutex);
594           }
595         else
596           {
597              ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
598           }
599      }
600 }
601
602 static void *
603 _ecore_direct_worker(Ecore_Pthread_Worker *work)
604 {
605    Ecore_Pthread_Data *pth;
606
607 #ifdef EFL_POSIX_THREADS
608    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
609    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
610 #endif
611
612    eina_sched_prio_drop();
613
614    pth = malloc(sizeof (Ecore_Pthread_Data));
615    if (!pth) return NULL;
616
617    pth->p = work->u.feedback_run.direct_pipe;
618    if (!pth->p)
619      {
620         free(pth);
621         return NULL;
622      }
623    pth->thread = PHS();
624
625    work->self = pth->thread;
626    work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
627
628    ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
629
630    work = work->u.feedback_run.direct_worker;
631    if (!work)
632      {
633         free(pth);
634         return NULL;
635      }
636
637    work->data = pth;
638    work->u.short_run.func_blocking = NULL;
639    work->func_end = (void *) _ecore_thread_end;
640    work->func_cancel = NULL;
641    work->cancel = EINA_FALSE;
642    work->feedback_run = EINA_FALSE;
643    work->kill = EINA_FALSE;
644    work->hash = NULL;
645    CDI(work->cond);
646    LKI(work->mutex);
647
648    ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
649
650    return pth->p;
651 }
652
653 static void *
654 _ecore_thread_worker(Ecore_Pthread_Data *pth)
655 {
656    Ecore_Pthread_Worker *work;
657
658 #ifdef EFL_POSIX_THREADS
659    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
660    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
661 #endif
662
663    eina_sched_prio_drop();
664
665    LKL(_ecore_pending_job_threads_mutex);
666    _ecore_thread_count++;
667    LKU(_ecore_pending_job_threads_mutex);
668
669  restart:
670    if (_ecore_pending_job_threads) _ecore_short_job(pth->p);
671    if (_ecore_pending_job_threads_feedback) _ecore_feedback_job(pth->p, pth->thread);
672
673    /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
674
675    LKL(_ecore_pending_job_threads_mutex);
676    if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
677      {
678         LKU(_ecore_pending_job_threads_mutex);
679         goto restart;
680      }
681    LKU(_ecore_pending_job_threads_mutex);
682
683    /* Sleep a little to prevent premature death */
684 #ifdef _WIN32
685    Sleep(1); /* around 50ms */
686 #else
687    usleep(200);
688 #endif
689
690    LKL(_ecore_pending_job_threads_mutex);
691    if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
692      {
693         LKU(_ecore_pending_job_threads_mutex);
694         goto restart;
695      }
696    _ecore_thread_count--;
697    LKU(_ecore_pending_job_threads_mutex);
698
699    work = pth->death_job;
700    if (!work) return NULL;
701
702    work->data = pth;
703    work->u.short_run.func_blocking = NULL;
704    work->func_end = (void *) _ecore_thread_end;
705    work->func_cancel = NULL;
706    work->cancel = EINA_FALSE;
707    work->feedback_run = EINA_FALSE;
708    work->kill = EINA_FALSE;
709    work->hash = NULL;
710    CDI(work->cond);
711    LKI(work->mutex);
712
713    ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
714
715    return pth->p;
716 }
717
718 #endif
719
720 static Ecore_Pthread_Worker *
721 _ecore_thread_worker_new(void)
722 {
723    Ecore_Pthread_Worker *result;
724
725 #ifdef EFL_HAVE_THREADS
726    result = eina_trash_pop(&_ecore_thread_worker_trash);
727
728    if (!result) result = malloc(sizeof (Ecore_Pthread_Worker));
729    else _ecore_thread_worker_count--;
730
731    return result;
732 #else
733    return malloc(sizeof (Ecore_Pthread_Worker));
734 #endif
735 }
736
737 void
738 _ecore_thread_init(void)
739 {
740    _ecore_thread_count_max = eina_cpu_count();
741    if (_ecore_thread_count_max <= 0)
742      _ecore_thread_count_max = 1;
743
744    ECORE_THREAD_PIPE_DEL = ecore_event_type_new();
745    _ecore_thread_pipe = eina_array_new(8);
746
747 #ifdef EFL_HAVE_THREADS
748    del_handler = ecore_event_handler_add(ECORE_THREAD_PIPE_DEL, _ecore_thread_pipe_del, NULL);
749    main_loop_thread = PHS();
750    have_main_loop_thread = 1;
751
752    LKI(_ecore_pending_job_threads_mutex);
753    LRWKI(_ecore_thread_global_hash_lock);
754    LKI(_ecore_thread_global_hash_mutex);
755    CDI(_ecore_thread_global_hash_cond);
756 #endif
757 }
758
759 void
760 _ecore_thread_shutdown(void)
761 {
762    /* FIXME: If function are still running in the background, should we kill them ? */
763    Ecore_Pipe *p;
764    Eina_Array_Iterator it;
765    unsigned int i;
766
767 #ifdef EFL_HAVE_THREADS
768    Ecore_Pthread_Worker *work;
769    Ecore_Pthread_Data *pth;
770
771    LKL(_ecore_pending_job_threads_mutex);
772
773    EINA_LIST_FREE(_ecore_pending_job_threads, work)
774      {
775         if (work->func_cancel)
776           work->func_cancel((void *)work->data, (Ecore_Thread *) work);
777         free(work);
778      }
779
780    EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
781      {
782         if (work->func_cancel)
783           work->func_cancel((void *)work->data, (Ecore_Thread *) work);
784         free(work);
785      }
786
787    LKU(_ecore_pending_job_threads_mutex);
788
789    /* Improve emergency shutdown */
790    EINA_LIST_FREE(_ecore_active_job_threads, pth)
791      {
792         Ecore_Pipe *ep;
793
794         PHA(pth->thread);
795         PHJ(pth->thread, ep);
796
797         ecore_pipe_del(pth->p);
798      }
799    if (_ecore_thread_global_hash)
800      eina_hash_free(_ecore_thread_global_hash);
801    ecore_event_handler_del(del_handler);
802    have_main_loop_thread = 0;
803    del_handler = NULL;
804
805    LKD(_ecore_pending_job_threads_mutex);
806    LRWKD(_ecore_thread_global_hash_lock);
807    LKD(_ecore_thread_global_hash_mutex);
808    CDD(_ecore_thread_global_hash_cond);
809 #endif
810
811    EINA_ARRAY_ITER_NEXT(_ecore_thread_pipe, i, p, it)
812      ecore_pipe_del(p);
813
814    eina_array_free(_ecore_thread_pipe);
815    _ecore_thread_pipe = NULL;
816 }
817
818 /**
819  * @addtogroup Ecore_Group Ecore - Main Loop and Job Functions.
820  *
821  * @{
822  */
823
824 /**
825  * @addtogroup Ecore_Thread_Group Ecore Thread functions
826  *
827  * These functions allow for ecore-managed threads which integrate with ecore's main loop.
828  *
829  * @{
830  */
831
832 /**
833  * @brief Run some blocking code in a parallel thread to avoid locking the main loop.
834  * @param func_blocking The function that should run in another thread.
835  * @param func_end The function that will be called in the main loop if the thread terminate correctly.
836  * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
837  * @param data User context data to pass to all callback.
838  * @return A reference to the newly created thread instance, or NULL if it failed.
839  *
840  * ecore_thread_run provide a facility for easily managing blocking task in a
841  * parallel thread. You should provide three function. The first one, func_blocking,
842  * that will do the blocking work in another thread (so you should not use the
843  * EFL in it except Eina if you are careful). The second one, func_end,
844  * that will be called in Ecore main loop when func_blocking is done. So you
845  * can use all the EFL inside this function. The last one, func_cancel, will
846  * be called in the main loop if the thread is cancelled or could not run at all.
847  *
848  * Be aware, that you can't make assumption on the result order of func_end
849  * after many call to ecore_thread_run, as we start as much thread as the
850  * host CPU can handle.
851  */
852 EAPI Ecore_Thread *
853 ecore_thread_run(Ecore_Thread_Cb func_blocking,
854                  Ecore_Thread_Cb func_end,
855                  Ecore_Thread_Cb func_cancel,
856                  const void *data)
857 {
858    Ecore_Pthread_Worker *work;
859 #ifdef EFL_HAVE_THREADS
860    Ecore_Pthread_Data *pth = NULL;
861 #endif
862
863    if (!func_blocking) return NULL;
864
865    work = _ecore_thread_worker_new();
866    if (!work)
867      {
868         if (func_cancel)
869           func_cancel((void *) data, NULL);
870         return NULL;
871      }
872
873    work->u.short_run.func_blocking = func_blocking;
874    work->func_end = func_end;
875    work->func_cancel = func_cancel;
876    work->cancel = EINA_FALSE;
877    work->feedback_run = EINA_FALSE;
878    work->kill = EINA_FALSE;
879    work->reschedule = EINA_FALSE;
880    work->data = data;
881
882 #ifdef EFL_HAVE_THREADS
883    work->hash = NULL;
884    CDI(work->cond);
885    LKI(work->mutex);
886
887    LKL(_ecore_pending_job_threads_mutex);
888    _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
889
890    if (_ecore_thread_count == _ecore_thread_count_max)
891      {
892         LKU(_ecore_pending_job_threads_mutex);
893         return (Ecore_Thread *) work;
894      }
895
896    LKU(_ecore_pending_job_threads_mutex);
897
898    /* One more thread could be created. */
899    pth = malloc(sizeof (Ecore_Pthread_Data));
900    if (!pth) goto on_error;
901
902    pth->p = _ecore_thread_pipe_get();
903    pth->death_job = _ecore_thread_worker_new();
904    if (!pth->p || !pth->death_job) goto on_error;
905
906    eina_threads_init();
907
908    if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
909       return (Ecore_Thread *) work;
910
911    eina_threads_shutdown();
912
913  on_error:
914    if (pth)
915      {
916         if (pth->p) eina_array_push(_ecore_thread_pipe, pth->p);
917         if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
918         free(pth);
919      }
920
921    if (_ecore_thread_count == 0)
922      {
923         LKL(_ecore_pending_job_threads_mutex);
924         _ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
925         LKU(_ecore_pending_job_threads_mutex);
926
927         if (work->func_cancel)
928           work->func_cancel((void *) work->data, (Ecore_Thread *) work);
929         free(work);
930         work = NULL;
931      }
932    return (Ecore_Thread *) work;
933 #else
934    /*
935      If no thread and as we don't want to break app that rely on this
936      facility, we will lock the interface until we are done.
937     */
938    do {
939       /* Handle reschedule by forcing it here. That would mean locking the app,
940        * would be better with an idler, but really to complex for a case where
941        * thread should really exist.
942        */
943       work->reschedule = EINA_FALSE;
944
945       func_blocking((void *)data, (Ecore_Thread *) work);
946       if (work->cancel == EINA_FALSE) func_end((void *)data, (Ecore_Thread *) work);
947       else func_end((void *)data, (Ecore_Thread *) work);
948
949    } while (work->reschedule == EINA_TRUE);
950
951    free(work);
952
953    return NULL;
954 #endif
955 }
956
957 /**
958  * @brief Cancel a running thread.
959  * @param thread The thread to cancel.
960  * @return Will return EINA_TRUE if the thread has been cancelled,
961  *         EINA_FALSE if it is pending.
962  *
963  * ecore_thread_cancel give the possibility to cancel a task still running. It
964  * will return EINA_FALSE, if the destruction is delayed or EINA_TRUE if it is
965  * cancelled after this call.
966  *
967  * This function work in the main loop and in the thread, but you should not pass
968  * the Ecore_Thread variable from main loop to the worker thread in any structure.
969  * You should always use the one passed to the Ecore_Thread_Heavy_Cb.
970  *
971  * func_end, func_cancel will destroy the handler, so don't use it after.
972  * And if ecore_thread_cancel return EINA_TRUE, you should not use Ecore_Thread also.
973  */
974 EAPI Eina_Bool
975 ecore_thread_cancel(Ecore_Thread *thread)
976 {
977 #ifdef EFL_HAVE_THREADS
978    Ecore_Pthread_Worker *work = (Ecore_Pthread_Worker *)thread;
979    Eina_List *l;
980
981    if (!work)
982      return EINA_TRUE;
983    if (work->cancel)
984      return EINA_FALSE;
985
986    if (work->feedback_run)
987      {
988         if (work->kill)
989           return EINA_TRUE;
990         if (work->u.feedback_run.send != work->u.feedback_run.received)
991           goto on_exit;
992      }
993
994    LKL(_ecore_pending_job_threads_mutex);
995
996    if ((have_main_loop_thread) &&
997        (PHE(main_loop_thread, PHS())))
998      {
999         if (!work->feedback_run)
1000           EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
1001             {
1002                if ((void *) work == (void *) thread)
1003                  {
1004                     _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
1005
1006                     LKU(_ecore_pending_job_threads_mutex);
1007
1008                     if (work->func_cancel)
1009                       work->func_cancel((void *) work->data, (Ecore_Thread *) work);
1010                     free(work);
1011
1012                     return EINA_TRUE;
1013                  }
1014             }
1015         else
1016           EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
1017             {
1018                if ((void *) work == (void *) thread)
1019                  {
1020                     _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
1021
1022                     LKU(_ecore_pending_job_threads_mutex);
1023
1024                     if (work->func_cancel)
1025                       work->func_cancel((void *) work->data, (Ecore_Thread *) work);
1026                     free(work);
1027
1028                     return EINA_TRUE;
1029                  }
1030             }
1031      }
1032
1033    LKU(_ecore_pending_job_threads_mutex);
1034
1035    /* Delay the destruction */
1036  on_exit:
1037    ((Ecore_Pthread_Worker *)thread)->cancel = EINA_TRUE;
1038    return EINA_FALSE;
1039 #else
1040    return EINA_TRUE;
1041 #endif
1042 }
1043
1044 /**
1045  * @brief Tell if a thread was canceled or not.
1046  * @param thread The thread to test.
1047  * @return EINA_TRUE if the thread is cancelled,
1048  *         EINA_FALSE if it is not.
1049  *
1050  * You can use this function in main loop and in the thread.
1051  */
1052 EAPI Eina_Bool
1053 ecore_thread_check(Ecore_Thread *thread)
1054 {
1055    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1056
1057    if (!worker) return EINA_TRUE;
1058    return worker->cancel;
1059 }
1060
1061 /**
1062  * @brief Run some heavy code in a parallel thread to avoid locking the main loop.
1063  * @param func_heavy The function that should run in another thread.
1064  * @param func_notify The function that will receive the data send by func_heavy in the main loop.
1065  * @param func_end The function that will be called in the main loop if the thread terminate correctly.
1066  * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
1067  * @param data User context data to pass to all callback.
1068  * @param try_no_queue If you want to run outside of the thread pool.
1069  * @return A reference to the newly created thread instance, or NULL if it failed.
1070  *
1071  * ecore_thread_feedback_run provide a facility for easily managing heavy task in a
1072  * parallel thread. You should provide four functions. The first one, func_heavy,
1073  * that will do the heavy work in another thread (so you should not use the
1074  * EFL in it except Eina and Eet if you are careful). The second one, func_notify,
1075  * will receive the data send from the thread function (func_heavy) by ecore_thread_feedback
1076  * in the main loop (and so, can use all the EFL). The third, func_end,
1077  * that will be called in Ecore main loop when func_heavy is done. So you
1078  * can use all the EFL inside this function. The last one, func_cancel, will
1079  * be called in the main loop also, if the thread is cancelled or could not run at all.
1080  *
1081  * Be aware, that you can't make assumption on the result order of func_end
1082  * after many call to ecore_feedback_run, as we start as much thread as the
1083  * host CPU can handle.
1084  *
1085  * If you set try_no_queue, it will try to run outside of the thread pool, this can bring
1086  * the CPU down, so be careful with that. Of course if it can't start a new thread, it will
1087  * try to use one from the pool.
1088  */
1089 EAPI Ecore_Thread *ecore_thread_feedback_run(Ecore_Thread_Cb func_heavy,
1090                                              Ecore_Thread_Notify_Cb func_notify,
1091                                              Ecore_Thread_Cb func_end,
1092                                              Ecore_Thread_Cb func_cancel,
1093                                              const void *data,
1094                                              Eina_Bool try_no_queue)
1095 {
1096
1097 #ifdef EFL_HAVE_THREADS
1098    Ecore_Pthread_Worker *worker;
1099    Ecore_Pthread_Data *pth = NULL;
1100
1101    if (!func_heavy) return NULL;
1102
1103    worker = _ecore_thread_worker_new();
1104    if (!worker) goto on_error;
1105
1106    worker->u.feedback_run.func_heavy = func_heavy;
1107    worker->u.feedback_run.func_notify = func_notify;
1108    worker->hash = NULL;
1109    CDI(worker->cond);
1110    LKI(worker->mutex);
1111    worker->func_cancel = func_cancel;
1112    worker->func_end = func_end;
1113    worker->data = data;
1114    worker->cancel = EINA_FALSE;
1115    worker->feedback_run = EINA_TRUE;
1116    worker->kill = EINA_FALSE;
1117    worker->reschedule = EINA_FALSE;
1118
1119    worker->u.feedback_run.send = 0;
1120    worker->u.feedback_run.received = 0;
1121
1122    worker->u.feedback_run.notify = ecore_pipe_add(_ecore_notify_handler, worker);
1123    worker->u.feedback_run.direct_pipe = NULL;
1124    worker->u.feedback_run.direct_worker = NULL;
1125
1126    if (!try_no_queue)
1127      {
1128         PH(t);
1129
1130         worker->u.feedback_run.direct_pipe = _ecore_thread_pipe_get();
1131         worker->u.feedback_run.direct_worker = _ecore_thread_worker_new();
1132
1133         if (PHC(t, _ecore_direct_worker, worker) == 0)
1134            return (Ecore_Thread *) worker;
1135      }
1136
1137    LKL(_ecore_pending_job_threads_mutex);
1138    _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
1139
1140    if (_ecore_thread_count == _ecore_thread_count_max)
1141      {
1142         LKU(_ecore_pending_job_threads_mutex);
1143         return (Ecore_Thread *) worker;
1144      }
1145
1146    LKU(_ecore_pending_job_threads_mutex);
1147
1148    /* One more thread could be created. */
1149    pth = malloc(sizeof (Ecore_Pthread_Data));
1150    if (!pth) goto on_error;
1151
1152    pth->p = _ecore_thread_pipe_get();
1153    pth->death_job = _ecore_thread_worker_new();
1154    if (!pth->p || !pth->death_job) goto on_error;
1155
1156    eina_threads_init();
1157
1158    if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
1159       return (Ecore_Thread *) worker;
1160
1161    eina_threads_shutdown();
1162
1163  on_error:
1164    if (pth)
1165      {
1166         if (pth->p) eina_array_push(_ecore_thread_pipe, pth->p);
1167         if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
1168         free(pth);
1169      }
1170
1171    if (_ecore_thread_count == 0)
1172      {
1173         LKL(_ecore_pending_job_threads_mutex);
1174         _ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
1175                                                                worker);
1176         LKU(_ecore_pending_job_threads_mutex);
1177
1178         if (func_cancel) func_cancel((void *) data, NULL);
1179
1180         if (worker)
1181           {
1182              ecore_pipe_del(worker->u.feedback_run.notify);
1183              free(worker);
1184              worker = NULL;
1185           }
1186      }
1187
1188    return (Ecore_Thread *) worker;
1189 #else
1190    Ecore_Pthread_Worker worker;
1191
1192    (void) try_no_queue;
1193
1194    /*
1195      If no thread and as we don't want to break app that rely on this
1196      facility, we will lock the interface until we are done.
1197     */
1198    worker.u.feedback_run.func_heavy = func_heavy;
1199    worker.u.feedback_run.func_notify = func_notify;
1200    worker.u.feedback_run.notify = NULL;
1201    worker.u.feedback_run.send = 0;
1202    worker.u.feedback_run.received = 0;
1203    worker.func_cancel = func_cancel;
1204    worker.func_end = func_end;
1205    worker.data = data;
1206    worker.cancel = EINA_FALSE;
1207    worker.feedback_run = EINA_TRUE;
1208    worker.kill = EINA_FALSE;
1209
1210    do {
1211       worker.reschedule = EINA_FALSE;
1212
1213       func_heavy((void *)data, (Ecore_Thread *) &worker);
1214
1215       if (worker.cancel) func_cancel((void *)data, (Ecore_Thread *) &worker);
1216       else func_end((void *)data, (Ecore_Thread *) &worker);
1217    } while (worker.reschedule == EINA_FALSE);
1218
1219    return NULL;
1220 #endif
1221 }
1222
1223 /**
1224  * @brief Send data to main loop from worker thread.
1225  * @param thread The current Ecore_Thread context to send data from
1226  * @param data Data to be transmitted to the main loop
1227  * @return EINA_TRUE if data was successfully send to main loop,
1228  *         EINA_FALSE if anything goes wrong.
1229  *
1230  * After a succesfull call, the data should be considered owned
1231  * by the main loop.
1232  *
1233  * You should use this function only in the func_heavy call.
1234  */
1235 EAPI Eina_Bool
1236 ecore_thread_feedback(Ecore_Thread *thread, const void *data)
1237 {
1238    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1239
1240    if (!worker) return EINA_FALSE;
1241    if (!worker->feedback_run) return EINA_FALSE;
1242
1243 #ifdef EFL_HAVE_THREADS
1244    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1245
1246    worker->u.feedback_run.send++;
1247    ecore_pipe_write(worker->u.feedback_run.notify, &data, sizeof (void *));
1248
1249    return EINA_TRUE;
1250 #else
1251    worker->u.feedback_run.func_notify((void*) worker->data, thread, (void*) data);
1252
1253    return EINA_TRUE;
1254 #endif
1255 }
1256
1257 /**
1258  * @brief Plan to recall the heavy function once it exist it.
1259  * @param thread The current Ecore_Thread context to reschedule
1260  * @return EINA_TRUE if data was successfully send to main loop,
1261  *         EINA_FALSE if anything goes wrong.
1262  *
1263  * After a succesfull call, you can still do what you want in your thread, it
1264  * will only reschedule it once you exit the heavy loop.
1265  *
1266  * You should use this function only in the func_heavy call.
1267  */
1268 EAPI Eina_Bool
1269 ecore_thread_reschedule(Ecore_Thread *thread)
1270 {
1271    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1272
1273    if (!worker) return EINA_FALSE;
1274
1275 #ifdef EFL_HAVE_THREADS
1276    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1277 #endif
1278
1279    worker->reschedule = EINA_TRUE;
1280    return EINA_TRUE;
1281 }
1282
1283 /**
1284  * @brief Get number of active thread jobs
1285  * @return Number of active threads running jobs
1286  * This returns the number of threads currently running jobs through the
1287  * ecore_thread api.
1288  */
1289 EAPI int
1290 ecore_thread_active_get(void)
1291 {
1292 #ifdef EFL_HAVE_THREADS
1293    return _ecore_thread_count;
1294 #else
1295    return 0;
1296 #endif
1297 }
1298
1299 /**
1300  * @brief Get number of pending (short) thread jobs
1301  * @return Number of pending threads running "short" jobs
1302  * This returns the number of threads currently running jobs through the
1303  * ecore_thread_run api call.
1304  */
1305 EAPI int
1306 ecore_thread_pending_get(void)
1307 {
1308    int ret;
1309 #ifdef EFL_HAVE_THREADS
1310    LKL(_ecore_pending_job_threads_mutex);
1311    ret = eina_list_count(_ecore_pending_job_threads);
1312    LKU(_ecore_pending_job_threads_mutex);
1313    return ret;
1314 #else
1315    return 0;
1316 #endif
1317 }
1318
1319 /**
1320  * @brief Get number of pending feedback thread jobs
1321  * @return Number of pending threads running "feedback" jobs
1322  * This returns the number of threads currently running jobs through the
1323  * ecore_thread_feedback_run api call.
1324  */
1325 EAPI int
1326 ecore_thread_pending_feedback_get(void)
1327 {
1328    int ret;
1329 #ifdef EFL_HAVE_THREADS
1330    LKL(_ecore_pending_job_threads_mutex);
1331    ret = eina_list_count(_ecore_pending_job_threads_feedback);
1332    LKU(_ecore_pending_job_threads_mutex);
1333    return ret;
1334 #else
1335    return 0;
1336 #endif
1337 }
1338
1339 /**
1340  * @brief Get number of pending thread jobs
1341  * @return Number of pending threads running jobs
1342  * This returns the number of threads currently running jobs through the
1343  * ecore_thread_run and ecore_thread_feedback_run api calls combined.
1344  */
1345 EAPI int
1346 ecore_thread_pending_total_get(void)
1347 {
1348    int ret;
1349 #ifdef EFL_HAVE_THREADS
1350    LKL(_ecore_pending_job_threads_mutex);
1351    ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
1352    LKU(_ecore_pending_job_threads_mutex);
1353    return ret;
1354 #else
1355    return 0;
1356 #endif
1357 }
1358
1359 /**
1360  * @brief Get the max number of threads that can run simultaneously
1361  * @return Max number of threads ecore will run
1362  * This returns the total number of threads that ecore will attempt to run
1363  * simultaneously.
1364  */
1365 EAPI int
1366 ecore_thread_max_get(void)
1367 {
1368    return _ecore_thread_count_max;
1369 }
1370
1371 /**
1372  * @brief Set the max number of threads that can run simultaneously
1373  * @param num The new maximum
1374  * This sets the maximum number of threads that ecore will try to run
1375  * simultaneously.  This number cannot be < 1 or >= 2x the number of active cpus.
1376  */
1377 EAPI void
1378 ecore_thread_max_set(int num)
1379 {
1380    if (num < 1) return;
1381    /* avoid doing something hilarious by blocking dumb users */
1382    if (num >= (2 * eina_cpu_count())) return;
1383
1384    _ecore_thread_count_max = num;
1385 }
1386
1387 /**
1388  * @brief Reset the max number of threads that can run simultaneously
1389  * This resets the maximum number of threads that ecore will try to run
1390  * simultaneously to the number of active cpus.
1391  */
1392 EAPI void
1393 ecore_thread_max_reset(void)
1394 {
1395    _ecore_thread_count_max = eina_cpu_count();
1396 }
1397
1398 /**
1399  * @brief Get the number of threads which are available to be used
1400  * @return The number of available threads
1401  * This returns the number of threads slots that ecore has currently available.
1402  * Assuming that you haven't changed the max number of threads with @ref ecore_thread_max_set
1403  * this should be equal to (num_cpus - (active_running + active_feedback_running))
1404  */
1405 EAPI int
1406 ecore_thread_available_get(void)
1407 {
1408    int ret;
1409 #ifdef EFL_HAVE_THREADS
1410    LKL(_ecore_pending_job_threads_mutex);
1411    ret = _ecore_thread_count_max - _ecore_thread_count;
1412    LKU(_ecore_pending_job_threads_mutex);
1413    return ret;
1414 #else
1415    return 0;
1416 #endif
1417 }
1418
1419 /**
1420  * @brief Add data to the thread for subsequent use
1421  * @param thread The thread context to add to
1422  * @param key The name string to add the data with
1423  * @param value The data to add
1424  * @param cb The callback to free the data with
1425  * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1426  * @return EINA_TRUE on success, EINA_FALSE on failure
1427  * This adds data to the thread context, allowing the thread
1428  * to retrieve and use it without complicated mutexing.  This function can only be called by a
1429  * *_run thread INSIDE the thread and will return EINA_FALSE in any case but success.
1430  * All data added to the thread will be freed with its associated callback (if present)
1431  * upon thread termination.  If no callback is specified, it is expected that the user will free the
1432  * data, but this is most likely not what you want.
1433  */
1434 EAPI Eina_Bool
1435 ecore_thread_local_data_add(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1436 {
1437    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1438    Ecore_Thread_Data *d;
1439    Eina_Bool ret;
1440
1441    if ((!thread) || (!key) || (!value))
1442      return EINA_FALSE;
1443 #ifdef EFL_HAVE_THREADS
1444    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1445
1446    if (!worker->hash)
1447      worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1448
1449    if (!worker->hash)
1450      return EINA_FALSE;
1451
1452    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1453      return EINA_FALSE;
1454
1455    d->data = value;
1456    d->cb = cb;
1457
1458    if (direct)
1459      ret = eina_hash_direct_add(worker->hash, key, d);
1460    else
1461      ret = eina_hash_add(worker->hash, key, d);
1462    CDB(worker->cond);
1463    return ret;
1464 #else
1465    return EINA_TRUE;
1466 #endif
1467 }
1468
1469 /**
1470  * @brief Modify data in the thread, or add if not found
1471  * @param thread The thread context
1472  * @param key The name string to add the data with
1473  * @param value The data to add
1474  * @param cb The callback to free the data with
1475  * @return The old data associated with @p key on success if modified, NULL if added
1476  * This adds/modifies data in the thread context, adding only if modify fails.
1477  * This function can only be called by a *_run thread INSIDE the thread.
1478  * All data added to the thread pool will be freed with its associated callback (if present)
1479  * upon thread termination.  If no callback is specified, it is expected that the user will free the
1480  * data, but this is most likely not what you want.
1481  */
1482 EAPI void *
1483 ecore_thread_local_data_set(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb)
1484 {
1485    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1486    Ecore_Thread_Data *d, *r;
1487    void *ret;
1488    if ((!thread) || (!key) || (!value))
1489      return NULL;
1490 #ifdef EFL_HAVE_THREADS
1491    if (!PHE(worker->self, PHS())) return NULL;
1492
1493    if (!worker->hash)
1494      worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1495
1496    if (!worker->hash)
1497      return NULL;
1498
1499    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1500      return NULL;
1501
1502    d->data = value;
1503    d->cb = cb;
1504
1505    r = eina_hash_set(worker->hash, key, d);
1506    CDB(worker->cond);
1507    ret = r->data;
1508    free(r);
1509    return ret;
1510 #else
1511    return NULL;
1512 #endif
1513 }
1514
1515 /**
1516  * @brief Find data in the thread's data
1517  * @param thread The thread context
1518  * @param key The name string the data is associated with
1519  * @return The value, or NULL on error
1520  * This finds data in the thread context that has been previously added with @ref ecore_thread_local_data_add
1521  * This function can only be called by a *_run thread INSIDE the thread, and will return NULL
1522  * in any case but success.
1523  */
1524
1525 EAPI void *
1526 ecore_thread_local_data_find(Ecore_Thread *thread, const char *key)
1527 {
1528    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1529    Ecore_Thread_Data *d;
1530
1531    if ((!thread) || (!key))
1532      return NULL;
1533 #ifdef EFL_HAVE_THREADS
1534    if (!PHE(worker->self, PHS())) return NULL;
1535
1536    if (!worker->hash)
1537      return NULL;
1538
1539    d = eina_hash_find(worker->hash, key);
1540    return d->data;
1541 #else
1542    return NULL;
1543 #endif
1544 }
1545
1546 /**
1547  * @brief Delete data from the thread's data
1548  * @param thread The thread context
1549  * @param key The name string the data is associated with
1550  * @return EINA_TRUE on success, EINA_FALSE on failure
1551  * This deletes the data pointer from the thread context which was previously added with @ref ecore_thread_local_data_add
1552  * This function can only be called by a *_run thread INSIDE the thread, and will return EINA_FALSE
1553  * in any case but success.  Note that this WILL free the data if a callback was specified.
1554  */
1555 EAPI Eina_Bool
1556 ecore_thread_local_data_del(Ecore_Thread *thread, const char *key)
1557 {
1558    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1559    Ecore_Thread_Data *d;
1560    if ((!thread) || (!key))
1561      return EINA_FALSE;
1562 #ifdef EFL_HAVE_THREADS
1563    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1564
1565    if (!worker->hash)
1566      return EINA_FALSE;
1567    if ((d = eina_hash_find(worker->hash, key)))
1568      _ecore_thread_data_free(d);
1569    return eina_hash_del_by_key(worker->hash, key);
1570 #else
1571    return EINA_TRUE;
1572 #endif
1573 }
1574
1575 /**
1576  * @brief Add data to the global data
1577  * @param key The name string to add the data with
1578  * @param value The data to add
1579  * @param cb The optional callback to free the data with once ecore is shut down
1580  * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1581  * @return EINA_TRUE on success, EINA_FALSE on failure
1582  * This adds data to the global thread data, and will return EINA_FALSE in any case but success.
1583  * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1584  * be called upon ecore_thread shutting down.  Note that if you have manually freed data that a callback
1585  * was specified for, you will most likely encounter a segv later on.
1586  */
1587 EAPI Eina_Bool
1588 ecore_thread_global_data_add(const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1589 {
1590    Eina_Bool ret;
1591    Ecore_Thread_Data *d;
1592
1593    if ((!key) || (!value))
1594      return EINA_FALSE;
1595 #ifdef EFL_HAVE_THREADS
1596    LRWKWL(_ecore_thread_global_hash_lock);
1597    if (!_ecore_thread_global_hash)
1598      _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1599    LRWKU(_ecore_thread_global_hash_lock);
1600
1601    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1602      return EINA_FALSE;
1603
1604    d->data = value;
1605    d->cb = cb;
1606
1607    if (!_ecore_thread_global_hash)
1608      return EINA_FALSE;
1609    LRWKWL(_ecore_thread_global_hash_lock);
1610    if (direct)
1611      ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1612    else
1613      ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1614    LRWKU(_ecore_thread_global_hash_lock);
1615    CDB(_ecore_thread_global_hash_cond);
1616    return ret;
1617 #else
1618    return EINA_TRUE;
1619 #endif
1620 }
1621
1622 /**
1623  * @brief Add data to the global data
1624  * @param key The name string to add the data with
1625  * @param value The data to add
1626  * @param cb The optional callback to free the data with once ecore is shut down
1627  * @return An Ecore_Thread_Data on success, NULL on failure
1628  * This adds data to the global thread data and returns NULL, or replaces the previous data
1629  * associated with @p key and returning the previous data if it existed.  To see if an error occurred,
1630  * one must use eina_error_get.
1631  * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1632  * be called upon ecore_thread shutting down.  Note that if you have manually freed data that a callback
1633  * was specified for, you will most likely encounter a segv later on.
1634  */
1635 EAPI void *
1636 ecore_thread_global_data_set(const char *key, void *value, Eina_Free_Cb cb)
1637 {
1638    Ecore_Thread_Data *d, *r;
1639    void *ret;
1640
1641    if ((!key) || (!value))
1642      return NULL;
1643 #ifdef EFL_HAVE_THREADS
1644    LRWKWL(_ecore_thread_global_hash_lock);
1645    if (!_ecore_thread_global_hash)
1646      _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1647    LRWKU(_ecore_thread_global_hash_lock);
1648
1649    if (!_ecore_thread_global_hash)
1650      return NULL;
1651
1652    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1653      return NULL;
1654
1655    d->data = value;
1656    d->cb = cb;
1657
1658    LRWKWL(_ecore_thread_global_hash_lock);
1659    r = eina_hash_set(_ecore_thread_global_hash, key, d);
1660    LRWKU(_ecore_thread_global_hash_lock);
1661    CDB(_ecore_thread_global_hash_cond);
1662
1663    ret = r->data;
1664    free(r);
1665    return ret;
1666 #else
1667    return NULL;
1668 #endif
1669 }
1670
1671 /**
1672  * @brief Find data in the global data
1673  * @param key The name string the data is associated with
1674  * @return The value, or NULL on error
1675  * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1676  * This function will return NULL in any case but success.
1677  * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1678  * be called upon ecore_thread shutting down.  Note that if you have manually freed data that a callback
1679  * was specified for, you will most likely encounter a segv later on.
1680  * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1681  * if you will be doing anything with it.
1682  */
1683
1684 EAPI void *
1685 ecore_thread_global_data_find(const char *key)
1686 {
1687    Ecore_Thread_Data *ret;
1688    if (!key)
1689      return NULL;
1690 #ifdef EFL_HAVE_THREADS
1691    if (!_ecore_thread_global_hash) return NULL;
1692
1693    LRWKRL(_ecore_thread_global_hash_lock);
1694    ret = eina_hash_find(_ecore_thread_global_hash, key);
1695    LRWKU(_ecore_thread_global_hash_lock);
1696    return ret->data;
1697 #else
1698    return NULL;
1699 #endif
1700 }
1701
1702 /**
1703  * @brief Delete data from the global data
1704  * @param key The name string the data is associated with
1705  * @return EINA_TRUE on success, EINA_FALSE on failure
1706  * This deletes the data pointer from the global data which was previously added with @ref ecore_thread_global_data_add
1707  * This function will return EINA_FALSE in any case but success.
1708  * Note that this WILL free the data if an @c Eina_Free_Cb was specified when the data was added.
1709  */
1710 EAPI Eina_Bool
1711 ecore_thread_global_data_del(const char *key)
1712 {
1713    Eina_Bool ret;
1714    Ecore_Thread_Data *d;
1715
1716    if (!key)
1717      return EINA_FALSE;
1718 #ifdef EFL_HAVE_THREADS
1719    if (!_ecore_thread_global_hash)
1720      return EINA_FALSE;
1721
1722    LRWKWL(_ecore_thread_global_hash_lock);
1723    if ((d = eina_hash_find(_ecore_thread_global_hash, key)))
1724      _ecore_thread_data_free(d);
1725    ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1726    LRWKU(_ecore_thread_global_hash_lock);
1727    return ret;
1728 #else
1729    return EINA_TRUE;
1730 #endif
1731 }
1732
1733 /**
1734  * @brief Find data in the global data and optionally wait for the data if not found
1735  * @param key The name string the data is associated with
1736  * @param seconds The amount of time in seconds to wait for the data.  If 0, the call will be async and not wait for data.
1737  * If < 0 the call will wait indefinitely for the data.
1738  * @return The value, or NULL on failure
1739  * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1740  * This function will return NULL in any case but success.
1741  * Use @p seconds to specify the amount of time to wait.  Use > 0 for an actual wait time, 0 to not wait, and < 0 to wait indefinitely.
1742  * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1743  * if you will be doing anything with it.
1744  */
1745 EAPI void *
1746 ecore_thread_global_data_wait(const char *key, double seconds)
1747 {
1748    double tm = 0;
1749    Ecore_Thread_Data *ret = NULL;
1750
1751    if (!key)
1752      return NULL;
1753 #ifdef EFL_HAVE_THREADS
1754    if (!_ecore_thread_global_hash)
1755      return NULL;
1756    if (seconds > 0)
1757      tm = ecore_time_get() + seconds;
1758
1759    while (1)
1760      {
1761 #ifndef _WIN32
1762         struct timespec t = { 0, 0 };
1763
1764         t.tv_sec = (long int)tm;
1765         t.tv_nsec = (long int)((tm - (double)t.tv_sec) * 1000000000);
1766 #else
1767         struct timeval t = { 0, 0 };
1768
1769         t.tv_sec = (long int)tm;
1770         t.tv_usec = (long int)((tm - (double)t.tv_sec) * 1000000);
1771 #endif
1772         LRWKRL(_ecore_thread_global_hash_lock);
1773         ret = eina_hash_find(_ecore_thread_global_hash, key);
1774         LRWKU(_ecore_thread_global_hash_lock);
1775         if ((ret) || (!seconds) || ((seconds > 0) && (tm <= ecore_time_get())))
1776           break;
1777         LKL(_ecore_thread_global_hash_mutex);
1778         CDW(_ecore_thread_global_hash_cond, _ecore_thread_global_hash_mutex, &t);
1779         LKU(_ecore_thread_global_hash_mutex);
1780      }
1781    if (ret) return ret->data;
1782    return NULL;
1783 #else
1784    return NULL;
1785 #endif
1786 }
1787
1788 /**
1789  * @}
1790  */
1791
1792 /**
1793  * @}
1794  */