4dc7381c7ed6cf80e6d2a2172c15338d18008527
[profile/ivi/ecore.git] / src / lib / ecore / ecore_thread.c
1 #ifdef HAVE_CONFIG_H
2 # include <config.h>
3 #endif
4
5 #include <sys/time.h>
6
7 #ifdef HAVE_EVIL
8 # include <Evil.h>
9 #endif
10
11 #include "Ecore.h"
12 #include "ecore_private.h"
13
14 #ifdef EFL_HAVE_THREADS
15
16 # ifdef EFL_HAVE_POSIX_THREADS
17 #  include <pthread.h>
18 #  ifdef __linux__
19 #   include <sched.h>
20 #   include <sys/resource.h>
21 #   include <unistd.h>
22 #   include <sys/syscall.h>
23 #   include <errno.h>
24 #  endif
25
26 #  define PH(x)        pthread_t x
27 #  define PHE(x, y)    pthread_equal(x, y)
28 #  define PHS()        pthread_self()
29 #  define PHC(x, f, d) pthread_create(&(x), NULL, (void*) f, d)
30 #  define PHJ(x, p)    pthread_join(x, (void**)(&(p)))
31 #  define PHA(x)       pthread_cancel(x)
32
33 #  define CD(x)  pthread_cond_t x
34 #  define CDI(x) pthread_cond_init(&(x), NULL);
35 #  define CDD(x) pthread_cond_destroy(&(x));
36 #  define CDB(x) pthread_cond_broadcast(&(x));
37 #  define CDW(x, y, t) pthread_cond_timedwait(&(x), &(y), t);
38
39 #  define LK(x)  pthread_mutex_t x
40 #  define LKI(x) pthread_mutex_init(&(x), NULL);
41 #  define LKD(x) pthread_mutex_destroy(&(x));
42 #  define LKL(x) pthread_mutex_lock(&(x));
43 #  define LKU(x) pthread_mutex_unlock(&(x));
44
45 #  define LRWK(x)   pthread_rwlock_t x
46 #  define LRWKI(x)  pthread_rwlock_init(&(x), NULL);
47 #  define LRWKD(x)  pthread_rwlock_destroy(&(x));
48 #  define LRWKWL(x) pthread_rwlock_wrlock(&(x));
49 #  define LRWKRL(x) pthread_rwlock_rdlock(&(x));
50 #  define LRWKU(x)  pthread_rwlock_unlock(&(x));
51
52 # else /* EFL_HAVE_WIN32_THREADS */
53
54 #  define WIN32_LEAN_AND_MEAN
55 #  include <windows.h>
56 #  undef WIN32_LEAN_AND_MEAN
57
58 typedef struct
59 {
60   HANDLE thread;
61   void *val;
62 } win32_thread;
63
64 #  define PH(x)        win32_thread *x
65 #  define PHE(x, y)    ((x) == (y))
66 #  define PHS()        (HANDLE)GetCurrentThreadId()
67
68 int _ecore_thread_win32_create(win32_thread **x, LPTHREAD_START_ROUTINE f, void *d)
69 {
70   win32_thread *t;
71   t = (win32_thread *)calloc(1, sizeof(win32_thread));
72   if (!t)
73     return -1;
74
75   (t)->thread = CreateThread(NULL, 0, f, d, 0, NULL);
76   if (!t->thread)
77     {
78       free(t);
79       return -1;
80     }
81   t->val = d;
82   *x = t;
83
84   return 0;
85 }
86 #  define PHC(x, f, d) _ecore_thread_win32_create(&(x), (LPTHREAD_START_ROUTINE)f, d)
87
88 int _ecore_thread_win32_join(win32_thread *x, void **res)
89 {
90   if (!PHE(x, PHS()))
91     {
92       WaitForSingleObject(x->thread, INFINITE);
93       CloseHandle(x->thread);
94     }
95   if (res) *res = x->val;
96
97   return 0;
98 }
99
100 #  define PHJ(x, p) _ecore_thread_win32_join(x, (void**)(&(p)))
101 #  define PHA(x) TerminateThread(x->thread, 0)
102
103 #  define LK(x)  HANDLE x
104 #  define LKI(x) x = CreateMutex(NULL, FALSE, NULL)
105 #  define LKD(x) CloseHandle(x)
106 #  define LKL(x) WaitForSingleObject(x, INFINITE)
107 #  define LKU(x) ReleaseMutex(x)
108
109 typedef struct
110 {
111   HANDLE semaphore;
112   LONG threads_count;
113   CRITICAL_SECTION threads_count_lock;
114 } win32_cond;
115
116 #  define CD(x)  win32_cond *x
117
118 #  define CDI(x)                                                     \
119    do {                                                              \
120      x = (win32_cond *)calloc(1, sizeof(win32_cond));                \
121      if (x)                                                          \
122         {                                                            \
123           x->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL); \
124           if (x->semaphore)                                          \
125             InitializeCriticalSection(&x->threads_count_lock);     \
126           else                                                       \
127             {                                                        \
128               free(x);                                               \
129               x = NULL;                                              \
130             }                                                        \
131         }                                                            \
132    } while (0)
133
134 #  define CDD(x)               \
135   do {                         \
136     CloseHandle(x->semaphore); \
137     free(x);                   \
138     x = NULL;                  \
139    } while (0)
140
141 #  define CDB(x)                                            \
142 do {                                                        \
143   EnterCriticalSection(&x->threads_count_lock);             \
144   if (x->threads_count > 0)                                 \
145     ReleaseSemaphore(x->semaphore, x->threads_count, NULL); \
146   LeaveCriticalSection (&x->threads_count_lock);            \
147  } while (0)
148
149 int _ecore_thread_win32_cond_timedwait(win32_cond *c, HANDLE *external_mutex, struct timeval *t)
150 {
151   DWORD res;
152   DWORD val = t->tv_sec * 1000 + (t->tv_usec / 1000);
153   LKL(external_mutex);
154   EnterCriticalSection (&c->threads_count_lock);
155   c->threads_count++;
156   LeaveCriticalSection (&c->threads_count_lock);
157   LKU(external_mutex);
158   res = WaitForSingleObject(c->semaphore, val);
159   if (res == WAIT_OBJECT_0)
160     return 0;
161   else
162     return -1;
163 }
164 #  define CDW(x, y, t) _ecore_thread_win32_cond_timedwait(x, y, t)
165
166 typedef struct
167 {
168   LONG readers_count;
169   LONG writers_count;
170   int readers;
171   int writers; 
172   LK(mutex);
173   CD(cond_read);
174   CD(cond_write);
175 } win32_rwl;
176
177 #  define LRWK(x)   win32_rwl *x
178 #  define LRWKI(x)                                 \
179   do {                                             \
180     x = (win32_rwl *)calloc(1, sizeof(win32_rwl)); \
181     if (x)                                         \
182       {                                            \
183         LKI(x->mutex);                             \
184         if (x->mutex)                              \
185           {                                        \
186             CDI(x->cond_read);                     \
187             if (x->cond_read)                      \
188               {                                    \
189                 CDI(x->cond_write);                \
190                 if (!x->cond_write)                \
191                   {                                \
192                     CDD(x->cond_read);             \
193                     LKD(x->mutex);                 \
194                     free(x);                       \
195                     x = NULL;                      \
196                   }                                \
197               }                                    \
198             else                                   \
199               {                                    \
200                 LKD(x->mutex);                     \
201                 free(x);                           \
202                 x = NULL;                          \
203               }                                    \
204           }                                        \
205         else                                       \
206           {                                        \
207             free(x);                               \
208             x = NULL;                              \
209           }                                        \
210       }                                            \
211   } while (0)
212
213 #  define LRWKD(x)                   \
214   do {                               \
215     LKU(x->mutex);                   \
216     LKD(x->mutex);                   \
217     CDD(x->cond_write);              \
218     CDD(x->cond_read);               \
219     free(x);                         \
220   } while (0)
221 #  define LRWKWL(x)                                                       \
222   do {                                                                    \
223     DWORD res;                                                            \
224     LKU(x->mutex);                                                        \
225     if (x->writers || x->readers > 0)                                     \
226       {                                                                   \
227         x->writers_count++;                                               \
228         while (x->writers || x->readers > 0)                              \
229           {                                                               \
230             EnterCriticalSection(&x->cond_write->threads_count_lock);     \
231             x->cond_read->threads_count++;                                \
232             LeaveCriticalSection(&x->cond_write->threads_count_lock);     \
233             res = WaitForSingleObject(x->cond_write->semaphore, INFINITE); \
234             if (res != WAIT_OBJECT_0) break;                              \
235           }                                                               \
236         x->writers_count--;                                               \
237       }                                                                   \
238     if (res == 0) x->writers_count = 1;                                   \
239     LKU(x->mutex);                                                        \
240   } while (0)
241 #  define LRWKRL(x)                                                       \
242   do {                                                                    \
243     DWORD res;                                                            \
244     LKL(x->mutex);                                                        \
245     if (x->writers)                                                       \
246       {                                                                   \
247         x->readers_count++;                                               \
248         while (x->writers)                                                \
249           {                                                               \
250             EnterCriticalSection(&x->cond_write->threads_count_lock);     \
251             x->cond_read->threads_count++;                                \
252             LeaveCriticalSection(&x->cond_write->threads_count_lock);     \
253             res = WaitForSingleObject(x->cond_write->semaphore, INFINITE); \
254             if (res != WAIT_OBJECT_0) break;                              \
255           }                                                               \
256         x->readers_count--;                                               \
257       }                                                                   \
258     if (res == 0)                                                         \
259       x->readers++;                                                       \
260     LKU(x->mutex);                                                        \
261   } while (0)
262 #  define LRWKU(x)                                                     \
263   do {                                                                 \
264     LKL(x->mutex);                                                     \
265     if (x->writers)                                                    \
266       {                                                                \
267         x->writers = 0;                                                \
268         if (x->readers_count == 1)                                     \
269           {                                                            \
270             EnterCriticalSection(&x->cond_read->threads_count_lock);   \
271             if (x->cond_read->threads_count > 0)                       \
272               ReleaseSemaphore(x->cond_read->semaphore, 1, 0);         \
273             LeaveCriticalSection(&x->cond_read->threads_count_lock);   \
274           }                                                            \
275         else if (x->readers_count > 0)                                 \
276           CDB(x->cond_read);                                           \
277         else if (x->writers_count > 0)                                 \
278           {                                                            \
279             EnterCriticalSection (&x->cond_write->threads_count_lock); \
280             if (x->cond_write->threads_count > 0)                      \
281               ReleaseSemaphore(x->cond_write->semaphore, 1, 0);        \
282             LeaveCriticalSection (&x->cond_write->threads_count_lock); \
283           }                                                            \
284       }                                                                \
285     else if (x->readers > 0)                                           \
286       {                                                                \
287         x->readers--;                                                  \
288         if (x->readers == 0 && x->writers_count > 0)                   \
289           {                                                            \
290             EnterCriticalSection (&x->cond_write->threads_count_lock); \
291             if (x->cond_write->threads_count > 0)                      \
292               ReleaseSemaphore(x->cond_write->semaphore, 1, 0);        \
293             LeaveCriticalSection (&x->cond_write->threads_count_lock); \
294           }                                                            \
295       }                                                                \
296     LKU(x->mutex);                                                     \
297   } while (0)
298
299 # endif
300
301 #endif
302
303 typedef struct _Ecore_Pthread_Worker Ecore_Pthread_Worker;
304 typedef struct _Ecore_Pthread Ecore_Pthread;
305 typedef struct _Ecore_Thread_Data  Ecore_Thread_Data;
306
307 struct _Ecore_Thread_Data
308 {
309    void *data;
310    Eina_Free_Cb cb;
311 };
312
313 struct _Ecore_Pthread_Worker
314 {
315    union {
316       struct {
317          Ecore_Thread_Cb func_blocking;
318       } short_run;
319       struct {
320          Ecore_Thread_Cb func_heavy;
321          Ecore_Thread_Notify_Cb func_notify;
322          Ecore_Pipe *notify;
323
324          Ecore_Pipe *direct_pipe;
325          Ecore_Pthread_Worker *direct_worker;
326
327          int send;
328          int received;
329       } feedback_run;
330    } u;
331
332    Ecore_Thread_Cb func_cancel;
333    Ecore_Thread_Cb func_end;
334 #ifdef EFL_HAVE_THREADS
335    PH(self);
336    Eina_Hash *hash;
337    CD(cond);
338    LK(mutex);
339 #endif
340
341    const void *data;
342
343    Eina_Bool cancel : 1;
344    Eina_Bool feedback_run : 1;
345    Eina_Bool kill : 1;
346    Eina_Bool reschedule : 1;
347 };
348
349 #ifdef EFL_HAVE_THREADS
350 typedef struct _Ecore_Pthread_Data Ecore_Pthread_Data;
351
352 struct _Ecore_Pthread_Data
353 {
354    Ecore_Pthread_Worker *death_job;
355    Ecore_Pipe *p;
356    void *data;
357    PH(thread);
358 };
359 #endif
360
361 static void _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte);
362
363 static int _ecore_thread_count_max = 0;
364 static int ECORE_THREAD_PIPE_DEL = 0;
365 static Eina_Array *_ecore_thread_pipe = NULL;
366
367 static Ecore_Pipe*
368 _ecore_thread_pipe_get(void)
369 {
370    if (eina_array_count_get(_ecore_thread_pipe) > 0)
371      return eina_array_pop(_ecore_thread_pipe);
372
373    return ecore_pipe_add(_ecore_thread_handler, NULL);
374 }
375
376 #ifdef EFL_HAVE_THREADS
377 static int _ecore_thread_count = 0;
378
379 static Ecore_Event_Handler *del_handler = NULL;
380 static Eina_List *_ecore_active_job_threads = NULL;
381 static Eina_List *_ecore_pending_job_threads = NULL;
382 static Eina_List *_ecore_pending_job_threads_feedback = NULL;
383 static LK(_ecore_pending_job_threads_mutex);
384
385 static Eina_Hash *_ecore_thread_global_hash = NULL;
386 static LRWK(_ecore_thread_global_hash_lock);
387 static LK(_ecore_thread_global_hash_mutex);
388 static CD(_ecore_thread_global_hash_cond);
389
390 static PH(main_loop_thread);
391 static Eina_Bool have_main_loop_thread = 0;
392
393 static Eina_Trash *_ecore_thread_worker_trash = NULL;
394 static int _ecore_thread_worker_count = 0;
395
396 static void
397 _ecore_thread_worker_free(Ecore_Pthread_Worker *worker)
398 {
399    if (_ecore_thread_worker_count > (_ecore_thread_count_max + 1) * 16)
400      {
401         free(worker);
402         return ;
403      }
404
405    eina_trash_push(&_ecore_thread_worker_trash, worker);
406 }
407
408 static void
409 _ecore_thread_data_free(void *data)
410 {
411    Ecore_Thread_Data *d = data;
412
413    if (d->cb) d->cb(d->data);
414    free(d);
415 }
416
417 static void
418 _ecore_thread_pipe_free(void *data __UNUSED__, void *event)
419 {
420    Ecore_Pipe *p = event;
421
422    eina_array_push(_ecore_thread_pipe, p);
423    eina_threads_shutdown();
424 }
425
426 static Eina_Bool
427 _ecore_thread_pipe_del(void *data __UNUSED__, int type __UNUSED__, void *event __UNUSED__)
428 {
429    /* This is a hack to delay pipe destruction until we are out of its internal loop. */
430    return ECORE_CALLBACK_CANCEL;
431 }
432
433 static void
434 _ecore_thread_end(Ecore_Pthread_Data *pth, __UNUSED__ Ecore_Thread *work)
435 {
436    Ecore_Pipe *p;
437
438    if (PHJ(pth->thread, p) != 0)
439      return ;
440
441    _ecore_active_job_threads = eina_list_remove(_ecore_active_job_threads, pth);
442
443    ecore_event_add(ECORE_THREAD_PIPE_DEL, pth->p, _ecore_thread_pipe_free, NULL);
444    free(pth);
445 }
446
447 static void
448 _ecore_thread_kill(Ecore_Pthread_Worker *work)
449 {
450    if (work->cancel)
451      {
452         if (work->func_cancel)
453           work->func_cancel((void *) work->data, (Ecore_Thread *) work);
454      }
455    else
456      {
457         if (work->func_end)
458           work->func_end((void *) work->data, (Ecore_Thread *) work);
459      }
460
461    if (work->feedback_run)
462      {
463         ecore_pipe_del(work->u.feedback_run.notify);
464
465         if (work->u.feedback_run.direct_pipe)
466           eina_array_push(_ecore_thread_pipe, work->u.feedback_run.direct_pipe);
467         if (work->u.feedback_run.direct_worker)
468           _ecore_thread_worker_free(work->u.feedback_run.direct_worker);
469      }
470    CDD(work->cond);
471    LKD(work->mutex);
472    if (work->hash)
473      eina_hash_free(work->hash);
474    free(work);
475 }
476
477 static void
478 _ecore_thread_handler(void *data __UNUSED__, void *buffer, unsigned int nbyte)
479 {
480    Ecore_Pthread_Worker *work;
481
482    if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
483
484    work = *(Ecore_Pthread_Worker **)buffer;
485
486    if (work->feedback_run)
487      {
488         if (work->u.feedback_run.send != work->u.feedback_run.received)
489           {
490              work->kill = EINA_TRUE;
491              return ;
492           }
493      }
494
495    _ecore_thread_kill(work);
496 }
497
498 static void
499 _ecore_notify_handler(void *data, void *buffer, unsigned int nbyte)
500 {
501    Ecore_Pthread_Worker *work = data;
502    void *user_data;
503
504    if (nbyte != sizeof (Ecore_Pthread_Worker *)) return ;
505
506    user_data = *(void **)buffer;
507    work->u.feedback_run.received++;
508
509    if (work->u.feedback_run.func_notify)
510      work->u.feedback_run.func_notify((void *) work->data, (Ecore_Thread *) work, user_data);
511
512    /* Force reading all notify event before killing the thread */
513    if (work->kill && work->u.feedback_run.send == work->u.feedback_run.received)
514      {
515         _ecore_thread_kill(work);
516      }
517 }
518
519 static void
520 _ecore_short_job(Ecore_Pipe *end_pipe)
521 {
522    Ecore_Pthread_Worker *work;
523
524    while (_ecore_pending_job_threads)
525      {
526         LKL(_ecore_pending_job_threads_mutex);
527
528         if (!_ecore_pending_job_threads)
529           {
530              LKU(_ecore_pending_job_threads_mutex);
531              break;
532           }
533
534         work = eina_list_data_get(_ecore_pending_job_threads);
535         _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads,
536                                                            _ecore_pending_job_threads);
537
538         LKU(_ecore_pending_job_threads_mutex);
539
540         if (!work->cancel)
541           work->u.short_run.func_blocking((void *) work->data, (Ecore_Thread*) work);
542
543         if (work->reschedule)
544           {
545              work->reschedule = EINA_FALSE;
546
547              LKL(_ecore_pending_job_threads_mutex);
548              _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
549              LKU(_ecore_pending_job_threads_mutex);
550           }
551         else
552           {
553              ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
554           }
555      }
556 }
557
558 static void
559 _ecore_feedback_job(Ecore_Pipe *end_pipe, PH(thread))
560 {
561    Ecore_Pthread_Worker *work;
562
563    while (_ecore_pending_job_threads_feedback)
564      {
565         LKL(_ecore_pending_job_threads_mutex);
566
567         if (!_ecore_pending_job_threads_feedback)
568           {
569              LKU(_ecore_pending_job_threads_mutex);
570              break;
571           }
572
573         work = eina_list_data_get(_ecore_pending_job_threads_feedback);
574         _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback,
575                                                                     _ecore_pending_job_threads_feedback);
576
577         LKU(_ecore_pending_job_threads_mutex);
578
579         work->self = thread;
580         if (!work->cancel)
581           work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
582
583         if (work->reschedule)
584           {
585              work->reschedule = EINA_FALSE;
586
587              LKL(_ecore_pending_job_threads_mutex);
588              _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, work);
589              LKU(_ecore_pending_job_threads_mutex);
590           }
591         else
592           {
593              ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
594           }
595      }
596 }
597
598 static void *
599 _ecore_direct_worker(Ecore_Pthread_Worker *work)
600 {
601    Ecore_Pthread_Data *pth;
602
603 #ifdef EFL_POSIX_THREADS
604    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
605    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
606 #endif
607
608    eina_sched_prio_drop();
609
610    pth = malloc(sizeof (Ecore_Pthread_Data));
611    if (!pth) return NULL;
612
613    pth->p = work->u.feedback_run.direct_pipe;
614    if (!pth->p)
615      {
616         free(pth);
617         return NULL;
618      }
619    pth->thread = PHS();
620
621    work->self = pth->thread;
622    work->u.feedback_run.func_heavy((void *) work->data, (Ecore_Thread *) work);
623
624    ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
625
626    work = work->u.feedback_run.direct_worker;
627    if (!work)
628      {
629         free(pth);
630         return NULL;
631      }
632
633    work->data = pth;
634    work->u.short_run.func_blocking = NULL;
635    work->func_end = (void *) _ecore_thread_end;
636    work->func_cancel = NULL;
637    work->cancel = EINA_FALSE;
638    work->feedback_run = EINA_FALSE;
639    work->kill = EINA_FALSE;
640    work->hash = NULL;
641    CDI(work->cond);
642    LKI(work->mutex);
643
644    ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
645
646    return pth->p;
647 }
648
649 static void *
650 _ecore_thread_worker(Ecore_Pthread_Data *pth)
651 {
652    Ecore_Pthread_Worker *work;
653
654 #ifdef EFL_POSIX_THREADS
655    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
656    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
657 #endif
658
659    eina_sched_prio_drop();
660
661    LKL(_ecore_pending_job_threads_mutex);
662    _ecore_thread_count++;
663    LKU(_ecore_pending_job_threads_mutex);
664
665  restart:
666    if (_ecore_pending_job_threads) _ecore_short_job(pth->p);
667    if (_ecore_pending_job_threads_feedback) _ecore_feedback_job(pth->p, pth->thread);
668
669    /* FIXME: Check if there is feedback running task todo, and switch to feedback run handler. */
670
671    LKL(_ecore_pending_job_threads_mutex);
672    if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
673      {
674         LKU(_ecore_pending_job_threads_mutex);
675         goto restart;
676      }
677    LKU(_ecore_pending_job_threads_mutex);
678
679    /* Sleep a little to prevent premature death */
680    usleep(200);
681
682    LKL(_ecore_pending_job_threads_mutex);
683    if (_ecore_pending_job_threads || _ecore_pending_job_threads_feedback)
684      {
685         LKU(_ecore_pending_job_threads_mutex);
686         goto restart;
687      }
688    _ecore_thread_count--;
689    LKU(_ecore_pending_job_threads_mutex);
690
691    work = pth->death_job;
692    if (!work) return NULL;
693
694    work->data = pth;
695    work->u.short_run.func_blocking = NULL;
696    work->func_end = (void *) _ecore_thread_end;
697    work->func_cancel = NULL;
698    work->cancel = EINA_FALSE;
699    work->feedback_run = EINA_FALSE;
700    work->kill = EINA_FALSE;
701    work->hash = NULL;
702    CDI(work->cond);
703    LKI(work->mutex);
704
705    ecore_pipe_write(pth->p, &work, sizeof (Ecore_Pthread_Worker *));
706
707    return pth->p;
708 }
709
710 #endif
711
712 static Ecore_Pthread_Worker *
713 _ecore_thread_worker_new(void)
714 {
715    Ecore_Pthread_Worker *result;
716
717 #ifdef EFL_HAVE_THREADS
718    result = eina_trash_pop(&_ecore_thread_worker_trash);
719
720    if (!result) result = malloc(sizeof (Ecore_Pthread_Worker));
721    else _ecore_thread_worker_count--;
722
723    return result;
724 #else
725    return malloc(sizeof (Ecore_Pthread_Worker));
726 #endif
727 }
728
729 void
730 _ecore_thread_init(void)
731 {
732    _ecore_thread_count_max = eina_cpu_count();
733    if (_ecore_thread_count_max <= 0)
734      _ecore_thread_count_max = 1;
735
736    ECORE_THREAD_PIPE_DEL = ecore_event_type_new();
737    _ecore_thread_pipe = eina_array_new(8);
738
739 #ifdef EFL_HAVE_THREADS
740    del_handler = ecore_event_handler_add(ECORE_THREAD_PIPE_DEL, _ecore_thread_pipe_del, NULL);
741    main_loop_thread = PHS();
742    have_main_loop_thread = 1;
743
744    LKI(_ecore_pending_job_threads_mutex);
745    LRWKI(_ecore_thread_global_hash_lock);
746    LKI(_ecore_thread_global_hash_mutex);
747    CDI(_ecore_thread_global_hash_cond);
748 #endif
749 }
750
751 void
752 _ecore_thread_shutdown(void)
753 {
754    /* FIXME: If function are still running in the background, should we kill them ? */
755    Ecore_Pipe *p;
756    Eina_Array_Iterator it;
757    unsigned int i;
758
759 #ifdef EFL_HAVE_THREADS
760    Ecore_Pthread_Worker *work;
761    Ecore_Pthread_Data *pth;
762
763    LKL(_ecore_pending_job_threads_mutex);
764
765    EINA_LIST_FREE(_ecore_pending_job_threads, work)
766      {
767         if (work->func_cancel)
768           work->func_cancel((void *)work->data, (Ecore_Thread *) work);
769         free(work);
770      }
771
772    EINA_LIST_FREE(_ecore_pending_job_threads_feedback, work)
773      {
774         if (work->func_cancel)
775           work->func_cancel((void *)work->data, (Ecore_Thread *) work);
776         free(work);
777      }
778
779    LKU(_ecore_pending_job_threads_mutex);
780
781    /* Improve emergency shutdown */
782    EINA_LIST_FREE(_ecore_active_job_threads, pth)
783      {
784         Ecore_Pipe *p;
785
786         PHA(pth->thread);
787         PHJ(pth->thread, p);
788
789         ecore_pipe_del(pth->p);
790      }
791    if (_ecore_thread_global_hash)
792      eina_hash_free(_ecore_thread_global_hash);
793    ecore_event_handler_del(del_handler);
794    have_main_loop_thread = 0;
795    del_handler = NULL;
796
797    LKD(_ecore_pending_job_threads_mutex);
798    LRWKD(_ecore_thread_global_hash_lock);
799    LKD(_ecore_thread_global_hash_mutex);
800    CDD(_ecore_thread_global_hash_cond);
801 #endif
802
803    EINA_ARRAY_ITER_NEXT(_ecore_thread_pipe, i, p, it)
804      ecore_pipe_del(p);
805
806    eina_array_free(_ecore_thread_pipe);
807    _ecore_thread_pipe = NULL;
808 }
809
810 /**
811  * @addtogroup Ecore_Group Ecore - Main Loop and Job Functions.
812  *
813  * @{
814  */
815
816 /**
817  * @addtogroup Ecore_Thread_Group Ecore Thread functions
818  *
819  * These functions allow for ecore-managed threads which integrate with ecore's main loop.
820  *
821  * @{
822  */
823
824 /**
825  * @brief Run some blocking code in a parallel thread to avoid locking the main loop.
826  * @param func_blocking The function that should run in another thread.
827  * @param func_end The function that will be called in the main loop if the thread terminate correctly.
828  * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
829  * @param data User context data to pass to all callback.
830  * @return A reference to the newly created thread instance, or NULL if it failed.
831  *
832  * ecore_thread_run provide a facility for easily managing blocking task in a
833  * parallel thread. You should provide three function. The first one, func_blocking,
834  * that will do the blocking work in another thread (so you should not use the
835  * EFL in it except Eina if you are careful). The second one, func_end,
836  * that will be called in Ecore main loop when func_blocking is done. So you
837  * can use all the EFL inside this function. The last one, func_cancel, will
838  * be called in the main loop if the thread is cancelled or could not run at all.
839  *
840  * Be aware, that you can't make assumption on the result order of func_end
841  * after many call to ecore_thread_run, as we start as much thread as the
842  * host CPU can handle.
843  */
844 EAPI Ecore_Thread *
845 ecore_thread_run(Ecore_Thread_Cb func_blocking,
846                  Ecore_Thread_Cb func_end,
847                  Ecore_Thread_Cb func_cancel,
848                  const void *data)
849 {
850    Ecore_Pthread_Worker *work;
851 #ifdef EFL_HAVE_THREADS
852    Ecore_Pthread_Data *pth = NULL;
853 #endif
854
855    if (!func_blocking) return NULL;
856
857    work = _ecore_thread_worker_new();
858    if (!work)
859      {
860         if (func_cancel)
861           func_cancel((void *) data, NULL);
862         return NULL;
863      }
864
865    work->u.short_run.func_blocking = func_blocking;
866    work->func_end = func_end;
867    work->func_cancel = func_cancel;
868    work->cancel = EINA_FALSE;
869    work->feedback_run = EINA_FALSE;
870    work->kill = EINA_FALSE;
871    work->reschedule = EINA_FALSE;
872    work->data = data;
873
874 #ifdef EFL_HAVE_THREADS
875    work->hash = NULL;
876    CDI(work->cond);
877    LKI(work->mutex);
878
879    LKL(_ecore_pending_job_threads_mutex);
880    _ecore_pending_job_threads = eina_list_append(_ecore_pending_job_threads, work);
881
882    if (_ecore_thread_count == _ecore_thread_count_max)
883      {
884         LKU(_ecore_pending_job_threads_mutex);
885         return (Ecore_Thread *) work;
886      }
887
888    LKU(_ecore_pending_job_threads_mutex);
889
890    /* One more thread could be created. */
891    pth = malloc(sizeof (Ecore_Pthread_Data));
892    if (!pth) goto on_error;
893
894    pth->p = _ecore_thread_pipe_get();
895    pth->death_job = _ecore_thread_worker_new();
896    if (!pth->p || !pth->death_job) goto on_error;
897
898    eina_threads_init();
899
900    if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
901       return (Ecore_Thread *) work;
902
903    eina_threads_shutdown();
904
905  on_error:
906    if (pth)
907      {
908         if (pth->p) eina_array_push(_ecore_thread_pipe, pth->p);
909         if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
910         free(pth);
911      }
912
913    if (_ecore_thread_count == 0)
914      {
915         LKL(_ecore_pending_job_threads_mutex);
916         _ecore_pending_job_threads = eina_list_remove(_ecore_pending_job_threads, work);
917         LKU(_ecore_pending_job_threads_mutex);
918
919         if (work->func_cancel)
920           work->func_cancel((void *) work->data, (Ecore_Thread *) work);
921         free(work);
922         work = NULL;
923      }
924    return (Ecore_Thread *) work;
925 #else
926    /*
927      If no thread and as we don't want to break app that rely on this
928      facility, we will lock the interface until we are done.
929     */
930    do {
931       /* Handle reschedule by forcing it here. That would mean locking the app,
932        * would be better with an idler, but really to complex for a case where
933        * thread should really exist.
934        */
935       work->reschedule = EINA_FALSE;
936
937       func_blocking((void *)data, (Ecore_Thread *) work);
938       if (work->cancel == EINA_FALSE) func_end((void *)data, (Ecore_Thread *) work);
939       else func_end((void *)data, (Ecore_Thread *) work);
940
941    } while (work->reschedule == EINA_TRUE);
942
943    free(work);
944
945    return NULL;
946 #endif
947 }
948
949 /**
950  * @brief Cancel a running thread.
951  * @param thread The thread to cancel.
952  * @return Will return EINA_TRUE if the thread has been cancelled,
953  *         EINA_FALSE if it is pending.
954  *
955  * ecore_thread_cancel give the possibility to cancel a task still running. It
956  * will return EINA_FALSE, if the destruction is delayed or EINA_TRUE if it is
957  * cancelled after this call.
958  *
959  * This function work in the main loop and in the thread, but you should not pass
960  * the Ecore_Thread variable from main loop to the worker thread in any structure.
961  * You should always use the one passed to the Ecore_Thread_Heavy_Cb.
962  *
963  * func_end, func_cancel will destroy the handler, so don't use it after.
964  * And if ecore_thread_cancel return EINA_TRUE, you should not use Ecore_Thread also.
965  */
966 EAPI Eina_Bool
967 ecore_thread_cancel(Ecore_Thread *thread)
968 {
969 #ifdef EFL_HAVE_THREADS
970    Ecore_Pthread_Worker *work = (Ecore_Pthread_Worker *)thread;
971    Eina_List *l;
972
973    if (!work)
974      return EINA_TRUE;
975    if (work->cancel)
976      return EINA_FALSE;
977
978    if (work->feedback_run)
979      {
980         if (work->kill)
981           return EINA_TRUE;
982         if (work->u.feedback_run.send != work->u.feedback_run.received)
983           goto on_exit;
984      }
985
986    LKL(_ecore_pending_job_threads_mutex);
987
988    if ((have_main_loop_thread) &&
989        (PHE(main_loop_thread, PHS())))
990      {
991         if (!work->feedback_run)
992           EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
993             {
994                if ((void *) work == (void *) thread)
995                  {
996                     _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
997
998                     LKU(_ecore_pending_job_threads_mutex);
999
1000                     if (work->func_cancel)
1001                       work->func_cancel((void *) work->data, (Ecore_Thread *) work);
1002                     free(work);
1003
1004                     return EINA_TRUE;
1005                  }
1006             }
1007         else
1008           EINA_LIST_FOREACH(_ecore_pending_job_threads_feedback, l, work)
1009             {
1010                if ((void *) work == (void *) thread)
1011                  {
1012                     _ecore_pending_job_threads_feedback = eina_list_remove_list(_ecore_pending_job_threads_feedback, l);
1013
1014                     LKU(_ecore_pending_job_threads_mutex);
1015
1016                     if (work->func_cancel)
1017                       work->func_cancel((void *) work->data, (Ecore_Thread *) work);
1018                     free(work);
1019
1020                     return EINA_TRUE;
1021                  }
1022             }
1023      }
1024
1025    LKU(_ecore_pending_job_threads_mutex);
1026
1027    /* Delay the destruction */
1028  on_exit:
1029    ((Ecore_Pthread_Worker *)thread)->cancel = EINA_TRUE;
1030    return EINA_FALSE;
1031 #else
1032    return EINA_TRUE;
1033 #endif
1034 }
1035
1036 /**
1037  * @brief Tell if a thread was canceled or not.
1038  * @param thread The thread to test.
1039  * @return EINA_TRUE if the thread is cancelled,
1040  *         EINA_FALSE if it is not.
1041  *
1042  * You can use this function in main loop and in the thread.
1043  */
1044 EAPI Eina_Bool
1045 ecore_thread_check(Ecore_Thread *thread)
1046 {
1047    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1048
1049    if (!worker) return EINA_TRUE;
1050    return worker->cancel;
1051 }
1052
1053 /**
1054  * @brief Run some heavy code in a parallel thread to avoid locking the main loop.
1055  * @param func_heavy The function that should run in another thread.
1056  * @param func_notify The function that will receive the data send by func_heavy in the main loop.
1057  * @param func_end The function that will be called in the main loop if the thread terminate correctly.
1058  * @param func_cancel The function that will be called in the main loop if the thread is cancelled.
1059  * @param data User context data to pass to all callback.
1060  * @param try_no_queue If you want to run outside of the thread pool.
1061  * @return A reference to the newly created thread instance, or NULL if it failed.
1062  *
1063  * ecore_thread_feedback_run provide a facility for easily managing heavy task in a
1064  * parallel thread. You should provide four functions. The first one, func_heavy,
1065  * that will do the heavy work in another thread (so you should not use the
1066  * EFL in it except Eina and Eet if you are careful). The second one, func_notify,
1067  * will receive the data send from the thread function (func_heavy) by ecore_thread_feedback
1068  * in the main loop (and so, can use all the EFL). The third, func_end,
1069  * that will be called in Ecore main loop when func_heavy is done. So you
1070  * can use all the EFL inside this function. The last one, func_cancel, will
1071  * be called in the main loop also, if the thread is cancelled or could not run at all.
1072  *
1073  * Be aware, that you can't make assumption on the result order of func_end
1074  * after many call to ecore_feedback_run, as we start as much thread as the
1075  * host CPU can handle.
1076  *
1077  * If you set try_no_queue, it will try to run outside of the thread pool, this can bring
1078  * the CPU down, so be careful with that. Of course if it can't start a new thread, it will
1079  * try to use one from the pool.
1080  */
1081 EAPI Ecore_Thread *ecore_thread_feedback_run(Ecore_Thread_Cb func_heavy,
1082                                              Ecore_Thread_Notify_Cb func_notify,
1083                                              Ecore_Thread_Cb func_end,
1084                                              Ecore_Thread_Cb func_cancel,
1085                                              const void *data,
1086                                              Eina_Bool try_no_queue)
1087 {
1088
1089 #ifdef EFL_HAVE_THREADS
1090    Ecore_Pthread_Worker *worker;
1091    Ecore_Pthread_Data *pth = NULL;
1092
1093    if (!func_heavy) return NULL;
1094
1095    worker = _ecore_thread_worker_new();
1096    if (!worker) goto on_error;
1097
1098    worker->u.feedback_run.func_heavy = func_heavy;
1099    worker->u.feedback_run.func_notify = func_notify;
1100    worker->hash = NULL;
1101    CDI(worker->cond);
1102    LKI(worker->mutex);
1103    worker->func_cancel = func_cancel;
1104    worker->func_end = func_end;
1105    worker->data = data;
1106    worker->cancel = EINA_FALSE;
1107    worker->feedback_run = EINA_TRUE;
1108    worker->kill = EINA_FALSE;
1109    worker->reschedule = EINA_FALSE;
1110
1111    worker->u.feedback_run.send = 0;
1112    worker->u.feedback_run.received = 0;
1113
1114    worker->u.feedback_run.notify = ecore_pipe_add(_ecore_notify_handler, worker);
1115    worker->u.feedback_run.direct_pipe = NULL;
1116    worker->u.feedback_run.direct_worker = NULL;
1117
1118    if (!try_no_queue)
1119      {
1120         PH(t);
1121
1122         worker->u.feedback_run.direct_pipe = _ecore_thread_pipe_get();
1123         worker->u.feedback_run.direct_worker = _ecore_thread_worker_new();
1124
1125         if (PHC(t, _ecore_direct_worker, worker) == 0)
1126            return (Ecore_Thread *) worker;
1127      }
1128
1129    LKL(_ecore_pending_job_threads_mutex);
1130    _ecore_pending_job_threads_feedback = eina_list_append(_ecore_pending_job_threads_feedback, worker);
1131
1132    if (_ecore_thread_count == _ecore_thread_count_max)
1133      {
1134         LKU(_ecore_pending_job_threads_mutex);
1135         return (Ecore_Thread *) worker;
1136      }
1137
1138    LKU(_ecore_pending_job_threads_mutex);
1139
1140    /* One more thread could be created. */
1141    pth = malloc(sizeof (Ecore_Pthread_Data));
1142    if (!pth) goto on_error;
1143
1144    pth->p = _ecore_thread_pipe_get();
1145    pth->death_job = _ecore_thread_worker_new();
1146    if (!pth->p || !pth->death_job) goto on_error;
1147
1148    eina_threads_init();
1149
1150    if (PHC(pth->thread, _ecore_thread_worker, pth) == 0)
1151       return (Ecore_Thread *) worker;
1152
1153    eina_threads_shutdown();
1154
1155  on_error:
1156    if (pth)
1157      {
1158         if (pth->p) eina_array_push(_ecore_thread_pipe, pth->p);
1159         if (pth->death_job) _ecore_thread_worker_free(pth->death_job);
1160         free(pth);
1161      }
1162
1163    if (_ecore_thread_count == 0)
1164      {
1165         LKL(_ecore_pending_job_threads_mutex);
1166         _ecore_pending_job_threads_feedback = eina_list_remove(_ecore_pending_job_threads_feedback,
1167                                                                worker);
1168         LKU(_ecore_pending_job_threads_mutex);
1169
1170         if (func_cancel) func_cancel((void *) data, NULL);
1171
1172         if (worker)
1173           {
1174              ecore_pipe_del(worker->u.feedback_run.notify);
1175              free(worker);
1176              worker = NULL;
1177           }
1178      }
1179
1180    return (Ecore_Thread *) worker;
1181 #else
1182    Ecore_Pthread_Worker worker;
1183
1184    (void) try_no_queue;
1185
1186    /*
1187      If no thread and as we don't want to break app that rely on this
1188      facility, we will lock the interface until we are done.
1189     */
1190    worker.u.feedback_run.func_heavy = func_heavy;
1191    worker.u.feedback_run.func_notify = func_notify;
1192    worker.u.feedback_run.notify = NULL;
1193    worker.u.feedback_run.send = 0;
1194    worker.u.feedback_run.received = 0;
1195    worker.func_cancel = func_cancel;
1196    worker.func_end = func_end;
1197    worker.data = data;
1198    worker.cancel = EINA_FALSE;
1199    worker.feedback_run = EINA_TRUE;
1200    worker.kill = EINA_FALSE;
1201
1202    do {
1203       worker.reschedule = EINA_FALSE;
1204
1205       func_heavy((void *)data, (Ecore_Thread *) &worker);
1206
1207       if (worker.cancel) func_cancel((void *)data, (Ecore_Thread *) &worker);
1208       else func_end((void *)data, (Ecore_Thread *) &worker);
1209    } while (worker.reschedule == EINA_FALSE);
1210
1211    return NULL;
1212 #endif
1213 }
1214
1215 /**
1216  * @brief Send data to main loop from worker thread.
1217  * @param thread The current Ecore_Thread context to send data from
1218  * @param data Data to be transmitted to the main loop
1219  * @return EINA_TRUE if data was successfully send to main loop,
1220  *         EINA_FALSE if anything goes wrong.
1221  *
1222  * After a succesfull call, the data should be considered owned
1223  * by the main loop.
1224  *
1225  * You should use this function only in the func_heavy call.
1226  */
1227 EAPI Eina_Bool
1228 ecore_thread_feedback(Ecore_Thread *thread, const void *data)
1229 {
1230    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1231
1232    if (!worker) return EINA_FALSE;
1233    if (!worker->feedback_run) return EINA_FALSE;
1234
1235 #ifdef EFL_HAVE_THREADS
1236    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1237
1238    worker->u.feedback_run.send++;
1239    ecore_pipe_write(worker->u.feedback_run.notify, &data, sizeof (void *));
1240
1241    return EINA_TRUE;
1242 #else
1243    worker->u.feedback_run.func_notify((void*) worker->data, thread, (void*) data);
1244
1245    return EINA_TRUE;
1246 #endif
1247 }
1248
1249 /**
1250  * @brief Plan to recall the heavy function once it exist it.
1251  * @param thread The current Ecore_Thread context to reschedule
1252  * @return EINA_TRUE if data was successfully send to main loop,
1253  *         EINA_FALSE if anything goes wrong.
1254  *
1255  * After a succesfull call, you can still do what you want in your thread, it
1256  * will only reschedule it once you exit the heavy loop.
1257  *
1258  * You should use this function only in the func_heavy call.
1259  */
1260 EAPI Eina_Bool
1261 ecore_thread_reschedule(Ecore_Thread *thread)
1262 {
1263    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1264
1265    if (!worker) return EINA_FALSE;
1266
1267 #ifdef EFL_HAVE_THREADS
1268    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1269 #endif
1270
1271    worker->reschedule = EINA_TRUE;
1272    return EINA_TRUE;
1273 }
1274
1275 /**
1276  * @brief Get number of active thread jobs
1277  * @return Number of active threads running jobs
1278  * This returns the number of threads currently running jobs through the
1279  * ecore_thread api.
1280  */
1281 EAPI int
1282 ecore_thread_active_get(void)
1283 {
1284 #ifdef EFL_HAVE_THREADS
1285    return _ecore_thread_count;
1286 #else
1287    return 0;
1288 #endif
1289 }
1290
1291 /**
1292  * @brief Get number of pending (short) thread jobs
1293  * @return Number of pending threads running "short" jobs
1294  * This returns the number of threads currently running jobs through the
1295  * ecore_thread_run api call.
1296  */
1297 EAPI int
1298 ecore_thread_pending_get(void)
1299 {
1300    int ret;
1301 #ifdef EFL_HAVE_THREADS
1302    LKL(_ecore_pending_job_threads_mutex);
1303    ret = eina_list_count(_ecore_pending_job_threads);
1304    LKU(_ecore_pending_job_threads_mutex);
1305    return ret;
1306 #else
1307    return 0;
1308 #endif
1309 }
1310
1311 /**
1312  * @brief Get number of pending feedback thread jobs
1313  * @return Number of pending threads running "feedback" jobs
1314  * This returns the number of threads currently running jobs through the
1315  * ecore_thread_feedback_run api call.
1316  */
1317 EAPI int
1318 ecore_thread_pending_feedback_get(void)
1319 {
1320    int ret;
1321 #ifdef EFL_HAVE_THREADS
1322    LKL(_ecore_pending_job_threads_mutex);
1323    ret = eina_list_count(_ecore_pending_job_threads_feedback);
1324    LKU(_ecore_pending_job_threads_mutex);
1325    return ret;
1326 #else
1327    return 0;
1328 #endif
1329 }
1330
1331 /**
1332  * @brief Get number of pending thread jobs
1333  * @return Number of pending threads running jobs
1334  * This returns the number of threads currently running jobs through the
1335  * ecore_thread_run and ecore_thread_feedback_run api calls combined.
1336  */
1337 EAPI int
1338 ecore_thread_pending_total_get(void)
1339 {
1340    int ret;
1341 #ifdef EFL_HAVE_THREADS
1342    LKL(_ecore_pending_job_threads_mutex);
1343    ret = eina_list_count(_ecore_pending_job_threads) + eina_list_count(_ecore_pending_job_threads_feedback);
1344    LKU(_ecore_pending_job_threads_mutex);
1345    return ret;
1346 #else
1347    return 0;
1348 #endif
1349 }
1350
1351 /**
1352  * @brief Get the max number of threads that can run simultaneously
1353  * @return Max number of threads ecore will run
1354  * This returns the total number of threads that ecore will attempt to run
1355  * simultaneously.
1356  */
1357 EAPI int
1358 ecore_thread_max_get(void)
1359 {
1360    return _ecore_thread_count_max;
1361 }
1362
1363 /**
1364  * @brief Set the max number of threads that can run simultaneously
1365  * @param num The new maximum
1366  * This sets the maximum number of threads that ecore will try to run
1367  * simultaneously.  This number cannot be < 1 or >= 2x the number of active cpus.
1368  */
1369 EAPI void
1370 ecore_thread_max_set(int num)
1371 {
1372    if (num < 1) return;
1373    /* avoid doing something hilarious by blocking dumb users */
1374    if (num >= (2 * eina_cpu_count())) return;
1375
1376    _ecore_thread_count_max = num;
1377 }
1378
1379 /**
1380  * @brief Reset the max number of threads that can run simultaneously
1381  * This resets the maximum number of threads that ecore will try to run
1382  * simultaneously to the number of active cpus.
1383  */
1384 EAPI void
1385 ecore_thread_max_reset(void)
1386 {
1387    _ecore_thread_count_max = eina_cpu_count();
1388 }
1389
1390 /**
1391  * @brief Get the number of threads which are available to be used
1392  * @return The number of available threads
1393  * This returns the number of threads slots that ecore has currently available.
1394  * Assuming that you haven't changed the max number of threads with @ref ecore_thread_max_set
1395  * this should be equal to (num_cpus - (active_running + active_feedback_running))
1396  */
1397 EAPI int
1398 ecore_thread_available_get(void)
1399 {
1400    int ret;
1401 #ifdef EFL_HAVE_THREADS
1402    LKL(_ecore_pending_job_threads_mutex);
1403    ret = _ecore_thread_count_max - _ecore_thread_count;
1404    LKU(_ecore_pending_job_threads_mutex);
1405    return ret;
1406 #else
1407    return 0;
1408 #endif
1409 }
1410
1411 /**
1412  * @brief Add data to the thread for subsequent use
1413  * @param thread The thread context to add to
1414  * @param key The name string to add the data with
1415  * @param value The data to add
1416  * @param cb The callback to free the data with
1417  * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1418  * @return EINA_TRUE on success, EINA_FALSE on failure
1419  * This adds data to the thread context, allowing the thread
1420  * to retrieve and use it without complicated mutexing.  This function can only be called by a
1421  * *_run thread INSIDE the thread and will return EINA_FALSE in any case but success.
1422  * All data added to the thread will be freed with its associated callback (if present)
1423  * upon thread termination.  If no callback is specified, it is expected that the user will free the
1424  * data, but this is most likely not what you want.
1425  */
1426 EAPI Eina_Bool
1427 ecore_thread_local_data_add(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1428 {
1429    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1430    Ecore_Thread_Data *d;
1431    Eina_Bool ret;
1432
1433    if ((!thread) || (!key) || (!value))
1434      return EINA_FALSE;
1435 #ifdef EFL_HAVE_THREADS
1436    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1437
1438    if (!worker->hash)
1439      worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1440
1441    if (!worker->hash)
1442      return EINA_FALSE;
1443
1444    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1445      return EINA_FALSE;
1446
1447    d->data = value;
1448    d->cb = cb;
1449
1450    if (direct)
1451      ret = eina_hash_direct_add(worker->hash, key, d);
1452    else
1453      ret = eina_hash_add(worker->hash, key, d);
1454    CDB(worker->cond);
1455    return ret;
1456 #else
1457    return EINA_TRUE;
1458 #endif
1459 }
1460
1461 /**
1462  * @brief Modify data in the thread, or add if not found
1463  * @param thread The thread context
1464  * @param key The name string to add the data with
1465  * @param value The data to add
1466  * @param cb The callback to free the data with
1467  * @return The old data associated with @p key on success if modified, NULL if added
1468  * This adds/modifies data in the thread context, adding only if modify fails.
1469  * This function can only be called by a *_run thread INSIDE the thread.
1470  * All data added to the thread pool will be freed with its associated callback (if present)
1471  * upon thread termination.  If no callback is specified, it is expected that the user will free the
1472  * data, but this is most likely not what you want.
1473  */
1474 EAPI void *
1475 ecore_thread_local_data_set(Ecore_Thread *thread, const char *key, void *value, Eina_Free_Cb cb)
1476 {
1477    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1478    Ecore_Thread_Data *d, *r;
1479    void *ret;
1480    if ((!thread) || (!key) || (!value))
1481      return NULL;
1482 #ifdef EFL_HAVE_THREADS
1483    if (!PHE(worker->self, PHS())) return NULL;
1484
1485    if (!worker->hash)
1486      worker->hash = eina_hash_string_small_new(_ecore_thread_data_free);
1487
1488    if (!worker->hash)
1489      return NULL;
1490
1491    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1492      return NULL;
1493
1494    d->data = value;
1495    d->cb = cb;
1496
1497    r = eina_hash_set(worker->hash, key, d);
1498    CDB(worker->cond);
1499    ret = r->data;
1500    free(r);
1501    return ret;
1502 #else
1503    return NULL;
1504 #endif
1505 }
1506
1507 /**
1508  * @brief Find data in the thread's data
1509  * @param thread The thread context
1510  * @param key The name string the data is associated with
1511  * @return The value, or NULL on error
1512  * This finds data in the thread context that has been previously added with @ref ecore_thread_local_data_add
1513  * This function can only be called by a *_run thread INSIDE the thread, and will return NULL
1514  * in any case but success.
1515  */
1516
1517 EAPI void *
1518 ecore_thread_local_data_find(Ecore_Thread *thread, const char *key)
1519 {
1520    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1521    Ecore_Thread_Data *d;
1522
1523    if ((!thread) || (!key))
1524      return NULL;
1525 #ifdef EFL_HAVE_THREADS
1526    if (!PHE(worker->self, PHS())) return NULL;
1527
1528    if (!worker->hash)
1529      return NULL;
1530
1531    d = eina_hash_find(worker->hash, key);
1532    return d->data;
1533 #else
1534    return NULL;
1535 #endif
1536 }
1537
1538 /**
1539  * @brief Delete data from the thread's data
1540  * @param thread The thread context
1541  * @param key The name string the data is associated with
1542  * @return EINA_TRUE on success, EINA_FALSE on failure
1543  * This deletes the data pointer from the thread context which was previously added with @ref ecore_thread_local_data_add
1544  * This function can only be called by a *_run thread INSIDE the thread, and will return EINA_FALSE
1545  * in any case but success.  Note that this WILL free the data if a callback was specified.
1546  */
1547 EAPI Eina_Bool
1548 ecore_thread_local_data_del(Ecore_Thread *thread, const char *key)
1549 {
1550    Ecore_Pthread_Worker *worker = (Ecore_Pthread_Worker *) thread;
1551    Ecore_Thread_Data *d;
1552    if ((!thread) || (!key))
1553      return EINA_FALSE;
1554 #ifdef EFL_HAVE_THREADS
1555    if (!PHE(worker->self, PHS())) return EINA_FALSE;
1556
1557    if (!worker->hash)
1558      return EINA_FALSE;
1559    if ((d = eina_hash_find(worker->hash, key)))
1560      _ecore_thread_data_free(d);
1561    return eina_hash_del_by_key(worker->hash, key);
1562 #else
1563    return EINA_TRUE;
1564 #endif
1565 }
1566
1567 /**
1568  * @brief Add data to the global data
1569  * @param key The name string to add the data with
1570  * @param value The data to add
1571  * @param cb The optional callback to free the data with once ecore is shut down
1572  * @param direct If true, this will not copy the key string (like eina_hash_direct_add)
1573  * @return EINA_TRUE on success, EINA_FALSE on failure
1574  * This adds data to the global thread data, and will return EINA_FALSE in any case but success.
1575  * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1576  * be called upon ecore_thread shutting down.  Note that if you have manually freed data that a callback
1577  * was specified for, you will most likely encounter a segv later on.
1578  */
1579 EAPI Eina_Bool
1580 ecore_thread_global_data_add(const char *key, void *value, Eina_Free_Cb cb, Eina_Bool direct)
1581 {
1582    Eina_Bool ret;
1583    Ecore_Thread_Data *d;
1584
1585    if ((!key) || (!value))
1586      return EINA_FALSE;
1587 #ifdef EFL_HAVE_THREADS
1588    LRWKWL(_ecore_thread_global_hash_lock);
1589    if (!_ecore_thread_global_hash)
1590      _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1591    LRWKU(_ecore_thread_global_hash_lock);
1592
1593    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1594      return EINA_FALSE;
1595
1596    d->data = value;
1597    d->cb = cb;
1598
1599    if (!_ecore_thread_global_hash)
1600      return EINA_FALSE;
1601    LRWKWL(_ecore_thread_global_hash_lock);
1602    if (direct)
1603      ret = eina_hash_direct_add(_ecore_thread_global_hash, key, d);
1604    else
1605      ret = eina_hash_add(_ecore_thread_global_hash, key, d);
1606    LRWKU(_ecore_thread_global_hash_lock);
1607    CDB(_ecore_thread_global_hash_cond);
1608    return ret;
1609 #else
1610    return EINA_TRUE;
1611 #endif
1612 }
1613
1614 /**
1615  * @brief Add data to the global data
1616  * @param key The name string to add the data with
1617  * @param value The data to add
1618  * @param cb The optional callback to free the data with once ecore is shut down
1619  * @return An Ecore_Thread_Data on success, NULL on failure
1620  * This adds data to the global thread data and returns NULL, or replaces the previous data
1621  * associated with @p key and returning the previous data if it existed.  To see if an error occurred,
1622  * one must use eina_error_get.
1623  * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1624  * be called upon ecore_thread shutting down.  Note that if you have manually freed data that a callback
1625  * was specified for, you will most likely encounter a segv later on.
1626  */
1627 EAPI void *
1628 ecore_thread_global_data_set(const char *key, void *value, Eina_Free_Cb cb)
1629 {
1630    Ecore_Thread_Data *d, *r;
1631    void *ret;
1632
1633    if ((!key) || (!value))
1634      return NULL;
1635 #ifdef EFL_HAVE_THREADS
1636    LRWKWL(_ecore_thread_global_hash_lock);
1637    if (!_ecore_thread_global_hash)
1638      _ecore_thread_global_hash = eina_hash_string_small_new(_ecore_thread_data_free);
1639    LRWKU(_ecore_thread_global_hash_lock);
1640
1641    if (!_ecore_thread_global_hash)
1642      return NULL;
1643
1644    if (!(d = malloc(sizeof(Ecore_Thread_Data))))
1645      return NULL;
1646
1647    d->data = value;
1648    d->cb = cb;
1649
1650    LRWKWL(_ecore_thread_global_hash_lock);
1651    r = eina_hash_set(_ecore_thread_global_hash, key, d);
1652    LRWKU(_ecore_thread_global_hash_lock);
1653    CDB(_ecore_thread_global_hash_cond);
1654
1655    ret = r->data;
1656    free(r);
1657    return ret;
1658 #else
1659    return NULL;
1660 #endif
1661 }
1662
1663 /**
1664  * @brief Find data in the global data
1665  * @param key The name string the data is associated with
1666  * @return The value, or NULL on error
1667  * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1668  * This function will return NULL in any case but success.
1669  * All data added to global can be manually freed, or a callback can be provided with @p cb which will
1670  * be called upon ecore_thread shutting down.  Note that if you have manually freed data that a callback
1671  * was specified for, you will most likely encounter a segv later on.
1672  * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1673  * if you will be doing anything with it.
1674  */
1675
1676 EAPI void *
1677 ecore_thread_global_data_find(const char *key)
1678 {
1679    Ecore_Thread_Data *ret;
1680    if (!key)
1681      return NULL;
1682 #ifdef EFL_HAVE_THREADS
1683    if (!_ecore_thread_global_hash) return NULL;
1684
1685    LRWKRL(_ecore_thread_global_hash_lock);
1686    ret = eina_hash_find(_ecore_thread_global_hash, key);
1687    LRWKU(_ecore_thread_global_hash_lock);
1688    return ret->data;
1689 #else
1690    return NULL;
1691 #endif
1692 }
1693
1694 /**
1695  * @brief Delete data from the global data
1696  * @param key The name string the data is associated with
1697  * @return EINA_TRUE on success, EINA_FALSE on failure
1698  * This deletes the data pointer from the global data which was previously added with @ref ecore_thread_global_data_add
1699  * This function will return EINA_FALSE in any case but success.
1700  * Note that this WILL free the data if an @c Eina_Free_Cb was specified when the data was added.
1701  */
1702 EAPI Eina_Bool
1703 ecore_thread_global_data_del(const char *key)
1704 {
1705    Eina_Bool ret;
1706    Ecore_Thread_Data *d;
1707
1708    if (!key)
1709      return EINA_FALSE;
1710 #ifdef EFL_HAVE_THREADS
1711    if (!_ecore_thread_global_hash)
1712      return EINA_FALSE;
1713
1714    LRWKWL(_ecore_thread_global_hash_lock);
1715    if ((d = eina_hash_find(_ecore_thread_global_hash, key)))
1716      _ecore_thread_data_free(d);
1717    ret = eina_hash_del_by_key(_ecore_thread_global_hash, key);
1718    LRWKU(_ecore_thread_global_hash_lock);
1719    return ret;
1720 #else
1721    return EINA_TRUE;
1722 #endif
1723 }
1724
1725 /**
1726  * @brief Find data in the global data and optionally wait for the data if not found
1727  * @param key The name string the data is associated with
1728  * @param seconds The amount of time in seconds to wait for the data.  If 0, the call will be async and not wait for data.
1729  * If < 0 the call will wait indefinitely for the data.
1730  * @return The value, or NULL on failure
1731  * This finds data in the global data that has been previously added with @ref ecore_thread_global_data_add
1732  * This function will return NULL in any case but success.
1733  * Use @p seconds to specify the amount of time to wait.  Use > 0 for an actual wait time, 0 to not wait, and < 0 to wait indefinitely.
1734  * @note Keep in mind that the data returned can be used by multiple threads at a time, so you will most likely want to mutex
1735  * if you will be doing anything with it.
1736  */
1737 EAPI void *
1738 ecore_thread_global_data_wait(const char *key, double seconds)
1739 {
1740    double time = 0;
1741    Ecore_Thread_Data *ret = NULL;
1742    if (!key)
1743      return NULL;
1744 #ifdef EFL_HAVE_THREADS
1745    if (!_ecore_thread_global_hash)
1746      return NULL;
1747    if (seconds > 0)
1748      time = ecore_time_get() + seconds;
1749
1750    while (1)
1751      {
1752 #ifndef _WIN32
1753         struct timespec t = { 0, 0 };
1754
1755         t.tv_sec = (long int)time;
1756         t.tv_nsec = (long int)((time - (double)t.tv_sec) * 1000000000);
1757 #else
1758         struct timeval t = { 0, 0 };
1759
1760         t.tv_sec = (long int)time;
1761         t.tv_usec = (long int)((time - (double)t.tv_sec) * 1000000);
1762 #endif
1763         LRWKRL(_ecore_thread_global_hash_lock);
1764         ret = eina_hash_find(_ecore_thread_global_hash, key);
1765         LRWKU(_ecore_thread_global_hash_lock);
1766         if ((ret) || (!seconds) || ((seconds > 0) && (time <= ecore_time_get())))
1767           break;
1768         LKL(_ecore_thread_global_hash_mutex);
1769         CDW(_ecore_thread_global_hash_cond, _ecore_thread_global_hash_mutex, &t);
1770         LKU(_ecore_thread_global_hash_mutex);
1771      }
1772    if (ret) return ret->data;
1773    return NULL;
1774 #else
1775    return NULL;
1776 #endif
1777 }
1778
1779 /**
1780  * @}
1781  */
1782
1783 /**
1784  * @}
1785  */