{
union {
struct {
- void (*func_blocking)(void *data);
+ void (*func_blocking)(void *data);
} short_run;
struct {
- void (*func_heavy)(Ecore_Thread *thread, void *data);
- void (*func_notify)(Ecore_Thread *thread, void *msg_data, void *data);
+ void (*func_heavy)(Ecore_Thread *thread, void *data);
+ void (*func_notify)(Ecore_Thread *thread, void *msg_data, void *data);
- Ecore_Pipe *notify;
+ Ecore_Pipe *notify;
#ifdef EFL_HAVE_PTHREAD
- pthread_t self;
+ pthread_t self;
#endif
} long_run;
} u;
if (work->cancel)
{
- if (work->func_cancel)
- work->func_cancel((void *) work->data);
+ if (work->func_cancel)
+ work->func_cancel((void *) work->data);
}
else
{
- if (work->func_end)
- work->func_end((void *) work->data);
+ if (work->func_end)
+ work->func_end((void *) work->data);
}
if (work->long_run) ecore_pipe_del(work->u.long_run.notify);
while (_ecore_pending_job_threads)
{
- pthread_mutex_lock(&_mutex);
+ pthread_mutex_lock(&_mutex);
- if (!_ecore_pending_job_threads)
- {
- pthread_mutex_unlock(&_mutex);
- break;
- }
+ if (!_ecore_pending_job_threads)
+ {
+ pthread_mutex_unlock(&_mutex);
+ break;
+ }
- work = eina_list_data_get(_ecore_pending_job_threads);
- _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, _ecore_pending_job_threads);
+ work = eina_list_data_get(_ecore_pending_job_threads);
+ _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, _ecore_pending_job_threads);
- pthread_mutex_unlock(&_mutex);
+ pthread_mutex_unlock(&_mutex);
- work->u.short_run.func_blocking((void *) work->data);
+ work->u.short_run.func_blocking((void *) work->data);
- ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
+ ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
}
}
while (_ecore_pending_job_threads_long)
{
- pthread_mutex_lock(&_mutex);
+ pthread_mutex_lock(&_mutex);
- if (!_ecore_pending_job_threads_long)
- {
- pthread_mutex_unlock(&_mutex);
- break;
- }
+ if (!_ecore_pending_job_threads_long)
+ {
+ pthread_mutex_unlock(&_mutex);
+ break;
+ }
- work = eina_list_data_get(_ecore_pending_job_threads_long);
- _ecore_pending_job_threads_long = eina_list_remove_list(_ecore_pending_job_threads_long, _ecore_pending_job_threads_long);
+ work = eina_list_data_get(_ecore_pending_job_threads_long);
+ _ecore_pending_job_threads_long = eina_list_remove_list(_ecore_pending_job_threads_long, _ecore_pending_job_threads_long);
- pthread_mutex_unlock(&_mutex);
+ pthread_mutex_unlock(&_mutex);
- work->u.long_run.self = thread;
- work->u.long_run.func_heavy((Ecore_Thread *) work, (void *) work->data);
+ work->u.long_run.self = thread;
+ work->u.long_run.func_heavy((Ecore_Thread *) work, (void *) work->data);
- ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
+ ecore_pipe_write(end_pipe, &work, sizeof (Ecore_Pthread_Worker *));
}
}
pth->p = ecore_pipe_add(_ecore_thread_handler, NULL);
if (!pth->p)
{
- free(pth);
- return NULL;
+ free(pth);
+ return NULL;
}
pth->thread = pthread_self();
work = malloc(sizeof (Ecore_Pthread_Worker));
if (!work)
{
- ecore_pipe_del(pth->p);
- free(pth);
- return NULL;
+ ecore_pipe_del(pth->p);
+ free(pth);
+ return NULL;
}
work->data = pth;
pthread_mutex_lock(&_mutex);
if (_ecore_pending_job_threads)
{
- pthread_mutex_unlock(&_mutex);
- goto on_error;
+ pthread_mutex_unlock(&_mutex);
+ goto on_error;
}
if (_ecore_pending_job_threads_long)
{
- pthread_mutex_unlock(&_mutex);
- goto on_error;
+ pthread_mutex_unlock(&_mutex);
+ goto on_error;
}
_ecore_thread_count--;
EINA_LIST_FREE(_ecore_pending_job_threads, work)
{
- if (work->func_cancel)
- work->func_cancel((void *)work->data);
- free(work);
+ if (work->func_cancel)
+ work->func_cancel((void *)work->data);
+ free(work);
}
pthread_mutex_unlock(&_mutex);
EINA_LIST_FREE(_ecore_active_job_threads, pth)
{
- Ecore_Pipe *p;
+ Ecore_Pipe *p;
- pthread_cancel(pth->thread);
- pthread_join(pth->thread, (void **) &p);
+ pthread_cancel(pth->thread);
+ pthread_join(pth->thread, (void **) &p);
- ecore_pipe_del(pth->p);
+ ecore_pipe_del(pth->p);
}
ecore_event_handler_del(del_handler);
/**
* @defgroup Ecore_Thread Ecore Thread Functions
* These functions allow for ecore-managed threads which integrate with ecore's main loop.
- */
+ */
/**
* @brief Run some blocking code in a parrallel thread to avoid locking the main loop.
*/
EAPI Ecore_Thread *
ecore_thread_run(void (*func_blocking)(void *data),
- void (*func_end)(void *data),
- void (*func_cancel)(void *data),
- const void *data)
+ void (*func_end)(void *data),
+ void (*func_cancel)(void *data),
+ const void *data)
{
#ifdef EFL_HAVE_PTHREAD
Ecore_Pthread_Worker *work;
if (!work)
{
func_cancel((void *) data);
- return NULL;
+ return NULL;
}
work->u.short_run.func_blocking = func_blocking;
if (_ecore_thread_count == _ecore_thread_count_max)
{
- pthread_mutex_unlock(&_mutex);
- return (Ecore_Thread *) work;
+ pthread_mutex_unlock(&_mutex);
+ return (Ecore_Thread *) work;
}
pthread_mutex_unlock(&_mutex);
on_error:
if (pth)
{
- if (pth->p) ecore_pipe_del(pth->p);
- free(pth);
+ if (pth->p) ecore_pipe_del(pth->p);
+ free(pth);
}
if (_ecore_thread_count == 0)
{
- if (work->func_cancel)
- work->func_cancel((void *) work->data);
- free(work);
- work = NULL;
+ if (work->func_cancel)
+ work->func_cancel((void *) work->data);
+ free(work);
+ work = NULL;
}
return (Ecore_Thread *) work;
#else
EINA_LIST_FOREACH(_ecore_pending_job_threads, l, work)
if ((void *) work == (void *) thread)
{
- _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
+ _ecore_pending_job_threads = eina_list_remove_list(_ecore_pending_job_threads, l);
- pthread_mutex_unlock(&_mutex);
+ pthread_mutex_unlock(&_mutex);
- if (work->func_cancel)
- work->func_cancel((void *) work->data);
- free(work);
+ if (work->func_cancel)
+ work->func_cancel((void *) work->data);
+ free(work);
- return EINA_TRUE;
+ return EINA_TRUE;
}
pthread_mutex_unlock(&_mutex);
*/
EAPI Ecore_Thread *
ecore_long_run(void (*func_heavy)(Ecore_Thread *thread, void *data),
- void (*func_notify)(Ecore_Thread *thread, void *msg_data, void *data),
- void (*func_end)(void *data),
- void (*func_cancel)(void *data),
- const void *data,
- Eina_Bool try_no_queue)
+ void (*func_notify)(Ecore_Thread *thread, void *msg_data, void *data),
+ void (*func_end)(void *data),
+ void (*func_cancel)(void *data),
+ const void *data,
+ Eina_Bool try_no_queue)
{
#ifdef EFL_HAVE_PTHREAD
if (!try_no_queue)
{
- pthread_t t;
+ pthread_t t;
- if (pthread_create(&t, NULL, (void *) _ecore_direct_worker, worker) == 0)
- return (Ecore_Thread *) worker;
+ if (pthread_create(&t, NULL, (void *) _ecore_direct_worker, worker) == 0)
+ return (Ecore_Thread *) worker;
}
pthread_mutex_lock(&_mutex);
if (_ecore_thread_count == _ecore_thread_count_max)
{
- pthread_mutex_unlock(&_mutex);
- return (Ecore_Thread *) worker;
+ pthread_mutex_unlock(&_mutex);
+ return (Ecore_Thread *) worker;
}
pthread_mutex_unlock(&_mutex);
on_error:
if (pth)
{
- if (pth->p) ecore_pipe_del(pth->p);
- free(pth);
+ if (pth->p) ecore_pipe_del(pth->p);
+ free(pth);
}
if (_ecore_thread_count == 0)
{
- if (func_cancel) func_cancel((void *) data);
-
- if (worker)
- {
- ecore_pipe_del(worker->u.long_run.notify);
- free(worker);
- worker = NULL;
- }
+ if (func_cancel) func_cancel((void *) data);
+
+ if (worker)
+ {
+ ecore_pipe_del(worker->u.long_run.notify);
+ free(worker);
+ worker = NULL;
+ }
}
return (Ecore_Thread *) worker;
EAPI int
ecore_thread_max_get(void)
{
- return _ecore_thread_count_max;
+ return _ecore_thread_count_max;
}
/**