68db3983b26e3e7c2b2fe4fd2db4e5c36d6b526e
[framework/uifw/evas.git] / src / lib / engines / common / evas_pipe.c
1 // THIS IS DEPRECATED. WILL GO EVENTUALLTY. NO NEED TO SUPPORT ANYMORE
2
3 #include "evas_common.h"
4
5 #ifdef BUILD_PIPE_RENDER
6
7 #ifdef EVAS_FRAME_QUEUING
8 #define SCALECACHE
9 static Evas_FrameQ gframeq;     // global frameQ
10
11 static Evas_Surface *
12 evas_common_surface_alloc(void *surface, int x, int y, int w, int h)
13 {
14    Evas_Surface *e_surface;
15
16    e_surface = calloc(1, sizeof(Evas_Surface));
17    e_surface->im = surface;
18    LKL(e_surface->im->cache_entry.ref_fq_add);
19    e_surface->im->cache_entry.ref_fq[0]++;
20    LKU(e_surface->im->cache_entry.ref_fq_add);
21    e_surface->x = x;
22    e_surface->y = y;
23    e_surface->w = w;
24    e_surface->h = h;
25
26    return e_surface;
27 }
28
29 static void
30 evas_common_surface_dealloc(Evas_Surface *surface)
31 {
32    Evas_Surface *d_surface;
33
34    while (surface)
35      {
36         d_surface = surface;
37         surface = (Evas_Surface *)eina_inlist_remove(EINA_INLIST_GET(surface), EINA_INLIST_GET(d_surface));
38         LKL(d_surface->im->cache_entry.ref_fq_del);
39         d_surface->im->cache_entry.ref_fq[1]++;
40         LKU(d_surface->im->cache_entry.ref_fq_del);
41         free(d_surface);
42      }
43 }
44
45 static void
46 evas_common_surface_add(Evas_Frame *frame, Evas_Surface *surface)
47 {
48    frame->surfaces = (Evas_Surface *)eina_inlist_append(EINA_INLIST_GET(frame->surfaces), EINA_INLIST_GET(surface));
49 }
50
51 static Evas_Frame * 
52 evas_common_frame_alloc(void)
53 {
54    Evas_Frame *frame;
55
56    frame = calloc(1, sizeof(Evas_Frame));
57    frame->surfaces = NULL;
58    return frame;
59 }
60
61 static void 
62 evas_common_frame_dealloc(Evas_Frame *frame)
63 {
64    evas_common_surface_dealloc(frame->surfaces);
65    free(frame);
66 }
67
68 static void
69 evas_common_frame_add(Evas_FrameQ *frameq, Evas_Frame *frame)
70 {
71    Evas_Frame *temp_frame;
72    
73    LKL(frameq->mutex);
74    while ((int)eina_inlist_count(EINA_INLIST_GET(frameq->frames)) >= frameq->frameq_sz)
75      {
76         /* wait a worker thread finish previous frame */
77         pthread_cond_wait(&(frameq->cond_done), &(frameq->mutex));
78      }
79    frameq->frames = (Evas_Frame *) eina_inlist_append(EINA_INLIST_GET(frameq->frames), EINA_INLIST_GET(frame));
80
81    // this frame need not to be scheduled for flushing time
82    EINA_INLIST_FOREACH(EINA_INLIST_GET(frameq->frames), temp_frame)
83      {
84        if (!temp_frame->ready)
85          {
86             break;
87          }
88      }
89    if (temp_frame && temp_frame == frame)
90       frame->dont_schedule = 1;
91
92    LKU(frameq->mutex);
93
94    pthread_cond_signal(&(frameq->cond_new));
95 }
96
97 EAPI Evas_Surface *
98 evas_common_frameq_new_surface(void *surface, int x, int y, int w, int h)
99 {
100    return evas_common_surface_alloc(surface, x, y, w, h);
101 }
102
103 EAPI void
104 evas_common_frameq_add_surface(Evas_Surface *surface)
105 {
106    evas_common_surface_add(gframeq.cur_frame, surface);
107 }
108
109 EAPI void 
110 evas_common_frameq_set_frame_data(void *data, 
111                                   void (*fn_output_redraws_next_update_push) (void *data, void *surface, int x, int y, int w, int h),
112                                   void (*fn_output_flush)  (void *data),
113                                   void (*fn_output_set_priv)(void *data, void *cur, void *prev))
114 {
115    if (gframeq.cur_frame) 
116      {
117         gframeq.cur_frame->data = data;
118         gframeq.cur_frame->output_redraws_next_update_push = fn_output_redraws_next_update_push;
119         gframeq.cur_frame->output_flush = fn_output_flush;
120         gframeq.cur_frame->output_set_priv = fn_output_set_priv;
121      }
122 }
123
124 EAPI void
125 evas_common_frameq_prepare_frame(void)
126 {
127    if (!gframeq.cur_frame )
128      {
129         gframeq.cur_frame = evas_common_frame_alloc();
130      }
131 }
132
133 EAPI void
134 evas_common_frameq_ready_frame(void)
135 {
136    if (gframeq.cur_frame)
137      {
138         evas_common_frame_add(&gframeq, gframeq.cur_frame);
139         gframeq.cur_frame = NULL; // create a new frame for the next frame later
140      }
141 }
142
143
144 EAPI void
145 evas_common_frameq_init(void)
146 {
147    gframeq.frames = NULL;
148    pthread_cond_init(&(gframeq.cond_new), NULL);
149    pthread_cond_init(&(gframeq.cond_ready), NULL);
150    pthread_cond_init(&(gframeq.cond_done), NULL);
151    LKI(gframeq.mutex);
152    gframeq.initialised = 0;     // worker thread are not created yet
153    gframeq.frameq_sz = 1;       // this value ensures the first frame can be enqueued.
154 }
155
156 EAPI void
157 evas_common_frameq_destroy(void)
158 {
159 #if 0 // let them destroyed indirectly with program exit
160    LKL(gframeq.mutex);
161    pthread_cond_destroy(&(gframeq.cond_new));
162    pthread_cond_destroy(&(gframeq.cond_ready));
163    pthread_cond_destroy(&(gframeq.cond_done));
164    LKU(gframeq.mutex);
165 #endif
166    LKD(gframeq.mutex);
167    
168    gframeq.frames = NULL;
169    gframeq.initialised = 0;
170 }
171
172 EAPI void
173 evas_common_frameq_flush(void)
174 {
175    if (! evas_common_frameq_enabled())
176       return;
177
178    LKL(gframeq.mutex);
179    while(eina_inlist_count(EINA_INLIST_GET(gframeq.frames)) > 0)
180      {
181         /* wait a worker thread finish previous frame */
182         pthread_cond_wait(&(gframeq.cond_done), &(gframeq.mutex));
183    }
184    LKU(gframeq.mutex);
185 }
186
187
188 EAPI void
189 evas_common_frameq_flush_ready(void)
190 {
191    return;
192 }
193
194 EAPI int
195 evas_common_frameq_get_frameq_sz(void)
196 {
197    return gframeq.frameq_sz;
198 }
199
200 EAPI int
201 evas_common_frameq_enabled(void)
202 {
203    return gframeq.initialised;
204 }
205 #endif
206
207 static RGBA_Pipe *evas_common_pipe_add(RGBA_Pipe *pipe, RGBA_Pipe_Op **op);
208 static void evas_common_pipe_draw_context_copy(RGBA_Draw_Context *dc, RGBA_Pipe_Op *op);
209 static void evas_common_pipe_op_free(RGBA_Pipe_Op *op);
210
211 /* utils */
212 static RGBA_Pipe *
213 evas_common_pipe_add(RGBA_Pipe *pipe, RGBA_Pipe_Op **op)
214 {
215    RGBA_Pipe *p;
216    int first_pipe = 0;
217
218    if (!pipe)
219      {
220         first_pipe = 1;
221         p = calloc(1, sizeof(RGBA_Pipe));
222         if (!p) return NULL;
223         pipe = (RGBA_Pipe *)eina_inlist_append(EINA_INLIST_GET(pipe), EINA_INLIST_GET(p));
224      }
225    p = (RGBA_Pipe *)(EINA_INLIST_GET(pipe))->last;
226    if (p->op_num == PIPE_LEN)
227      {
228         p = calloc(1, sizeof(RGBA_Pipe));
229         if (!p) return NULL;
230         pipe = (RGBA_Pipe *)eina_inlist_append(EINA_INLIST_GET(pipe), EINA_INLIST_GET(p));
231      }
232    p->op_num++;
233    *op = &(p->op[p->op_num - 1]);
234    if (first_pipe)
235      {
236         /* FIXME: PTHREAD init any thread locks etc */
237      }
238    return pipe;
239 }
240
241 static void
242 evas_common_pipe_draw_context_copy(RGBA_Draw_Context *dc, RGBA_Pipe_Op *op)
243 {
244    memcpy(&(op->context), dc, sizeof(RGBA_Draw_Context));
245    if (op->context.cutout.active > 0)
246      {
247         op->context.cutout.rects = malloc(sizeof(Cutout_Rect) * op->context.cutout.active);
248         memcpy(op->context.cutout.rects, dc->cutout.rects, sizeof(Cutout_Rect) * op->context.cutout.active);
249      }
250    else
251      {
252         op->context.cutout.rects = NULL;
253      }
254 }
255
256 static void
257 evas_common_pipe_op_free(RGBA_Pipe_Op *op)
258 {
259    evas_common_draw_context_apply_clean_cutouts(&op->context.cutout);
260 }
261
262 #ifdef BUILD_PTHREAD
263 /* main api calls */
264 static void *
265 evas_common_pipe_thread(void *data)
266 {
267    Thinfo *thinfo;
268
269 // INF("TH [...........");
270    thinfo = data;
271    for (;;)
272      {
273         RGBA_Pipe_Thread_Info *info;
274         RGBA_Pipe *p;
275
276         /* wait for start signal */
277 // INF(" TH %i START...", thinfo->thread_num);
278         pthread_barrier_wait(&(thinfo->barrier[0]));
279         info = thinfo->info;
280 // if (info)
281 //   {
282 //      thinfo->info = NULL;
283 //      INF(" TH %i GO", thinfo->thread_num);
284         EINA_INLIST_FOREACH(EINA_INLIST_GET(info->im->cache_entry.pipe), p)
285           {
286              int i;
287
288              for (i = 0; i < p->op_num; i++)
289                {
290                   if (p->op[i].op_func)
291                   p->op[i].op_func(info->im, &(p->op[i]), info);
292                }
293           }
294         free(info);
295 //   }
296 // INF(" TH %i DONE", thinfo->thread_num);
297         /* send finished signal */
298         pthread_barrier_wait(&(thinfo->barrier[1]));
299      }
300    return NULL;
301 }
302
303 #ifdef EVAS_FRAME_QUEUING
304 static void
305 evas_common_frameq_release(void *data)
306 {
307    Evas_FrameQ *frameq;
308    Evas_Frameq_Thread_Info *fq_info;
309    Thinfo *thinfo;
310
311    thinfo = data;
312    fq_info = (Evas_Frameq_Thread_Info *)(thinfo->fq_info);
313    frameq = fq_info->frameq;
314
315    /* This thread may or may not own the mutex.
316     * But there's no way to determine the ownership of the mutex, so release it anyway 
317     */
318    LKU(frameq->mutex);
319 }
320
321 static void *
322 evas_common_frameq_thread(void *data)
323 {
324    Evas_FrameQ *frameq;
325    Evas_Frame *frame;
326    Evas_Surface *surface;
327    RGBA_Pipe *p;
328    Thinfo *thinfo;
329    Evas_Frameq_Thread_Info *fq_info;
330    RGBA_Pipe_Thread_Info p_info;
331
332    thinfo = data;
333    fq_info = (Evas_Frameq_Thread_Info *)(thinfo->fq_info);
334    frameq = fq_info->frameq;
335
336    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
337    pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
338    /* install  thread cancelation cleanup handler */
339    pthread_cleanup_push(evas_common_frameq_release, data); 
340
341    for (;;)
342      {
343         frame = NULL;
344                 
345         /* 1. pick a frame to draw */
346         LKL(frameq->mutex);
347         while(!frame)
348           {
349              EINA_INLIST_FOREACH(EINA_INLIST_GET(frameq->frames), frame)
350                {
351                   if (!frame->in_process)
352                     {
353                        frame->in_process = 1;
354                        break;
355                     }
356                }
357              if (frame)
358                {
359                   break;
360                }
361              pthread_testcancel();
362              pthread_cond_wait(&(frameq->cond_new), &(frameq->mutex));
363           }
364         LKU(frameq->mutex);
365
366         /* 2. draw selected frame */
367         EINA_INLIST_FOREACH(EINA_INLIST_GET(frame->surfaces), surface)
368           {
369              p_info.im = surface->im;
370              p_info.x = 0;
371              p_info.y = 0;
372              p_info.w = surface->im->cache_entry.w;
373              p_info.h = surface->im->cache_entry.h;
374
375              EINA_INLIST_FOREACH(EINA_INLIST_GET(p_info.im->cache_entry.pipe), p)
376                {
377                   int i;
378
379                   for (i = 0; i < p->op_num; i++)
380                     {
381                        if (p->op[i].op_func)
382                          {
383                             p->op[i].op_func(p_info.im, &(p->op[i]), &p_info);
384                          }
385                     }
386                }
387
388              /* push surface out */
389              if (! surface->dontpush)
390                {
391                   frame->output_redraws_next_update_push(frame->data,  
392                               surface->im, surface->x, surface->y, surface->w, surface->h);
393                }
394           }
395
396         // record frame ready time, will be used in post worker thread, evas_common_frameq_thread_post()
397         gettimeofday(&frame->ready_time, NULL);
398
399         LKL(frameq->mutex);
400         frame->ready = 1;
401         pthread_cond_signal(&frameq->cond_ready);
402         LKU(frameq->mutex);
403      }
404
405    // Remove cleanup handler
406    pthread_cleanup_pop(0);
407    return NULL;
408 }
409
410
411 #define INTERVAL_QSIZE 17  // Actual size is 'INTERVAL_QSIZE - 1' because of not using index
412 #define SATISFACTION_THRESHOLD 4 // 4 ms --> 250 FPS
413 #define RESET_RATIO 4   // RESET_RATIO * [Average Ready Gap | get_max_interval()] --> Reset Threshold
414 #define DOUBLE_RESET_TIME_INTERVAL_THRESHOLD 16000 // make it double in case of less 16ms
415 #define RESET_ABSOLUTE_INTERVAL 600000  // 600 msec
416
417 struct iq_node
418 {
419    long long rt;
420    long long ri;
421 };
422
423 static struct iq_node _IQ[INTERVAL_QSIZE];
424 static int _IQ_head = 0, _IQ_tail = 0;
425 static int _IQ_length = 0;
426 static long long min_ready, max_ready;
427 static long long average_interval;
428
429 static int 
430 _IQ_next_index(int i)
431 {
432    return (i + 1) % INTERVAL_QSIZE;
433 }
434
435 static int 
436 _IQ_previous_index(int i)
437 {
438    if (--i < 0) i += INTERVAL_QSIZE;
439    return i;
440 }
441
442 static void 
443 _IQ_init(void)
444 {
445    _IQ_length = _IQ_head = _IQ_tail = 0;
446    min_ready = LLONG_MAX, max_ready = LLONG_MIN;
447    average_interval = 0;
448 }
449
450 static int 
451 _IQ_empty(void)
452 {
453    return (_IQ_head == _IQ_tail) ? 1 : 0;
454 }
455
456 static int 
457 _IQ_full(void)
458 {
459    return (_IQ_head == ((_IQ_tail + 1) % INTERVAL_QSIZE)) ? 1 : 0;
460 }
461
462 static void 
463 _IQ_insert(long long ready_time, long long last_interval)
464 {
465    if (_IQ_full()) return;
466
467    if (_IQ_empty())
468      {
469         if (last_interval < 0)
470           {
471              last_interval = -last_interval;
472           }
473         _IQ[_IQ_tail].rt = ready_time;
474         _IQ[_IQ_tail].ri = last_interval;
475         min_ready = ready_time - last_interval;
476         max_ready = ready_time;
477         _IQ_tail = _IQ_next_index(_IQ_tail);
478         _IQ_length++;
479      }
480    else
481      {
482         if (max_ready < ready_time)
483           {
484              _IQ[_IQ_tail].rt = ready_time;
485              _IQ[_IQ_tail].ri = ready_time - max_ready;
486              _IQ_tail = _IQ_next_index(_IQ_tail);
487              _IQ_length++;
488              max_ready = ready_time;
489           }
490         else if (ready_time < min_ready)
491           {
492              last_interval = _IQ[_IQ_head].ri;
493              _IQ[_IQ_head].ri = _IQ[_IQ_head].rt - ready_time;
494              _IQ_head = _IQ_previous_index(_IQ_head);
495              _IQ[_IQ_head].rt = ready_time;
496              _IQ[_IQ_head].ri = last_interval;
497              min_ready = ready_time;
498              _IQ_length++;
499           }
500         else
501           {
502              int i, j, k, l = 0;
503              for (i = _IQ_head; i != _IQ_tail; i = j)
504                {
505                   j = _IQ_next_index(i);
506                   if (_IQ[j].rt < ready_time)
507                     {
508                        continue;
509                     }
510                   break;
511                }
512              for (k = _IQ_tail; k != j; k = l)
513                {
514                   l = _IQ_previous_index(k);
515                   _IQ[k] = _IQ[l];
516                }
517              i = _IQ_next_index(j);
518              _IQ[j].ri -= (_IQ[j].rt - ready_time);
519              _IQ[j].rt = ready_time;
520              _IQ[i].ri = _IQ[i].rt - ready_time;
521              _IQ_tail = _IQ_next_index(_IQ_tail);
522              _IQ_length++;
523           }
524      }
525    average_interval = (max_ready - min_ready) / _IQ_length;
526 }
527
528 static long long 
529 _IQ_delete(void)
530 {
531    struct iq_node oldest;
532   
533    if (_IQ_empty()) return 0;
534    oldest = _IQ[_IQ_head];
535    _IQ_head = (_IQ_head + 1) % INTERVAL_QSIZE;
536    if ((--_IQ_length) == 0)
537      {
538         _IQ_init();
539      }
540    else
541      {
542         min_ready = _IQ[_IQ_head].rt;
543         average_interval = (max_ready - min_ready) / _IQ_length;
544      }
545
546    return oldest.ri;
547 }
548
549 static long long 
550 get_max_interval(void)
551 {
552    int i;
553    long long max = LLONG_MIN;
554
555    for ( i= _IQ_head ; i != _IQ_tail ; i = _IQ_next_index(i))
556      {
557         if (_IQ[i].ri > max)
558           {
559              max = _IQ[i].ri;
560           }
561      }
562
563    return max;
564 }
565
566 static long long 
567 tv_to_long_long(struct timeval *tv)
568 {
569    if (!tv)
570      {
571         return 0;
572      }
573
574    return tv->tv_sec * 1000000LL + tv->tv_usec;
575 }
576
577 static long long
578 evas_common_frameq_schedule_flush_time(int frameq_sz, int thread_no, 
579                                        long long last_ready_time, long long current_ready_time,
580                                        long long last_flush_time, int ready_frames_num,
581                                        int dont_schedule)
582 {
583    // to get each time and to do others
584    long long current_time = 0LL;
585    long long current_ready_interval = 0LL;
586    long long theshold_time = SATISFACTION_THRESHOLD * 1000LL;   // ms -> usec
587    long long reset_time_interval;
588    long long sleep_time = 0LL;
589    long long saved_ready_time, saved_ready_interval;
590    long long time_slept = 0LL;
591    static long long time_lag = 0;
592    struct timeval now;
593    int frameq_full_threshold =0;
594    int need_reset = 0;
595    int need_schedule = 0;
596
597    frameq_full_threshold = frameq_sz -thread_no;        // Qsize - threads#
598    
599    /* 1.5 defer flush time of current frame if need */
600    // in case of the first time, just keep ready time only
601    if (last_ready_time == 0LL)
602      {
603         last_ready_time = current_ready_time;
604      }
605    else
606      {
607         /* 1.5.1 get current ready time & interval */
608         saved_ready_time = current_ready_time;
609         saved_ready_interval = current_ready_interval = current_ready_time - last_ready_time;
610         // compensate a case which current ready time is older than previous one, 
611         // doesn't work on the interval queue
612         if (current_ready_interval < 0)
613           {
614              current_ready_time = last_ready_time;
615              current_ready_interval = 0;
616           }
617
618         /* 1.5.2 get the reset time interval before keeping a new one */
619         if (!_IQ_empty())
620           {
621              reset_time_interval = RESET_RATIO * average_interval;
622              if (average_interval < DOUBLE_RESET_TIME_INTERVAL_THRESHOLD) 
623                {
624                   reset_time_interval *= 2;
625                }
626           }
627
628         /* 1.5.3 reset - if too late, discard all saved interval and start from here */
629         if (current_ready_interval > RESET_ABSOLUTE_INTERVAL)
630           {
631              need_reset = 1;
632           }
633         else if (_IQ_length >= thread_no * 2 && current_ready_interval > reset_time_interval)
634           {
635              need_reset = 1;
636           }
637         else if (_IQ_length >= thread_no && _IQ_length < thread_no * 2 
638              && current_ready_interval > get_max_interval() * RESET_RATIO)
639           {
640              need_reset = 1;
641           }
642        
643         if (need_reset)
644           {
645              _IQ_init();
646           }
647         else
648           {
649              /* 1.5.4 enqueue - keep a new interval for next average interval */
650              if (_IQ_full())
651                {
652                   _IQ_delete();
653                }
654              _IQ_insert(saved_ready_time, saved_ready_interval);
655
656              /* 1.5.5 schedule - if faster than average interval, figure out sleep time to meet it */
657              if (!dont_schedule)
658                {
659                   need_schedule = 0;
660                   sleep_time = 0;
661                   if (_IQ_length >= thread_no * 2 && average_interval > theshold_time)
662                     {
663                        need_schedule = 1;
664                     }
665                   // compensate the case that postworker blocks the workers from getting a new fresh frame
666                   // It's actually occurred when during the wait time of postworker, the frame queue is full
667                   // Consequently check the number of currently ready frames and apply some time drop to average time according to the number
668                   if (ready_frames_num >= frameq_full_threshold)
669                     {
670                        need_schedule = 0;
671                     }
672                   if (need_schedule)
673                     {
674                        gettimeofday(&now, NULL);
675                        current_time = tv_to_long_long(&now);
676                        time_lag += (current_time - last_flush_time);
677                        sleep_time = (average_interval < time_lag) ? 0 : (average_interval - time_lag);
678                     }
679                }
680
681              /* 1.5.6 sleep - actually sleep and get over-slept time (time_lag) for next frame */
682              if (sleep_time > 0)
683                {
684                   sleep_time = sleep_time * 9 / 10;
685                   usleep((unsigned int)sleep_time);
686                   gettimeofday(&now, NULL);
687                   time_slept = tv_to_long_long(&now) - current_time;
688                   time_lag = time_slept - sleep_time;
689                }
690              else
691                {
692                   time_lag = 0;
693                }
694           }
695         last_ready_time = current_ready_time;
696      }
697
698    return last_ready_time;
699 }
700
701 static void *
702 evas_common_frameq_thread_post(void *data)
703 {
704    Evas_FrameQ *frameq;
705    Evas_Frame *frame;
706    Evas_Surface *surface;
707    Thinfo *thinfo;
708    Evas_Frameq_Thread_Info *fq_info;
709    Eina_List   *pending_writes = NULL;
710    Eina_List   *prev_pending_writes = NULL;
711
712    long long last_ready_time = 0LL;
713    long long current_ready_time;
714    Evas_Frame *temp_frame = NULL;
715    int ready_frames_num;
716    long long last_flush_time = 0LL;
717    struct timeval now;
718    int dont_schedule = 0;
719
720    thinfo = data;
721    fq_info = (Evas_Frameq_Thread_Info *)(thinfo->fq_info);
722    frameq = fq_info->frameq;
723
724    pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
725    pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
726    /* install  thread cancelation cleanup handler */
727    pthread_cleanup_push(evas_common_frameq_release, data); 
728
729    _IQ_init();
730
731    for (;;)
732      {
733         /* 1. wait the first frame being done */
734         LKL(frameq->mutex);
735         while(!frameq->frames || !frameq->frames->ready)
736           {
737              pthread_cond_wait(&(frameq->cond_ready), &(frameq->mutex));
738           }
739         frame = frameq->frames;
740
741         /* 1.5. prepare to schedule flush time */
742         current_ready_time = tv_to_long_long(&frame->ready_time);
743         ready_frames_num = 0;
744         EINA_INLIST_FOREACH(EINA_INLIST_GET(frameq->frames), temp_frame)
745           {
746              if (temp_frame->ready == 1)
747                {
748                   ready_frames_num++;
749                }
750           }
751         dont_schedule = (frame->dont_schedule)?1:0;
752         LKU(frameq->mutex);
753
754         /* 2. generate pending_writes */
755         EINA_INLIST_FOREACH(EINA_INLIST_GET(frame->surfaces), surface)
756          {
757             evas_common_pipe_flush(surface->im);
758             if (! surface->dontpush)
759               {
760                  pending_writes = eina_list_append(pending_writes, surface->im);
761               }
762          }
763
764         /* 2.5. schedule flush time */
765         last_ready_time = evas_common_frameq_schedule_flush_time(
766                                        frameq->frameq_sz, frameq->thread_num, 
767                                        last_ready_time, current_ready_time,
768                                        last_flush_time, ready_frames_num, dont_schedule);
769
770         /* 3. flush redraws */
771         frame->output_set_priv(frame->data, pending_writes, prev_pending_writes);
772         frame->output_flush(frame->data);
773         gettimeofday(&now, NULL);
774         // keep as the last flush time
775         last_flush_time = now.tv_sec * 1000000LL + now.tv_usec;
776
777         prev_pending_writes = pending_writes;
778         pending_writes = NULL;
779
780         /* 4. remove this frame from the frame queue */
781         LKL(frameq->mutex);
782         frameq->frames = 
783             (Evas_Frame *)eina_inlist_remove(EINA_INLIST_GET(frameq->frames), 
784                   EINA_INLIST_GET(frame));
785
786         LKU(frameq->mutex);
787         pthread_cond_broadcast(&frameq->cond_done);
788         evas_common_frame_dealloc(frame);
789      }
790
791    // Remove cleanup handler
792    pthread_cleanup_pop(0);
793    return NULL;
794 }
795
796 #endif /* EVAS_FRAME_QUEUING */
797 #endif
798
799 #ifdef BUILD_PTHREAD
800 static int               thread_num = 0;
801 static Thinfo            thinfo[TH_MAX];
802 static pthread_barrier_t thbarrier[2];
803 #endif
804
805 static void
806 evas_common_pipe_begin(RGBA_Image *im)
807 {
808 #ifdef BUILD_PTHREAD
809    int i, y, h;
810
811 #ifdef EVAS_FRAME_QUEUING
812    return;
813 #endif
814
815    if (!im->cache_entry.pipe) return;
816    if (thread_num == 1) return;
817    y = 0;
818    h = im->cache_entry.h / thread_num;
819    if (h < 1) h = 1;
820    for (i = 0; i < thread_num; i++)
821      {
822         RGBA_Pipe_Thread_Info *info;
823
824 //      if (y >= im->cache_entry.h) break;
825         info = calloc(1, sizeof(RGBA_Pipe_Thread_Info));
826         info->im = im;
827 #ifdef EVAS_SLI
828         info->x = 0;
829         info->w = im->cache_entry.w;
830         info->y = i;
831         info->h = thread_num;
832 #else
833         info->x = 0;
834         info->y = y;
835         info->w = im->cache_entry.w;
836         if (i == (thread_num - 1))
837           {
838              info->h = im->cache_entry.h - y;
839           }
840         else
841           {
842              info->h = h;
843           }
844         y += info->h;
845 #endif
846         thinfo[i].info = info;
847      }
848    /* tell worker threads to start */
849    pthread_barrier_wait(&(thbarrier[0]));
850 #endif
851 }
852
853 #ifdef EVAS_FRAME_QUEUING
854 EAPI void
855 evas_common_frameq_begin(void)
856 {
857 #ifdef BUILD_PTHREAD
858    int i;
859    Evas_Frameq_Thread_Info *fp_info;
860    pthread_attr_t attr;
861    cpu_set_t cpu;
862
863    if (!gframeq.initialised)
864      {
865         int cpunum, set_cpu_affinity = 0;
866
867         cpunum = eina_cpu_count();
868         gframeq.thread_num = cpunum;
869         gframeq.frameq_sz = cpunum * FRAMEQ_SZ_PER_THREAD;
870
871         for (i = 0; i < gframeq.thread_num; i++)
872           {
873
874              fp_info = calloc(1, sizeof(Evas_Frameq_Thread_Info));
875              fp_info->frameq = &gframeq;
876
877              gframeq.thinfo[i].thread_num = i;
878              gframeq.thinfo[i].fq_info = fp_info;
879
880              pthread_attr_init(&attr);
881              if (set_cpu_affinity)
882                {
883                   CPU_ZERO(&cpu);
884                   CPU_SET((i+1) % cpunum, &cpu);
885                   pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
886                }
887
888              pthread_create(&(gframeq.thinfo[i].thread_id), &attr,
889                      evas_common_frameq_thread, &(gframeq.thinfo[i]));
890
891              pthread_attr_destroy(&attr);
892              pthread_detach(gframeq.thinfo[i].thread_id);
893           }
894
895           {
896              fp_info = calloc(1, sizeof(Evas_Frameq_Thread_Info));
897              fp_info->frameq = &gframeq;
898
899              gframeq.thinfo[i].thread_num = i;
900              gframeq.thinfo[i].fq_info = fp_info;
901
902              pthread_attr_init(&attr);
903              if (set_cpu_affinity)
904                {
905                   CPU_ZERO(&cpu);
906                   CPU_SET((i+1) % cpunum, &cpu);
907                   pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
908                }
909
910              pthread_create(&(gframeq.thinfo[i].thread_id), &attr,
911                      evas_common_frameq_thread_post, &(gframeq.thinfo[i]));
912              pthread_attr_destroy(&attr);
913              pthread_detach(gframeq.thinfo[i].thread_id);
914           }
915         gframeq.initialised = 1;        // now worker threads are created.
916
917         INF("initialised");
918         DBG("%d cpus, set_cpu_affinity=%d, frameq_sz=%d",
919             cpunum, set_cpu_affinity, gframeq.frameq_sz);
920      }
921 #endif /* BUILD_PTHREAD */
922 }
923
924 EAPI void
925 evas_common_frameq_finish(void)
926 {
927    int i;
928    
929    /* 1. cancel all worker threads */
930    for (i = 0; i < gframeq.thread_num; i++)
931      {
932         pthread_cancel(gframeq.thinfo[i].thread_id);
933      }
934      // cancel post-worker thread
935      pthread_cancel(gframeq.thinfo[i].thread_id);
936
937    /* 2. send signal to worker threads so that they enter to the thread cancelation cleanup handler */
938    for (i = 0; i < gframeq.thread_num; i++)
939      {
940         pthread_cond_signal(&(gframeq.cond_new));
941      }
942    // send signal to post-worker thread
943    pthread_cond_signal(&(gframeq.cond_ready));
944
945    /* all the workers were created and detached before
946     *  so don't need to join them here.
947     */
948
949 }
950
951 #endif /* EVAS_FRAME_QUEUING */
952
953 EAPI void
954 evas_common_pipe_flush(RGBA_Image *im)
955 {
956    if (!im->cache_entry.pipe) return;
957 #ifndef EVAS_FRAME_QUEUING
958 #ifdef BUILD_PTHREAD
959    if (thread_num > 1)
960      {
961         /* sync worker threads */
962         pthread_barrier_wait(&(thbarrier[1]));
963      }
964    else
965 #endif
966      {
967        RGBA_Pipe *p;
968        int i;
969
970         /* process pipe - 1 thead */
971         for (p = im->cache_entry.pipe; p; p = (RGBA_Pipe *)(EINA_INLIST_GET(p))->next)
972           {
973              for (i = 0; i < p->op_num; i++)
974                {
975                   if (p->op[i].op_func)
976                     {
977                        p->op[i].op_func(im, &(p->op[i]), NULL);
978                     }
979                }
980           }
981      }
982 #endif /* !EVAS_FRAME_QUEUING */
983    evas_common_cpu_end_opt();
984    evas_common_pipe_free(im);
985 }
986
987 EAPI void
988 evas_common_pipe_free(RGBA_Image *im)
989 {
990
991    RGBA_Pipe *p;
992    int i;
993
994    if (!im->cache_entry.pipe) return;
995    /* FIXME: PTHREAD join all threads here (if not finished) */
996
997    /* free pipe */
998    while (im->cache_entry.pipe)
999      {
1000         p = im->cache_entry.pipe;
1001         for (i = 0; i < p->op_num; i++)
1002           {
1003              if (p->op[i].free_func)
1004                {
1005                   p->op[i].free_func(&(p->op[i]));
1006                }
1007           }
1008         im->cache_entry.pipe = (RGBA_Pipe *)eina_inlist_remove(EINA_INLIST_GET(im->cache_entry.pipe), EINA_INLIST_GET(p));
1009         free(p);
1010      }
1011 }
1012
1013
1014
1015 /* draw ops */
1016 /**************** RECT ******************/
1017 static void
1018 evas_common_pipe_rectangle_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1019 {
1020    if (info)
1021      {
1022         RGBA_Draw_Context context;
1023
1024         memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1025 #ifdef EVAS_SLI
1026         evas_common_draw_context_set_sli(&(context), info->y, info->h);
1027 #else
1028         evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1029 #endif
1030         evas_common_rectangle_draw(dst, &(context),
1031                op->op.rect.x, op->op.rect.y,
1032                op->op.rect.w, op->op.rect.h);
1033      }
1034    else
1035      {
1036         evas_common_rectangle_draw(dst, &(op->context),
1037                op->op.rect.x, op->op.rect.y,
1038                op->op.rect.w, op->op.rect.h);
1039      }
1040 }
1041
1042 EAPI void
1043 evas_common_pipe_rectangle_draw(RGBA_Image *dst, RGBA_Draw_Context *dc, int x, int y, int w, int h)
1044 {
1045    RGBA_Pipe_Op *op;
1046
1047    if ((w < 1) || (h < 1)) return;
1048    dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1049    if (!dst->cache_entry.pipe) return;
1050    op->op.rect.x = x;
1051    op->op.rect.y = y;
1052    op->op.rect.w = w;
1053    op->op.rect.h = h;
1054    op->op_func = evas_common_pipe_rectangle_draw_do;
1055    op->free_func = evas_common_pipe_op_free;
1056    evas_common_pipe_draw_context_copy(dc, op);
1057 }
1058
1059 /**************** LINE ******************/
1060 static void
1061 evas_common_pipe_line_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1062 {
1063    if (info)
1064      {
1065         RGBA_Draw_Context context;
1066
1067         memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1068 #ifdef EVAS_SLI
1069         evas_common_draw_context_set_sli(&(context), info->y, info->h);
1070 #else
1071         evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1072 #endif
1073         evas_common_line_draw(dst, &(context),
1074                op->op.line.x0, op->op.line.y0,
1075                op->op.line.x1, op->op.line.y1);
1076      }
1077    else
1078      {
1079         evas_common_line_draw(dst, &(op->context),
1080                op->op.line.x0, op->op.line.y0,
1081                op->op.line.x1, op->op.line.y1);
1082      }
1083 }
1084
1085 EAPI void
1086 evas_common_pipe_line_draw(RGBA_Image *dst, RGBA_Draw_Context *dc,
1087                            int x0, int y0, int x1, int y1)
1088 {
1089    RGBA_Pipe_Op *op;
1090
1091    dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1092    if (!dst->cache_entry.pipe) return;
1093    op->op.line.x0 = x0;
1094    op->op.line.y0 = y0;
1095    op->op.line.x1 = x1;
1096    op->op.line.y1 = y1;
1097    op->op_func = evas_common_pipe_line_draw_do;
1098    op->free_func = evas_common_pipe_op_free;
1099    evas_common_pipe_draw_context_copy(dc, op);
1100 }
1101
1102 /**************** POLY ******************/
1103 static void
1104 evas_common_pipe_op_poly_free(RGBA_Pipe_Op *op)
1105 {
1106    RGBA_Polygon_Point *p;
1107
1108    while (op->op.poly.points)
1109      {
1110         p = op->op.poly.points;
1111         op->op.poly.points = (RGBA_Polygon_Point *)eina_inlist_remove(EINA_INLIST_GET(op->op.poly.points),
1112                                                       EINA_INLIST_GET(p));
1113         free(p);
1114      }
1115    evas_common_pipe_op_free(op);
1116 }
1117
1118 static void
1119 evas_common_pipe_poly_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1120 {
1121    if (info)
1122      {
1123         RGBA_Draw_Context context;
1124
1125         memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1126 #ifdef EVAS_SLI
1127         evas_common_draw_context_set_sli(&(context), info->y, info->h);
1128 #else
1129         evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1130 #endif
1131         evas_common_polygon_draw(dst, &(context),
1132                      op->op.poly.points, 0, 0);
1133      }
1134    else
1135      {
1136         evas_common_polygon_draw(dst, &(op->context),
1137                      op->op.poly.points, 0, 0);
1138      }
1139 }
1140
1141 EAPI void
1142 evas_common_pipe_poly_draw(RGBA_Image *dst, RGBA_Draw_Context *dc,
1143                            RGBA_Polygon_Point *points, int x, int y)
1144 {
1145    RGBA_Pipe_Op *op;
1146    RGBA_Polygon_Point *pts = NULL, *p, *pp;
1147
1148    if (!points) return;
1149    dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1150    if (!dst->cache_entry.pipe) return;
1151    /* FIXME: copy points - maybe we should refcount? */
1152    for (p = points; p; p = (RGBA_Polygon_Point *)(EINA_INLIST_GET(p))->next)
1153      {
1154         pp = calloc(1, sizeof(RGBA_Polygon_Point));
1155         if (pp)
1156           {
1157              pp->x = p->x + x;
1158              pp->y = p->y + y;
1159              pts = (RGBA_Polygon_Point *)eina_inlist_append(EINA_INLIST_GET(pts), EINA_INLIST_GET(pp));
1160           }
1161      }
1162    op->op.poly.points = pts;
1163    op->op_func = evas_common_pipe_poly_draw_do;
1164    op->free_func = evas_common_pipe_op_poly_free;
1165    evas_common_pipe_draw_context_copy(dc, op);
1166 }
1167
1168 /**************** TEXT ******************/
1169 static void
1170 evas_common_pipe_op_text_free(RGBA_Pipe_Op *op)
1171 {
1172 #ifdef EVAS_FRAME_QUEUING
1173    LKL(op->op.text.font->ref_fq_del);
1174    op->op.text.font->ref_fq[1]++;
1175    LKU(op->op.text.font->ref_fq_del);
1176    pthread_cond_signal(&(op->op.text.font->cond_fq_del));
1177 #else
1178    evas_common_font_free(op->op.text.font);
1179 #endif
1180    evas_common_text_props_content_unref(&(op->op.text.intl_props));
1181    free(op->op.text.text);
1182    evas_common_pipe_op_free(op);
1183 }
1184
1185 #ifdef EVAS_FRAME_QUEUING
1186 /* flush all op using @fn */
1187 EAPI void
1188 evas_common_pipe_op_text_flush(RGBA_Font *fn)
1189 {
1190    if (! evas_common_frameq_enabled())
1191       return;
1192
1193    LKL(fn->ref_fq_add);
1194    LKL(fn->ref_fq_del);
1195
1196    while (fn->ref_fq[0] != fn->ref_fq[1])
1197       pthread_cond_wait(&(fn->cond_fq_del), &(fn->ref_fq_del));
1198
1199    LKU(fn->ref_fq_del);
1200    LKU(fn->ref_fq_add);
1201 }
1202 #endif
1203
1204 static void
1205 evas_common_pipe_text_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1206 {
1207    if (info)
1208      {
1209         RGBA_Draw_Context context;
1210
1211         memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1212 #ifdef EVAS_SLI
1213         evas_common_draw_context_set_sli(&(context), info->y, info->h);
1214 #else
1215         evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1216 #endif
1217         evas_common_font_draw(dst, &(context),
1218                   op->op.text.font, op->op.text.x, op->op.text.y,
1219                   op->op.text.text, &op->op.text.intl_props);
1220      }
1221    else
1222      {
1223         evas_common_font_draw(dst, &(op->context),
1224                   op->op.text.font, op->op.text.x, op->op.text.y,
1225                   op->op.text.text, &op->op.text.intl_props);
1226      }
1227 }
1228
1229 EAPI void
1230 evas_common_pipe_text_draw(RGBA_Image *dst, RGBA_Draw_Context *dc,
1231                RGBA_Font *fn, int x, int y, const Eina_Unicode *text, const Evas_Text_Props *intl_props)
1232 {
1233    RGBA_Pipe_Op *op;
1234
1235    if ((!fn) || (!text)) return;
1236    dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1237    if (!dst->cache_entry.pipe) return;
1238    op->op.text.x = x;
1239    op->op.text.y = y;
1240    op->op.text.text = eina_unicode_strdup(text);
1241    evas_common_text_props_content_copy_and_ref(&(op->op.text.intl_props),
1242          intl_props);
1243 #ifdef EVAS_FRAME_QUEUING
1244    LKL(fn->ref_fq_add);
1245    fn->ref_fq[0]++;
1246    LKU(fn->ref_fq_add);
1247 #else
1248    fn->references++;
1249 #endif
1250    op->op.text.font = fn;
1251    op->op_func = evas_common_pipe_text_draw_do;
1252    op->free_func = evas_common_pipe_op_text_free;
1253    evas_common_pipe_draw_context_copy(dc, op);
1254 }
1255
1256 /**************** IMAGE *****************/
1257 static void
1258 evas_common_pipe_op_image_free(RGBA_Pipe_Op *op)
1259 {
1260 #ifdef EVAS_FRAME_QUEUING
1261    LKL(op->op.image.src->cache_entry.ref_fq_del);
1262    op->op.image.src->cache_entry.ref_fq[1]++;
1263    LKU(op->op.image.src->cache_entry.ref_fq_del);
1264    pthread_cond_signal(&(op->op.image.src->cache_entry.cond_fq_del)); 
1265 #else
1266    op->op.image.src->ref--;
1267    if (op->op.image.src->ref == 0)
1268      {
1269         evas_cache_image_drop(&op->op.image.src->cache_entry);
1270      }
1271 #endif
1272    evas_common_pipe_op_free(op);
1273 }
1274
1275 #ifdef EVAS_FRAME_QUEUING
1276 EAPI void
1277 evas_common_pipe_op_image_flush(RGBA_Image *im)
1278 {
1279    if (! evas_common_frameq_enabled())
1280       return;
1281    
1282    LKL(im->cache_entry.ref_fq_add);
1283    LKL(im->cache_entry.ref_fq_del);
1284
1285    while (im->cache_entry.ref_fq[0] != im->cache_entry.ref_fq[1])
1286       pthread_cond_wait(&(im->cache_entry.cond_fq_del), &(im->cache_entry.ref_fq_del));
1287
1288    LKU(im->cache_entry.ref_fq_del);
1289    LKU(im->cache_entry.ref_fq_add);
1290 }
1291 #endif
1292
1293 static void
1294 evas_common_pipe_image_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1295 {
1296    if (info)
1297      {
1298         RGBA_Draw_Context context;
1299
1300         memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1301 #ifdef EVAS_SLI
1302         evas_common_draw_context_set_sli(&(context), info->y, info->h);
1303 #else
1304         evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1305 #endif
1306
1307 #ifdef SCALECACHE
1308         evas_common_rgba_image_scalecache_do((Image_Entry *)(op->op.image.src),
1309                                              dst, &(context),
1310                                              op->op.image.smooth,
1311                                              op->op.image.sx,
1312                                              op->op.image.sy,
1313                                              op->op.image.sw,
1314                                              op->op.image.sh,
1315                                              op->op.image.dx,
1316                                              op->op.image.dy,
1317                                              op->op.image.dw,
1318                                              op->op.image.dh);
1319 #else
1320         if (op->op.image.smooth)
1321           {
1322              evas_common_scale_rgba_in_to_out_clip_smooth(op->op.image.src,
1323                            dst, &(context),
1324                            op->op.image.sx,
1325                            op->op.image.sy,
1326                            op->op.image.sw,
1327                            op->op.image.sh,
1328                            op->op.image.dx,
1329                            op->op.image.dy,
1330                            op->op.image.dw,
1331                            op->op.image.dh);
1332           }
1333         else
1334           {
1335              evas_common_scale_rgba_in_to_out_clip_sample(op->op.image.src,
1336                            dst, &(context),
1337                            op->op.image.sx,
1338                            op->op.image.sy,
1339                            op->op.image.sw,
1340                            op->op.image.sh,
1341                            op->op.image.dx,
1342                            op->op.image.dy,
1343                            op->op.image.dw,
1344                            op->op.image.dh);
1345                     }
1346 #endif
1347      }
1348    else
1349      {
1350 #ifdef SCALECACHE
1351         evas_common_rgba_image_scalecache_do((Image_Entry *)(op->op.image.src),
1352                                              dst, &(op->context),
1353                                              op->op.image.smooth,
1354                                              op->op.image.sx,
1355                                              op->op.image.sy,
1356                                              op->op.image.sw,
1357                                              op->op.image.sh,
1358                                              op->op.image.dx,
1359                                              op->op.image.dy,
1360                                              op->op.image.dw,
1361                                              op->op.image.dh);
1362 #else
1363         if (op->op.image.smooth)
1364           {
1365              evas_common_scale_rgba_in_to_out_clip_smooth(op->op.image.src,
1366                                  dst, &(op->context),
1367                                  op->op.image.sx,
1368                                  op->op.image.sy,
1369                                  op->op.image.sw,
1370                                  op->op.image.sh,
1371                                  op->op.image.dx,
1372                                  op->op.image.dy,
1373                                  op->op.image.dw,
1374                                  op->op.image.dh);
1375           }
1376         else
1377          {
1378              evas_common_scale_rgba_in_to_out_clip_sample(op->op.image.src,
1379                                  dst, &(op->context),
1380                                  op->op.image.sx,
1381                                  op->op.image.sy,
1382                                  op->op.image.sw,
1383                                  op->op.image.sh,
1384                                  op->op.image.dx,
1385                                  op->op.image.dy,
1386                                  op->op.image.dw,
1387                                  op->op.image.dh);
1388            }
1389 #endif
1390      }
1391 }
1392
1393 EAPI void
1394 evas_common_pipe_image_draw(RGBA_Image *src, RGBA_Image *dst,
1395                            RGBA_Draw_Context *dc, int smooth,
1396                            int src_region_x, int src_region_y,
1397                            int src_region_w, int src_region_h,
1398                            int dst_region_x, int dst_region_y,
1399                            int dst_region_w, int dst_region_h)
1400 {
1401    RGBA_Pipe_Op *op;
1402
1403    if (!src) return;
1404 //   evas_common_pipe_flush(src);
1405    dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1406    if (!dst->cache_entry.pipe) return;
1407    op->op.image.smooth = smooth;
1408    op->op.image.sx = src_region_x;
1409    op->op.image.sy = src_region_y;
1410    op->op.image.sw = src_region_w;
1411    op->op.image.sh = src_region_h;
1412    op->op.image.dx = dst_region_x;
1413    op->op.image.dy = dst_region_y;
1414    op->op.image.dw = dst_region_w;
1415    op->op.image.dh = dst_region_h;
1416 #ifdef EVAS_FRAME_QUEUING
1417    LKL(src->cache_entry.ref_fq_add);
1418    src->cache_entry.ref_fq[0]++;
1419    LKU(src->cache_entry.ref_fq_add);
1420 #else
1421    src->ref++;
1422 #endif
1423    op->op.image.src = src;
1424    op->op_func = evas_common_pipe_image_draw_do;
1425    op->free_func = evas_common_pipe_op_image_free;
1426    evas_common_pipe_draw_context_copy(dc, op);
1427
1428 #ifdef EVAS_FRAME_QUEUING
1429    /* laod every src image here.
1430     * frameq utilize all cpu cores already by worker threads
1431     * so another threads and barrier waiting can't be of any benefit.
1432     * therefore, not instantiate loader threads.
1433     */
1434    if (src->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1435       evas_cache_image_load_data(&src->cache_entry);
1436    evas_common_image_colorspace_normalize(src);
1437 #else
1438    evas_common_pipe_image_load(src);
1439 #endif
1440 }
1441
1442 static void
1443 evas_common_pipe_op_map4_free(RGBA_Pipe_Op *op)
1444 {
1445 #ifdef EVAS_FRAME_QUEUING
1446    LKL(op->op.image.src->cache_entry.ref_fq_del);
1447    op->op.image.src->cache_entry.ref_fq[1]++;
1448    LKU(op->op.image.src->cache_entry.ref_fq_del);
1449 #else
1450    op->op.map4.src->ref--;
1451    if (op->op.map4.src->ref == 0)
1452      evas_cache_image_drop(&op->op.map4.src->cache_entry);
1453 #endif
1454    free(op->op.map4.p);
1455    evas_common_pipe_op_free(op);
1456 }
1457
1458 static void
1459 evas_common_pipe_map4_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1460 {
1461    if (info)
1462      {
1463         RGBA_Draw_Context context;
1464
1465         memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1466 #ifdef EVAS_SLI
1467         evas_common_draw_context_set_sli(&(context), info->y, info->h);
1468 #else
1469         evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1470 #endif
1471
1472         evas_common_map4_rgba(op->op.map4.src, dst,
1473                               &context, op->op.map4.p,
1474                               op->op.map4.smooth, op->op.map4.level);
1475      }
1476    else
1477      {
1478         evas_common_map4_rgba(op->op.map4.src, dst,
1479                               &(op->context), op->op.map4.p,
1480                               op->op.map4.smooth, op->op.map4.level);
1481      }
1482 }
1483
1484 EAPI void
1485 evas_common_pipe_map4_draw(RGBA_Image *src, RGBA_Image *dst,
1486                            RGBA_Draw_Context *dc, RGBA_Map_Point *p,
1487                            int smooth, int level)
1488 {
1489    RGBA_Pipe_Op *op;
1490    RGBA_Map_Point *pts_copy;
1491    int i;
1492
1493    if (!src) return;
1494    pts_copy = malloc(sizeof (RGBA_Map_Point) * 4);
1495    if (!pts_copy) return;
1496    dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1497    if (!dst->cache_entry.pipe) 
1498      {
1499        free(pts_copy);
1500        return; 
1501      }
1502
1503    for (i = 0; i < 4; ++i)
1504      pts_copy[i] = p[i];
1505
1506    op->op.map4.smooth = smooth;
1507    op->op.map4.level = level;
1508 #ifdef EVAS_FRAME_QUEUING
1509    LKL(src->cache_entry.ref_fq_add);
1510    src->cache_entry.ref_fq[0]++;
1511    LKU(src->cache_entry.ref_fq_add);
1512 #else
1513    src->ref++;
1514 #endif
1515    op->op.map4.src = src;
1516    op->op.map4.p = pts_copy;
1517    op->op_func = evas_common_pipe_map4_draw_do;
1518    op->free_func = evas_common_pipe_op_map4_free;
1519    evas_common_pipe_draw_context_copy(dc, op);
1520
1521 #ifdef EVAS_FRAME_QUEUING
1522    /* laod every src image here.
1523     * frameq utilize all cpu cores already by worker threads
1524     * so another threads and barrier waiting can't be of any benefit.
1525     * therefore, not instantiate loader threads.
1526     */
1527    if (src->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1528       evas_cache_image_load_data(&src->cache_entry);
1529    evas_common_image_colorspace_normalize(src);
1530 #else
1531    evas_common_pipe_image_load(src);
1532 #endif
1533 }
1534
1535 static void
1536 evas_common_pipe_map4_render(RGBA_Image *root)
1537 {
1538   RGBA_Pipe *p;
1539   int i;
1540
1541   /* Map imply that we need to process them recursively first. */
1542   for (p = root->cache_entry.pipe; p; p = (RGBA_Pipe *)(EINA_INLIST_GET(p))->next)
1543     {
1544       for (i = 0; i < p->op_num; i++) 
1545         {
1546           if (p->op[i].op_func == evas_common_pipe_map4_draw_do)
1547             {
1548               if (p->op[i].op.map4.src->cache_entry.pipe)
1549                 evas_common_pipe_map4_render(p->op[i].op.map4.src);
1550             }
1551           else if (p->op[i].op_func == evas_common_pipe_image_draw_do)
1552             {
1553               if (p->op[i].op.image.src->cache_entry.pipe)
1554                 evas_common_pipe_map4_render(p->op[i].op.image.src);
1555             }
1556         }
1557     }
1558
1559   evas_common_pipe_begin(root);
1560   evas_common_pipe_flush(root);
1561 }
1562
1563 #ifdef BUILD_PTHREAD
1564 static Eina_List *task = NULL;
1565 static Thinfo task_thinfo[TH_MAX];
1566 static pthread_barrier_t task_thbarrier[2];
1567 static LK(task_mutext) = PTHREAD_MUTEX_INITIALIZER;
1568 #endif
1569
1570 #ifdef BUILD_PTHREAD
1571 static void*
1572 evas_common_pipe_load(void *data)
1573 {
1574   Thinfo *thinfo;
1575
1576   thinfo = data;
1577   for (;;)
1578     {
1579       /* wait for start signal */
1580       pthread_barrier_wait(&(thinfo->barrier[0]));
1581
1582       while (task)
1583         {
1584           RGBA_Image *im = NULL;
1585
1586           LKL(task_mutext);
1587           im = eina_list_data_get(task);
1588           task = eina_list_remove_list(task, task);
1589           LKU(task_mutext);
1590
1591           if (im)
1592             {
1593               if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1594                 evas_cache_image_load_data(&im->cache_entry);
1595               evas_common_image_colorspace_normalize(im);
1596
1597               im->flags &= ~RGBA_IMAGE_TODO_LOAD;
1598             }
1599         }
1600
1601       /* send finished signal */    
1602       pthread_barrier_wait(&(thinfo->barrier[1]));
1603     }
1604
1605   return NULL;
1606 }
1607 #endif
1608
1609 static volatile int bval = 0;
1610
1611 static void
1612 evas_common_pipe_image_load_do(void)
1613 {
1614 #ifdef BUILD_PTHREAD
1615   /* Notify worker thread. */
1616   pthread_barrier_wait(&(task_thbarrier[0]));
1617
1618   /* sync worker threads */
1619   pthread_barrier_wait(&(task_thbarrier[1]));
1620 #endif
1621 }
1622
1623 static Eina_Bool
1624 evas_common_pipe_init(void)
1625 {
1626 #ifdef BUILD_PTHREAD
1627    if (thread_num == 0)
1628      {
1629         int cpunum;
1630         int i;
1631
1632         cpunum = eina_cpu_count();
1633         thread_num = cpunum;
1634 // on  single cpu we still want this initted.. otherwise we block forever
1635 // waiting onm pthread barriers for async rendering on a single core!
1636 //      if (thread_num == 1) return EINA_FALSE;
1637
1638         pthread_barrier_init(&(thbarrier[0]), NULL, thread_num + 1);
1639         pthread_barrier_init(&(thbarrier[1]), NULL, thread_num + 1);
1640         for (i = 0; i < thread_num; i++)
1641           {
1642              pthread_attr_t attr;
1643              cpu_set_t cpu;
1644
1645              pthread_attr_init(&attr);
1646              CPU_ZERO(&cpu);
1647              CPU_SET(i % cpunum, &cpu);
1648              pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
1649              thinfo[i].thread_num = i;
1650              thinfo[i].info = NULL;
1651              thinfo[i].barrier = thbarrier;
1652              /* setup initial locks */
1653              pthread_create(&(thinfo[i].thread_id), &attr,
1654                             evas_common_pipe_thread, &(thinfo[i]));
1655              pthread_attr_destroy(&attr);
1656           }
1657
1658         pthread_barrier_init(&(task_thbarrier[0]), NULL, thread_num + 1);
1659         pthread_barrier_init(&(task_thbarrier[1]), NULL, thread_num + 1);
1660         for (i = 0; i < thread_num; i++)
1661           {
1662              pthread_attr_t attr;
1663              cpu_set_t cpu;
1664
1665              pthread_attr_init(&attr);
1666              CPU_ZERO(&cpu);
1667              CPU_SET(i % cpunum, &cpu);
1668              pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
1669              task_thinfo[i].thread_num = i;
1670              task_thinfo[i].info = NULL;
1671              task_thinfo[i].barrier = task_thbarrier;
1672              /* setup initial locks */
1673              pthread_create(&(task_thinfo[i].thread_id), &attr,
1674                             evas_common_pipe_load, &(task_thinfo[i]));
1675              pthread_attr_destroy(&attr);
1676           }
1677
1678 #if defined(METRIC_CACHE) || defined(WORD_CACHE)
1679         eina_threads_init();
1680 #endif
1681      }
1682    if (thread_num == 1) return EINA_FALSE;
1683    return EINA_TRUE;
1684 #endif
1685    return EINA_FALSE;
1686 }
1687
1688 EAPI void
1689 evas_common_pipe_image_load(RGBA_Image *im)
1690 {
1691   if (im->flags & RGBA_IMAGE_TODO_LOAD)
1692     return ;
1693
1694   if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888
1695       && !evas_cache_image_is_loaded(&(im->cache_entry)))
1696     goto add_task;
1697
1698   if ((!im->cs.data) || ((!im->cs.dirty) && (!(im->flags & RGBA_IMAGE_IS_DIRTY))))
1699     goto add_task;
1700
1701   return ;
1702
1703  add_task:
1704   task = eina_list_append(task, im);
1705   im->flags |= RGBA_IMAGE_TODO_LOAD;
1706 }
1707
1708 EAPI void
1709 evas_common_pipe_map4_begin(RGBA_Image *root)
1710 {
1711   if (!evas_common_pipe_init())
1712     {
1713       RGBA_Image *im;
1714
1715       EINA_LIST_FREE(task, im)
1716         {
1717           if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1718             evas_cache_image_load_data(&im->cache_entry);
1719           evas_common_image_colorspace_normalize(im);
1720           
1721           im->flags &= ~RGBA_IMAGE_TODO_LOAD;
1722         }
1723     }
1724
1725   evas_common_pipe_image_load_do();
1726
1727   evas_common_pipe_map4_render(root);
1728 }
1729
1730 #endif