1 // THIS IS DEPRECATED. WILL GO EVENTUALLTY. NO NEED TO SUPPORT ANYMORE
3 #include "evas_common.h"
5 #ifdef BUILD_PIPE_RENDER
7 #ifdef EVAS_FRAME_QUEUING
9 static Evas_FrameQ gframeq; // global frameQ
12 evas_common_surface_alloc(void *surface, int x, int y, int w, int h)
14 Evas_Surface *e_surface;
16 e_surface = calloc(1, sizeof(Evas_Surface));
17 e_surface->im = surface;
18 LKL(e_surface->im->cache_entry.ref_fq_add);
19 e_surface->im->cache_entry.ref_fq[0]++;
20 LKU(e_surface->im->cache_entry.ref_fq_add);
30 evas_common_surface_dealloc(Evas_Surface *surface)
32 Evas_Surface *d_surface;
37 surface = (Evas_Surface *)eina_inlist_remove(EINA_INLIST_GET(surface), EINA_INLIST_GET(d_surface));
38 LKL(d_surface->im->cache_entry.ref_fq_del);
39 d_surface->im->cache_entry.ref_fq[1]++;
40 LKU(d_surface->im->cache_entry.ref_fq_del);
46 evas_common_surface_add(Evas_Frame *frame, Evas_Surface *surface)
48 frame->surfaces = (Evas_Surface *)eina_inlist_append(EINA_INLIST_GET(frame->surfaces), EINA_INLIST_GET(surface));
52 evas_common_frame_alloc(void)
56 frame = calloc(1, sizeof(Evas_Frame));
57 frame->surfaces = NULL;
62 evas_common_frame_dealloc(Evas_Frame *frame)
64 evas_common_surface_dealloc(frame->surfaces);
69 evas_common_frame_add(Evas_FrameQ *frameq, Evas_Frame *frame)
71 Evas_Frame *temp_frame;
74 while ((int)eina_inlist_count(EINA_INLIST_GET(frameq->frames)) >= frameq->frameq_sz)
76 /* wait a worker thread finish previous frame */
77 pthread_cond_wait(&(frameq->cond_done), &(frameq->mutex));
79 frameq->frames = (Evas_Frame *) eina_inlist_append(EINA_INLIST_GET(frameq->frames), EINA_INLIST_GET(frame));
81 // this frame need not to be scheduled for flushing time
82 EINA_INLIST_FOREACH(EINA_INLIST_GET(frameq->frames), temp_frame)
84 if (!temp_frame->ready)
89 if (temp_frame && temp_frame == frame)
90 frame->dont_schedule = 1;
94 pthread_cond_signal(&(frameq->cond_new));
98 evas_common_frameq_new_surface(void *surface, int x, int y, int w, int h)
100 return evas_common_surface_alloc(surface, x, y, w, h);
104 evas_common_frameq_add_surface(Evas_Surface *surface)
106 evas_common_surface_add(gframeq.cur_frame, surface);
110 evas_common_frameq_set_frame_data(void *data,
111 void (*fn_output_redraws_next_update_push) (void *data, void *surface, int x, int y, int w, int h),
112 void (*fn_output_flush) (void *data),
113 void (*fn_output_set_priv)(void *data, void *cur, void *prev))
115 if (gframeq.cur_frame)
117 gframeq.cur_frame->data = data;
118 gframeq.cur_frame->output_redraws_next_update_push = fn_output_redraws_next_update_push;
119 gframeq.cur_frame->output_flush = fn_output_flush;
120 gframeq.cur_frame->output_set_priv = fn_output_set_priv;
125 evas_common_frameq_prepare_frame(void)
127 if (!gframeq.cur_frame )
129 gframeq.cur_frame = evas_common_frame_alloc();
134 evas_common_frameq_ready_frame(void)
136 if (gframeq.cur_frame)
138 evas_common_frame_add(&gframeq, gframeq.cur_frame);
139 gframeq.cur_frame = NULL; // create a new frame for the next frame later
145 evas_common_frameq_init(void)
147 gframeq.frames = NULL;
148 pthread_cond_init(&(gframeq.cond_new), NULL);
149 pthread_cond_init(&(gframeq.cond_ready), NULL);
150 pthread_cond_init(&(gframeq.cond_done), NULL);
152 gframeq.initialised = 0; // worker thread are not created yet
153 gframeq.frameq_sz = 1; // this value ensures the first frame can be enqueued.
157 evas_common_frameq_destroy(void)
159 #if 0 // let them destroyed indirectly with program exit
161 pthread_cond_destroy(&(gframeq.cond_new));
162 pthread_cond_destroy(&(gframeq.cond_ready));
163 pthread_cond_destroy(&(gframeq.cond_done));
168 gframeq.frames = NULL;
169 gframeq.initialised = 0;
173 evas_common_frameq_flush(void)
175 if (! evas_common_frameq_enabled())
179 while(eina_inlist_count(EINA_INLIST_GET(gframeq.frames)) > 0)
181 /* wait a worker thread finish previous frame */
182 pthread_cond_wait(&(gframeq.cond_done), &(gframeq.mutex));
189 evas_common_frameq_flush_ready(void)
195 evas_common_frameq_get_frameq_sz(void)
197 return gframeq.frameq_sz;
201 evas_common_frameq_enabled(void)
203 return gframeq.initialised;
207 static RGBA_Pipe *evas_common_pipe_add(RGBA_Pipe *pipe, RGBA_Pipe_Op **op);
208 static void evas_common_pipe_draw_context_copy(RGBA_Draw_Context *dc, RGBA_Pipe_Op *op);
209 static void evas_common_pipe_op_free(RGBA_Pipe_Op *op);
213 evas_common_pipe_add(RGBA_Pipe *pipe, RGBA_Pipe_Op **op)
221 p = calloc(1, sizeof(RGBA_Pipe));
223 pipe = (RGBA_Pipe *)eina_inlist_append(EINA_INLIST_GET(pipe), EINA_INLIST_GET(p));
225 p = (RGBA_Pipe *)(EINA_INLIST_GET(pipe))->last;
226 if (p->op_num == PIPE_LEN)
228 p = calloc(1, sizeof(RGBA_Pipe));
230 pipe = (RGBA_Pipe *)eina_inlist_append(EINA_INLIST_GET(pipe), EINA_INLIST_GET(p));
233 *op = &(p->op[p->op_num - 1]);
236 /* FIXME: PTHREAD init any thread locks etc */
242 evas_common_pipe_draw_context_copy(RGBA_Draw_Context *dc, RGBA_Pipe_Op *op)
244 memcpy(&(op->context), dc, sizeof(RGBA_Draw_Context));
245 if (op->context.cutout.active > 0)
247 op->context.cutout.rects = malloc(sizeof(Cutout_Rect) * op->context.cutout.active);
248 memcpy(op->context.cutout.rects, dc->cutout.rects, sizeof(Cutout_Rect) * op->context.cutout.active);
252 op->context.cutout.rects = NULL;
257 evas_common_pipe_op_free(RGBA_Pipe_Op *op)
259 evas_common_draw_context_apply_clean_cutouts(&op->context.cutout);
265 evas_common_pipe_thread(void *data)
269 // INF("TH [...........");
273 RGBA_Pipe_Thread_Info *info;
276 /* wait for start signal */
277 // INF(" TH %i START...", thinfo->thread_num);
278 pthread_barrier_wait(&(thinfo->barrier[0]));
282 // thinfo->info = NULL;
283 // INF(" TH %i GO", thinfo->thread_num);
284 EINA_INLIST_FOREACH(EINA_INLIST_GET(info->im->cache_entry.pipe), p)
288 for (i = 0; i < p->op_num; i++)
290 if (p->op[i].op_func)
291 p->op[i].op_func(info->im, &(p->op[i]), info);
296 // INF(" TH %i DONE", thinfo->thread_num);
297 /* send finished signal */
298 pthread_barrier_wait(&(thinfo->barrier[1]));
303 #ifdef EVAS_FRAME_QUEUING
305 evas_common_frameq_release(void *data)
308 Evas_Frameq_Thread_Info *fq_info;
312 fq_info = (Evas_Frameq_Thread_Info *)(thinfo->fq_info);
313 frameq = fq_info->frameq;
315 /* This thread may or may not own the mutex.
316 * But there's no way to determine the ownership of the mutex, so release it anyway
322 evas_common_frameq_thread(void *data)
326 Evas_Surface *surface;
329 Evas_Frameq_Thread_Info *fq_info;
330 RGBA_Pipe_Thread_Info p_info;
333 fq_info = (Evas_Frameq_Thread_Info *)(thinfo->fq_info);
334 frameq = fq_info->frameq;
336 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
337 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
338 /* install thread cancelation cleanup handler */
339 pthread_cleanup_push(evas_common_frameq_release, data);
345 /* 1. pick a frame to draw */
349 EINA_INLIST_FOREACH(EINA_INLIST_GET(frameq->frames), frame)
351 if (!frame->in_process)
353 frame->in_process = 1;
361 pthread_testcancel();
362 pthread_cond_wait(&(frameq->cond_new), &(frameq->mutex));
366 /* 2. draw selected frame */
367 EINA_INLIST_FOREACH(EINA_INLIST_GET(frame->surfaces), surface)
369 p_info.im = surface->im;
372 p_info.w = surface->im->cache_entry.w;
373 p_info.h = surface->im->cache_entry.h;
375 EINA_INLIST_FOREACH(EINA_INLIST_GET(p_info.im->cache_entry.pipe), p)
379 for (i = 0; i < p->op_num; i++)
381 if (p->op[i].op_func)
383 p->op[i].op_func(p_info.im, &(p->op[i]), &p_info);
388 /* push surface out */
389 if (! surface->dontpush)
391 frame->output_redraws_next_update_push(frame->data,
392 surface->im, surface->x, surface->y, surface->w, surface->h);
396 // record frame ready time, will be used in post worker thread, evas_common_frameq_thread_post()
397 gettimeofday(&frame->ready_time, NULL);
401 pthread_cond_signal(&frameq->cond_ready);
405 // Remove cleanup handler
406 pthread_cleanup_pop(0);
411 #define INTERVAL_QSIZE 17 // Actual size is 'INTERVAL_QSIZE - 1' because of not using index
412 #define SATISFACTION_THRESHOLD 4 // 4 ms --> 250 FPS
413 #define RESET_RATIO 4 // RESET_RATIO * [Average Ready Gap | get_max_interval()] --> Reset Threshold
414 #define DOUBLE_RESET_TIME_INTERVAL_THRESHOLD 16000 // make it double in case of less 16ms
415 #define RESET_ABSOLUTE_INTERVAL 600000 // 600 msec
423 static struct iq_node _IQ[INTERVAL_QSIZE];
424 static int _IQ_head = 0, _IQ_tail = 0;
425 static int _IQ_length = 0;
426 static long long min_ready, max_ready;
427 static long long average_interval;
430 _IQ_next_index(int i)
432 return (i + 1) % INTERVAL_QSIZE;
436 _IQ_previous_index(int i)
438 if (--i < 0) i += INTERVAL_QSIZE;
445 _IQ_length = _IQ_head = _IQ_tail = 0;
446 min_ready = LLONG_MAX, max_ready = LLONG_MIN;
447 average_interval = 0;
453 return (_IQ_head == _IQ_tail) ? 1 : 0;
459 return (_IQ_head == ((_IQ_tail + 1) % INTERVAL_QSIZE)) ? 1 : 0;
463 _IQ_insert(long long ready_time, long long last_interval)
465 if (_IQ_full()) return;
469 if (last_interval < 0)
471 last_interval = -last_interval;
473 _IQ[_IQ_tail].rt = ready_time;
474 _IQ[_IQ_tail].ri = last_interval;
475 min_ready = ready_time - last_interval;
476 max_ready = ready_time;
477 _IQ_tail = _IQ_next_index(_IQ_tail);
482 if (max_ready < ready_time)
484 _IQ[_IQ_tail].rt = ready_time;
485 _IQ[_IQ_tail].ri = ready_time - max_ready;
486 _IQ_tail = _IQ_next_index(_IQ_tail);
488 max_ready = ready_time;
490 else if (ready_time < min_ready)
492 last_interval = _IQ[_IQ_head].ri;
493 _IQ[_IQ_head].ri = _IQ[_IQ_head].rt - ready_time;
494 _IQ_head = _IQ_previous_index(_IQ_head);
495 _IQ[_IQ_head].rt = ready_time;
496 _IQ[_IQ_head].ri = last_interval;
497 min_ready = ready_time;
503 for (i = _IQ_head; i != _IQ_tail; i = j)
505 j = _IQ_next_index(i);
506 if (_IQ[j].rt < ready_time)
512 for (k = _IQ_tail; k != j; k = l)
514 l = _IQ_previous_index(k);
517 i = _IQ_next_index(j);
518 _IQ[j].ri -= (_IQ[j].rt - ready_time);
519 _IQ[j].rt = ready_time;
520 _IQ[i].ri = _IQ[i].rt - ready_time;
521 _IQ_tail = _IQ_next_index(_IQ_tail);
525 average_interval = (max_ready - min_ready) / _IQ_length;
531 struct iq_node oldest;
533 if (_IQ_empty()) return 0;
534 oldest = _IQ[_IQ_head];
535 _IQ_head = (_IQ_head + 1) % INTERVAL_QSIZE;
536 if ((--_IQ_length) == 0)
542 min_ready = _IQ[_IQ_head].rt;
543 average_interval = (max_ready - min_ready) / _IQ_length;
550 get_max_interval(void)
553 long long max = LLONG_MIN;
555 for ( i= _IQ_head ; i != _IQ_tail ; i = _IQ_next_index(i))
567 tv_to_long_long(struct timeval *tv)
574 return tv->tv_sec * 1000000LL + tv->tv_usec;
578 evas_common_frameq_schedule_flush_time(int frameq_sz, int thread_no,
579 long long last_ready_time, long long current_ready_time,
580 long long last_flush_time, int ready_frames_num,
583 // to get each time and to do others
584 long long current_time = 0LL;
585 long long current_ready_interval = 0LL;
586 long long theshold_time = SATISFACTION_THRESHOLD * 1000LL; // ms -> usec
587 long long reset_time_interval;
588 long long sleep_time = 0LL;
589 long long saved_ready_time, saved_ready_interval;
590 long long time_slept = 0LL;
591 static long long time_lag = 0;
593 int frameq_full_threshold =0;
595 int need_schedule = 0;
597 frameq_full_threshold = frameq_sz -thread_no; // Qsize - threads#
599 /* 1.5 defer flush time of current frame if need */
600 // in case of the first time, just keep ready time only
601 if (last_ready_time == 0LL)
603 last_ready_time = current_ready_time;
607 /* 1.5.1 get current ready time & interval */
608 saved_ready_time = current_ready_time;
609 saved_ready_interval = current_ready_interval = current_ready_time - last_ready_time;
610 // compensate a case which current ready time is older than previous one,
611 // doesn't work on the interval queue
612 if (current_ready_interval < 0)
614 current_ready_time = last_ready_time;
615 current_ready_interval = 0;
618 /* 1.5.2 get the reset time interval before keeping a new one */
621 reset_time_interval = RESET_RATIO * average_interval;
622 if (average_interval < DOUBLE_RESET_TIME_INTERVAL_THRESHOLD)
624 reset_time_interval *= 2;
628 /* 1.5.3 reset - if too late, discard all saved interval and start from here */
629 if (current_ready_interval > RESET_ABSOLUTE_INTERVAL)
633 else if (_IQ_length >= thread_no * 2 && current_ready_interval > reset_time_interval)
637 else if (_IQ_length >= thread_no && _IQ_length < thread_no * 2
638 && current_ready_interval > get_max_interval() * RESET_RATIO)
649 /* 1.5.4 enqueue - keep a new interval for next average interval */
654 _IQ_insert(saved_ready_time, saved_ready_interval);
656 /* 1.5.5 schedule - if faster than average interval, figure out sleep time to meet it */
661 if (_IQ_length >= thread_no * 2 && average_interval > theshold_time)
665 // compensate the case that postworker blocks the workers from getting a new fresh frame
666 // It's actually occurred when during the wait time of postworker, the frame queue is full
667 // Consequently check the number of currently ready frames and apply some time drop to average time according to the number
668 if (ready_frames_num >= frameq_full_threshold)
674 gettimeofday(&now, NULL);
675 current_time = tv_to_long_long(&now);
676 time_lag += (current_time - last_flush_time);
677 sleep_time = (average_interval < time_lag) ? 0 : (average_interval - time_lag);
681 /* 1.5.6 sleep - actually sleep and get over-slept time (time_lag) for next frame */
684 sleep_time = sleep_time * 9 / 10;
685 usleep((unsigned int)sleep_time);
686 gettimeofday(&now, NULL);
687 time_slept = tv_to_long_long(&now) - current_time;
688 time_lag = time_slept - sleep_time;
695 last_ready_time = current_ready_time;
698 return last_ready_time;
702 evas_common_frameq_thread_post(void *data)
706 Evas_Surface *surface;
708 Evas_Frameq_Thread_Info *fq_info;
709 Eina_List *pending_writes = NULL;
710 Eina_List *prev_pending_writes = NULL;
712 long long last_ready_time = 0LL;
713 long long current_ready_time;
714 Evas_Frame *temp_frame = NULL;
715 int ready_frames_num;
716 long long last_flush_time = 0LL;
718 int dont_schedule = 0;
721 fq_info = (Evas_Frameq_Thread_Info *)(thinfo->fq_info);
722 frameq = fq_info->frameq;
724 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
725 pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
726 /* install thread cancelation cleanup handler */
727 pthread_cleanup_push(evas_common_frameq_release, data);
733 /* 1. wait the first frame being done */
735 while(!frameq->frames || !frameq->frames->ready)
737 pthread_cond_wait(&(frameq->cond_ready), &(frameq->mutex));
739 frame = frameq->frames;
741 /* 1.5. prepare to schedule flush time */
742 current_ready_time = tv_to_long_long(&frame->ready_time);
743 ready_frames_num = 0;
744 EINA_INLIST_FOREACH(EINA_INLIST_GET(frameq->frames), temp_frame)
746 if (temp_frame->ready == 1)
751 dont_schedule = (frame->dont_schedule)?1:0;
754 /* 2. generate pending_writes */
755 EINA_INLIST_FOREACH(EINA_INLIST_GET(frame->surfaces), surface)
757 evas_common_pipe_flush(surface->im);
758 if (! surface->dontpush)
760 pending_writes = eina_list_append(pending_writes, surface->im);
764 /* 2.5. schedule flush time */
765 last_ready_time = evas_common_frameq_schedule_flush_time(
766 frameq->frameq_sz, frameq->thread_num,
767 last_ready_time, current_ready_time,
768 last_flush_time, ready_frames_num, dont_schedule);
770 /* 3. flush redraws */
771 frame->output_set_priv(frame->data, pending_writes, prev_pending_writes);
772 frame->output_flush(frame->data);
773 gettimeofday(&now, NULL);
774 // keep as the last flush time
775 last_flush_time = now.tv_sec * 1000000LL + now.tv_usec;
777 prev_pending_writes = pending_writes;
778 pending_writes = NULL;
780 /* 4. remove this frame from the frame queue */
783 (Evas_Frame *)eina_inlist_remove(EINA_INLIST_GET(frameq->frames),
784 EINA_INLIST_GET(frame));
787 pthread_cond_broadcast(&frameq->cond_done);
788 evas_common_frame_dealloc(frame);
791 // Remove cleanup handler
792 pthread_cleanup_pop(0);
796 #endif /* EVAS_FRAME_QUEUING */
800 static int thread_num = 0;
801 static Thinfo thinfo[TH_MAX];
802 static pthread_barrier_t thbarrier[2];
806 evas_common_pipe_begin(RGBA_Image *im)
811 #ifdef EVAS_FRAME_QUEUING
815 if (!im->cache_entry.pipe) return;
816 if (thread_num == 1) return;
818 h = im->cache_entry.h / thread_num;
820 for (i = 0; i < thread_num; i++)
822 RGBA_Pipe_Thread_Info *info;
824 // if (y >= im->cache_entry.h) break;
825 info = calloc(1, sizeof(RGBA_Pipe_Thread_Info));
829 info->w = im->cache_entry.w;
831 info->h = thread_num;
835 info->w = im->cache_entry.w;
836 if (i == (thread_num - 1))
838 info->h = im->cache_entry.h - y;
846 thinfo[i].info = info;
848 /* tell worker threads to start */
849 pthread_barrier_wait(&(thbarrier[0]));
853 #ifdef EVAS_FRAME_QUEUING
855 evas_common_frameq_begin(void)
859 Evas_Frameq_Thread_Info *fp_info;
863 if (!gframeq.initialised)
865 int cpunum, set_cpu_affinity = 0;
867 cpunum = eina_cpu_count();
868 gframeq.thread_num = cpunum;
869 gframeq.frameq_sz = cpunum * FRAMEQ_SZ_PER_THREAD;
871 for (i = 0; i < gframeq.thread_num; i++)
874 fp_info = calloc(1, sizeof(Evas_Frameq_Thread_Info));
875 fp_info->frameq = &gframeq;
877 gframeq.thinfo[i].thread_num = i;
878 gframeq.thinfo[i].fq_info = fp_info;
880 pthread_attr_init(&attr);
881 if (set_cpu_affinity)
884 CPU_SET((i+1) % cpunum, &cpu);
885 pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
888 pthread_create(&(gframeq.thinfo[i].thread_id), &attr,
889 evas_common_frameq_thread, &(gframeq.thinfo[i]));
891 pthread_attr_destroy(&attr);
892 pthread_detach(gframeq.thinfo[i].thread_id);
896 fp_info = calloc(1, sizeof(Evas_Frameq_Thread_Info));
897 fp_info->frameq = &gframeq;
899 gframeq.thinfo[i].thread_num = i;
900 gframeq.thinfo[i].fq_info = fp_info;
902 pthread_attr_init(&attr);
903 if (set_cpu_affinity)
906 CPU_SET((i+1) % cpunum, &cpu);
907 pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
910 pthread_create(&(gframeq.thinfo[i].thread_id), &attr,
911 evas_common_frameq_thread_post, &(gframeq.thinfo[i]));
912 pthread_attr_destroy(&attr);
913 pthread_detach(gframeq.thinfo[i].thread_id);
915 gframeq.initialised = 1; // now worker threads are created.
918 DBG("%d cpus, set_cpu_affinity=%d, frameq_sz=%d",
919 cpunum, set_cpu_affinity, gframeq.frameq_sz);
921 #endif /* BUILD_PTHREAD */
925 evas_common_frameq_finish(void)
929 /* 1. cancel all worker threads */
930 for (i = 0; i < gframeq.thread_num; i++)
932 pthread_cancel(gframeq.thinfo[i].thread_id);
934 // cancel post-worker thread
935 pthread_cancel(gframeq.thinfo[i].thread_id);
937 /* 2. send signal to worker threads so that they enter to the thread cancelation cleanup handler */
938 for (i = 0; i < gframeq.thread_num; i++)
940 pthread_cond_signal(&(gframeq.cond_new));
942 // send signal to post-worker thread
943 pthread_cond_signal(&(gframeq.cond_ready));
945 /* all the workers were created and detached before
946 * so don't need to join them here.
951 #endif /* EVAS_FRAME_QUEUING */
954 evas_common_pipe_flush(RGBA_Image *im)
956 if (!im->cache_entry.pipe) return;
957 #ifndef EVAS_FRAME_QUEUING
961 /* sync worker threads */
962 pthread_barrier_wait(&(thbarrier[1]));
970 /* process pipe - 1 thead */
971 for (p = im->cache_entry.pipe; p; p = (RGBA_Pipe *)(EINA_INLIST_GET(p))->next)
973 for (i = 0; i < p->op_num; i++)
975 if (p->op[i].op_func)
977 p->op[i].op_func(im, &(p->op[i]), NULL);
982 #endif /* !EVAS_FRAME_QUEUING */
983 evas_common_cpu_end_opt();
984 evas_common_pipe_free(im);
988 evas_common_pipe_free(RGBA_Image *im)
994 if (!im->cache_entry.pipe) return;
995 /* FIXME: PTHREAD join all threads here (if not finished) */
998 while (im->cache_entry.pipe)
1000 p = im->cache_entry.pipe;
1001 for (i = 0; i < p->op_num; i++)
1003 if (p->op[i].free_func)
1005 p->op[i].free_func(&(p->op[i]));
1008 im->cache_entry.pipe = (RGBA_Pipe *)eina_inlist_remove(EINA_INLIST_GET(im->cache_entry.pipe), EINA_INLIST_GET(p));
1016 /**************** RECT ******************/
1018 evas_common_pipe_rectangle_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1022 RGBA_Draw_Context context;
1024 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1026 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1028 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1030 evas_common_rectangle_draw(dst, &(context),
1031 op->op.rect.x, op->op.rect.y,
1032 op->op.rect.w, op->op.rect.h);
1036 evas_common_rectangle_draw(dst, &(op->context),
1037 op->op.rect.x, op->op.rect.y,
1038 op->op.rect.w, op->op.rect.h);
1043 evas_common_pipe_rectangle_draw(RGBA_Image *dst, RGBA_Draw_Context *dc, int x, int y, int w, int h)
1047 if ((w < 1) || (h < 1)) return;
1048 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1049 if (!dst->cache_entry.pipe) return;
1054 op->op_func = evas_common_pipe_rectangle_draw_do;
1055 op->free_func = evas_common_pipe_op_free;
1056 evas_common_pipe_draw_context_copy(dc, op);
1059 /**************** LINE ******************/
1061 evas_common_pipe_line_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1065 RGBA_Draw_Context context;
1067 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1069 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1071 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1073 evas_common_line_draw(dst, &(context),
1074 op->op.line.x0, op->op.line.y0,
1075 op->op.line.x1, op->op.line.y1);
1079 evas_common_line_draw(dst, &(op->context),
1080 op->op.line.x0, op->op.line.y0,
1081 op->op.line.x1, op->op.line.y1);
1086 evas_common_pipe_line_draw(RGBA_Image *dst, RGBA_Draw_Context *dc,
1087 int x0, int y0, int x1, int y1)
1091 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1092 if (!dst->cache_entry.pipe) return;
1093 op->op.line.x0 = x0;
1094 op->op.line.y0 = y0;
1095 op->op.line.x1 = x1;
1096 op->op.line.y1 = y1;
1097 op->op_func = evas_common_pipe_line_draw_do;
1098 op->free_func = evas_common_pipe_op_free;
1099 evas_common_pipe_draw_context_copy(dc, op);
1102 /**************** POLY ******************/
1104 evas_common_pipe_op_poly_free(RGBA_Pipe_Op *op)
1106 RGBA_Polygon_Point *p;
1108 while (op->op.poly.points)
1110 p = op->op.poly.points;
1111 op->op.poly.points = (RGBA_Polygon_Point *)eina_inlist_remove(EINA_INLIST_GET(op->op.poly.points),
1112 EINA_INLIST_GET(p));
1115 evas_common_pipe_op_free(op);
1119 evas_common_pipe_poly_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1123 RGBA_Draw_Context context;
1125 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1127 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1129 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1131 evas_common_polygon_draw(dst, &(context),
1132 op->op.poly.points, 0, 0);
1136 evas_common_polygon_draw(dst, &(op->context),
1137 op->op.poly.points, 0, 0);
1142 evas_common_pipe_poly_draw(RGBA_Image *dst, RGBA_Draw_Context *dc,
1143 RGBA_Polygon_Point *points, int x, int y)
1146 RGBA_Polygon_Point *pts = NULL, *p, *pp;
1148 if (!points) return;
1149 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1150 if (!dst->cache_entry.pipe) return;
1151 /* FIXME: copy points - maybe we should refcount? */
1152 for (p = points; p; p = (RGBA_Polygon_Point *)(EINA_INLIST_GET(p))->next)
1154 pp = calloc(1, sizeof(RGBA_Polygon_Point));
1159 pts = (RGBA_Polygon_Point *)eina_inlist_append(EINA_INLIST_GET(pts), EINA_INLIST_GET(pp));
1162 op->op.poly.points = pts;
1163 op->op_func = evas_common_pipe_poly_draw_do;
1164 op->free_func = evas_common_pipe_op_poly_free;
1165 evas_common_pipe_draw_context_copy(dc, op);
1168 /**************** TEXT ******************/
1170 evas_common_pipe_op_text_free(RGBA_Pipe_Op *op)
1172 #ifdef EVAS_FRAME_QUEUING
1173 LKL(op->op.text.font->ref_fq_del);
1174 op->op.text.font->ref_fq[1]++;
1175 LKU(op->op.text.font->ref_fq_del);
1176 pthread_cond_signal(&(op->op.text.font->cond_fq_del));
1178 evas_common_font_free(op->op.text.font);
1180 evas_common_text_props_content_unref(&(op->op.text.intl_props));
1181 free(op->op.text.text);
1182 evas_common_pipe_op_free(op);
1185 #ifdef EVAS_FRAME_QUEUING
1186 /* flush all op using @fn */
1188 evas_common_pipe_op_text_flush(RGBA_Font *fn)
1190 if (! evas_common_frameq_enabled())
1193 LKL(fn->ref_fq_add);
1194 LKL(fn->ref_fq_del);
1196 while (fn->ref_fq[0] != fn->ref_fq[1])
1197 pthread_cond_wait(&(fn->cond_fq_del), &(fn->ref_fq_del));
1199 LKU(fn->ref_fq_del);
1200 LKU(fn->ref_fq_add);
1205 evas_common_pipe_text_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1209 RGBA_Draw_Context context;
1211 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1213 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1215 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1217 evas_common_font_draw(dst, &(context),
1218 op->op.text.font, op->op.text.x, op->op.text.y,
1219 op->op.text.text, &op->op.text.intl_props);
1223 evas_common_font_draw(dst, &(op->context),
1224 op->op.text.font, op->op.text.x, op->op.text.y,
1225 op->op.text.text, &op->op.text.intl_props);
1230 evas_common_pipe_text_draw(RGBA_Image *dst, RGBA_Draw_Context *dc,
1231 RGBA_Font *fn, int x, int y, const Eina_Unicode *text, const Evas_Text_Props *intl_props)
1235 if ((!fn) || (!text)) return;
1236 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1237 if (!dst->cache_entry.pipe) return;
1240 op->op.text.text = eina_unicode_strdup(text);
1241 evas_common_text_props_content_copy_and_ref(&(op->op.text.intl_props),
1243 #ifdef EVAS_FRAME_QUEUING
1244 LKL(fn->ref_fq_add);
1246 LKU(fn->ref_fq_add);
1250 op->op.text.font = fn;
1251 op->op_func = evas_common_pipe_text_draw_do;
1252 op->free_func = evas_common_pipe_op_text_free;
1253 evas_common_pipe_draw_context_copy(dc, op);
1256 /**************** IMAGE *****************/
1258 evas_common_pipe_op_image_free(RGBA_Pipe_Op *op)
1260 #ifdef EVAS_FRAME_QUEUING
1261 LKL(op->op.image.src->cache_entry.ref_fq_del);
1262 op->op.image.src->cache_entry.ref_fq[1]++;
1263 LKU(op->op.image.src->cache_entry.ref_fq_del);
1264 pthread_cond_signal(&(op->op.image.src->cache_entry.cond_fq_del));
1266 op->op.image.src->ref--;
1267 if (op->op.image.src->ref == 0)
1269 evas_cache_image_drop(&op->op.image.src->cache_entry);
1272 evas_common_pipe_op_free(op);
1275 #ifdef EVAS_FRAME_QUEUING
1277 evas_common_pipe_op_image_flush(RGBA_Image *im)
1279 if (! evas_common_frameq_enabled())
1282 LKL(im->cache_entry.ref_fq_add);
1283 LKL(im->cache_entry.ref_fq_del);
1285 while (im->cache_entry.ref_fq[0] != im->cache_entry.ref_fq[1])
1286 pthread_cond_wait(&(im->cache_entry.cond_fq_del), &(im->cache_entry.ref_fq_del));
1288 LKU(im->cache_entry.ref_fq_del);
1289 LKU(im->cache_entry.ref_fq_add);
1294 evas_common_pipe_image_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1298 RGBA_Draw_Context context;
1300 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1302 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1304 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1308 evas_common_rgba_image_scalecache_do((Image_Entry *)(op->op.image.src),
1310 op->op.image.smooth,
1320 if (op->op.image.smooth)
1322 evas_common_scale_rgba_in_to_out_clip_smooth(op->op.image.src,
1335 evas_common_scale_rgba_in_to_out_clip_sample(op->op.image.src,
1351 evas_common_rgba_image_scalecache_do((Image_Entry *)(op->op.image.src),
1352 dst, &(op->context),
1353 op->op.image.smooth,
1363 if (op->op.image.smooth)
1365 evas_common_scale_rgba_in_to_out_clip_smooth(op->op.image.src,
1366 dst, &(op->context),
1378 evas_common_scale_rgba_in_to_out_clip_sample(op->op.image.src,
1379 dst, &(op->context),
1394 evas_common_pipe_image_draw(RGBA_Image *src, RGBA_Image *dst,
1395 RGBA_Draw_Context *dc, int smooth,
1396 int src_region_x, int src_region_y,
1397 int src_region_w, int src_region_h,
1398 int dst_region_x, int dst_region_y,
1399 int dst_region_w, int dst_region_h)
1404 // evas_common_pipe_flush(src);
1405 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1406 if (!dst->cache_entry.pipe) return;
1407 op->op.image.smooth = smooth;
1408 op->op.image.sx = src_region_x;
1409 op->op.image.sy = src_region_y;
1410 op->op.image.sw = src_region_w;
1411 op->op.image.sh = src_region_h;
1412 op->op.image.dx = dst_region_x;
1413 op->op.image.dy = dst_region_y;
1414 op->op.image.dw = dst_region_w;
1415 op->op.image.dh = dst_region_h;
1416 #ifdef EVAS_FRAME_QUEUING
1417 LKL(src->cache_entry.ref_fq_add);
1418 src->cache_entry.ref_fq[0]++;
1419 LKU(src->cache_entry.ref_fq_add);
1423 op->op.image.src = src;
1424 op->op_func = evas_common_pipe_image_draw_do;
1425 op->free_func = evas_common_pipe_op_image_free;
1426 evas_common_pipe_draw_context_copy(dc, op);
1428 #ifdef EVAS_FRAME_QUEUING
1429 /* laod every src image here.
1430 * frameq utilize all cpu cores already by worker threads
1431 * so another threads and barrier waiting can't be of any benefit.
1432 * therefore, not instantiate loader threads.
1434 if (src->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1435 evas_cache_image_load_data(&src->cache_entry);
1436 evas_common_image_colorspace_normalize(src);
1438 evas_common_pipe_image_load(src);
1443 evas_common_pipe_op_map4_free(RGBA_Pipe_Op *op)
1445 #ifdef EVAS_FRAME_QUEUING
1446 LKL(op->op.image.src->cache_entry.ref_fq_del);
1447 op->op.image.src->cache_entry.ref_fq[1]++;
1448 LKU(op->op.image.src->cache_entry.ref_fq_del);
1450 op->op.map4.src->ref--;
1451 if (op->op.map4.src->ref == 0)
1452 evas_cache_image_drop(&op->op.map4.src->cache_entry);
1454 free(op->op.map4.p);
1455 evas_common_pipe_op_free(op);
1459 evas_common_pipe_map4_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1463 RGBA_Draw_Context context;
1465 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1467 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1469 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1472 evas_common_map4_rgba(op->op.map4.src, dst,
1473 &context, op->op.map4.p,
1474 op->op.map4.smooth, op->op.map4.level);
1478 evas_common_map4_rgba(op->op.map4.src, dst,
1479 &(op->context), op->op.map4.p,
1480 op->op.map4.smooth, op->op.map4.level);
1485 evas_common_pipe_map4_draw(RGBA_Image *src, RGBA_Image *dst,
1486 RGBA_Draw_Context *dc, RGBA_Map_Point *p,
1487 int smooth, int level)
1490 RGBA_Map_Point *pts_copy;
1494 pts_copy = malloc(sizeof (RGBA_Map_Point) * 4);
1495 if (!pts_copy) return;
1496 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1497 if (!dst->cache_entry.pipe)
1503 for (i = 0; i < 4; ++i)
1506 op->op.map4.smooth = smooth;
1507 op->op.map4.level = level;
1508 #ifdef EVAS_FRAME_QUEUING
1509 LKL(src->cache_entry.ref_fq_add);
1510 src->cache_entry.ref_fq[0]++;
1511 LKU(src->cache_entry.ref_fq_add);
1515 op->op.map4.src = src;
1516 op->op.map4.p = pts_copy;
1517 op->op_func = evas_common_pipe_map4_draw_do;
1518 op->free_func = evas_common_pipe_op_map4_free;
1519 evas_common_pipe_draw_context_copy(dc, op);
1521 #ifdef EVAS_FRAME_QUEUING
1522 /* laod every src image here.
1523 * frameq utilize all cpu cores already by worker threads
1524 * so another threads and barrier waiting can't be of any benefit.
1525 * therefore, not instantiate loader threads.
1527 if (src->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1528 evas_cache_image_load_data(&src->cache_entry);
1529 evas_common_image_colorspace_normalize(src);
1531 evas_common_pipe_image_load(src);
1536 evas_common_pipe_map4_render(RGBA_Image *root)
1541 /* Map imply that we need to process them recursively first. */
1542 for (p = root->cache_entry.pipe; p; p = (RGBA_Pipe *)(EINA_INLIST_GET(p))->next)
1544 for (i = 0; i < p->op_num; i++)
1546 if (p->op[i].op_func == evas_common_pipe_map4_draw_do)
1548 if (p->op[i].op.map4.src->cache_entry.pipe)
1549 evas_common_pipe_map4_render(p->op[i].op.map4.src);
1551 else if (p->op[i].op_func == evas_common_pipe_image_draw_do)
1553 if (p->op[i].op.image.src->cache_entry.pipe)
1554 evas_common_pipe_map4_render(p->op[i].op.image.src);
1559 evas_common_pipe_begin(root);
1560 evas_common_pipe_flush(root);
1563 #ifdef BUILD_PTHREAD
1564 static Eina_List *task = NULL;
1565 static Thinfo task_thinfo[TH_MAX];
1566 static pthread_barrier_t task_thbarrier[2];
1567 static LK(task_mutext) = PTHREAD_MUTEX_INITIALIZER;
1570 #ifdef BUILD_PTHREAD
1572 evas_common_pipe_load(void *data)
1579 /* wait for start signal */
1580 pthread_barrier_wait(&(thinfo->barrier[0]));
1584 RGBA_Image *im = NULL;
1587 im = eina_list_data_get(task);
1588 task = eina_list_remove_list(task, task);
1593 if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1594 evas_cache_image_load_data(&im->cache_entry);
1595 evas_common_image_colorspace_normalize(im);
1597 im->flags &= ~RGBA_IMAGE_TODO_LOAD;
1601 /* send finished signal */
1602 pthread_barrier_wait(&(thinfo->barrier[1]));
1609 static volatile int bval = 0;
1612 evas_common_pipe_image_load_do(void)
1614 #ifdef BUILD_PTHREAD
1615 /* Notify worker thread. */
1616 pthread_barrier_wait(&(task_thbarrier[0]));
1618 /* sync worker threads */
1619 pthread_barrier_wait(&(task_thbarrier[1]));
1624 evas_common_pipe_init(void)
1626 #ifdef BUILD_PTHREAD
1627 if (thread_num == 0)
1632 cpunum = eina_cpu_count();
1633 thread_num = cpunum;
1634 // on single cpu we still want this initted.. otherwise we block forever
1635 // waiting onm pthread barriers for async rendering on a single core!
1636 // if (thread_num == 1) return EINA_FALSE;
1638 pthread_barrier_init(&(thbarrier[0]), NULL, thread_num + 1);
1639 pthread_barrier_init(&(thbarrier[1]), NULL, thread_num + 1);
1640 for (i = 0; i < thread_num; i++)
1642 pthread_attr_t attr;
1645 pthread_attr_init(&attr);
1647 CPU_SET(i % cpunum, &cpu);
1648 pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
1649 thinfo[i].thread_num = i;
1650 thinfo[i].info = NULL;
1651 thinfo[i].barrier = thbarrier;
1652 /* setup initial locks */
1653 pthread_create(&(thinfo[i].thread_id), &attr,
1654 evas_common_pipe_thread, &(thinfo[i]));
1655 pthread_attr_destroy(&attr);
1658 pthread_barrier_init(&(task_thbarrier[0]), NULL, thread_num + 1);
1659 pthread_barrier_init(&(task_thbarrier[1]), NULL, thread_num + 1);
1660 for (i = 0; i < thread_num; i++)
1662 pthread_attr_t attr;
1665 pthread_attr_init(&attr);
1667 CPU_SET(i % cpunum, &cpu);
1668 pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
1669 task_thinfo[i].thread_num = i;
1670 task_thinfo[i].info = NULL;
1671 task_thinfo[i].barrier = task_thbarrier;
1672 /* setup initial locks */
1673 pthread_create(&(task_thinfo[i].thread_id), &attr,
1674 evas_common_pipe_load, &(task_thinfo[i]));
1675 pthread_attr_destroy(&attr);
1678 #if defined(METRIC_CACHE) || defined(WORD_CACHE)
1679 eina_threads_init();
1682 if (thread_num == 1) return EINA_FALSE;
1689 evas_common_pipe_image_load(RGBA_Image *im)
1691 if (im->flags & RGBA_IMAGE_TODO_LOAD)
1694 if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888
1695 && !evas_cache_image_is_loaded(&(im->cache_entry)))
1698 if ((!im->cs.data) || ((!im->cs.dirty) && (!(im->flags & RGBA_IMAGE_IS_DIRTY))))
1704 task = eina_list_append(task, im);
1705 im->flags |= RGBA_IMAGE_TODO_LOAD;
1709 evas_common_pipe_map4_begin(RGBA_Image *root)
1711 if (!evas_common_pipe_init())
1715 EINA_LIST_FREE(task, im)
1717 if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1718 evas_cache_image_load_data(&im->cache_entry);
1719 evas_common_image_colorspace_normalize(im);
1721 im->flags &= ~RGBA_IMAGE_TODO_LOAD;
1725 evas_common_pipe_image_load_do();
1727 evas_common_pipe_map4_render(root);