1 // THIS IS DEPRECATED. WILL GO EVENTUALLTY. NO NEED TO SUPPORT ANYMORE
3 #include "evas_common.h"
6 #ifdef BUILD_PIPE_RENDER
8 #ifdef EVAS_FRAME_QUEUING
10 static Evas_FrameQ gframeq; // global frameQ
13 evas_common_surface_alloc(void *surface, int x, int y, int w, int h)
15 Evas_Surface *e_surface;
17 e_surface = calloc(1, sizeof(Evas_Surface));
18 e_surface->im = surface;
19 LKL(e_surface->im->cache_entry.ref_fq_add);
20 e_surface->im->cache_entry.ref_fq[0]++;
21 LKU(e_surface->im->cache_entry.ref_fq_add);
31 evas_common_surface_dealloc(Evas_Surface *surface)
33 Evas_Surface *d_surface;
38 surface = (Evas_Surface *)eina_inlist_remove(EINA_INLIST_GET(surface), EINA_INLIST_GET(d_surface));
39 LKL(d_surface->im->cache_entry.ref_fq_del);
40 d_surface->im->cache_entry.ref_fq[1]++;
41 LKU(d_surface->im->cache_entry.ref_fq_del);
47 evas_common_surface_add(Evas_Frame *frame, Evas_Surface *surface)
49 frame->surfaces = (Evas_Surface *)eina_inlist_append(EINA_INLIST_GET(frame->surfaces), EINA_INLIST_GET(surface));
53 evas_common_frame_alloc(void)
57 frame = calloc(1, sizeof(Evas_Frame));
58 frame->surfaces = NULL;
63 evas_common_frame_dealloc(Evas_Frame *frame)
65 evas_common_surface_dealloc(frame->surfaces);
70 evas_common_frame_add(Evas_FrameQ *frameq, Evas_Frame *frame)
72 Evas_Frame *temp_frame;
75 while ((int)eina_inlist_count(EINA_INLIST_GET(frameq->frames)) >= frameq->frameq_sz)
77 /* wait a worker thread finish previous frame */
78 eina_condition_wait(&(frameq->cond_done));
80 frameq->frames = (Evas_Frame *) eina_inlist_append(EINA_INLIST_GET(frameq->frames), EINA_INLIST_GET(frame));
82 // this frame need not to be scheduled for flushing time
83 EINA_INLIST_FOREACH(EINA_INLIST_GET(frameq->frames), temp_frame)
85 if (!temp_frame->ready)
90 if (temp_frame && temp_frame == frame)
91 frame->dont_schedule = 1;
95 eina_condition_signal(&(frameq->cond_new));
99 evas_common_frameq_new_surface(void *surface, int x, int y, int w, int h)
101 return evas_common_surface_alloc(surface, x, y, w, h);
105 evas_common_frameq_add_surface(Evas_Surface *surface)
107 evas_common_surface_add(gframeq.cur_frame, surface);
111 evas_common_frameq_set_frame_data(void *data,
112 void (*fn_output_redraws_next_update_push) (void *data, void *surface, int x, int y, int w, int h),
113 void (*fn_output_flush) (void *data),
114 void (*fn_output_set_priv)(void *data, void *cur, void *prev))
116 if (gframeq.cur_frame)
118 gframeq.cur_frame->data = data;
119 gframeq.cur_frame->output_redraws_next_update_push = fn_output_redraws_next_update_push;
120 gframeq.cur_frame->output_flush = fn_output_flush;
121 gframeq.cur_frame->output_set_priv = fn_output_set_priv;
126 evas_common_frameq_prepare_frame(void)
128 if (!gframeq.cur_frame )
130 gframeq.cur_frame = evas_common_frame_alloc();
135 evas_common_frameq_ready_frame(void)
137 if (gframeq.cur_frame)
139 evas_common_frame_add(&gframeq, gframeq.cur_frame);
140 gframeq.cur_frame = NULL; // create a new frame for the next frame later
146 evas_common_frameq_init(void)
148 gframeq.frames = NULL;
150 eina_condition_new(&(gframeq.cond_new), &(gframeq.mutex));
151 eina_condition_new(&(gframeq.cond_ready), &(gframeq.mutex));
152 eina_condition_new(&(gframeq.cond_done), &(gframeq.mutex));
153 gframeq.initialised = 0; // worker thread are not created yet
154 gframeq.frameq_sz = 1; // this value ensures the first frame can be enqueued.
158 evas_common_frameq_destroy(void)
160 #if 0 // let them destroyed indirectly with program exit
162 eina_condition_free(&(gframeq.cond_new));
163 eina_condition_free(&(gframeq.cond_ready));
164 eina_condition_free(&(gframeq.cond_done));
169 gframeq.frames = NULL;
170 gframeq.initialised = 0;
174 evas_common_frameq_flush(void)
176 if (! evas_common_frameq_enabled())
180 while(eina_inlist_count(EINA_INLIST_GET(gframeq.frames)) > 0)
182 /* wait a worker thread finish previous frame */
183 eina_condition_wait(&(gframeq.cond_done));
190 evas_common_frameq_flush_ready(void)
196 evas_common_frameq_get_frameq_sz(void)
198 return gframeq.frameq_sz;
202 evas_common_frameq_enabled(void)
204 return gframeq.initialised;
208 static RGBA_Pipe *evas_common_pipe_add(RGBA_Pipe *pipe, RGBA_Pipe_Op **op);
209 static void evas_common_pipe_draw_context_copy(RGBA_Draw_Context *dc, RGBA_Pipe_Op *op);
210 static void evas_common_pipe_op_free(RGBA_Pipe_Op *op);
214 evas_common_pipe_add(RGBA_Pipe *rpipe, RGBA_Pipe_Op **op)
222 p = calloc(1, sizeof(RGBA_Pipe));
224 rpipe = (RGBA_Pipe *)eina_inlist_append(EINA_INLIST_GET(rpipe), EINA_INLIST_GET(p));
226 p = (RGBA_Pipe *)(EINA_INLIST_GET(rpipe))->last;
227 if (p->op_num == PIPE_LEN)
229 p = calloc(1, sizeof(RGBA_Pipe));
231 rpipe = (RGBA_Pipe *)eina_inlist_append(EINA_INLIST_GET(rpipe), EINA_INLIST_GET(p));
234 *op = &(p->op[p->op_num - 1]);
237 /* FIXME: PTHREAD init any thread locks etc */
243 evas_common_pipe_draw_context_copy(RGBA_Draw_Context *dc, RGBA_Pipe_Op *op)
245 memcpy(&(op->context), dc, sizeof(RGBA_Draw_Context));
246 if (op->context.cutout.active > 0)
248 op->context.cutout.rects = malloc(sizeof(Cutout_Rect) * op->context.cutout.active);
249 memcpy(op->context.cutout.rects, dc->cutout.rects, sizeof(Cutout_Rect) * op->context.cutout.active);
253 op->context.cutout.rects = NULL;
258 evas_common_pipe_op_free(RGBA_Pipe_Op *op)
260 evas_common_draw_context_apply_clean_cutouts(&op->context.cutout);
266 evas_common_pipe_thread(void *data)
270 // INF("TH [...........");
274 RGBA_Pipe_Thread_Info *info;
277 /* wait for start signal */
278 // INF(" TH %i START...", thinfo->thread_num);
279 pthread_barrier_wait(&(thinfo->barrier[0]));
283 // thinfo->info = NULL;
284 // INF(" TH %i GO", thinfo->thread_num);
285 EINA_INLIST_FOREACH(EINA_INLIST_GET(info->im->cache_entry.pipe), p)
289 for (i = 0; i < p->op_num; i++)
291 if (p->op[i].op_func)
292 p->op[i].op_func(info->im, &(p->op[i]), info);
297 // INF(" TH %i DONE", thinfo->thread_num);
298 /* send finished signal */
299 pthread_barrier_wait(&(thinfo->barrier[1]));
304 #ifdef EVAS_FRAME_QUEUING
306 evas_common_frameq_release(void *data)
309 Evas_Frameq_Thread_Info *fq_info;
313 fq_info = (Evas_Frameq_Thread_Info *)(thinfo->fq_info);
314 frameq = fq_info->frameq;
316 /* This thread may or may not own the mutex.
317 * But there's no way to determine the ownership of the mutex, so release it anyway
323 evas_common_frameq_thread(void *data)
327 Evas_Surface *surface;
330 Evas_Frameq_Thread_Info *fq_info;
331 RGBA_Pipe_Thread_Info p_info;
334 fq_info = (Evas_Frameq_Thread_Info *)(thinfo->fq_info);
335 frameq = fq_info->frameq;
337 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
338 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
339 /* install thread cancelation cleanup handler */
340 pthread_cleanup_push(evas_common_frameq_release, data);
346 /* 1. pick a frame to draw */
350 EINA_INLIST_FOREACH(EINA_INLIST_GET(frameq->frames), frame)
352 if (!frame->in_process)
354 frame->in_process = 1;
362 pthread_testcancel();
363 eina_condition_wait(&(frameq->cond_new));
367 /* 2. draw selected frame */
368 EINA_INLIST_FOREACH(EINA_INLIST_GET(frame->surfaces), surface)
370 p_info.im = surface->im;
373 p_info.w = surface->im->cache_entry.w;
374 p_info.h = surface->im->cache_entry.h;
376 EINA_INLIST_FOREACH(EINA_INLIST_GET(p_info.im->cache_entry.pipe), p)
380 for (i = 0; i < p->op_num; i++)
382 if (p->op[i].op_func)
384 p->op[i].op_func(p_info.im, &(p->op[i]), &p_info);
389 /* push surface out */
390 if (! surface->dontpush)
392 frame->output_redraws_next_update_push(frame->data,
393 surface->im, surface->x, surface->y, surface->w, surface->h);
397 // record frame ready time, will be used in post worker thread, evas_common_frameq_thread_post()
398 gettimeofday(&frame->ready_time, NULL);
402 eina_condition_signal(&(frameq->cond_ready));
406 // Remove cleanup handler
407 pthread_cleanup_pop(0);
412 #define INTERVAL_QSIZE 17 // Actual size is 'INTERVAL_QSIZE - 1' because of not using index
413 #define SATISFACTION_THRESHOLD 4 // 4 ms --> 250 FPS
414 #define RESET_RATIO 4 // RESET_RATIO * [Average Ready Gap | get_max_interval()] --> Reset Threshold
415 #define DOUBLE_RESET_TIME_INTERVAL_THRESHOLD 16000 // make it double in case of less 16ms
416 #define RESET_ABSOLUTE_INTERVAL 600000 // 600 msec
424 static struct iq_node _IQ[INTERVAL_QSIZE];
425 static int _IQ_head = 0, _IQ_tail = 0;
426 static int _IQ_length = 0;
427 static long long min_ready, max_ready;
428 static long long average_interval;
431 _IQ_next_index(int i)
433 return (i + 1) % INTERVAL_QSIZE;
437 _IQ_previous_index(int i)
439 if (--i < 0) i += INTERVAL_QSIZE;
446 _IQ_length = _IQ_head = _IQ_tail = 0;
447 min_ready = LLONG_MAX, max_ready = LLONG_MIN;
448 average_interval = 0;
454 return (_IQ_head == _IQ_tail) ? 1 : 0;
460 return (_IQ_head == ((_IQ_tail + 1) % INTERVAL_QSIZE)) ? 1 : 0;
464 _IQ_insert(long long ready_time, long long last_interval)
466 if (_IQ_full()) return;
470 if (last_interval < 0)
472 last_interval = -last_interval;
474 _IQ[_IQ_tail].rt = ready_time;
475 _IQ[_IQ_tail].ri = last_interval;
476 min_ready = ready_time - last_interval;
477 max_ready = ready_time;
478 _IQ_tail = _IQ_next_index(_IQ_tail);
483 if (max_ready < ready_time)
485 _IQ[_IQ_tail].rt = ready_time;
486 _IQ[_IQ_tail].ri = ready_time - max_ready;
487 _IQ_tail = _IQ_next_index(_IQ_tail);
489 max_ready = ready_time;
491 else if (ready_time < min_ready)
493 last_interval = _IQ[_IQ_head].ri;
494 _IQ[_IQ_head].ri = _IQ[_IQ_head].rt - ready_time;
495 _IQ_head = _IQ_previous_index(_IQ_head);
496 _IQ[_IQ_head].rt = ready_time;
497 _IQ[_IQ_head].ri = last_interval;
498 min_ready = ready_time;
504 for (i = _IQ_head; i != _IQ_tail; i = j)
506 j = _IQ_next_index(i);
507 if (_IQ[j].rt < ready_time)
513 for (k = _IQ_tail; k != j; k = l)
515 l = _IQ_previous_index(k);
518 i = _IQ_next_index(j);
519 _IQ[j].ri -= (_IQ[j].rt - ready_time);
520 _IQ[j].rt = ready_time;
521 _IQ[i].ri = _IQ[i].rt - ready_time;
522 _IQ_tail = _IQ_next_index(_IQ_tail);
526 average_interval = (max_ready - min_ready) / _IQ_length;
532 struct iq_node oldest;
534 if (_IQ_empty()) return 0;
535 oldest = _IQ[_IQ_head];
536 _IQ_head = (_IQ_head + 1) % INTERVAL_QSIZE;
537 if ((--_IQ_length) == 0)
543 min_ready = _IQ[_IQ_head].rt;
544 average_interval = (max_ready - min_ready) / _IQ_length;
551 get_max_interval(void)
554 long long max = LLONG_MIN;
556 for ( i= _IQ_head ; i != _IQ_tail ; i = _IQ_next_index(i))
568 tv_to_long_long(struct timeval *tv)
575 return tv->tv_sec * 1000000LL + tv->tv_usec;
579 evas_common_frameq_schedule_flush_time(int frameq_sz, int thread_no,
580 long long last_ready_time, long long current_ready_time,
581 long long last_flush_time, int ready_frames_num,
584 // to get each time and to do others
585 long long current_time = 0LL;
586 long long current_ready_interval = 0LL;
587 long long theshold_time = SATISFACTION_THRESHOLD * 1000LL; // ms -> usec
588 long long reset_time_interval = 0LL;
589 long long sleep_time = 0LL;
590 long long saved_ready_time, saved_ready_interval;
591 long long time_slept = 0LL;
592 static long long time_lag = 0;
594 int frameq_full_threshold =0;
596 int need_schedule = 0;
598 frameq_full_threshold = frameq_sz -thread_no; // Qsize - threads#
600 /* 1.5 defer flush time of current frame if need */
601 // in case of the first time, just keep ready time only
602 if (last_ready_time == 0LL)
604 last_ready_time = current_ready_time;
608 /* 1.5.1 get current ready time & interval */
609 saved_ready_time = current_ready_time;
610 saved_ready_interval = current_ready_interval = current_ready_time - last_ready_time;
611 // compensate a case which current ready time is older than previous one,
612 // doesn't work on the interval queue
613 if (current_ready_interval < 0)
615 current_ready_time = last_ready_time;
616 current_ready_interval = 0;
619 /* 1.5.2 get the reset time interval before keeping a new one */
622 reset_time_interval = RESET_RATIO * average_interval;
623 if (average_interval < DOUBLE_RESET_TIME_INTERVAL_THRESHOLD)
625 reset_time_interval *= 2;
629 /* 1.5.3 reset - if too late, discard all saved interval and start from here */
630 if (current_ready_interval > RESET_ABSOLUTE_INTERVAL)
634 else if (_IQ_length >= thread_no * 2 && current_ready_interval > reset_time_interval)
638 else if (_IQ_length >= thread_no && _IQ_length < thread_no * 2
639 && current_ready_interval > get_max_interval() * RESET_RATIO)
650 /* 1.5.4 enqueue - keep a new interval for next average interval */
655 _IQ_insert(saved_ready_time, saved_ready_interval);
657 /* 1.5.5 schedule - if faster than average interval, figure out sleep time to meet it */
662 if (_IQ_length >= thread_no * 2 && average_interval > theshold_time)
666 // compensate the case that postworker blocks the workers from getting a new fresh frame
667 // It's actually occurred when during the wait time of postworker, the frame queue is full
668 // Consequently check the number of currently ready frames and apply some time drop to average time according to the number
669 if (ready_frames_num >= frameq_full_threshold)
675 gettimeofday(&now, NULL);
676 current_time = tv_to_long_long(&now);
677 time_lag += (current_time - last_flush_time);
678 sleep_time = (average_interval < time_lag) ? 0 : (average_interval - time_lag);
682 /* 1.5.6 sleep - actually sleep and get over-slept time (time_lag) for next frame */
685 sleep_time = sleep_time * 9 / 10;
686 usleep((unsigned int)sleep_time);
687 gettimeofday(&now, NULL);
688 time_slept = tv_to_long_long(&now) - current_time;
689 time_lag = time_slept - sleep_time;
696 last_ready_time = current_ready_time;
699 return last_ready_time;
703 evas_common_frameq_thread_post(void *data)
707 Evas_Surface *surface;
709 Evas_Frameq_Thread_Info *fq_info;
710 Eina_List *pending_writes = NULL;
711 Eina_List *prev_pending_writes = NULL;
713 long long last_ready_time = 0LL;
714 long long current_ready_time;
715 Evas_Frame *temp_frame = NULL;
716 int ready_frames_num;
717 long long last_flush_time = 0LL;
719 int dont_schedule = 0;
722 fq_info = (Evas_Frameq_Thread_Info *)(thinfo->fq_info);
723 frameq = fq_info->frameq;
725 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
726 pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
727 /* install thread cancelation cleanup handler */
728 pthread_cleanup_push(evas_common_frameq_release, data);
734 /* 1. wait the first frame being done */
736 while(!frameq->frames || !frameq->frames->ready)
738 eina_condition_wait(&(frameq->cond_ready));
740 frame = frameq->frames;
742 /* 1.5. prepare to schedule flush time */
743 current_ready_time = tv_to_long_long(&frame->ready_time);
744 ready_frames_num = 0;
745 EINA_INLIST_FOREACH(EINA_INLIST_GET(frameq->frames), temp_frame)
747 if (temp_frame->ready == 1)
752 dont_schedule = (frame->dont_schedule)?1:0;
755 /* 2. generate pending_writes */
756 EINA_INLIST_FOREACH(EINA_INLIST_GET(frame->surfaces), surface)
758 evas_common_pipe_flush(surface->im);
759 if (! surface->dontpush)
761 pending_writes = eina_list_append(pending_writes, surface->im);
765 /* 2.5. schedule flush time */
766 last_ready_time = evas_common_frameq_schedule_flush_time(
767 frameq->frameq_sz, frameq->thread_num,
768 last_ready_time, current_ready_time,
769 last_flush_time, ready_frames_num, dont_schedule);
771 /* 3. flush redraws */
772 frame->output_set_priv(frame->data, pending_writes, prev_pending_writes);
773 frame->output_flush(frame->data);
774 gettimeofday(&now, NULL);
775 // keep as the last flush time
776 last_flush_time = now.tv_sec * 1000000LL + now.tv_usec;
778 prev_pending_writes = pending_writes;
779 pending_writes = NULL;
781 /* 4. remove this frame from the frame queue */
784 (Evas_Frame *)eina_inlist_remove(EINA_INLIST_GET(frameq->frames),
785 EINA_INLIST_GET(frame));
788 eina_condition_broadcast(&frameq->cond_done);
789 evas_common_frame_dealloc(frame);
792 // Remove cleanup handler
793 pthread_cleanup_pop(0);
797 #endif /* EVAS_FRAME_QUEUING */
801 static int thread_num = 0;
802 static Thinfo thinfo[TH_MAX];
803 static pthread_barrier_t thbarrier[2];
807 evas_common_pipe_begin(RGBA_Image *im)
812 #ifdef EVAS_FRAME_QUEUING
816 if (!im->cache_entry.pipe) return;
817 if (thread_num == 1) return;
819 h = im->cache_entry.h / thread_num;
821 for (i = 0; i < thread_num; i++)
823 RGBA_Pipe_Thread_Info *info;
825 // if (y >= im->cache_entry.h) break;
826 info = calloc(1, sizeof(RGBA_Pipe_Thread_Info));
830 info->w = im->cache_entry.w;
832 info->h = thread_num;
836 info->w = im->cache_entry.w;
837 if (i == (thread_num - 1))
839 info->h = im->cache_entry.h - y;
847 thinfo[i].info = info;
849 /* tell worker threads to start */
850 pthread_barrier_wait(&(thbarrier[0]));
854 #ifdef EVAS_FRAME_QUEUING
856 evas_common_frameq_begin(void)
860 Evas_Frameq_Thread_Info *fp_info;
864 if (!gframeq.initialised)
866 int cpunum, set_cpu_affinity = 0;
868 cpunum = eina_cpu_count();
869 gframeq.thread_num = cpunum;
870 gframeq.frameq_sz = cpunum * FRAMEQ_SZ_PER_THREAD;
874 for (i = 0; i < gframeq.thread_num; i++)
877 fp_info = calloc(1, sizeof(Evas_Frameq_Thread_Info));
878 fp_info->frameq = &gframeq;
880 gframeq.thinfo[i].thread_num = i;
881 gframeq.thinfo[i].fq_info = fp_info;
883 pthread_attr_init(&attr);
884 if (set_cpu_affinity)
887 CPU_SET((i+1) % cpunum, &cpu);
888 pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
891 pthread_create(&(gframeq.thinfo[i].thread_id), &attr,
892 evas_common_frameq_thread, &(gframeq.thinfo[i]));
894 pthread_attr_destroy(&attr);
895 pthread_detach(gframeq.thinfo[i].thread_id);
899 fp_info = calloc(1, sizeof(Evas_Frameq_Thread_Info));
900 fp_info->frameq = &gframeq;
902 gframeq.thinfo[i].thread_num = i;
903 gframeq.thinfo[i].fq_info = fp_info;
905 pthread_attr_init(&attr);
906 if (set_cpu_affinity)
909 CPU_SET((i+1) % cpunum, &cpu);
910 pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
913 pthread_create(&(gframeq.thinfo[i].thread_id), &attr,
914 evas_common_frameq_thread_post, &(gframeq.thinfo[i]));
915 pthread_attr_destroy(&attr);
916 pthread_detach(gframeq.thinfo[i].thread_id);
918 gframeq.initialised = 1; // now worker threads are created.
921 DBG("%d cpus, set_cpu_affinity=%d, frameq_sz=%d",
922 cpunum, set_cpu_affinity, gframeq.frameq_sz);
924 #endif /* BUILD_PTHREAD */
928 evas_common_frameq_finish(void)
932 /* 1. cancel all worker threads */
933 for (i = 0; i < gframeq.thread_num; i++)
935 pthread_cancel(gframeq.thinfo[i].thread_id);
937 // cancel post-worker thread
938 pthread_cancel(gframeq.thinfo[i].thread_id);
940 /* 2. send signal to worker threads so that they enter to the thread cancelation cleanup handler */
941 for (i = 0; i < gframeq.thread_num; i++)
943 eina_condition_signal(&(gframeq.cond_new));
945 // send signal to post-worker thread
946 eina_condition_signal(&(gframeq.cond_ready));
948 /* all the workers were created and detached before
949 * so don't need to join them here.
954 #endif /* EVAS_FRAME_QUEUING */
957 evas_common_pipe_flush(RGBA_Image *im)
959 if (!im->cache_entry.pipe) return;
960 #ifndef EVAS_FRAME_QUEUING
964 /* sync worker threads */
965 pthread_barrier_wait(&(thbarrier[1]));
973 /* process pipe - 1 thead */
974 for (p = im->cache_entry.pipe; p; p = (RGBA_Pipe *)(EINA_INLIST_GET(p))->next)
976 for (i = 0; i < p->op_num; i++)
978 if (p->op[i].op_func)
980 p->op[i].op_func(im, &(p->op[i]), NULL);
985 #endif /* !EVAS_FRAME_QUEUING */
986 evas_common_cpu_end_opt();
987 evas_common_pipe_free(im);
991 evas_common_pipe_free(RGBA_Image *im)
997 if (!im->cache_entry.pipe) return;
998 /* FIXME: PTHREAD join all threads here (if not finished) */
1001 while (im->cache_entry.pipe)
1003 p = im->cache_entry.pipe;
1004 for (i = 0; i < p->op_num; i++)
1006 if (p->op[i].free_func)
1008 p->op[i].free_func(&(p->op[i]));
1011 im->cache_entry.pipe = (RGBA_Pipe *)eina_inlist_remove(EINA_INLIST_GET(im->cache_entry.pipe), EINA_INLIST_GET(p));
1019 /**************** RECT ******************/
1021 evas_common_pipe_rectangle_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1025 RGBA_Draw_Context context;
1027 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1029 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1031 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1033 evas_common_rectangle_draw(dst, &(context),
1034 op->op.rect.x, op->op.rect.y,
1035 op->op.rect.w, op->op.rect.h);
1039 evas_common_rectangle_draw(dst, &(op->context),
1040 op->op.rect.x, op->op.rect.y,
1041 op->op.rect.w, op->op.rect.h);
1046 evas_common_pipe_rectangle_draw(RGBA_Image *dst, RGBA_Draw_Context *dc, int x, int y, int w, int h)
1050 if ((w < 1) || (h < 1)) return;
1051 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1052 if (!dst->cache_entry.pipe) return;
1057 op->op_func = evas_common_pipe_rectangle_draw_do;
1058 op->free_func = evas_common_pipe_op_free;
1059 evas_common_pipe_draw_context_copy(dc, op);
1062 /**************** LINE ******************/
1064 evas_common_pipe_line_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1068 RGBA_Draw_Context context;
1070 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1072 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1074 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1076 evas_common_line_draw(dst, &(context),
1077 op->op.line.x0, op->op.line.y0,
1078 op->op.line.x1, op->op.line.y1);
1082 evas_common_line_draw(dst, &(op->context),
1083 op->op.line.x0, op->op.line.y0,
1084 op->op.line.x1, op->op.line.y1);
1089 evas_common_pipe_line_draw(RGBA_Image *dst, RGBA_Draw_Context *dc,
1090 int x0, int y0, int x1, int y1)
1094 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1095 if (!dst->cache_entry.pipe) return;
1096 op->op.line.x0 = x0;
1097 op->op.line.y0 = y0;
1098 op->op.line.x1 = x1;
1099 op->op.line.y1 = y1;
1100 op->op_func = evas_common_pipe_line_draw_do;
1101 op->free_func = evas_common_pipe_op_free;
1102 evas_common_pipe_draw_context_copy(dc, op);
1105 /**************** POLY ******************/
1107 evas_common_pipe_op_poly_free(RGBA_Pipe_Op *op)
1109 RGBA_Polygon_Point *p;
1111 while (op->op.poly.points)
1113 p = op->op.poly.points;
1114 op->op.poly.points = (RGBA_Polygon_Point *)eina_inlist_remove(EINA_INLIST_GET(op->op.poly.points),
1115 EINA_INLIST_GET(p));
1118 evas_common_pipe_op_free(op);
1122 evas_common_pipe_poly_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1126 RGBA_Draw_Context context;
1128 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1130 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1132 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1134 evas_common_polygon_draw(dst, &(context),
1135 op->op.poly.points, 0, 0);
1139 evas_common_polygon_draw(dst, &(op->context),
1140 op->op.poly.points, 0, 0);
1145 evas_common_pipe_poly_draw(RGBA_Image *dst, RGBA_Draw_Context *dc,
1146 RGBA_Polygon_Point *points, int x, int y)
1149 RGBA_Polygon_Point *pts = NULL, *p, *pp;
1151 if (!points) return;
1152 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1153 if (!dst->cache_entry.pipe) return;
1154 /* FIXME: copy points - maybe we should refcount? */
1155 for (p = points; p; p = (RGBA_Polygon_Point *)(EINA_INLIST_GET(p))->next)
1157 pp = calloc(1, sizeof(RGBA_Polygon_Point));
1162 pts = (RGBA_Polygon_Point *)eina_inlist_append(EINA_INLIST_GET(pts), EINA_INLIST_GET(pp));
1165 op->op.poly.points = pts;
1166 op->op_func = evas_common_pipe_poly_draw_do;
1167 op->free_func = evas_common_pipe_op_poly_free;
1168 evas_common_pipe_draw_context_copy(dc, op);
1171 /**************** TEXT ******************/
1173 evas_common_pipe_op_text_free(RGBA_Pipe_Op *op)
1175 #ifdef EVAS_FRAME_QUEUING
1176 LKL(op->op.text.font->ref_fq_del);
1177 op->op.text.font->ref_fq[1]++;
1178 LKU(op->op.text.font->ref_fq_del);
1179 eina_condition_signal(&(op->op.text.font->cond_fq_del));
1181 evas_common_font_free(op->op.text.font);
1183 evas_common_text_props_content_unref(&(op->op.text.intl_props));
1184 evas_common_pipe_op_free(op);
1187 #ifdef EVAS_FRAME_QUEUING
1188 /* flush all op using @fn */
1190 evas_common_pipe_op_text_flush(RGBA_Font *fn)
1192 if (! evas_common_frameq_enabled())
1195 LKL(fn->ref_fq_add);
1196 LKL(fn->ref_fq_del);
1198 while (fn->ref_fq[0] != fn->ref_fq[1])
1199 eina_condition_wait(&(fn->cond_fq_del));
1201 LKU(fn->ref_fq_del);
1202 LKU(fn->ref_fq_add);
1207 evas_common_pipe_text_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1211 RGBA_Draw_Context context;
1213 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1215 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1217 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1219 evas_common_font_draw(dst, &(context),
1220 op->op.text.font, op->op.text.x, op->op.text.y,
1221 &op->op.text.intl_props);
1225 evas_common_font_draw(dst, &(op->context),
1226 op->op.text.font, op->op.text.x, op->op.text.y,
1227 &op->op.text.intl_props);
1232 evas_common_pipe_text_draw(RGBA_Image *dst, RGBA_Draw_Context *dc,
1233 RGBA_Font *fn, int x, int y, const Evas_Text_Props *intl_props)
1238 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1239 if (!dst->cache_entry.pipe) return;
1242 evas_common_text_props_content_copy_and_ref(&(op->op.text.intl_props),
1244 #ifdef EVAS_FRAME_QUEUING
1245 LKL(fn->ref_fq_add);
1247 LKU(fn->ref_fq_add);
1251 op->op.text.font = fn;
1252 op->op_func = evas_common_pipe_text_draw_do;
1253 op->free_func = evas_common_pipe_op_text_free;
1254 evas_common_pipe_draw_context_copy(dc, op);
1257 /**************** IMAGE *****************/
1259 evas_common_pipe_op_image_free(RGBA_Pipe_Op *op)
1261 #ifdef EVAS_FRAME_QUEUING
1262 LKL(op->op.image.src->cache_entry.ref_fq_del);
1263 op->op.image.src->cache_entry.ref_fq[1]++;
1264 LKU(op->op.image.src->cache_entry.ref_fq_del);
1265 eina_condition_signal(&(op->op.image.src->cache_entry.cond_fq_del));
1267 op->op.image.src->ref--;
1268 if (op->op.image.src->ref == 0)
1270 evas_cache_image_drop(&op->op.image.src->cache_entry);
1273 evas_common_pipe_op_free(op);
1276 #ifdef EVAS_FRAME_QUEUING
1278 evas_common_pipe_op_image_flush(RGBA_Image *im)
1280 if (! evas_common_frameq_enabled())
1283 LKL(im->cache_entry.ref_fq_add);
1284 LKL(im->cache_entry.ref_fq_del);
1286 while (im->cache_entry.ref_fq[0] != im->cache_entry.ref_fq[1])
1287 eina_condition_wait(&(im->cache_entry.cond_fq_del));
1289 LKU(im->cache_entry.ref_fq_del);
1290 LKU(im->cache_entry.ref_fq_add);
1295 evas_common_pipe_image_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1299 RGBA_Draw_Context context;
1301 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1303 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1305 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1309 evas_common_rgba_image_scalecache_do((Image_Entry *)(op->op.image.src),
1311 op->op.image.smooth,
1321 if (op->op.image.smooth)
1323 evas_common_scale_rgba_in_to_out_clip_smooth(op->op.image.src,
1336 evas_common_scale_rgba_in_to_out_clip_sample(op->op.image.src,
1352 evas_common_rgba_image_scalecache_do((Image_Entry *)(op->op.image.src),
1353 dst, &(op->context),
1354 op->op.image.smooth,
1364 if (op->op.image.smooth)
1366 evas_common_scale_rgba_in_to_out_clip_smooth(op->op.image.src,
1367 dst, &(op->context),
1379 evas_common_scale_rgba_in_to_out_clip_sample(op->op.image.src,
1380 dst, &(op->context),
1395 evas_common_pipe_image_draw(RGBA_Image *src, RGBA_Image *dst,
1396 RGBA_Draw_Context *dc, int smooth,
1397 int src_region_x, int src_region_y,
1398 int src_region_w, int src_region_h,
1399 int dst_region_x, int dst_region_y,
1400 int dst_region_w, int dst_region_h)
1405 // evas_common_pipe_flush(src);
1406 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1407 if (!dst->cache_entry.pipe) return;
1408 op->op.image.smooth = smooth;
1409 op->op.image.sx = src_region_x;
1410 op->op.image.sy = src_region_y;
1411 op->op.image.sw = src_region_w;
1412 op->op.image.sh = src_region_h;
1413 op->op.image.dx = dst_region_x;
1414 op->op.image.dy = dst_region_y;
1415 op->op.image.dw = dst_region_w;
1416 op->op.image.dh = dst_region_h;
1417 #ifdef EVAS_FRAME_QUEUING
1418 LKL(src->cache_entry.ref_fq_add);
1419 src->cache_entry.ref_fq[0]++;
1420 LKU(src->cache_entry.ref_fq_add);
1424 op->op.image.src = src;
1425 op->op_func = evas_common_pipe_image_draw_do;
1426 op->free_func = evas_common_pipe_op_image_free;
1427 evas_common_pipe_draw_context_copy(dc, op);
1429 #ifdef EVAS_FRAME_QUEUING
1430 /* laod every src image here.
1431 * frameq utilize all cpu cores already by worker threads
1432 * so another threads and barrier waiting can't be of any benefit.
1433 * therefore, not instantiate loader threads.
1435 if (src->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1436 evas_cache_image_load_data(&src->cache_entry);
1437 evas_common_image_colorspace_normalize(src);
1439 evas_common_pipe_image_load(src);
1444 evas_common_pipe_op_map_free(RGBA_Pipe_Op *op)
1446 #ifdef EVAS_FRAME_QUEUING
1447 LKL(op->op.image.src->cache_entry.ref_fq_del);
1448 op->op.image.src->cache_entry.ref_fq[1]++;
1449 LKU(op->op.image.src->cache_entry.ref_fq_del);
1451 op->op.map.src->ref--;
1452 if (op->op.map.src->ref == 0)
1453 evas_cache_image_drop(&op->op.map.src->cache_entry);
1456 evas_common_pipe_op_free(op);
1460 evas_common_pipe_map_draw_do(RGBA_Image *dst, RGBA_Pipe_Op *op, RGBA_Pipe_Thread_Info *info)
1464 RGBA_Draw_Context context;
1466 memcpy(&(context), &(op->context), sizeof(RGBA_Draw_Context));
1468 evas_common_draw_context_set_sli(&(context), info->y, info->h);
1470 evas_common_draw_context_clip_clip(&(context), info->x, info->y, info->w, info->h);
1473 evas_common_map_rgba(op->op.map.src, dst,
1474 &context, op->op.map.npoints, op->op.map.p,
1475 op->op.map.smooth, op->op.map.level);
1479 evas_common_map_rgba(op->op.map.src, dst,
1480 &(op->context), op->op.map.npoints, op->op.map.p,
1481 op->op.map.smooth, op->op.map.level);
1486 evas_common_pipe_map_draw(RGBA_Image *src, RGBA_Image *dst,
1487 RGBA_Draw_Context *dc, int npoints, RGBA_Map_Point *p,
1488 int smooth, int level)
1491 RGBA_Map_Point *pts_copy;
1495 pts_copy = malloc(sizeof (RGBA_Map_Point) * 4);
1496 if (!pts_copy) return;
1497 dst->cache_entry.pipe = evas_common_pipe_add(dst->cache_entry.pipe, &op);
1498 if (!dst->cache_entry.pipe)
1504 for (i = 0; i < 4; ++i)
1507 op->op.map.npoints = npoints;
1508 op->op.map.smooth = smooth;
1509 op->op.map.level = level;
1510 #ifdef EVAS_FRAME_QUEUING
1511 LKL(src->cache_entry.ref_fq_add);
1512 src->cache_entry.ref_fq[0]++;
1513 LKU(src->cache_entry.ref_fq_add);
1517 op->op.map.src = src;
1518 op->op.map.p = pts_copy;
1519 op->op_func = evas_common_pipe_map_draw_do;
1520 op->free_func = evas_common_pipe_op_map_free;
1521 evas_common_pipe_draw_context_copy(dc, op);
1523 #ifdef EVAS_FRAME_QUEUING
1524 /* laod every src image here.
1525 * frameq utilize all cpu cores already by worker threads
1526 * so another threads and barrier waiting can't be of any benefit.
1527 * therefore, not instantiate loader threads.
1529 if (src->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1530 evas_cache_image_load_data(&src->cache_entry);
1531 evas_common_image_colorspace_normalize(src);
1533 evas_common_pipe_image_load(src);
1538 evas_common_pipe_map_render(RGBA_Image *root)
1543 /* Map imply that we need to process them recursively first. */
1544 for (p = root->cache_entry.pipe; p; p = (RGBA_Pipe *)(EINA_INLIST_GET(p))->next)
1546 for (i = 0; i < p->op_num; i++)
1548 if (p->op[i].op_func == evas_common_pipe_map_draw_do)
1550 if (p->op[i].op.map.src->cache_entry.pipe)
1551 evas_common_pipe_map_render(p->op[i].op.map.src);
1553 else if (p->op[i].op_func == evas_common_pipe_image_draw_do)
1555 if (p->op[i].op.image.src->cache_entry.pipe)
1556 evas_common_pipe_map_render(p->op[i].op.image.src);
1561 evas_common_pipe_begin(root);
1562 evas_common_pipe_flush(root);
1565 #ifdef BUILD_PTHREAD
1566 static Eina_List *task = NULL;
1567 static Thinfo task_thinfo[TH_MAX];
1568 static pthread_barrier_t task_thbarrier[2];
1569 static LK(task_mutext);
1572 #ifdef BUILD_PTHREAD
1574 evas_common_pipe_load(void *data)
1581 /* wait for start signal */
1582 pthread_barrier_wait(&(tinfo->barrier[0]));
1586 RGBA_Image *im = NULL;
1589 im = eina_list_data_get(task);
1590 task = eina_list_remove_list(task, task);
1595 if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1596 evas_cache_image_load_data(&im->cache_entry);
1597 evas_common_image_colorspace_normalize(im);
1599 im->flags &= ~RGBA_IMAGE_TODO_LOAD;
1603 /* send finished signal */
1604 pthread_barrier_wait(&(tinfo->barrier[1]));
1611 static volatile int bval = 0;
1614 evas_common_pipe_image_load_do(void)
1616 #ifdef BUILD_PTHREAD
1617 /* Notify worker thread. */
1618 pthread_barrier_wait(&(task_thbarrier[0]));
1620 /* sync worker threads */
1621 pthread_barrier_wait(&(task_thbarrier[1]));
1626 evas_common_pipe_init(void)
1628 #ifdef BUILD_PTHREAD
1629 if (thread_num == 0)
1634 cpunum = eina_cpu_count();
1635 thread_num = cpunum;
1636 // on single cpu we still want this initted.. otherwise we block forever
1637 // waiting onm pthread barriers for async rendering on a single core!
1638 // if (thread_num == 1) return EINA_FALSE;
1640 eina_threads_init();
1644 pthread_barrier_init(&(thbarrier[0]), NULL, thread_num + 1);
1645 pthread_barrier_init(&(thbarrier[1]), NULL, thread_num + 1);
1646 for (i = 0; i < thread_num; i++)
1648 pthread_attr_t attr;
1651 pthread_attr_init(&attr);
1653 CPU_SET(i % cpunum, &cpu);
1654 pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
1655 thinfo[i].thread_num = i;
1656 thinfo[i].info = NULL;
1657 thinfo[i].barrier = thbarrier;
1658 /* setup initial locks */
1659 pthread_create(&(thinfo[i].thread_id), &attr,
1660 evas_common_pipe_thread, &(thinfo[i]));
1661 pthread_attr_destroy(&attr);
1664 pthread_barrier_init(&(task_thbarrier[0]), NULL, thread_num + 1);
1665 pthread_barrier_init(&(task_thbarrier[1]), NULL, thread_num + 1);
1666 for (i = 0; i < thread_num; i++)
1668 pthread_attr_t attr;
1671 pthread_attr_init(&attr);
1673 CPU_SET(i % cpunum, &cpu);
1674 pthread_attr_setaffinity_np(&attr, sizeof(cpu), &cpu);
1675 task_thinfo[i].thread_num = i;
1676 task_thinfo[i].info = NULL;
1677 task_thinfo[i].barrier = task_thbarrier;
1678 /* setup initial locks */
1679 pthread_create(&(task_thinfo[i].thread_id), &attr,
1680 evas_common_pipe_load, &(task_thinfo[i]));
1681 pthread_attr_destroy(&attr);
1685 if (thread_num == 1) return EINA_FALSE;
1692 evas_common_pipe_image_load(RGBA_Image *im)
1694 if (im->flags & RGBA_IMAGE_TODO_LOAD)
1697 if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888
1698 && !evas_cache_image_is_loaded(&(im->cache_entry)))
1701 if ((!im->cs.data) || ((!im->cs.dirty) && (!(im->flags & RGBA_IMAGE_IS_DIRTY))))
1707 task = eina_list_append(task, im);
1708 im->flags |= RGBA_IMAGE_TODO_LOAD;
1712 evas_common_pipe_map_begin(RGBA_Image *root)
1714 if (!evas_common_pipe_init())
1718 EINA_LIST_FREE(task, im)
1720 if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888)
1721 evas_cache_image_load_data(&im->cache_entry);
1722 evas_common_image_colorspace_normalize(im);
1724 im->flags &= ~RGBA_IMAGE_TODO_LOAD;
1728 evas_common_pipe_image_load_do();
1730 evas_common_pipe_map_render(root);