EFL 1.7 svn doobies
[profile/ivi/emotion.git] / src / modules / xine / emotion_xine_vo_out.c
1 /***************************************************************************/
2 /***                  emotion xine display engine                        ***/
3 /***************************************************************************/
4 #ifdef HAVE_CONFIG_H
5 # include "config.h"
6 #endif
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <unistd.h>
11 #include <string.h>
12
13 #include <Eina.h>
14 #include <Evas.h>
15 #include <Ecore.h>
16
17 #include "Emotion.h"
18 #include "emotion_private.h"
19 #include "emotion_xine.h"
20
21 #include <xine.h>
22 #include <xine/video_out.h>
23 #include <xine/xine_internal.h>
24 #include <xine/xineutils.h>
25 #include <xine/vo_scale.h>
26
27 #define BLEND_BYTE(dst, src, o) (((src)*o + ((dst)*(0xf-o)))/0xf)
28
29 /***************************************************************************/
30 typedef struct _Emotion_Frame  Emotion_Frame;
31 typedef struct _Emotion_Driver Emotion_Driver;
32 typedef struct _Emotion_Class  Emotion_Class;
33 typedef struct _Emotion_Lut    Emotion_Lut;
34
35 struct _Emotion_Frame
36 {
37    vo_frame_t           vo_frame;
38    int                  width;
39    int                  height;
40    double               ratio;
41    int                  format;
42    xine_t               *xine;
43    
44    Emotion_Xine_Video_Frame frame;
45    unsigned char            in_use : 1;
46 };
47
48 struct _Emotion_Driver
49 {
50    vo_driver_t          vo_driver;
51    config_values_t     *config;
52    int                  ratio;
53    xine_t               *xine;
54    Emotion_Xine_Video   *ev;
55 };
56
57 struct _Emotion_Class
58 {
59    video_driver_class_t  driver_class;
60    config_values_t      *config;
61    xine_t               *xine;
62 };
63
64 struct _Emotion_Lut
65 {
66    uint8_t cb    : 8;
67    uint8_t cr    : 8;
68    uint8_t y     : 8;
69    uint8_t foo   : 8;
70 } __attribute__ ((packed));
71
72 typedef void (*done_func_type)(void *data);
73
74 /***************************************************************************/
75 static void        *_emotion_class_init            (xine_t *xine, void *visual);
76 static void         _emotion_class_dispose         (video_driver_class_t *driver_class);
77 static char        *_emotion_class_identifier_get  (video_driver_class_t *driver_class);
78 static char        *_emotion_class_description_get (video_driver_class_t *driver_class);
79
80 static vo_driver_t *_emotion_open                  (video_driver_class_t *driver_class, const void *visual);
81 static void         _emotion_dispose               (vo_driver_t *vo_driver);
82
83 static int          _emotion_redraw                (vo_driver_t *vo_driver);
84
85 static uint32_t     _emotion_capabilities_get      (vo_driver_t *vo_driver);
86 static int          _emotion_gui_data_exchange     (vo_driver_t *vo_driver, int data_type, void *data);
87
88 static int          _emotion_property_set          (vo_driver_t *vo_driver, int property, int value);
89 static int          _emotion_property_get          (vo_driver_t *vo_driver, int property);
90 static void         _emotion_property_min_max_get  (vo_driver_t *vo_driver, int property, int *min, int *max);
91
92 static vo_frame_t  *_emotion_frame_alloc           (vo_driver_t *vo_driver);
93 static void         _emotion_frame_dispose         (vo_frame_t *vo_frame);
94 static void         _emotion_frame_format_update   (vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags);
95 static void         _emotion_frame_display         (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
96 static void         _emotion_frame_field           (vo_frame_t *vo_frame, int which_field);
97
98 static void         _emotion_frame_data_free       (Emotion_Frame *fr);
99 static void         _emotion_frame_data_unlock     (Emotion_Frame *fr);
100
101 static void         _emotion_overlay_begin         (vo_driver_t *vo_driver, vo_frame_t *vo_frame, int changed);
102 static void         _emotion_overlay_end           (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
103 static void         _emotion_overlay_blend         (vo_driver_t *vo_driver, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay);
104
105 static void         _emotion_overlay_mem_blend_8   (uint8_t *mem, uint8_t val, uint8_t o, size_t sz);
106 static void         _emotion_overlay_blend_yuv     (uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3]);
107
108 static void         _emotion_yuy2_to_bgra32        (int width, int height, unsigned char *src, unsigned char *dst);
109
110 /***************************************************************************/
111 static vo_info_t _emotion_info = 
112 {
113    1,                        /* priority */
114    XINE_VISUAL_TYPE_NONE   /* visual type */
115 };
116
117 plugin_info_t emotion_xine_plugin_info[] =
118 {
119      { PLUGIN_VIDEO_OUT, 21, "emotion", XINE_VERSION_CODE, &_emotion_info, _emotion_class_init },
120      { PLUGIN_NONE, 0, "", 0, NULL, NULL }
121 };
122
123 /***************************************************************************/
124 static void *
125 _emotion_class_init(xine_t *xine, void *visual __UNUSED__)
126 {
127    Emotion_Class *cl;
128    
129 //   DBG("");
130    cl = (Emotion_Class *) malloc(sizeof(Emotion_Class));
131    if (!cl) return NULL;
132    cl->driver_class.open_plugin     = _emotion_open;
133 #if XINE_MAJOR_VERSION < 1 || (XINE_MAJOR_VERSION == 1 && XINE_MINOR_VERSION < 2) 
134    cl->driver_class.get_identifier  = _emotion_class_identifier_get;
135    cl->driver_class.get_description = _emotion_class_description_get;
136 #else
137    cl->driver_class.identifier      = _emotion_class_identifier_get(NULL);
138    cl->driver_class.description     = _emotion_class_description_get(NULL);
139 #endif
140    cl->driver_class.dispose         = _emotion_class_dispose;
141    cl->config                       = xine->config;
142    cl->xine                         = xine;
143
144    return cl;
145 }
146
147 static void
148 _emotion_class_dispose(video_driver_class_t *driver_class)
149 {
150    Emotion_Class *cl;
151    
152    cl = (Emotion_Class *)driver_class;
153    free(cl);
154 }
155
156 static char *
157 _emotion_class_identifier_get(video_driver_class_t *driver_class __UNUSED__)
158 {
159    return "emotion";
160 }
161
162 static char *
163 _emotion_class_description_get(video_driver_class_t *driver_class __UNUSED__)
164 {
165    return "Emotion xine video output plugin";
166 }
167
168 /***************************************************************************/
169 static vo_driver_t *
170 _emotion_open(video_driver_class_t *driver_class, const void *visual)
171 {
172    Emotion_Class *cl;
173    Emotion_Driver *dv;
174    
175    cl = (Emotion_Class *)driver_class;
176    /* visual here is the data ptr passed to xine_open_video_driver() */
177 //   DBG("");
178    dv = (Emotion_Driver *)malloc(sizeof(Emotion_Driver));
179    if (!dv) return NULL;
180    
181    dv->config                         = cl->config;
182    dv->xine                           = cl->xine;
183    dv->ratio                          = XINE_VO_ASPECT_AUTO;
184    dv->vo_driver.get_capabilities     = _emotion_capabilities_get;
185    dv->vo_driver.alloc_frame          = _emotion_frame_alloc;
186    dv->vo_driver.update_frame_format  = _emotion_frame_format_update;
187    dv->vo_driver.overlay_begin        = _emotion_overlay_begin;
188    dv->vo_driver.overlay_blend        = _emotion_overlay_blend;
189    dv->vo_driver.overlay_end          = _emotion_overlay_end;
190    dv->vo_driver.display_frame        = _emotion_frame_display;
191    dv->vo_driver.get_property         = _emotion_property_get;
192    dv->vo_driver.set_property         = _emotion_property_set;
193    dv->vo_driver.get_property_min_max = _emotion_property_min_max_get;
194    dv->vo_driver.gui_data_exchange    = _emotion_gui_data_exchange;
195    dv->vo_driver.dispose              = _emotion_dispose;
196    dv->vo_driver.redraw_needed        = _emotion_redraw;
197    dv->ev                             = (Emotion_Xine_Video *)visual;
198    dv->ev->have_vo = 1;
199    DBG("vo_driver = %p", &dv->vo_driver);
200    return &dv->vo_driver;
201 }    
202
203 static void
204 _emotion_dispose(vo_driver_t *vo_driver)
205 {
206    Emotion_Driver *dv;
207    
208    dv = (Emotion_Driver *)vo_driver;
209    dv->ev->have_vo = 0;
210    DBG("vo_driver = %p", dv);
211    free(dv);
212 }
213
214 /***************************************************************************/
215 static int
216 _emotion_redraw(vo_driver_t *vo_driver __UNUSED__)
217 {
218 //   DBG("");
219    return 0;
220 }
221
222 /***************************************************************************/
223 static uint32_t
224 _emotion_capabilities_get(vo_driver_t *vo_driver __UNUSED__)
225 {
226 //   DBG("");
227    return VO_CAP_YV12 | VO_CAP_YUY2;
228 }
229
230 /***************************************************************************/
231 static int
232 _emotion_gui_data_exchange(vo_driver_t *vo_driver __UNUSED__, int data_type, void *data __UNUSED__)
233 {
234 //   DBG("");
235    switch (data_type)
236      {
237       case XINE_GUI_SEND_COMPLETION_EVENT:
238         break;
239       case XINE_GUI_SEND_DRAWABLE_CHANGED:
240         break;
241       case XINE_GUI_SEND_EXPOSE_EVENT:
242         break;
243       case XINE_GUI_SEND_TRANSLATE_GUI_TO_VIDEO:
244         break;
245       case XINE_GUI_SEND_VIDEOWIN_VISIBLE:
246         break;
247       case XINE_GUI_SEND_SELECT_VISUAL:
248         break;
249       default:
250         break;
251      }
252   return 0;
253 }
254
255 /***************************************************************************/
256 static int
257 _emotion_property_set(vo_driver_t *vo_driver, int property, int value)
258 {
259    Emotion_Driver *dv;
260    
261    dv = (Emotion_Driver *)vo_driver;
262 //   DBG("");
263    switch (property)
264      {
265       case VO_PROP_ASPECT_RATIO:
266         if (value >= XINE_VO_ASPECT_NUM_RATIOS)
267           value = XINE_VO_ASPECT_AUTO;
268 //      DBG("DRIVER RATIO SET %i!", value);
269         dv->ratio = value;
270         break;
271       default:
272         break;
273      }
274    return value;
275 }
276
277 static int
278 _emotion_property_get(vo_driver_t *vo_driver, int property)
279 {
280    Emotion_Driver *dv;
281    
282    dv = (Emotion_Driver *)vo_driver;
283 //   DBG("");
284    switch (property)
285      {
286       case VO_PROP_ASPECT_RATIO:
287         return dv->ratio;
288         break;
289       default:
290         break;
291      }
292   return 0;
293 }
294
295 static void
296 _emotion_property_min_max_get(vo_driver_t *vo_driver __UNUSED__, int property __UNUSED__, int *min, int *max)
297 {
298 //   DBG("");
299    *min = 0;
300    *max = 0;
301 }
302
303 /***************************************************************************/
304 static vo_frame_t *
305 _emotion_frame_alloc(vo_driver_t *vo_driver __UNUSED__)
306 {
307    Emotion_Frame *fr;
308    
309 //   DBG("");
310    fr = (Emotion_Frame *)calloc(1, sizeof(Emotion_Frame));
311    if (!fr) return NULL;
312    
313    fr->vo_frame.base[0]    = NULL;
314    fr->vo_frame.base[1]    = NULL;
315    fr->vo_frame.base[2]    = NULL;
316    
317    fr->vo_frame.proc_slice = NULL;
318    fr->vo_frame.proc_frame = NULL;
319    fr->vo_frame.field      = _emotion_frame_field;
320    fr->vo_frame.dispose    = _emotion_frame_dispose;
321    fr->vo_frame.driver     = vo_driver;
322    
323    return (vo_frame_t *)fr;
324 }
325
326 static void
327 _emotion_frame_dispose(vo_frame_t *vo_frame)
328 {
329    Emotion_Frame *fr;
330    
331    fr = (Emotion_Frame *)vo_frame;
332 //   DBG("");
333    _emotion_frame_data_free(fr);  
334    free(fr);
335 }
336
337 static void
338 _emotion_frame_format_update(vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags __UNUSED__)
339 {
340    Emotion_Driver *dv;
341    Emotion_Frame *fr;
342    
343    dv = (Emotion_Driver *)vo_driver;
344    fr = (Emotion_Frame *)vo_frame;
345    
346    if ((fr->width != (int)width) ||  (fr->height != (int)height) || 
347        (fr->format != format) || (!fr->vo_frame.base[0]))
348      {
349 //   DBG("");
350         _emotion_frame_data_free(fr);
351         
352         fr->width  = width;
353         fr->height = height;
354         fr->format = format;
355         
356         switch (format)
357           {
358            case XINE_IMGFMT_YV12: 
359                {
360                   int y_size, uv_size;
361                   
362                   fr->frame.format = EMOTION_FORMAT_YV12;
363                   fr->vo_frame.pitches[0] = 8 * ((width + 7) / 8);
364                   fr->vo_frame.pitches[1] = 8 * ((width + 15) / 16);
365                   fr->vo_frame.pitches[2] = 8 * ((width + 15) / 16);
366                   
367                   y_size  = fr->vo_frame.pitches[0] * height;
368                   uv_size = fr->vo_frame.pitches[1] * ((height + 1) / 2);
369                   
370                   fr->vo_frame.base[0] = malloc(y_size + (2 * uv_size));
371                   fr->vo_frame.base[1] = fr->vo_frame.base[0] + y_size + uv_size;
372                   fr->vo_frame.base[2] = fr->vo_frame.base[0] + y_size;
373                   fr->frame.w = fr->width;
374                   fr->frame.h = fr->height;
375                   fr->frame.ratio = fr->vo_frame.ratio;
376                   fr->frame.y = fr->vo_frame.base[0];
377                   fr->frame.u = fr->vo_frame.base[1];
378                   fr->frame.v = fr->vo_frame.base[2];
379                   fr->frame.bgra_data = NULL;
380                   fr->frame.y_stride = fr->vo_frame.pitches[0];
381                   fr->frame.u_stride = fr->vo_frame.pitches[1];
382                   fr->frame.v_stride = fr->vo_frame.pitches[2];
383                   fr->frame.obj = dv->ev->obj;
384                }
385              break;
386            case XINE_IMGFMT_YUY2: 
387                {
388                   fr->frame.format = EMOTION_FORMAT_BGRA;
389                   fr->vo_frame.pitches[0] = 8 * ((width + 3) / 4);
390                   fr->vo_frame.pitches[1] = 0;
391                   fr->vo_frame.pitches[2] = 0;
392                   
393                   fr->vo_frame.base[0] = malloc(fr->vo_frame.pitches[0] * height);
394                   fr->vo_frame.base[1] = NULL;
395                   fr->vo_frame.base[2] = NULL;
396                   
397                   fr->frame.w = fr->width;
398                   fr->frame.h = fr->height;
399                   fr->frame.ratio = fr->vo_frame.ratio;
400                   fr->frame.y = NULL;
401                   fr->frame.u = NULL;
402                   fr->frame.v = NULL;
403                   fr->frame.bgra_data = malloc(fr->width * fr->height * 4);
404                   fr->frame.y_stride = 0;
405                   fr->frame.u_stride = 0;
406                   fr->frame.v_stride = 0;
407                   fr->frame.obj = dv->ev->obj;
408                }
409              break;
410            default:
411              break;
412           }
413         if (((format == XINE_IMGFMT_YV12)
414              && ((!fr->vo_frame.base[0])
415                  || (!fr->vo_frame.base[1])
416                  || (!fr->vo_frame.base[2])))
417             || ((format == XINE_IMGFMT_YUY2)
418                 && ((!fr->vo_frame.base[0])
419                     || (!fr->frame.bgra_data))))
420           {
421              _emotion_frame_data_free(fr);
422           }
423      }
424    fr->frame.ratio = fr->vo_frame.ratio;
425    fr->ratio = ratio;
426 }
427
428 static void
429 _emotion_frame_display(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
430 {
431    Emotion_Driver *dv;
432    Emotion_Frame *fr;
433    
434    dv = (Emotion_Driver *)vo_driver;
435    fr = (Emotion_Frame *)vo_frame;
436 //   DBG("fq %i %p", dv->ev->fq, dv->ev);
437 // if my frame queue is too deep ( > 4 frames) simply block and wait for them
438 // to drain
439 //   while (dv->ev->fq > 4) usleep(1);
440    if (dv->ev)
441      {
442         void *buf;
443
444         if (dv->ev->closing) return;
445         if (fr->format == XINE_IMGFMT_YUY2)
446           {
447              _emotion_yuy2_to_bgra32(fr->width, fr->height, fr->vo_frame.base[0], fr->frame.bgra_data);
448           }
449         
450         buf = &(fr->frame);
451         fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0;
452         fr->frame.done_func = (done_func_type)_emotion_frame_data_unlock;
453         fr->frame.done_data = fr;
454 //      DBG("FRAME FOR %p", dv->ev);
455         if (write(dv->ev->fd_write, &buf, sizeof(void *)) < 0) perror("write");
456 //      DBG("-- FRAME DEC %p == %i", fr->frame.obj, ret);
457         fr->in_use = 1;
458         dv->ev->fq++;
459      }
460    /* hmm - must find a way to sanely copy data out... FIXME problem */
461 //   fr->vo_frame.free(&fr->vo_frame);
462 }
463
464 static void
465 _emotion_frame_field(vo_frame_t *vo_frame __UNUSED__, int which_field __UNUSED__)
466 {
467 //   DBG("");
468 }
469
470 /***************************************************************************/
471 static void
472 _emotion_frame_data_free(Emotion_Frame *fr)
473 {
474    if (fr->vo_frame.base[0])
475      {
476         free(fr->vo_frame.base[0]);
477         fr->vo_frame.base[0] = NULL;
478         fr->vo_frame.base[1] = NULL;
479         fr->vo_frame.base[2] = NULL;
480         fr->frame.y = fr->vo_frame.base[0];
481         fr->frame.u = fr->vo_frame.base[1];
482         fr->frame.v = fr->vo_frame.base[2];
483      }
484    if (fr->frame.bgra_data)
485      {
486         free(fr->frame.bgra_data);
487         fr->frame.bgra_data = NULL;
488      }
489 }
490
491 static void
492 _emotion_frame_data_unlock(Emotion_Frame *fr)
493 {
494 //   DBG("");
495    if (fr->in_use)
496      {
497         fr->vo_frame.free(&fr->vo_frame);
498         fr->in_use = 0;
499      }
500 }
501
502 /***************************************************************************/
503 static void
504 _emotion_overlay_begin(vo_driver_t *vo_driver __UNUSED__, vo_frame_t *vo_frame __UNUSED__, int changed __UNUSED__)
505 {
506 //   DBG("");
507 }
508
509 static void
510 _emotion_overlay_end(vo_driver_t *vo_driver __UNUSED__, vo_frame_t *vo_frame __UNUSED__)
511 {
512 //   DBG("");
513 }
514
515 static void
516 _emotion_overlay_blend(vo_driver_t *vo_driver __UNUSED__, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay __UNUSED__)
517 {
518    Emotion_Frame *fr;
519    
520    fr = (Emotion_Frame *)vo_frame;
521 //   DBG("");
522    _emotion_overlay_blend_yuv(fr->vo_frame.base, vo_overlay,
523                               fr->width, fr->height, 
524                               fr->vo_frame.pitches);
525 }
526
527 static void _emotion_overlay_mem_blend_8(uint8_t *mem, uint8_t val, uint8_t o, size_t sz)
528 {
529    uint8_t *limit = mem + sz;
530    while (mem < limit)
531      {
532         *mem = BLEND_BYTE(*mem, val, o);
533         mem++;
534      }
535 }
536
537 static void _emotion_overlay_blend_yuv(uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3])
538 {
539    Emotion_Lut *my_clut;
540    uint8_t *my_trans;
541    int src_width;
542    int src_height;
543    rle_elem_t *rle;
544    rle_elem_t *rle_limit;
545    int x_off;
546    int y_off;
547    int ymask, xmask;
548    int rle_this_bite;
549    int rle_remainder;
550    int rlelen;
551    int x, y;
552    int hili_right;
553    uint8_t clr = 0;
554    
555    src_width = img_overl->width;
556    src_height = img_overl->height;
557    rle = img_overl->rle;
558    rle_limit = rle + img_overl->num_rle;
559    x_off = img_overl->x;
560    y_off = img_overl->y;
561    
562    if (!rle) return;
563    
564    uint8_t *dst_y = dst_base[0] + dst_pitches[0] * y_off + x_off;
565    uint8_t *dst_cr = dst_base[2] + (y_off / 2) * dst_pitches[1] + (x_off / 2) + 1;
566    uint8_t *dst_cb = dst_base[1] + (y_off / 2) * dst_pitches[2] + (x_off / 2) + 1;
567    my_clut = (Emotion_Lut *) img_overl->hili_color;
568    my_trans = img_overl->hili_trans;
569    
570    /* avoid wraping overlay if drawing to small image */
571    if( (x_off + img_overl->hili_right) < dst_width )
572      hili_right = img_overl->hili_right;
573    else
574      hili_right = dst_width - 1 - x_off;
575    
576    /* avoid buffer overflow */
577    if( (src_height + y_off) >= dst_height )
578      src_height = dst_height - 1 - y_off;
579    
580    rlelen=rle_remainder=0;
581    for (y = 0; y < src_height; y++)
582      {
583         ymask = ((img_overl->hili_top > y) || (img_overl->hili_bottom < y));
584         xmask = 0;
585         
586         for (x = 0; x < src_width;)
587           {
588              uint16_t o;
589              
590              if (rlelen == 0)
591                {
592                   rle_remainder = rlelen = rle->len;
593                   clr = rle->color;
594                   rle++;
595                }
596              if (rle_remainder == 0)
597                {
598                   rle_remainder = rlelen;
599                }
600              if ((rle_remainder + x) > src_width)
601                {
602                   /* Do something for long rlelengths */
603                   rle_remainder = src_width - x;
604                } 
605              
606              if (ymask == 0)
607                {
608                   if (x <= img_overl->hili_left)
609                     {
610                        /* Starts outside clip area */
611                        if ((x + rle_remainder - 1) > img_overl->hili_left )
612                          {
613                             /* Cutting needed, starts outside, ends inside */
614                             rle_this_bite = (img_overl->hili_left - x + 1);
615                             rle_remainder -= rle_this_bite;
616                             rlelen -= rle_this_bite;
617                             my_clut = (Emotion_Lut *) img_overl->color;
618                             my_trans = img_overl->trans;
619                             xmask = 0;
620                          }
621                        else
622                          {
623                             /* no cutting needed, starts outside, ends outside */
624                             rle_this_bite = rle_remainder;
625                             rle_remainder = 0;
626                             rlelen -= rle_this_bite;
627                             my_clut = (Emotion_Lut *) img_overl->color;
628                             my_trans = img_overl->trans;
629                             xmask = 0;
630                          }
631                     }
632                   else if (x < hili_right)
633                     {
634                        /* Starts inside clip area */
635                        if ((x + rle_remainder) > hili_right )
636                          {
637                             /* Cutting needed, starts inside, ends outside */
638                             rle_this_bite = (hili_right - x);
639                             rle_remainder -= rle_this_bite;
640                             rlelen -= rle_this_bite;
641                             my_clut = (Emotion_Lut *) img_overl->hili_color;
642                             my_trans = img_overl->hili_trans;
643                             xmask++;
644                          }
645                        else
646                          {
647                             /* no cutting needed, starts inside, ends inside */
648                             rle_this_bite = rle_remainder;
649                             rle_remainder = 0;
650                             rlelen -= rle_this_bite;
651                             my_clut = (Emotion_Lut *) img_overl->hili_color;
652                             my_trans = img_overl->hili_trans;
653                             xmask++;
654                          }
655                     }
656                   else if (x >= hili_right)
657                     {
658                        /* Starts outside clip area, ends outsite clip area */
659                        if ((x + rle_remainder ) > src_width )
660                          {
661                             /* Cutting needed, starts outside, ends at right edge */
662                             /* It should never reach here due to the earlier test of src_width */
663                             rle_this_bite = (src_width - x );
664                             rle_remainder -= rle_this_bite;
665                             rlelen -= rle_this_bite;
666                             my_clut = (Emotion_Lut *) img_overl->color;
667                             my_trans = img_overl->trans;
668                             xmask = 0;
669                          }
670                        else
671                          {
672                             /* no cutting needed, starts outside, ends outside */
673                             rle_this_bite = rle_remainder;
674                             rle_remainder = 0;
675                             rlelen -= rle_this_bite;
676                             my_clut = (Emotion_Lut *) img_overl->color;
677                             my_trans = img_overl->trans;
678                             xmask = 0;
679                          }
680                     }
681                }
682              else
683                {
684                   /* Outside clip are due to y */
685                   /* no cutting needed, starts outside, ends outside */
686                   rle_this_bite = rle_remainder;
687                   rle_remainder = 0;
688                   rlelen -= rle_this_bite;
689                   my_clut = (Emotion_Lut *) img_overl->color;
690                   my_trans = img_overl->trans;
691                   xmask = 0;
692                }
693              o   = my_trans[clr];
694              if (o)
695                {
696                   if (o >= 15)
697                     {
698                        memset(dst_y + x, my_clut[clr].y, rle_this_bite);
699                        if (y & 1)
700                          {
701                             memset(dst_cr + (x >> 1), my_clut[clr].cr, (rle_this_bite+1) >> 1);
702                             memset(dst_cb + (x >> 1), my_clut[clr].cb, (rle_this_bite+1) >> 1);
703                          }
704                     }
705                   else
706                     {
707                        _emotion_overlay_mem_blend_8(dst_y + x, my_clut[clr].y, o, rle_this_bite);
708                        if (y & 1)
709                          {
710                             /* Blending cr and cb should use a different function, with pre -128 to each sample */
711                             _emotion_overlay_mem_blend_8(dst_cr + (x >> 1), my_clut[clr].cr, o, (rle_this_bite+1) >> 1);
712                             _emotion_overlay_mem_blend_8(dst_cb + (x >> 1), my_clut[clr].cb, o, (rle_this_bite+1) >> 1);
713                          }
714                     }
715                }
716              x += rle_this_bite;
717              if (rle >= rle_limit)
718                {
719                   break;
720                }
721           }
722         if (rle >= rle_limit)
723           {
724              break;
725           }
726         
727         dst_y += dst_pitches[0];
728         
729         if (y & 1)
730           {
731              dst_cr += dst_pitches[2];
732              dst_cb += dst_pitches[1];
733           }
734      }
735 }
736
737 //TODO: Really need to improve this converter! 
738 #define LIMIT(x)  ((x) > 0xff ? 0xff : ((x) < 0 ? 0 : (x)))
739
740 static void
741 _emotion_yuy2_to_bgra32(int width, int height, unsigned char *src, unsigned char *dst)
742 {
743    int i, j;
744    unsigned char *y, *u, *v;
745
746    y = src;
747    u = src + 1;
748    v = src + 3;
749    for (i = 0; i < width; i++)
750      {
751         for (j = 0; j < height; j++)
752           {
753              *dst++ = LIMIT(1.164 * (*y - 16) + 2.018 * (*u - 128));
754              *dst++ = LIMIT(1.164 * (*y - 16) - 0.813 * (*v - 128) - 0.391 * (*u - 128));
755              *dst++ = LIMIT(1.164 * (*y - 16) + 1.596 * (*v - 128));
756              *dst++ = 0;
757              
758              y += 2;
759              if (j % 2 == 1)
760                {
761                   u += 4;
762                   v += 4;
763                }
764           }
765      }
766 }