e0502f631148dc2f42d0d59f4edc584a444451cb
[profile/ivi/emotion.git] / src / modules / xine / emotion_xine_vo_out.c
1 /***************************************************************************/
2 /***                  emotion xine display engine                        ***/
3 /***************************************************************************/
4 #ifdef HAVE_CONFIG_H
5 # include "config.h"
6 #endif
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <unistd.h>
11 #include <string.h>
12
13 #include <Eina.h>
14 #include <Evas.h>
15 #include <Ecore.h>
16
17 #include "Emotion.h"
18 #include "emotion_private.h"
19 #include "emotion_xine.h"
20
21 #include <xine.h>
22 #include <xine/video_out.h>
23 #include <xine/xine_internal.h>
24 #include <xine/xineutils.h>
25 #include <xine/vo_scale.h>
26
27 #define BLEND_BYTE(dst, src, o) (((src)*o + ((dst)*(0xf-o)))/0xf)
28
29 /***************************************************************************/
30 typedef struct _Emotion_Frame  Emotion_Frame;
31 typedef struct _Emotion_Driver Emotion_Driver;
32 typedef struct _Emotion_Class  Emotion_Class;
33 typedef struct _Emotion_Lut    Emotion_Lut;
34
35 struct _Emotion_Frame
36 {
37    vo_frame_t           vo_frame;
38    int                  width;
39    int                  height;
40    double               ratio;
41    int                  format;
42    xine_t               *xine;
43    
44    Emotion_Xine_Video_Frame frame;
45    unsigned char            in_use : 1;
46 };
47
48 struct _Emotion_Driver
49 {
50    vo_driver_t          vo_driver;
51    config_values_t     *config;
52    int                  ratio;
53    xine_t               *xine;
54    Emotion_Xine_Video   *ev;
55 };
56
57 struct _Emotion_Class
58 {
59    video_driver_class_t  driver_class;
60    config_values_t      *config;
61    xine_t               *xine;
62 };
63
64 struct _Emotion_Lut
65 {
66    uint8_t cb    : 8;
67    uint8_t cr    : 8;
68    uint8_t y     : 8;
69    uint8_t foo   : 8;
70 } __attribute__ ((packed));
71
72 typedef void (*done_func_type)(void *data);
73
74 /***************************************************************************/
75 static void        *_emotion_class_init            (xine_t *xine, void *visual);
76 static void         _emotion_class_dispose         (video_driver_class_t *driver_class);
77 static char        *_emotion_class_identifier_get  (video_driver_class_t *driver_class);
78 static char        *_emotion_class_description_get (video_driver_class_t *driver_class);
79
80 static vo_driver_t *_emotion_open                  (video_driver_class_t *driver_class, const void *visual);
81 static void         _emotion_dispose               (vo_driver_t *vo_driver);
82
83 static int          _emotion_redraw                (vo_driver_t *vo_driver);
84
85 static uint32_t     _emotion_capabilities_get      (vo_driver_t *vo_driver);
86 static int          _emotion_gui_data_exchange     (vo_driver_t *vo_driver, int data_type, void *data);
87
88 static int          _emotion_property_set          (vo_driver_t *vo_driver, int property, int value);
89 static int          _emotion_property_get          (vo_driver_t *vo_driver, int property);
90 static void         _emotion_property_min_max_get  (vo_driver_t *vo_driver, int property, int *min, int *max);
91
92 static vo_frame_t  *_emotion_frame_alloc           (vo_driver_t *vo_driver);
93 static void         _emotion_frame_dispose         (vo_frame_t *vo_frame);
94 static void         _emotion_frame_format_update   (vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags);
95 static void         _emotion_frame_display         (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
96 static void         _emotion_frame_field           (vo_frame_t *vo_frame, int which_field);
97
98 static void         _emotion_frame_data_free       (Emotion_Frame *fr);
99 static void         _emotion_frame_data_unlock     (Emotion_Frame *fr);
100
101 static void         _emotion_overlay_begin         (vo_driver_t *vo_driver, vo_frame_t *vo_frame, int changed);
102 static void         _emotion_overlay_end           (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
103 static void         _emotion_overlay_blend         (vo_driver_t *vo_driver, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay);
104
105 static void         _emotion_overlay_mem_blend_8   (uint8_t *mem, uint8_t val, uint8_t o, size_t sz);
106 static void         _emotion_overlay_blend_yuv     (uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3]);
107
108 static void         _emotion_yuy2_to_bgra32        (int width, int height, unsigned char *src, unsigned char *dst);
109
110 /***************************************************************************/
111 static vo_info_t _emotion_info = 
112 {
113    1,                        /* priority */
114    XINE_VISUAL_TYPE_NONE   /* visual type */
115 };
116
117 plugin_info_t emotion_xine_plugin_info[] =
118 {
119      { PLUGIN_VIDEO_OUT, 21, "emotion", XINE_VERSION_CODE, &_emotion_info, _emotion_class_init },
120      { PLUGIN_NONE, 0, "", 0, NULL, NULL }
121 };
122
123 /***************************************************************************/
124 static void *
125 _emotion_class_init(xine_t *xine, void *visual __UNUSED__)
126 {
127    Emotion_Class *cl;
128    
129 //   DBG("");
130    cl = (Emotion_Class *) malloc(sizeof(Emotion_Class));
131    if (!cl) return NULL;
132    cl->driver_class.open_plugin     = _emotion_open;
133    cl->driver_class.get_identifier  = _emotion_class_identifier_get;
134    cl->driver_class.get_description = _emotion_class_description_get;
135    cl->driver_class.dispose         = _emotion_class_dispose;
136    cl->config                       = xine->config;
137    cl->xine                         = xine;
138
139    return cl;
140 }
141
142 static void
143 _emotion_class_dispose(video_driver_class_t *driver_class)
144 {
145    Emotion_Class *cl;
146    
147    cl = (Emotion_Class *)driver_class;
148    free(cl);
149 }
150
151 static char *
152 _emotion_class_identifier_get(video_driver_class_t *driver_class __UNUSED__)
153 {
154    return "emotion";
155 }
156
157 static char *
158 _emotion_class_description_get(video_driver_class_t *driver_class __UNUSED__)
159 {
160    return "Emotion xine video output plugin";
161 }
162
163 /***************************************************************************/
164 static vo_driver_t *
165 _emotion_open(video_driver_class_t *driver_class, const void *visual)
166 {
167    Emotion_Class *cl;
168    Emotion_Driver *dv;
169    
170    cl = (Emotion_Class *)driver_class;
171    /* visual here is the data ptr passed to xine_open_video_driver() */
172 //   DBG("");
173    dv = (Emotion_Driver *)malloc(sizeof(Emotion_Driver));
174    if (!dv) return NULL;
175    
176    dv->config                         = cl->config;
177    dv->xine                           = cl->xine;
178    dv->ratio                          = XINE_VO_ASPECT_AUTO;
179    dv->vo_driver.get_capabilities     = _emotion_capabilities_get;
180    dv->vo_driver.alloc_frame          = _emotion_frame_alloc;
181    dv->vo_driver.update_frame_format  = _emotion_frame_format_update;
182    dv->vo_driver.overlay_begin        = _emotion_overlay_begin;
183    dv->vo_driver.overlay_blend        = _emotion_overlay_blend;
184    dv->vo_driver.overlay_end          = _emotion_overlay_end;
185    dv->vo_driver.display_frame        = _emotion_frame_display;
186    dv->vo_driver.get_property         = _emotion_property_get;
187    dv->vo_driver.set_property         = _emotion_property_set;
188    dv->vo_driver.get_property_min_max = _emotion_property_min_max_get;
189    dv->vo_driver.gui_data_exchange    = _emotion_gui_data_exchange;
190    dv->vo_driver.dispose              = _emotion_dispose;
191    dv->vo_driver.redraw_needed        = _emotion_redraw;
192    dv->ev                             = (Emotion_Xine_Video *)visual;
193    dv->ev->have_vo = 1;
194    DBG("vo_driver = %p", &dv->vo_driver);
195    return &dv->vo_driver;
196 }    
197
198 static void
199 _emotion_dispose(vo_driver_t *vo_driver)
200 {
201    Emotion_Driver *dv;
202    
203    dv = (Emotion_Driver *)vo_driver;
204    dv->ev->have_vo = 0;
205    DBG("vo_driver = %p", dv);
206    free(dv);
207 }
208
209 /***************************************************************************/
210 static int
211 _emotion_redraw(vo_driver_t *vo_driver __UNUSED__)
212 {
213 //   DBG("");
214    return 0;
215 }
216
217 /***************************************************************************/
218 static uint32_t
219 _emotion_capabilities_get(vo_driver_t *vo_driver __UNUSED__)
220 {
221 //   DBG("");
222    return VO_CAP_YV12 | VO_CAP_YUY2;
223 }
224
225 /***************************************************************************/
226 static int
227 _emotion_gui_data_exchange(vo_driver_t *vo_driver __UNUSED__, int data_type, void *data __UNUSED__)
228 {
229 //   DBG("");
230    switch (data_type)
231      {
232       case XINE_GUI_SEND_COMPLETION_EVENT:
233         break;
234       case XINE_GUI_SEND_DRAWABLE_CHANGED:
235         break;
236       case XINE_GUI_SEND_EXPOSE_EVENT:
237         break;
238       case XINE_GUI_SEND_TRANSLATE_GUI_TO_VIDEO:
239         break;
240       case XINE_GUI_SEND_VIDEOWIN_VISIBLE:
241         break;
242       case XINE_GUI_SEND_SELECT_VISUAL:
243         break;
244       default:
245         break;
246      }
247   return 0;
248 }
249
250 /***************************************************************************/
251 static int
252 _emotion_property_set(vo_driver_t *vo_driver, int property, int value)
253 {
254    Emotion_Driver *dv;
255    
256    dv = (Emotion_Driver *)vo_driver;
257 //   DBG("");
258    switch (property)
259      {
260       case VO_PROP_ASPECT_RATIO:
261         if (value >= XINE_VO_ASPECT_NUM_RATIOS)
262           value = XINE_VO_ASPECT_AUTO;
263 //      DBG("DRIVER RATIO SET %i!", value);
264         dv->ratio = value;
265         break;
266       default:
267         break;
268      }
269    return value;
270 }
271
272 static int
273 _emotion_property_get(vo_driver_t *vo_driver, int property)
274 {
275    Emotion_Driver *dv;
276    
277    dv = (Emotion_Driver *)vo_driver;
278 //   DBG("");
279    switch (property)
280      {
281       case VO_PROP_ASPECT_RATIO:
282         return dv->ratio;
283         break;
284       default:
285         break;
286      }
287   return 0;
288 }
289
290 static void
291 _emotion_property_min_max_get(vo_driver_t *vo_driver __UNUSED__, int property __UNUSED__, int *min, int *max)
292 {
293 //   DBG("");
294    *min = 0;
295    *max = 0;
296 }
297
298 /***************************************************************************/
299 static vo_frame_t *
300 _emotion_frame_alloc(vo_driver_t *vo_driver __UNUSED__)
301 {
302    Emotion_Frame *fr;
303    
304 //   DBG("");
305    fr = (Emotion_Frame *)calloc(1, sizeof(Emotion_Frame));
306    if (!fr) return NULL;
307    
308    fr->vo_frame.base[0]    = NULL;
309    fr->vo_frame.base[1]    = NULL;
310    fr->vo_frame.base[2]    = NULL;
311    
312    fr->vo_frame.proc_slice = NULL;
313    fr->vo_frame.proc_frame = NULL;
314    fr->vo_frame.field      = _emotion_frame_field;
315    fr->vo_frame.dispose    = _emotion_frame_dispose;
316    fr->vo_frame.driver     = vo_driver;
317    
318    return (vo_frame_t *)fr;
319 }
320
321 static void
322 _emotion_frame_dispose(vo_frame_t *vo_frame)
323 {
324    Emotion_Frame *fr;
325    
326    fr = (Emotion_Frame *)vo_frame;
327 //   DBG("");
328    _emotion_frame_data_free(fr);  
329    free(fr);
330 }
331
332 static void
333 _emotion_frame_format_update(vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags __UNUSED__)
334 {
335    Emotion_Driver *dv;
336    Emotion_Frame *fr;
337    
338    dv = (Emotion_Driver *)vo_driver;
339    fr = (Emotion_Frame *)vo_frame;
340    
341    if ((fr->width != (int)width) ||  (fr->height != (int)height) || 
342        (fr->format != format) || (!fr->vo_frame.base[0]))
343      {
344 //   DBG("");
345         _emotion_frame_data_free(fr);
346         
347         fr->width  = width;
348         fr->height = height;
349         fr->format = format;
350         
351         switch (format)
352           {
353            case XINE_IMGFMT_YV12: 
354                {
355                   int y_size, uv_size;
356                   
357                   fr->frame.format = EMOTION_FORMAT_YV12;
358                   fr->vo_frame.pitches[0] = 8 * ((width + 7) / 8);
359                   fr->vo_frame.pitches[1] = 8 * ((width + 15) / 16);
360                   fr->vo_frame.pitches[2] = 8 * ((width + 15) / 16);
361                   
362                   y_size  = fr->vo_frame.pitches[0] * height;
363                   uv_size = fr->vo_frame.pitches[1] * ((height + 1) / 2);
364                   
365                   fr->vo_frame.base[0] = malloc(y_size + (2 * uv_size));
366                   fr->vo_frame.base[1] = fr->vo_frame.base[0] + y_size + uv_size;
367                   fr->vo_frame.base[2] = fr->vo_frame.base[0] + y_size;
368                   fr->frame.w = fr->width;
369                   fr->frame.h = fr->height;
370                   fr->frame.ratio = fr->vo_frame.ratio;
371                   fr->frame.y = fr->vo_frame.base[0];
372                   fr->frame.u = fr->vo_frame.base[1];
373                   fr->frame.v = fr->vo_frame.base[2];
374                   fr->frame.bgra_data = NULL;
375                   fr->frame.y_stride = fr->vo_frame.pitches[0];
376                   fr->frame.u_stride = fr->vo_frame.pitches[1];
377                   fr->frame.v_stride = fr->vo_frame.pitches[2];
378                   fr->frame.obj = dv->ev->obj;
379                }
380              break;
381            case XINE_IMGFMT_YUY2: 
382                {
383                   fr->frame.format = EMOTION_FORMAT_BGRA;
384                   fr->vo_frame.pitches[0] = 8 * ((width + 3) / 4);
385                   fr->vo_frame.pitches[1] = 0;
386                   fr->vo_frame.pitches[2] = 0;
387                   
388                   fr->vo_frame.base[0] = malloc(fr->vo_frame.pitches[0] * height);
389                   fr->vo_frame.base[1] = NULL;
390                   fr->vo_frame.base[2] = NULL;
391                   
392                   fr->frame.w = fr->width;
393                   fr->frame.h = fr->height;
394                   fr->frame.ratio = fr->vo_frame.ratio;
395                   fr->frame.y = NULL;
396                   fr->frame.u = NULL;
397                   fr->frame.v = NULL;
398                   fr->frame.bgra_data = malloc(fr->width * fr->height * 4);
399                   fr->frame.y_stride = 0;
400                   fr->frame.u_stride = 0;
401                   fr->frame.v_stride = 0;
402                   fr->frame.obj = dv->ev->obj;
403                }
404              break;
405            default:
406              break;
407           }
408         if (((format == XINE_IMGFMT_YV12)
409              && ((!fr->vo_frame.base[0])
410                  || (!fr->vo_frame.base[1])
411                  || (!fr->vo_frame.base[2])))
412             || ((format == XINE_IMGFMT_YUY2)
413                 && ((!fr->vo_frame.base[0])
414                     || (!fr->frame.bgra_data))))
415           {
416              _emotion_frame_data_free(fr);
417           }
418      }
419    fr->frame.ratio = fr->vo_frame.ratio;
420    fr->ratio = ratio;
421 }
422
423 static void
424 _emotion_frame_display(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
425 {
426    Emotion_Driver *dv;
427    Emotion_Frame *fr;
428    
429    dv = (Emotion_Driver *)vo_driver;
430    fr = (Emotion_Frame *)vo_frame;
431 //   DBG("fq %i %p", dv->ev->fq, dv->ev);
432 // if my frame queue is too deep ( > 4 frames) simply block and wait for them
433 // to drain
434 //   while (dv->ev->fq > 4) usleep(1);
435    if (dv->ev)
436      {
437         void *buf;
438
439         if (dv->ev->closing) return;
440         if (fr->format == XINE_IMGFMT_YUY2)
441           {
442              _emotion_yuy2_to_bgra32(fr->width, fr->height, fr->vo_frame.base[0], fr->frame.bgra_data);
443           }
444         
445         buf = &(fr->frame);
446         fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0;
447         fr->frame.done_func = (done_func_type)_emotion_frame_data_unlock;
448         fr->frame.done_data = fr;
449 //      DBG("FRAME FOR %p", dv->ev);
450         if (write(dv->ev->fd_write, &buf, sizeof(void *)) < 0) perror("write");
451 //      DBG("-- FRAME DEC %p == %i", fr->frame.obj, ret);
452         fr->in_use = 1;
453         dv->ev->fq++;
454      }
455    /* hmm - must find a way to sanely copy data out... FIXME problem */
456 //   fr->vo_frame.free(&fr->vo_frame);
457 }
458
459 static void
460 _emotion_frame_field(vo_frame_t *vo_frame __UNUSED__, int which_field __UNUSED__)
461 {
462 //   DBG("");
463 }
464
465 /***************************************************************************/
466 static void
467 _emotion_frame_data_free(Emotion_Frame *fr)
468 {
469    if (fr->vo_frame.base[0])
470      {
471         free(fr->vo_frame.base[0]);
472         fr->vo_frame.base[0] = NULL;
473         fr->vo_frame.base[1] = NULL;
474         fr->vo_frame.base[2] = NULL;
475         fr->frame.y = fr->vo_frame.base[0];
476         fr->frame.u = fr->vo_frame.base[1];
477         fr->frame.v = fr->vo_frame.base[2];
478      }
479    if (fr->frame.bgra_data)
480      {
481         free(fr->frame.bgra_data);
482         fr->frame.bgra_data = NULL;
483      }
484 }
485
486 static void
487 _emotion_frame_data_unlock(Emotion_Frame *fr)
488 {
489 //   DBG("");
490    if (fr->in_use)
491      {
492         fr->vo_frame.free(&fr->vo_frame);
493         fr->in_use = 0;
494      }
495 }
496
497 /***************************************************************************/
498 static void
499 _emotion_overlay_begin(vo_driver_t *vo_driver __UNUSED__, vo_frame_t *vo_frame __UNUSED__, int changed __UNUSED__)
500 {
501 //   DBG("");
502 }
503
504 static void
505 _emotion_overlay_end(vo_driver_t *vo_driver __UNUSED__, vo_frame_t *vo_frame __UNUSED__)
506 {
507 //   DBG("");
508 }
509
510 static void
511 _emotion_overlay_blend(vo_driver_t *vo_driver __UNUSED__, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay __UNUSED__)
512 {
513    Emotion_Frame *fr;
514    
515    fr = (Emotion_Frame *)vo_frame;
516 //   DBG("");
517    _emotion_overlay_blend_yuv(fr->vo_frame.base, vo_overlay,
518                               fr->width, fr->height, 
519                               fr->vo_frame.pitches);
520 }
521
522 static void _emotion_overlay_mem_blend_8(uint8_t *mem, uint8_t val, uint8_t o, size_t sz)
523 {
524    uint8_t *limit = mem + sz;
525    while (mem < limit)
526      {
527         *mem = BLEND_BYTE(*mem, val, o);
528         mem++;
529      }
530 }
531
532 static void _emotion_overlay_blend_yuv(uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3])
533 {
534    Emotion_Lut *my_clut;
535    uint8_t *my_trans;
536    int src_width;
537    int src_height;
538    rle_elem_t *rle;
539    rle_elem_t *rle_limit;
540    int x_off;
541    int y_off;
542    int ymask, xmask;
543    int rle_this_bite;
544    int rle_remainder;
545    int rlelen;
546    int x, y;
547    int hili_right;
548    uint8_t clr = 0;
549    
550    src_width = img_overl->width;
551    src_height = img_overl->height;
552    rle = img_overl->rle;
553    rle_limit = rle + img_overl->num_rle;
554    x_off = img_overl->x;
555    y_off = img_overl->y;
556    
557    if (!rle) return;
558    
559    uint8_t *dst_y = dst_base[0] + dst_pitches[0] * y_off + x_off;
560    uint8_t *dst_cr = dst_base[2] + (y_off / 2) * dst_pitches[1] + (x_off / 2) + 1;
561    uint8_t *dst_cb = dst_base[1] + (y_off / 2) * dst_pitches[2] + (x_off / 2) + 1;
562    my_clut = (Emotion_Lut *) img_overl->hili_color;
563    my_trans = img_overl->hili_trans;
564    
565    /* avoid wraping overlay if drawing to small image */
566    if( (x_off + img_overl->hili_right) < dst_width )
567      hili_right = img_overl->hili_right;
568    else
569      hili_right = dst_width - 1 - x_off;
570    
571    /* avoid buffer overflow */
572    if( (src_height + y_off) >= dst_height )
573      src_height = dst_height - 1 - y_off;
574    
575    rlelen=rle_remainder=0;
576    for (y = 0; y < src_height; y++)
577      {
578         ymask = ((img_overl->hili_top > y) || (img_overl->hili_bottom < y));
579         xmask = 0;
580         
581         for (x = 0; x < src_width;)
582           {
583              uint16_t o;
584              
585              if (rlelen == 0)
586                {
587                   rle_remainder = rlelen = rle->len;
588                   clr = rle->color;
589                   rle++;
590                }
591              if (rle_remainder == 0)
592                {
593                   rle_remainder = rlelen;
594                }
595              if ((rle_remainder + x) > src_width)
596                {
597                   /* Do something for long rlelengths */
598                   rle_remainder = src_width - x;
599                } 
600              
601              if (ymask == 0)
602                {
603                   if (x <= img_overl->hili_left)
604                     {
605                        /* Starts outside clip area */
606                        if ((x + rle_remainder - 1) > img_overl->hili_left )
607                          {
608                             /* Cutting needed, starts outside, ends inside */
609                             rle_this_bite = (img_overl->hili_left - x + 1);
610                             rle_remainder -= rle_this_bite;
611                             rlelen -= rle_this_bite;
612                             my_clut = (Emotion_Lut *) img_overl->color;
613                             my_trans = img_overl->trans;
614                             xmask = 0;
615                          }
616                        else
617                          {
618                             /* no cutting needed, starts outside, ends outside */
619                             rle_this_bite = rle_remainder;
620                             rle_remainder = 0;
621                             rlelen -= rle_this_bite;
622                             my_clut = (Emotion_Lut *) img_overl->color;
623                             my_trans = img_overl->trans;
624                             xmask = 0;
625                          }
626                     }
627                   else if (x < hili_right)
628                     {
629                        /* Starts inside clip area */
630                        if ((x + rle_remainder) > hili_right )
631                          {
632                             /* Cutting needed, starts inside, ends outside */
633                             rle_this_bite = (hili_right - x);
634                             rle_remainder -= rle_this_bite;
635                             rlelen -= rle_this_bite;
636                             my_clut = (Emotion_Lut *) img_overl->hili_color;
637                             my_trans = img_overl->hili_trans;
638                             xmask++;
639                          }
640                        else
641                          {
642                             /* no cutting needed, starts inside, ends inside */
643                             rle_this_bite = rle_remainder;
644                             rle_remainder = 0;
645                             rlelen -= rle_this_bite;
646                             my_clut = (Emotion_Lut *) img_overl->hili_color;
647                             my_trans = img_overl->hili_trans;
648                             xmask++;
649                          }
650                     }
651                   else if (x >= hili_right)
652                     {
653                        /* Starts outside clip area, ends outsite clip area */
654                        if ((x + rle_remainder ) > src_width )
655                          {
656                             /* Cutting needed, starts outside, ends at right edge */
657                             /* It should never reach here due to the earlier test of src_width */
658                             rle_this_bite = (src_width - x );
659                             rle_remainder -= rle_this_bite;
660                             rlelen -= rle_this_bite;
661                             my_clut = (Emotion_Lut *) img_overl->color;
662                             my_trans = img_overl->trans;
663                             xmask = 0;
664                          }
665                        else
666                          {
667                             /* no cutting needed, starts outside, ends outside */
668                             rle_this_bite = rle_remainder;
669                             rle_remainder = 0;
670                             rlelen -= rle_this_bite;
671                             my_clut = (Emotion_Lut *) img_overl->color;
672                             my_trans = img_overl->trans;
673                             xmask = 0;
674                          }
675                     }
676                }
677              else
678                {
679                   /* Outside clip are due to y */
680                   /* no cutting needed, starts outside, ends outside */
681                   rle_this_bite = rle_remainder;
682                   rle_remainder = 0;
683                   rlelen -= rle_this_bite;
684                   my_clut = (Emotion_Lut *) img_overl->color;
685                   my_trans = img_overl->trans;
686                   xmask = 0;
687                }
688              o   = my_trans[clr];
689              if (o)
690                {
691                   if (o >= 15)
692                     {
693                        memset(dst_y + x, my_clut[clr].y, rle_this_bite);
694                        if (y & 1)
695                          {
696                             memset(dst_cr + (x >> 1), my_clut[clr].cr, (rle_this_bite+1) >> 1);
697                             memset(dst_cb + (x >> 1), my_clut[clr].cb, (rle_this_bite+1) >> 1);
698                          }
699                     }
700                   else
701                     {
702                        _emotion_overlay_mem_blend_8(dst_y + x, my_clut[clr].y, o, rle_this_bite);
703                        if (y & 1)
704                          {
705                             /* Blending cr and cb should use a different function, with pre -128 to each sample */
706                             _emotion_overlay_mem_blend_8(dst_cr + (x >> 1), my_clut[clr].cr, o, (rle_this_bite+1) >> 1);
707                             _emotion_overlay_mem_blend_8(dst_cb + (x >> 1), my_clut[clr].cb, o, (rle_this_bite+1) >> 1);
708                          }
709                     }
710                }
711              x += rle_this_bite;
712              if (rle >= rle_limit)
713                {
714                   break;
715                }
716           }
717         if (rle >= rle_limit)
718           {
719              break;
720           }
721         
722         dst_y += dst_pitches[0];
723         
724         if (y & 1)
725           {
726              dst_cr += dst_pitches[2];
727              dst_cb += dst_pitches[1];
728           }
729      }
730 }
731
732 //TODO: Really need to improve this converter! 
733 #define LIMIT(x)  ((x) > 0xff ? 0xff : ((x) < 0 ? 0 : (x)))
734
735 static void
736 _emotion_yuy2_to_bgra32(int width, int height, unsigned char *src, unsigned char *dst)
737 {
738    int i, j;
739    unsigned char *y, *u, *v;
740
741    y = src;
742    u = src + 1;
743    v = src + 3;
744    for (i = 0; i < width; i++)
745      {
746         for (j = 0; j < height; j++)
747           {
748              *dst++ = LIMIT(1.164 * (*y - 16) + 2.018 * (*u - 128));
749              *dst++ = LIMIT(1.164 * (*y - 16) - 0.813 * (*v - 128) - 0.391 * (*u - 128));
750              *dst++ = LIMIT(1.164 * (*y - 16) + 1.596 * (*v - 128));
751              *dst++ = 0;
752              
753              y += 2;
754              if (j % 2 == 1)
755                {
756                   u += 4;
757                   v += 4;
758                }
759           }
760      }
761 }