1 /***************************************************************************/
2 /*** emotion xine display engine ***/
3 /***************************************************************************/
18 #include "emotion_private.h"
19 #include "emotion_xine.h"
22 #include <xine/video_out.h>
23 #include <xine/xine_internal.h>
24 #include <xine/xineutils.h>
25 #include <xine/vo_scale.h>
27 #define BLEND_BYTE(dst, src, o) (((src)*o + ((dst)*(0xf-o)))/0xf)
29 /***************************************************************************/
30 typedef struct _Emotion_Frame Emotion_Frame;
31 typedef struct _Emotion_Driver Emotion_Driver;
32 typedef struct _Emotion_Class Emotion_Class;
33 typedef struct _Emotion_Lut Emotion_Lut;
44 Emotion_Xine_Video_Frame frame;
45 unsigned char in_use : 1;
48 struct _Emotion_Driver
50 vo_driver_t vo_driver;
51 config_values_t *config;
54 Emotion_Xine_Video *ev;
59 video_driver_class_t driver_class;
60 config_values_t *config;
70 } __attribute__ ((packed));
72 typedef void (*done_func_type)(void *data);
74 /***************************************************************************/
75 static void *_emotion_class_init (xine_t *xine, void *visual);
76 static void _emotion_class_dispose (video_driver_class_t *driver_class);
77 static char *_emotion_class_identifier_get (video_driver_class_t *driver_class);
78 static char *_emotion_class_description_get (video_driver_class_t *driver_class);
80 static vo_driver_t *_emotion_open (video_driver_class_t *driver_class, const void *visual);
81 static void _emotion_dispose (vo_driver_t *vo_driver);
83 static int _emotion_redraw (vo_driver_t *vo_driver);
85 static uint32_t _emotion_capabilities_get (vo_driver_t *vo_driver);
86 static int _emotion_gui_data_exchange (vo_driver_t *vo_driver, int data_type, void *data);
88 static int _emotion_property_set (vo_driver_t *vo_driver, int property, int value);
89 static int _emotion_property_get (vo_driver_t *vo_driver, int property);
90 static void _emotion_property_min_max_get (vo_driver_t *vo_driver, int property, int *min, int *max);
92 static vo_frame_t *_emotion_frame_alloc (vo_driver_t *vo_driver);
93 static void _emotion_frame_dispose (vo_frame_t *vo_frame);
94 static void _emotion_frame_format_update (vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags);
95 static void _emotion_frame_display (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
96 static void _emotion_frame_field (vo_frame_t *vo_frame, int which_field);
98 static void _emotion_frame_data_free (Emotion_Frame *fr);
99 static void _emotion_frame_data_unlock (Emotion_Frame *fr);
101 static void _emotion_overlay_begin (vo_driver_t *vo_driver, vo_frame_t *vo_frame, int changed);
102 static void _emotion_overlay_end (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
103 static void _emotion_overlay_blend (vo_driver_t *vo_driver, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay);
105 static void _emotion_overlay_mem_blend_8 (uint8_t *mem, uint8_t val, uint8_t o, size_t sz);
106 static void _emotion_overlay_blend_yuv (uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3]);
108 static void _emotion_yuy2_to_bgra32 (int width, int height, unsigned char *src, unsigned char *dst);
110 /***************************************************************************/
111 static vo_info_t _emotion_info =
114 XINE_VISUAL_TYPE_NONE /* visual type */
117 plugin_info_t emotion_xine_plugin_info[] =
119 { PLUGIN_VIDEO_OUT, 21, "emotion", XINE_VERSION_CODE, &_emotion_info, _emotion_class_init },
120 { PLUGIN_NONE, 0, "", 0, NULL, NULL }
123 /***************************************************************************/
125 _emotion_class_init(xine_t *xine, void *visual __UNUSED__)
130 cl = (Emotion_Class *) malloc(sizeof(Emotion_Class));
131 if (!cl) return NULL;
132 cl->driver_class.open_plugin = _emotion_open;
133 #if XINE_MAJOR_VERSION < 1 || (XINE_MAJOR_VERSION == 1 && XINE_MINOR_VERSION < 2)
134 cl->driver_class.get_identifier = _emotion_class_identifier_get;
135 cl->driver_class.get_description = _emotion_class_description_get;
137 cl->driver_class.identifier = _emotion_class_identifier_get(NULL);
138 cl->driver_class.description = _emotion_class_description_get(NULL);
140 cl->driver_class.dispose = _emotion_class_dispose;
141 cl->config = xine->config;
148 _emotion_class_dispose(video_driver_class_t *driver_class)
152 cl = (Emotion_Class *)driver_class;
157 _emotion_class_identifier_get(video_driver_class_t *driver_class __UNUSED__)
163 _emotion_class_description_get(video_driver_class_t *driver_class __UNUSED__)
165 return "Emotion xine video output plugin";
168 /***************************************************************************/
170 _emotion_open(video_driver_class_t *driver_class, const void *visual)
175 cl = (Emotion_Class *)driver_class;
176 /* visual here is the data ptr passed to xine_open_video_driver() */
178 dv = (Emotion_Driver *)malloc(sizeof(Emotion_Driver));
179 if (!dv) return NULL;
181 dv->config = cl->config;
183 dv->ratio = XINE_VO_ASPECT_AUTO;
184 dv->vo_driver.get_capabilities = _emotion_capabilities_get;
185 dv->vo_driver.alloc_frame = _emotion_frame_alloc;
186 dv->vo_driver.update_frame_format = _emotion_frame_format_update;
187 dv->vo_driver.overlay_begin = _emotion_overlay_begin;
188 dv->vo_driver.overlay_blend = _emotion_overlay_blend;
189 dv->vo_driver.overlay_end = _emotion_overlay_end;
190 dv->vo_driver.display_frame = _emotion_frame_display;
191 dv->vo_driver.get_property = _emotion_property_get;
192 dv->vo_driver.set_property = _emotion_property_set;
193 dv->vo_driver.get_property_min_max = _emotion_property_min_max_get;
194 dv->vo_driver.gui_data_exchange = _emotion_gui_data_exchange;
195 dv->vo_driver.dispose = _emotion_dispose;
196 dv->vo_driver.redraw_needed = _emotion_redraw;
197 dv->ev = (Emotion_Xine_Video *)visual;
199 DBG("vo_driver = %p", &dv->vo_driver);
200 return &dv->vo_driver;
204 _emotion_dispose(vo_driver_t *vo_driver)
208 dv = (Emotion_Driver *)vo_driver;
210 DBG("vo_driver = %p", dv);
214 /***************************************************************************/
216 _emotion_redraw(vo_driver_t *vo_driver __UNUSED__)
222 /***************************************************************************/
224 _emotion_capabilities_get(vo_driver_t *vo_driver __UNUSED__)
227 return VO_CAP_YV12 | VO_CAP_YUY2;
230 /***************************************************************************/
232 _emotion_gui_data_exchange(vo_driver_t *vo_driver __UNUSED__, int data_type, void *data __UNUSED__)
237 case XINE_GUI_SEND_COMPLETION_EVENT:
239 case XINE_GUI_SEND_DRAWABLE_CHANGED:
241 case XINE_GUI_SEND_EXPOSE_EVENT:
243 case XINE_GUI_SEND_TRANSLATE_GUI_TO_VIDEO:
245 case XINE_GUI_SEND_VIDEOWIN_VISIBLE:
247 case XINE_GUI_SEND_SELECT_VISUAL:
255 /***************************************************************************/
257 _emotion_property_set(vo_driver_t *vo_driver, int property, int value)
261 dv = (Emotion_Driver *)vo_driver;
265 case VO_PROP_ASPECT_RATIO:
266 if (value >= XINE_VO_ASPECT_NUM_RATIOS)
267 value = XINE_VO_ASPECT_AUTO;
268 // DBG("DRIVER RATIO SET %i!", value);
278 _emotion_property_get(vo_driver_t *vo_driver, int property)
282 dv = (Emotion_Driver *)vo_driver;
286 case VO_PROP_ASPECT_RATIO:
296 _emotion_property_min_max_get(vo_driver_t *vo_driver __UNUSED__, int property __UNUSED__, int *min, int *max)
303 /***************************************************************************/
305 _emotion_frame_alloc(vo_driver_t *vo_driver __UNUSED__)
310 fr = (Emotion_Frame *)calloc(1, sizeof(Emotion_Frame));
311 if (!fr) return NULL;
313 fr->vo_frame.base[0] = NULL;
314 fr->vo_frame.base[1] = NULL;
315 fr->vo_frame.base[2] = NULL;
317 fr->vo_frame.proc_slice = NULL;
318 fr->vo_frame.proc_frame = NULL;
319 fr->vo_frame.field = _emotion_frame_field;
320 fr->vo_frame.dispose = _emotion_frame_dispose;
321 fr->vo_frame.driver = vo_driver;
323 return (vo_frame_t *)fr;
327 _emotion_frame_dispose(vo_frame_t *vo_frame)
331 fr = (Emotion_Frame *)vo_frame;
333 _emotion_frame_data_free(fr);
338 _emotion_frame_format_update(vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags __UNUSED__)
343 dv = (Emotion_Driver *)vo_driver;
344 fr = (Emotion_Frame *)vo_frame;
346 if ((fr->width != (int)width) || (fr->height != (int)height) ||
347 (fr->format != format) || (!fr->vo_frame.base[0]))
350 _emotion_frame_data_free(fr);
358 case XINE_IMGFMT_YV12:
362 fr->frame.format = EMOTION_FORMAT_YV12;
363 fr->vo_frame.pitches[0] = 8 * ((width + 7) / 8);
364 fr->vo_frame.pitches[1] = 8 * ((width + 15) / 16);
365 fr->vo_frame.pitches[2] = 8 * ((width + 15) / 16);
367 y_size = fr->vo_frame.pitches[0] * height;
368 uv_size = fr->vo_frame.pitches[1] * ((height + 1) / 2);
370 fr->vo_frame.base[0] = malloc(y_size + (2 * uv_size));
371 fr->vo_frame.base[1] = fr->vo_frame.base[0] + y_size + uv_size;
372 fr->vo_frame.base[2] = fr->vo_frame.base[0] + y_size;
373 fr->frame.w = fr->width;
374 fr->frame.h = fr->height;
375 fr->frame.ratio = fr->vo_frame.ratio;
376 fr->frame.y = fr->vo_frame.base[0];
377 fr->frame.u = fr->vo_frame.base[1];
378 fr->frame.v = fr->vo_frame.base[2];
379 fr->frame.bgra_data = NULL;
380 fr->frame.y_stride = fr->vo_frame.pitches[0];
381 fr->frame.u_stride = fr->vo_frame.pitches[1];
382 fr->frame.v_stride = fr->vo_frame.pitches[2];
383 fr->frame.obj = dv->ev->obj;
386 case XINE_IMGFMT_YUY2:
388 fr->frame.format = EMOTION_FORMAT_BGRA;
389 fr->vo_frame.pitches[0] = 8 * ((width + 3) / 4);
390 fr->vo_frame.pitches[1] = 0;
391 fr->vo_frame.pitches[2] = 0;
393 fr->vo_frame.base[0] = malloc(fr->vo_frame.pitches[0] * height);
394 fr->vo_frame.base[1] = NULL;
395 fr->vo_frame.base[2] = NULL;
397 fr->frame.w = fr->width;
398 fr->frame.h = fr->height;
399 fr->frame.ratio = fr->vo_frame.ratio;
403 fr->frame.bgra_data = malloc(fr->width * fr->height * 4);
404 fr->frame.y_stride = 0;
405 fr->frame.u_stride = 0;
406 fr->frame.v_stride = 0;
407 fr->frame.obj = dv->ev->obj;
413 if (((format == XINE_IMGFMT_YV12)
414 && ((!fr->vo_frame.base[0])
415 || (!fr->vo_frame.base[1])
416 || (!fr->vo_frame.base[2])))
417 || ((format == XINE_IMGFMT_YUY2)
418 && ((!fr->vo_frame.base[0])
419 || (!fr->frame.bgra_data))))
421 _emotion_frame_data_free(fr);
424 fr->frame.ratio = fr->vo_frame.ratio;
429 _emotion_frame_display(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
434 dv = (Emotion_Driver *)vo_driver;
435 fr = (Emotion_Frame *)vo_frame;
436 // DBG("fq %i %p", dv->ev->fq, dv->ev);
437 // if my frame queue is too deep ( > 4 frames) simply block and wait for them
439 // while (dv->ev->fq > 4) usleep(1);
444 if (dv->ev->closing) return;
445 if (fr->format == XINE_IMGFMT_YUY2)
447 _emotion_yuy2_to_bgra32(fr->width, fr->height, fr->vo_frame.base[0], fr->frame.bgra_data);
451 fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0;
452 fr->frame.done_func = (done_func_type)_emotion_frame_data_unlock;
453 fr->frame.done_data = fr;
454 // DBG("FRAME FOR %p", dv->ev);
455 if (write(dv->ev->fd_write, &buf, sizeof(void *)) < 0) perror("write");
456 // DBG("-- FRAME DEC %p == %i", fr->frame.obj, ret);
460 /* hmm - must find a way to sanely copy data out... FIXME problem */
461 // fr->vo_frame.free(&fr->vo_frame);
465 _emotion_frame_field(vo_frame_t *vo_frame __UNUSED__, int which_field __UNUSED__)
470 /***************************************************************************/
472 _emotion_frame_data_free(Emotion_Frame *fr)
474 if (fr->vo_frame.base[0])
476 free(fr->vo_frame.base[0]);
477 fr->vo_frame.base[0] = NULL;
478 fr->vo_frame.base[1] = NULL;
479 fr->vo_frame.base[2] = NULL;
480 fr->frame.y = fr->vo_frame.base[0];
481 fr->frame.u = fr->vo_frame.base[1];
482 fr->frame.v = fr->vo_frame.base[2];
484 if (fr->frame.bgra_data)
486 free(fr->frame.bgra_data);
487 fr->frame.bgra_data = NULL;
492 _emotion_frame_data_unlock(Emotion_Frame *fr)
497 fr->vo_frame.free(&fr->vo_frame);
502 /***************************************************************************/
504 _emotion_overlay_begin(vo_driver_t *vo_driver __UNUSED__, vo_frame_t *vo_frame __UNUSED__, int changed __UNUSED__)
510 _emotion_overlay_end(vo_driver_t *vo_driver __UNUSED__, vo_frame_t *vo_frame __UNUSED__)
516 _emotion_overlay_blend(vo_driver_t *vo_driver __UNUSED__, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay __UNUSED__)
520 fr = (Emotion_Frame *)vo_frame;
522 _emotion_overlay_blend_yuv(fr->vo_frame.base, vo_overlay,
523 fr->width, fr->height,
524 fr->vo_frame.pitches);
527 static void _emotion_overlay_mem_blend_8(uint8_t *mem, uint8_t val, uint8_t o, size_t sz)
529 uint8_t *limit = mem + sz;
532 *mem = BLEND_BYTE(*mem, val, o);
537 static void _emotion_overlay_blend_yuv(uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3])
539 Emotion_Lut *my_clut;
544 rle_elem_t *rle_limit;
555 src_width = img_overl->width;
556 src_height = img_overl->height;
557 rle = img_overl->rle;
558 rle_limit = rle + img_overl->num_rle;
559 x_off = img_overl->x;
560 y_off = img_overl->y;
564 uint8_t *dst_y = dst_base[0] + dst_pitches[0] * y_off + x_off;
565 uint8_t *dst_cr = dst_base[2] + (y_off / 2) * dst_pitches[1] + (x_off / 2) + 1;
566 uint8_t *dst_cb = dst_base[1] + (y_off / 2) * dst_pitches[2] + (x_off / 2) + 1;
567 my_clut = (Emotion_Lut *) img_overl->hili_color;
568 my_trans = img_overl->hili_trans;
570 /* avoid wraping overlay if drawing to small image */
571 if( (x_off + img_overl->hili_right) < dst_width )
572 hili_right = img_overl->hili_right;
574 hili_right = dst_width - 1 - x_off;
576 /* avoid buffer overflow */
577 if( (src_height + y_off) >= dst_height )
578 src_height = dst_height - 1 - y_off;
580 rlelen=rle_remainder=0;
581 for (y = 0; y < src_height; y++)
583 ymask = ((img_overl->hili_top > y) || (img_overl->hili_bottom < y));
586 for (x = 0; x < src_width;)
592 rle_remainder = rlelen = rle->len;
596 if (rle_remainder == 0)
598 rle_remainder = rlelen;
600 if ((rle_remainder + x) > src_width)
602 /* Do something for long rlelengths */
603 rle_remainder = src_width - x;
608 if (x <= img_overl->hili_left)
610 /* Starts outside clip area */
611 if ((x + rle_remainder - 1) > img_overl->hili_left )
613 /* Cutting needed, starts outside, ends inside */
614 rle_this_bite = (img_overl->hili_left - x + 1);
615 rle_remainder -= rle_this_bite;
616 rlelen -= rle_this_bite;
617 my_clut = (Emotion_Lut *) img_overl->color;
618 my_trans = img_overl->trans;
623 /* no cutting needed, starts outside, ends outside */
624 rle_this_bite = rle_remainder;
626 rlelen -= rle_this_bite;
627 my_clut = (Emotion_Lut *) img_overl->color;
628 my_trans = img_overl->trans;
632 else if (x < hili_right)
634 /* Starts inside clip area */
635 if ((x + rle_remainder) > hili_right )
637 /* Cutting needed, starts inside, ends outside */
638 rle_this_bite = (hili_right - x);
639 rle_remainder -= rle_this_bite;
640 rlelen -= rle_this_bite;
641 my_clut = (Emotion_Lut *) img_overl->hili_color;
642 my_trans = img_overl->hili_trans;
647 /* no cutting needed, starts inside, ends inside */
648 rle_this_bite = rle_remainder;
650 rlelen -= rle_this_bite;
651 my_clut = (Emotion_Lut *) img_overl->hili_color;
652 my_trans = img_overl->hili_trans;
656 else if (x >= hili_right)
658 /* Starts outside clip area, ends outsite clip area */
659 if ((x + rle_remainder ) > src_width )
661 /* Cutting needed, starts outside, ends at right edge */
662 /* It should never reach here due to the earlier test of src_width */
663 rle_this_bite = (src_width - x );
664 rle_remainder -= rle_this_bite;
665 rlelen -= rle_this_bite;
666 my_clut = (Emotion_Lut *) img_overl->color;
667 my_trans = img_overl->trans;
672 /* no cutting needed, starts outside, ends outside */
673 rle_this_bite = rle_remainder;
675 rlelen -= rle_this_bite;
676 my_clut = (Emotion_Lut *) img_overl->color;
677 my_trans = img_overl->trans;
684 /* Outside clip are due to y */
685 /* no cutting needed, starts outside, ends outside */
686 rle_this_bite = rle_remainder;
688 rlelen -= rle_this_bite;
689 my_clut = (Emotion_Lut *) img_overl->color;
690 my_trans = img_overl->trans;
698 memset(dst_y + x, my_clut[clr].y, rle_this_bite);
701 memset(dst_cr + (x >> 1), my_clut[clr].cr, (rle_this_bite+1) >> 1);
702 memset(dst_cb + (x >> 1), my_clut[clr].cb, (rle_this_bite+1) >> 1);
707 _emotion_overlay_mem_blend_8(dst_y + x, my_clut[clr].y, o, rle_this_bite);
710 /* Blending cr and cb should use a different function, with pre -128 to each sample */
711 _emotion_overlay_mem_blend_8(dst_cr + (x >> 1), my_clut[clr].cr, o, (rle_this_bite+1) >> 1);
712 _emotion_overlay_mem_blend_8(dst_cb + (x >> 1), my_clut[clr].cb, o, (rle_this_bite+1) >> 1);
717 if (rle >= rle_limit)
722 if (rle >= rle_limit)
727 dst_y += dst_pitches[0];
731 dst_cr += dst_pitches[2];
732 dst_cb += dst_pitches[1];
737 //TODO: Really need to improve this converter!
738 #define LIMIT(x) ((x) > 0xff ? 0xff : ((x) < 0 ? 0 : (x)))
741 _emotion_yuy2_to_bgra32(int width, int height, unsigned char *src, unsigned char *dst)
744 unsigned char *y, *u, *v;
749 for (i = 0; i < width; i++)
751 for (j = 0; j < height; j++)
753 *dst++ = LIMIT(1.164 * (*y - 16) + 2.018 * (*u - 128));
754 *dst++ = LIMIT(1.164 * (*y - 16) - 0.813 * (*v - 128) - 0.391 * (*u - 128));
755 *dst++ = LIMIT(1.164 * (*y - 16) + 1.596 * (*v - 128));