#define META_TRACK_YEAR 6
#define META_TRACK_DISCID 7
+typedef enum _Emotion_Format Emotion_Format;
typedef struct _Emotion_Video_Module Emotion_Video_Module;
+enum _Emotion_Format
+{
+ EMOTION_YV12,
+ EMOTION_YUY2, /* unused for now since evas does not support yuy2 format */
+ EMOTION_BGRA
+};
+
struct _Emotion_Video_Module
{
unsigned char (*init) (Evas_Object *obj, void **video);
int (*audio_handled) (void *ef);
int (*seekable) (void *ef);
void (*frame_done) (void *ef);
- void (*yuv_size_get) (void *ef, int *w, int *h);
+ Emotion_Format (*format_get) (void *ef);
+ void (*video_data_size_get) (void *ef, int *w, int *h);
int (*yuv_rows_get) (void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows);
+ int (*bgra_data_get) (void *ef, unsigned char **bgra_data);
void (*event_feed) (void *ef, int event);
void (*event_mouse_button_feed) (void *ef, int button, int x, int y);
void (*event_mouse_move_feed) (void *ef, int x, int y);
static void
_pixels_get(void *data, Evas_Object *obj)
{
- Evas_Pixel_Import_Source ps;
int iw, ih, w, h;
- int i;
- unsigned char **rows;
Smart_Data *sd;
+ Emotion_Format format;
sd = data;
evas_object_image_size_get(obj, &iw, &ih);
- sd->module->yuv_size_get(sd->video, &w, &h);
+ sd->module->video_data_size_get(sd->video, &w, &h);
if ((w != iw) || (h != ih))
{
- evas_object_image_size_set(obj, w, h);
- iw = w;
- ih = h;
+ evas_object_image_size_set(obj, w, h);
+ iw = w;
+ ih = h;
}
- ps.format = EVAS_PIXEL_FORMAT_YUV420P_601;
- ps.w = iw;
- ps.h = ih;
+ format = sd->module->format_get(sd->video);
+ if (format == EMOTION_YV12)
+ {
+ unsigned char **rows;
+ Evas_Pixel_Import_Source ps;
- ps.rows = malloc(ps.h * 2 * sizeof(void *));
- if (!ps.rows)
+ ps.format = EVAS_PIXEL_FORMAT_YUV420P_601;
+ ps.w = iw;
+ ps.h = ih;
+
+ ps.rows = malloc(ps.h * 2 * sizeof(void *));
+ if (!ps.rows)
+ {
+ sd->module->frame_done(sd->video);
+ return;
+ }
+
+ rows = (unsigned char **)ps.rows;
+
+ if (sd->module->yuv_rows_get(sd->video, iw, ih,
+ rows,
+ &rows[ps.h],
+ &rows[ps.h + (ps.h / 2)]))
+ evas_object_image_pixels_import(obj, &ps);
+ evas_object_image_pixels_dirty_set(obj, 0);
+ free(ps.rows);
+ }
+ else if (format == EMOTION_BGRA)
{
- sd->module->frame_done(sd->video);
- return;
+ unsigned char *bgra_data;
+ if (sd->module->bgra_data_get(sd->video, &bgra_data));
+ {
+ evas_object_image_data_set(obj, bgra_data);
+ }
}
-
-
- rows = (unsigned char **)ps.rows;
-
- if (sd->module->yuv_rows_get(sd->video, iw, ih,
- rows,
- &rows[ps.h],
- &rows[ps.h + (ps.h / 2)]))
- evas_object_image_pixels_import(obj, &ps);
- evas_object_image_pixels_dirty_set(obj, 0);
- free(ps.rows);
+
sd->module->frame_done(sd->video);
}
static double em_ratio_get(void *ef);
static int em_seekable(void *ef);
static void em_frame_done(void *ef);
-static void em_yuv_size_get(void *ef, int *w, int *h);
+static Emotion_Format em_format_get(void *ef);
+static void em_video_data_size_get(void *ef, int *w, int *h);
static int em_yuv_rows_get(void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows);
+static int em_bgra_data_get(void *ef, unsigned char **bgra_data);
static void em_event_feed(void *ef, int event);
static void em_event_mouse_button_feed(void *ef, int button, int x, int y);
static void em_event_mouse_move_feed(void *ef, int x, int y);
}
}
+static Emotion_Format em_format_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+ Emotion_Xine_Video_Frame *fr;
+
+ ev = (Emotion_Xine_Video *)ef;
+ fr = ev->cur_frame;
+
+ if (fr)
+ return fr->format;
+ return EMOTION_YV12;
+}
+
static void
-em_yuv_size_get(void *ef, int *w, int *h)
+em_video_data_size_get(void *ef, int *w, int *h)
{
Emotion_Xine_Video *ev;
Emotion_Xine_Video_Frame *fr;
return 0;
}
+static int
+em_bgra_data_get(void *ef, unsigned char **bgra_data)
+{
+ Emotion_Xine_Video *ev;
+ Emotion_Xine_Video_Frame *fr;
+
+ ev = (Emotion_Xine_Video *)ef;
+ fr = ev->cur_frame;
+ if (!fr) return 0;
+ if (fr->bgra_data)
+ {
+ *bgra_data = fr->bgra_data;
+ return 1;
+ }
+ return 0;
+}
+
static void
em_event_feed(void *ef, int event)
{
em_audio_handled, /* audio_handled */
em_seekable, /* seekable */
em_frame_done, /* frame_done */
- em_yuv_size_get, /* yuv_size_get */
+ em_format_get, /* format_get */
+ em_video_data_size_get, /* video_data_size_get */
em_yuv_rows_get, /* yuv_rows_get */
+ em_bgra_data_get, /* bgra_data_get */
em_event_feed, /* event_feed */
em_event_mouse_button_feed, /* event_mouse_button_feed */
em_event_mouse_move_feed, /* event_mouse_move_feed */
{
int w, h;
double ratio;
+ Emotion_Format format;
unsigned char *y, *u, *v;
+ unsigned char *bgra_data;
int y_stride, u_stride, v_stride;
Evas_Object *obj;
double timestamp;
static void _emotion_overlay_mem_blend_8 (uint8_t *mem, uint8_t val, uint8_t o, size_t sz);
static void _emotion_overlay_blend_yuv (uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3]);
+static void _emotion_yuy2_to_bgra32 (int width, int height, unsigned char *src, unsigned char *dst);
+
/***************************************************************************/
static vo_info_t _emotion_info =
{
dv = (Emotion_Driver *)vo_driver;
// printf("emotion: _emotion_capabilities_get()\n");
- return VO_CAP_YV12;
+ return VO_CAP_YV12 | VO_CAP_YUY2;
}
/***************************************************************************/
{
int y_size, uv_size;
+ fr->frame.format = EMOTION_YV12;
fr->vo_frame.pitches[0] = 8 * ((width + 7) / 8);
fr->vo_frame.pitches[1] = 8 * ((width + 15) / 16);
fr->vo_frame.pitches[2] = 8 * ((width + 15) / 16);
fr->frame.y = fr->vo_frame.base[0];
fr->frame.u = fr->vo_frame.base[1];
fr->frame.v = fr->vo_frame.base[2];
+ fr->frame.bgra_data = NULL;
fr->frame.y_stride = fr->vo_frame.pitches[0];
fr->frame.u_stride = fr->vo_frame.pitches[1];
fr->frame.v_stride = fr->vo_frame.pitches[2];
fr->frame.obj = dv->ev->obj;
}
break;
+ case XINE_IMGFMT_YUY2:
+ {
+ int y_size, uv_size;
+
+ fr->frame.format = EMOTION_BGRA;
+ fr->vo_frame.pitches[0] = 8 * ((width + 3) / 4);
+ fr->vo_frame.pitches[1] = 0;
+ fr->vo_frame.pitches[2] = 0;
+
+ fr->vo_frame.base[0] = malloc(fr->vo_frame.pitches[0] * height);
+ fr->vo_frame.base[1] = NULL;
+ fr->vo_frame.base[2] = NULL;
+
+ fr->frame.w = fr->width;
+ fr->frame.h = fr->height;
+ fr->frame.ratio = fr->vo_frame.ratio;
+ fr->frame.y = NULL;
+ fr->frame.u = NULL;
+ fr->frame.v = NULL;
+ fr->frame.bgra_data = malloc(fr->width * fr->height * 4);
+ fr->frame.y_stride = 0;
+ fr->frame.u_stride = 0;
+ fr->frame.v_stride = 0;
+ fr->frame.obj = dv->ev->obj;
+ }
+ break;
default:
break;
}
if (((format == XINE_IMGFMT_YV12)
&& ((fr->vo_frame.base[0] == NULL)
|| (fr->vo_frame.base[1] == NULL)
- || (fr->vo_frame.base[2] == NULL))))
+ || (fr->vo_frame.base[2] == NULL)))
+ || ((format == XINE_IMGFMT_YUY2)
+ && ((fr->vo_frame.base[0] == NULL)
+ || (fr->frame.bgra_data == NULL))))
{
_emotion_frame_data_free(fr);
}
{
void *buf;
int ret;
+
+ if (fr->format == XINE_IMGFMT_YUY2)
+ {
+ _emotion_yuy2_to_bgra32(fr->width, fr->height, fr->vo_frame.base[0], fr->frame.bgra_data);
+ }
buf = &(fr->frame);
fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0;
fr->frame.u = fr->vo_frame.base[1];
fr->frame.v = fr->vo_frame.base[2];
}
+ if (fr->frame.bgra_data)
+ {
+ free(fr->frame.bgra_data);
+ fr->frame.bgra_data = NULL;
+ }
}
static void
}
}
}
+
+/*MoOm:
+* yuy2 to bgra converter taken from vgrabbj (http://vgrabbj.gecius.de)
+* This code is under GPLv2. Copyright Jens Gecius.
+* If it causes problem with emotion BSD license, tell me, I'll remove it!
+* TODO: Really need to improve this converter!
+*/
+#define LIMIT(x) ((x) > 0xffff ? 0xff : ((x) <= 0xff ? 0 : ((x) >> 8 )))
+
+static void
+_emotion_yuy2_to_bgra32(int width, int height, unsigned char *src, unsigned char *dst)
+{
+ int line, col, linewidth;
+ int y, yy;
+ int u, v;
+ int vr, ug, vg, ub;
+ int r, g, b;
+ unsigned char *py, *pu, *pv;
+
+ linewidth = width - (width >> 1);
+ py = src;
+ pu = src + 1;
+ pv = src + 3;
+
+ y = *py;
+ yy = y << 8;
+ u = *pu - 128;
+ ug = 88 * u;
+ ub = 454 * u;
+ v = *pv - 128;
+ vg = 183 * v;
+ vr = 359 * v;
+
+ for (line = 0; line < height; line++)
+ {
+ for (col = 0; col < width; col++)
+ {
+ r = LIMIT(yy + vr);
+ g = LIMIT(yy - ug - vg);
+ b = LIMIT(yy + ub);
+ *dst++ = b;
+ *dst++ = g;
+ *dst++ = r;
+ *dst++ = 0;
+
+ py += 2;
+ y = *py;
+ yy = y << 8;
+ if ((col & 1) == 1)
+ {
+ pu += 4; //skip yvy every second y
+ pv += 4; //skip yuy every second y
+ }
+ u = *pu - 128;
+ ug = 88 * u;
+ ub = 454 * u;
+ v = *pv - 128;
+ vg = 183 * v;
+ vr = 359 * v;
+ }
+ }
+}