Evas_Bool emotion_object_play_get (Evas_Object *obj);
void emotion_object_position_set (Evas_Object *obj, double sec);
double emotion_object_position_get (Evas_Object *obj);
+Evas_Bool emotion_object_video_handled_get (Evas_Object *obj);
+Evas_Bool emotion_object_audio_handled_get (Evas_Object *obj);
Evas_Bool emotion_object_seekable_get (Evas_Object *obj);
double emotion_object_play_length_get (Evas_Object *obj);
void emotion_object_size_get (Evas_Object *obj, int *iw, int *ih);
double (*fps_get) (void *ef);
double (*pos_get) (void *ef);
double (*ratio_get) (void *ef);
+ int (*video_handled) (void *ef);
+ int (*audio_handled) (void *ef);
int (*seekable) (void *ef);
void (*frame_done) (void *ef);
void (*yuv_size_get) (void *ef, int *w, int *h);
void _emotion_frame_resize(Evas_Object *obj, int w, int h, double ratio);
void _emotion_decode_stop(Evas_Object *obj);
void _emotion_playback_finished(Evas_Object *obj);
+void _emotion_audio_level_change(Evas_Object *obj);
void _emotion_channels_change(Evas_Object *obj);
void _emotion_title_set(Evas_Object *obj, char *title);
void _emotion_progress_set(Evas_Object *obj, char *info, double stat);
return sd->module->seekable(sd->video);
}
+Evas_Bool
+emotion_object_video_handled_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->video_handled(sd->video);
+}
+
+Evas_Bool
+emotion_object_audio_handled_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->audio_handled(sd->video);
+}
+
double
emotion_object_play_length_get(Evas_Object *obj)
{
evas_object_smart_callback_call(obj, "playback_finished", NULL);
}
+void _emotion_audio_level_change(Evas_Object *obj)
+{
+ evas_object_smart_callback_call(obj, "audio_level_change", NULL);
+}
+
void
_emotion_channels_change(Evas_Object *obj)
{
ev->fd = ev->fd_write;
ev->video = xine_open_video_driver(ev->decoder, "emotion", XINE_VISUAL_TYPE_NONE, ev);
- ev->audio = xine_open_audio_driver(ev->decoder, "oss", ev);
-// ev->audio = xine_open_audio_driver(ev->decoder, "alsa", ev);
+// ev->audio = xine_open_audio_driver(ev->decoder, "oss", ev);
+ ev->audio = xine_open_audio_driver(ev->decoder, "alsa", ev);
// ev->audio = xine_open_audio_driver(ev->decoder, "arts", ev);
// ev->audio = xine_open_audio_driver(ev->decoder, "esd", ev);
ev->stream = xine_stream_new(ev->decoder, ev->audio, ev->video);
}
static int
+em_video_handled(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO) &&
+ xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HANDLED));
+}
+
+static int
+em_audio_handled(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_AUDIO) &&
+ xine_get_stream_info(ev->stream, XINE_STREAM_INFO_AUDIO_HANDLED));
+}
+
+static int
em_seekable(void *ef)
{
Emotion_Xine_Video *ev;
xine_audio_level_data_t *e;
e = (xine_audio_level_data_t *)eev->xine_event;
+ _emotion_audio_level_change(ev->obj);
printf("EV: Audio Level [FIXME: break this out to emotion api]\n");
// e->left (0->100)
// e->right
em_fps_get, /* fps_get */
em_pos_get, /* pos_get */
em_ratio_get, /* ratio_get */
+ em_video_handled, /* video_handled */
+ em_audio_handled, /* audio_handled */
em_seekable, /* seekable */
em_frame_done, /* frame_done */
em_yuv_size_get, /* yuv_size_get */