--- /dev/null
+stamp-h1
+missing
+libtool
+install-sh
+emotion.pc
+emotion-config
+depcomp
+configure
+config.status
+config.log
+config.h.in
+config.h
+autom4te.cache
+aclocal.m4
+Makefile.in
+Makefile
+doc
+*.tar.gz
+ltmain.sh
+mkinstalldirs
+config.guess
+config.sub
+emotion.spec
+emotion.c
--- /dev/null
+The Rasterman (Carsten Haitzler) <raster@rasterman.com>
+doursse (Vincent Torri) <torri@maths.univ-evry.fr>
+captainigloo
--- /dev/null
+Copyright (C) 2000 Carsten Haitzler and various contributors (see AUTHORS)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies of the Software and its Copyright notices. In addition publicly
+documented acknowledgment must be given that this software has been used if no
+source code of this software is made available publicly. This includes
+acknowledgments in either Copyright notices, Manuals, Publicity and Marketing
+documents or any documentation provided with any product containing this
+software. This License does not apply to any software that links to the
+libraries provided by this software (statically or dynamically), but only to
+the software provided.
+
+Please see the COPYING.PLAIN for a plain-english explanation of this notice
+and it's intent.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--- /dev/null
+Plain English Copyright Notice
+
+This file is not intended to be the actual License. The reason this file
+exists is that we here are programmers and engineers. We aren't lawyers. We
+provide licenses that we THINK say the right things, but we have our own
+intentions at heart. This is a plain-english explanation of what those
+intentions are, and if you follow them you will be within the "spirit" of
+the license.
+
+The intent is for us to enjoy writing software that is useful to us (the
+AUTHORS) and allow others to use it freely and also benefit from the work we
+put into making it. We don't want to restrict others using it. They should
+not *HAVE* to make the source code of the applications they write that
+simply link to these libraries (be that statically or dynamically), or for
+them to be limited as to what license they choose to use (be it open, closed
+or anything else). But we would like to know you are using these libraries.
+We simply would like to know that it has been useful to someone. This is why
+we ask for acknowledgement of some sort.
+
+You can do what you want with the source of this software - it doesn't
+matter. We still have it here for ourselves and it is open and free to use
+and download and play with. It can't be taken away. We don't really mind what
+you do with the source to your software. We would simply like to know that
+you are using it - especially if it makes it to a commerical product. If you
+simply e-mail all the AUTHORS (see COPYING and AUTHORS files) telling us, and
+then make sure you include a paragraph or page in the manual for the product
+with the copyright notice and state that you used this software, we will be
+very happy. If you want to contribute back modifications and fixes you may have
+made we will welcome those too with open arms (generally). If you want help
+with changes needed, ports needed or features to be added, arrangements can
+be easily made with some dialogue.
+
+Carsten Haitzler <raster@rasterman.com>
--- /dev/null
+2007-10-31 Vincent Torri <doursse at users dot sf dot net>
+
+ * configure.in:
+ * src/bin/emotion_test_main.c:
+ * src/lib/Makefile.am:
+ * src/lib/emotion_private.h:
+ * src/lib/emotion_smart.c:
+ * src/modules/Makefile.am:
+ * src/modules/emotion_gstreamer.c:
+ Use ecore_plugin instead of dlfcn
+ The modules names are now "xine" and "gstreamer" (it's useless to add
+ the extension, as ecore_plugin does not need it).
+ Look for the modules in path that is pointed by the env variable
+ EMOTION_MODULES_DIR first.
+ Fix a leak in the gstreamer module
+ Remove some trailing spaces
+ Fix bug #223
+
+ add the support of URI playback. Minor fixes.
+ Fix bug #114
+
+2007-09-01 Vincent Torri <doursse at users dot sf dot net>
+
+ * src/modules/emotion_gstreamer.c: (em_file_open):
+ * src/modules/emotion_gstreamer_pipeline.c: (cb_handoff),
+ (emotion_pipeline_dvd_build), (emotion_pipeline_uri_build),
+ (emotion_pipeline_file_build), (_emotion_streams_sinks_get),
+ (_emotion_video_sink_fill), (_emotion_audio_sink_fill):
+ * src/modules/emotion_gstreamer_pipeline.h:
+ add the support of URI playback. Minor fixes.
+ Fix bug #114
+
+Wed Mar 29 20:33:40 2006 Michael Jennings (mej)
+
+Xine plugins seem to no longer be built.
+----------------------------------------------------------------------
--- /dev/null
+PROJECT_NAME = Emotion
+PROJECT_NUMBER =
+OUTPUT_DIRECTORY = doc
+INPUT = emotion.c
+IMAGE_PATH = doc/img
+OUTPUT_LANGUAGE = English
+GENERATE_HTML = YES
+HTML_OUTPUT = html
+HTML_FILE_EXTENSION = .html
+HTML_HEADER = doc/head.html
+HTML_FOOTER = doc/foot.html
+HTML_STYLESHEET = doc/e.css
+HTML_ALIGN_MEMBERS = YES
+ENUM_VALUES_PER_LINE = 1
+GENERATE_HTMLHELP = NO
+CHM_FILE =
+HHC_LOCATION =
+GENERATE_CHI = NO
+BINARY_TOC = NO
+TOC_EXPAND = NO
+DISABLE_INDEX = YES
+EXTRACT_ALL = NO
+EXTRACT_PRIVATE = NO
+EXTRACT_STATIC = NO
+EXTRACT_LOCAL_CLASSES = NO
+HIDE_UNDOC_MEMBERS = YES
+HIDE_UNDOC_CLASSES = YES
+HIDE_FRIEND_COMPOUNDS = YES
+BRIEF_MEMBER_DESC = YES
+REPEAT_BRIEF = YES
+ALWAYS_DETAILED_SEC = NO
+INLINE_INHERITED_MEMB = NO
+FULL_PATH_NAMES = NO
+STRIP_FROM_PATH =
+INTERNAL_DOCS = NO
+STRIP_CODE_COMMENTS = YES
+CASE_SENSE_NAMES = YES
+SHORT_NAMES = NO
+HIDE_SCOPE_NAMES = NO
+VERBATIM_HEADERS = NO
+SHOW_INCLUDE_FILES = NO
+JAVADOC_AUTOBRIEF = YES
+MULTILINE_CPP_IS_BRIEF = NO
+DETAILS_AT_TOP = NO
+INHERIT_DOCS = YES
+INLINE_INFO = YES
+SORT_MEMBER_DOCS = YES
+DISTRIBUTE_GROUP_DOC = NO
+TAB_SIZE = 2
+GENERATE_TODOLIST = YES
+GENERATE_TESTLIST = YES
+GENERATE_BUGLIST = YES
+GENERATE_DEPRECATEDLIST= YES
+ALIASES =
+ENABLED_SECTIONS =
+MAX_INITIALIZER_LINES = 30
+OPTIMIZE_OUTPUT_FOR_C = YES
+OPTIMIZE_OUTPUT_JAVA = NO
+SHOW_USED_FILES = NO
+QUIET = NO
+WARNINGS = YES
+WARN_IF_UNDOCUMENTED = YES
+WARN_FORMAT = "$file:$line: $text"
+WARN_LOGFILE =
+FILE_PATTERNS =
+RECURSIVE = NO
+EXCLUDE =
+EXCLUDE_SYMLINKS = NO
+EXCLUDE_PATTERNS =
+EXAMPLE_PATH =
+EXAMPLE_PATTERNS =
+EXAMPLE_RECURSIVE = NO
+INPUT_FILTER =
+FILTER_SOURCE_FILES = NO
+SOURCE_BROWSER = NO
+INLINE_SOURCES = NO
+REFERENCED_BY_RELATION = YES
+REFERENCES_RELATION = YES
+ALPHABETICAL_INDEX = YES
+COLS_IN_ALPHA_INDEX = 2
+IGNORE_PREFIX =
+GENERATE_TREEVIEW = NO
+TREEVIEW_WIDTH = 250
+GENERATE_LATEX = YES
+LATEX_OUTPUT = latex
+LATEX_CMD_NAME = latex
+MAKEINDEX_CMD_NAME = makeindex
+COMPACT_LATEX = NO
+PAPER_TYPE = a4wide
+EXTRA_PACKAGES =
+LATEX_HEADER =
+PDF_HYPERLINKS = YES
+USE_PDFLATEX = NO
+LATEX_BATCHMODE = NO
+GENERATE_RTF = NO
+RTF_OUTPUT = rtf
+COMPACT_RTF = NO
+RTF_HYPERLINKS = NO
+RTF_STYLESHEET_FILE =
+RTF_EXTENSIONS_FILE =
+GENERATE_MAN = YES
+MAN_OUTPUT = man
+MAN_EXTENSION = .3
+MAN_LINKS = YES
+GENERATE_XML = NO
+XML_SCHEMA =
+XML_DTD =
+GENERATE_AUTOGEN_DEF = NO
+ENABLE_PREPROCESSING = YES
+MACRO_EXPANSION = NO
+EXPAND_ONLY_PREDEF = NO
+SEARCH_INCLUDES = NO
+INCLUDE_PATH =
+INCLUDE_FILE_PATTERNS =
+PREDEFINED =
+EXPAND_AS_DEFINED =
+SKIP_FUNCTION_MACROS = YES
+TAGFILES =
+GENERATE_TAGFILE =
+ALLEXTERNALS = NO
+EXTERNAL_GROUPS = YES
+PERL_PATH = /usr/bin/perl
+CLASS_DIAGRAMS = NO
+HIDE_UNDOC_RELATIONS = YES
+HAVE_DOT = NO
+CLASS_GRAPH = NO
+COLLABORATION_GRAPH = NO
+TEMPLATE_RELATIONS = NO
+INCLUDE_GRAPH = NO
+INCLUDED_BY_GRAPH = NO
+GRAPHICAL_HIERARCHY = NO
+DOT_IMAGE_FORMAT = png
+DOT_PATH =
+DOTFILE_DIRS =
+MAX_DOT_GRAPH_WIDTH = 512
+MAX_DOT_GRAPH_HEIGHT = 512
+GENERATE_LEGEND = YES
+DOT_CLEANUP = YES
+SEARCHENGINE = NO
--- /dev/null
+COMPILING and INSTALLING:
+
+If you got a official release tar archive do:
+ ./configure
+
+( otherwise if you got this from enlightenment cvs do: ./autogen.sh )
+
+Then to compile:
+ make
+
+To install (run this as root, or the user who handles installs):
+ make install
+
+NOTE: You MUST make install Emotion for it to run properly.
--- /dev/null
+## Process this file with automake to produce Makefile.in
+
+ACLOCAL_AMFLAGS = -I m4
+
+SUBDIRS = src data
+
+MAINTAINERCLEANFILES = Makefile.in aclocal.m4 config.guess emotion.spec \
+ config.h.in config.sub configure install-sh \
+ ltconfig ltmain.sh missing mkinstalldirs \
+ stamp-h.in emotion_docs.tar emotion_docs.tar.gz \
+ emotion.c acconfig.h debian/changelog depcomp
+
+bin_SCRIPTS =
+
+EXTRA_DIST = README AUTHORS COPYING COPYING-PLAIN \
+ emotion.spec emotion.spec.in emotion.c.in gendoc Doxyfile \
+ emotion.pc.in debian/changelog debian/control \
+ debian/copyright debian/rules debian/libemotion0.install \
+ debian/libemotion-dev.install debian/libemotion-bin.install \
+ debian/libemotion-gstreamer.install debian/libemotion-xine.install
+
+pkgconfigdir = $(libdir)/pkgconfig
+pkgconfig_DATA = emotion.pc
--- /dev/null
+Emotion 0.0.1
--- /dev/null
+
+Emotion smart object
+--------------------
+
+
+Xine module
+-----------
+
+* Fix seek and get_pos threads delete bugs (infinite loop)
+* Add support of visualizations for audio files (Goom...)
+
+
+Gstreamer module
+----------------
+
+* Add gststreamselector to support multiple video and audio streams.
+* Add API for visualisation plugins
+* Add http source (N. Aguirre)
+* Add mms source
+* Add rtsp source
+* Add dvb source
+* Add v4l2 source (no v4l1 source) (N. Aguirre)
+* Add dvd source (waiting for the dvdnav gstreamer source plugin, though...)
+* Add daap source
+* Add upnp source
+* Implement time shifting
+* Add evas sink to gstreamer (in progress: V. Torri)
--- /dev/null
+#!/bin/sh
+
+rm -rf autom4te.cache
+rm -f aclocal.m4 ltmain.sh
+
+touch README
+
+echo "Running aclocal..." ; aclocal $ACLOCAL_FLAGS -I m4 || exit 1
+echo "Running autoheader..." ; autoheader || exit 1
+echo "Running autoconf..." ; autoconf || exit 1
+echo "Running libtoolize..." ; (libtoolize --copy --automake || glibtoolize --automake) || exit 1
+echo "Running automake..." ; automake --add-missing --copy --gnu || exit 1
+
+if [ -z "$NOCONFIGURE" ]; then
+ ./configure "$@"
+fi
--- /dev/null
+dnl Process this file with autoconf to produce a configure script.
+
+# get rid of that stupid cache mechanism
+rm -f config.cache
+
+AC_INIT(emotion, 0.1.0.042, enlightenment-devel@lists.sourceforge.net)
+AC_PREREQ(2.52)
+AC_CONFIG_SRCDIR(configure.in)
+
+AM_INIT_AUTOMAKE(1.6 dist-bzip2)
+AM_CONFIG_HEADER(config.h)
+
+AC_C_BIGENDIAN
+AC_ISC_POSIX
+AC_PROG_CC
+AM_PROG_CC_STDC
+AC_HEADER_STDC
+AC_C_CONST
+
+AC_LIBTOOL_WIN32_DLL
+define([AC_LIBTOOL_LANG_CXX_CONFIG], [:])dnl
+define([AC_LIBTOOL_LANG_F77_CONFIG], [:])dnl
+AC_PROG_LIBTOOL
+
+VMAJ=`echo $PACKAGE_VERSION | awk -F. '{printf("%s", $1);}'`
+VMIN=`echo $PACKAGE_VERSION | awk -F. '{printf("%s", $2);}'`
+VMIC=`echo $PACKAGE_VERSION | awk -F. '{printf("%s", $3);}'`
+SNAP=`echo $PACKAGE_VERSION | awk -F. '{printf("%s", $4);}'`
+version_info=`expr $VMAJ + $VMIN`":$VMIC:$VMIN"
+AC_SUBST(version_info)
+
+PKG_CHECK_MODULES(EVAS, [evas >= 0.9.9])
+PKG_CHECK_MODULES(EDJE, [edje >= 0.5.0])
+PKG_CHECK_MODULES(ECORE, [ecore >= 0.9.9 ecore-evas >= 0.9.9 ecore-job >= 0.9.9])
+
+save_CPPFLAGS=$CPPFLAGS
+CPPFLAGS="$CPPFLAGS $ECORE_CFLAGS"
+AC_CHECK_HEADERS(Ecore_X.h Ecore_Fb.h)
+CPPFLAGS=$save_CPPFLAGS
+
+AC_ARG_ENABLE(xine,
+ AC_HELP_STRING([--disable-xine],[disable xine support @<:@default=auto@:>@]),
+ [enable_xine=$enableval],[enable_xine=auto])
+HAVE_XINE="no"
+if test "$enable_xine" != "no" ; then
+ PKG_CHECK_MODULES(XINE, [libxine >= 1.1.1], [HAVE_XINE="yes"])
+fi
+if test "$enable_xine$HAVE_XINE" = "yesno" ; then
+ AC_MSG_RESULT(xine not found)
+fi
+
+AC_ARG_ENABLE(gstreamer,
+ AC_HELP_STRING([--disable-gstreamer],[disable gstreamer support @<:@default=auto@:>@]),
+ [enable_gstreamer=$enableval],[enable_gstreamer=auto])
+HAVE_GSTREAMER="no"
+HAVE_GSTPLUG="no"
+if test "$enable_gstreamer" != "no" ; then
+
+ dnl Gstreamer version requirement
+ GST_REQS=0.10.2
+ GSTPLUG_REQS=0.10.1
+ GST_MAJORMINOR=0.10
+
+ PKG_CHECK_MODULES(GST, \
+ gstreamer-$GST_MAJORMINOR >= $GST_REQS,
+ HAVE_GSTREAMER="yes",
+ HAVE_GSTREAMER="no")
+
+ PKG_CHECK_MODULES(GSTPLUG, \
+ gstreamer-plugins-base-$GST_MAJORMINOR >= $GSTPLUG_REQS,
+ HAVE_GSTPLUG="yes",
+ HAVE_GSTPLUG="no")
+
+ AM_GST_ELEMENT_CHECK(
+ [ffmpeg],
+ [HAVE_GSTFFMPEG="yes"],
+ [HAVE_GSTFFMPEG="no"])
+
+ AM_GST_ELEMENT_CHECK(
+ [cdiocddasrc],
+ [HAVE_GSTCDIOCDDASRC="yes"],
+ [HAVE_GSTCDIOCDDASRC="no"])
+fi
+if test "$enable_gstreamer$HAVE_GSTREAMER" = "yesno" ; then
+ AC_MSG_RESULT(gstreamer not found)
+fi
+if test "$HAVE_GSTPLUG" = "no"; then
+ if test "$HAVE_GSTREAMER" = "no" ; then
+ GSTPLUG_MSG="no"
+ else
+ AC_MSG_WARN([You should install gstreamer plugins and gst-ffmpeg to properly decode your video and audio files])
+ GSTPLUG_MSG="no (you should install gst-plugins-base and gst-ffmpeg)"
+ fi
+else
+ if test "$HAVE_GSTFFMPEG" = "no"; then
+ if test "$HAVE_GSTREAMER" = "no" ; then
+ GSTPLUG_MSG="no"
+ else
+ AC_MSG_WARN([You should install gstreamer plugins and gst-ffmpeg to properly decode your video and audio files])
+ GSTPLUG_MSG="no (you should install gst-plugins-base and gst-ffmpeg)"
+ fi
+ GSTPLUG_MSG="yes"
+ else
+ GSTPLUG_MSG="yes"
+ fi
+ if test "$HAVE_GSTCDIOCDDASRC" = "no"; then
+ AC_MSG_WARN([You should install gstreamer-plugins-good and make sure that the cdiocddasrc element is installed])
+ GSTCDDA_MSG="no (you should install gst-plugins-good and the cdiocddasrc element)"
+ else
+ GSTCDDA_MSG="yes"
+ fi
+fi
+
+if test "$HAVE_XINE$HAVE_GSTREAMER" = "nono" ; then
+ AC_MSG_ERROR([Xine or Gstreamer must be installed to build emotion])
+fi
+
+AM_CONDITIONAL([BUILD_XINE_MODULE], [test "$HAVE_XINE" = yes])
+AM_CONDITIONAL([BUILD_GSTREAMER_MODULE], [test "$HAVE_GSTREAMER" = yes])
+
+AC_OUTPUT([
+Makefile
+emotion.pc
+emotion.spec
+src/Makefile
+src/lib/Makefile
+src/modules/Makefile
+src/modules/xine/Makefile
+src/modules/gstreamer/Makefile
+src/bin/Makefile
+data/Makefile
+debian/changelog
+])
+
+
+#####################################################################
+## Info
+
+echo
+echo
+echo
+echo "------------------------------------------------------------------------"
+echo "$PACKAGE $VERSION"
+echo "------------------------------------------------------------------------"
+echo
+echo "Configuration Options Summary:"
+echo
+echo " Modules:"
+echo " Xine...............: $HAVE_XINE"
+echo " Gstreamer..........: $HAVE_GSTREAMER"
+echo " Gstreamer plugins..: $GSTPLUG_MSG"
+echo " Gstreamer FFmpeg...: $HAVE_GSTFFMPEG"
+echo " Gstreamer CDDA.....: $GSTCDDA_MSG"
+echo
+echo " Compilation..........: make"
+echo
+echo " Installation.........: make install"
+echo
+echo " prefix.........: $prefix"
+echo
--- /dev/null
+Makefile
+Makefile.in
+*.eet
+theme.edj
--- /dev/null
+
+MAINTAINERCLEANFILES = Makefile.in
+
+EDJE_CC = edje_cc
+EDJE_FLAGS = -v -id $(top_srcdir)/data -fd $(top_srcdir)/data
+
+filesdir = $(datadir)/emotion/data
+files_DATA = theme.edj
+
+EXTRA_DIST = \
+bpause.png \
+bplay.png \
+bstop.png \
+e_logo.png \
+fr1.png \
+fr2.png \
+fr3.png \
+fr4.png \
+fr5.png \
+fr6.png \
+fr7.png \
+h_slider.png \
+knob.png \
+orb.png \
+pnl.png \
+sl.png \
+tiles.png \
+video_frame_bottom.png \
+video_frame_left.png \
+video_frame_right.png \
+video_frame_top.png \
+whb.png \
+window_inner_shadow.png \
+Vera.ttf \
+theme.edc
+
+theme.edj: Makefile
+ $(EDJE_CC) $(EDJE_FLAGS) \
+ $(top_srcdir)/data/theme.edc \
+ $(top_builddir)/data/theme.edj
+
+clean-local:
+ rm -f *.edj
--- /dev/null
+images {
+ image: "tiles.png" COMP;
+ image: "window_inner_shadow.png" LOSSY 70;
+/* image: "e_logo.png" COMP;*/
+
+ image: "h_slider.png" COMP;
+/************/
+ image: "video_frame_left.png" COMP;
+ image: "video_frame_right.png" COMP;
+ image: "video_frame_top.png" COMP;
+ image: "video_frame_bottom.png" COMP;
+
+ image: "knob.png" COMP;
+
+ image: "fr1.png" COMP;
+ image: "fr2.png" COMP;
+ image: "fr3.png" COMP;
+ image: "fr4.png" COMP;
+ image: "fr5.png" COMP;
+ image: "fr6.png" COMP;
+ image: "fr7.png" COMP;
+ image: "sl.png" COMP;
+ image: "orb.png" COMP;
+ image: "whb.png" COMP;
+ image: "bpause.png" COMP;
+ image: "bplay.png" COMP;
+ image: "bstop.png" COMP;
+ image: "pnl.png" COMP;
+}
+
+fonts {
+ font: "Vera.ttf" "Vera";
+}
+
+collections {
+ group {
+ name: "background";
+ parts {
+ part {
+ name: "bg";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ color_class: "background";
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ fill {
+ smooth: 0;
+ origin {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ size {
+ relative: 0.0 0.0;
+ offset: 128 128;
+ }
+ }
+ image {
+ normal: "tiles.png";
+ }
+ }
+ }
+/*
+ part {
+ name: "logo";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ min: 120 140;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 48 48;
+ }
+ rel2 {
+ relative: 0.0 0.0;
+ offset: 140 140;
+ }
+ image {
+ normal: "e_logo.png";
+ }
+ }
+ }
+ */
+ part {
+ name: "shadow";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ image {
+ normal: "window_inner_shadow.png";
+ }
+ }
+ }
+ }
+ }
+#if 0
+ group {
+ name: "video_controller";
+ parts {
+ // need swallow parts:
+ // "video_swallow"
+ //
+ // need txt parts:
+ // "video_speed_txt"
+ // "video_progress_txt"
+ //
+ // need dragables:
+ // "video_progress" horizontal
+ // "video_speed" vertical
+ part {
+ name: "video_swallow";
+ mouse_events: 0;
+ type: SWALLOW;
+ clip_to: "vclip";
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "fr_c1";
+ relative: 0.0 0.0;
+ offset: 20 20;
+ }
+ rel2 {
+ to: "fr_c4";
+ relative: 0.0 0.0;
+ offset: 14 14;
+ }
+ }
+ }
+ part {
+ name: "vclip";
+ mouse_events: 0;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "fr_c1";
+ relative: 0.0 0.0;
+ offset: 20 20;
+ }
+ rel2 {
+ to: "fr_c4";
+ relative: 0.0 0.0;
+ offset: 14 14;
+ }
+ color: 255 255 255 255;
+ }
+ description {
+ state: "faded" 0.0;
+ rel1 {
+ to: "fr_c1";
+ relative: 0.0 0.0;
+ offset: 20 20;
+ }
+ rel2 {
+ to: "fr_c4";
+ relative: 0.0 0.0;
+ offset: 14 14;
+ }
+ color: 255 255 255 180;
+ }
+ description {
+ state: "dim" 0.0;
+ rel1 {
+ to: "fr_c1";
+ relative: 0.0 0.0;
+ offset: 20 20;
+ }
+ rel2 {
+ to: "fr_c4";
+ relative: 0.0 0.0;
+ offset: 14 14;
+ }
+ color: 255 255 255 80;
+ }
+ }
+ part {
+ name: "fr_c1";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ relative: 0.0 0.0;
+ offset: 34 34;
+ }
+ image {
+ normal: "fr1.png";
+ }
+ }
+ }
+ part {
+ name: "fr_c2";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 1.0 0.0;
+ offset: -35 0;
+ }
+ rel2 {
+ relative: 1.0 0.0;
+ offset: -1 34;
+ }
+ image {
+ normal: "fr6.png";
+ }
+ }
+ }
+ part {
+ name: "fr_c3";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 1.0;
+ offset: 0 -35;
+ }
+ rel2 {
+ relative: 0.0 1.0;
+ offset: 34 -1;
+ }
+ image {
+ normal: "fr3.png";
+ }
+ }
+ }
+ part {
+ name: "fr_c4";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 1.0 1.0;
+ offset: -35 -35;
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: 9 9;
+ }
+ image {
+ normal: "fr5.png";
+ }
+ }
+ }
+ part {
+ name: "fr_s1";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 1.0;
+ offset: 0 0;
+ to: "fr_c1";
+ }
+ rel2 {
+ relative: 1.0 0.0;
+ offset: -1 -1;
+ to: "fr_c3";
+ }
+ image {
+ normal: "fr2.png";
+ }
+ }
+ }
+ part {
+ name: "fr_s2";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 1.0 0.0;
+ offset: 0 0;
+ to: "fr_c3";
+ }
+ rel2 {
+ relative: 0.0 1.0;
+ offset: -1 -11;
+ to: "fr_c4";
+ }
+ image {
+ normal: "fr4.png";
+ }
+ }
+ }
+ part {
+ name: "fr_s3";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 1.0;
+ offset: 0 0;
+ to: "fr_c2";
+ }
+ rel2 {
+ relative: 1.0 0.0;
+ offset: -11 -1;
+ to: "fr_c4";
+ }
+ image {
+ normal: "fr2.png";
+ }
+ }
+ }
+ part {
+ name: "fr_t";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 1.0 0.0;
+ offset: 0 0;
+ to: "fr_c1";
+ }
+ rel2 {
+ relative: 0.0 1.0;
+ offset: -1 -1;
+ to: "fr_c2";
+ }
+ image {
+ border: 50 50 0 0;
+ normal: "fr7.png";
+ }
+ }
+ }
+ part {
+ name: "panel_bg";
+ mouse_events: 0;
+ clip_to: "panel_clip";
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ to: "panel_clip";
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ to: "panel_clip";
+ }
+ image {
+ normal: "pnl.png";
+ }
+ fill {
+ smooth: 1;
+ origin {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ size {
+ relative: 0.0 1.0;
+ offset: 32 0;
+ }
+ }
+ }
+ }
+ part {
+ name: "prog_container";
+ mouse_events: 0;
+ clip_to: "panel_clip";
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 4 4;
+ to: "panel_clip";
+ }
+ rel2 {
+ relative: 1.0 0.0;
+ offset: -5 9;
+ to: "panel_clip";
+ }
+ image {
+ border: 2 2 2 2;
+ normal: "whb.png";
+ }
+ }
+ }
+ part {
+ name: "prog_done";
+ mouse_events: 0;
+ clip_to: "panel_clip";
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ to: "prog_container";
+ }
+ rel2 {
+ relative: 0.5 1.0;
+ offset: 0 -1;
+ to_x: "video_progress";
+ to_y: "prog_container";
+ }
+ image {
+ border: 2 2 2 2;
+ normal: "orb.png";
+ }
+ }
+ }
+ part {
+ name: "video_progress";
+ type: RECT;
+ clip_to: "panel_clip";
+ mouse_events: 1;
+ dragable {
+ x: 1 1 0;
+ y: 0 0 0;
+ confine: "prog_container";
+ }
+ description {
+ state: "default" 0.0;
+ min: 10 5;
+ align: 0.5 0.5;
+ rel1 {
+ to: "prog_container";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "prog_container";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ color: 0 255 0 50;
+ }
+ }
+ part {
+ name: "video_progress_img";
+ mouse_events: 0;
+ clip_to: "panel_clip";
+ description {
+ state: "default" 0.0;
+ min: 9 16;
+ align: 0.5 1.0;
+ fixed: 1 1;
+ rel1 {
+ relative: 0.5 0.0;
+ offset: 0 0;
+ to_x: "video_progress";
+ to_y: "prog_container";
+ }
+ rel2 {
+ relative: 0.5 0.0;
+ offset: 0 0;
+ to_x: "video_progress";
+ to_y: "prog_container";
+ }
+ image {
+ normal: "sl.png";
+ }
+ }
+ }
+ part {
+ name: "b_stop";
+ mouse_events: 0;
+ clip_to: "panel_clip";
+ description {
+ state: "default" 0.0;
+ min: 22 22;
+ max: 22 22;
+ align: 0.0 0.0;
+ rel1 {
+ relative: 1.0 1.0;
+ offset: -5 -5;
+ to: "panel_clip";
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -5 -5;
+ to: "panel_clip";
+ }
+ image {
+ normal: "bstop.png";
+ }
+ }
+ }
+ part {
+ name: "b_play";
+ mouse_events: 0;
+ clip_to: "panel_clip";
+ description {
+ state: "default" 0.0;
+ min: 22 22;
+ max: 22 22;
+ align: 0.0 1.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: -1 0;
+ to: "b_stop";
+ }
+ rel2 {
+ relative: 0.0 0.0;
+ offset: -1 0;
+ to: "b_stop";
+ }
+ image {
+ normal: "bstop.png";
+ }
+ }
+ description {
+ state: "play" 0.0;
+ min: 22 22;
+ max: 22 22;
+ align: 0.0 1.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: -1 0;
+ to: "b_stop";
+ }
+ rel2 {
+ relative: 0.0 0.0;
+ offset: -1 0;
+ to: "b_stop";
+ }
+ image {
+ normal: "bplay.png";
+ }
+ }
+ description {
+ state: "pause" 0.0;
+ min: 22 22;
+ max: 22 22;
+ align: 0.0 1.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: -1 0;
+ to: "b_stop";
+ }
+ rel2 {
+ relative: 0.0 0.0;
+ offset: -1 0;
+ to: "b_stop";
+ }
+ image {
+ normal: "bpause.png";
+ }
+ }
+ }
+ part {
+ name: "panel_clip";
+ mouse_events: 0;
+ type: RECT;
+ clip_to: "vclip";
+ description {
+ visible: 0;
+ state: "default" 0.0;
+ rel1 {
+ to: "panel";
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "panel";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 255 255 255 0;
+ }
+ description {
+ visible: 1;
+ state: "shown" 0.0;
+ rel1 {
+ to: "panel";
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "panel";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 255 255 255 255;
+ }
+ }
+ part {
+ name: "video_progress_txt";
+ type: TEXT;
+ mouse_events: 0;
+ effect: OUTLINE;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "panel";
+ relative: 0.0 1.0;
+ offset: 0 -10;
+ }
+ rel2 {
+ to: "panel";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 255 255 255 255;
+ color2: 0 0 0 255;
+ text {
+ text: "Video Progress";
+ font: "Vera";
+ size: 6;
+ align: 0.0 1.0;
+ };
+ }
+ }
+ part {
+ name: "panel";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "fr_c3";
+ relative: 0.0 0.0;
+ offset: 20 -46;
+ }
+ rel2 {
+ to: "fr_c4";
+ relative: 0.0 0.0;
+ offset: 14 14;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "panel2";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "fr_c1";
+ relative: 0.0 0.0;
+ offset: 20 20;
+ }
+ rel2 {
+ to: "fr_c2";
+ relative: 1.0 0.0;
+ offset: -1 -1;
+ to: "panel";
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_resizer";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "fr_c4";
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "fr_c4";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_mover";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "fr_t";
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "fr_t";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ }
+ programs {
+ program {
+ name: "video_move_start";
+ signal: "mouse,down,*";
+ source: "video_mover";
+ action: SIGNAL_EMIT "frame_move" "start";
+ }
+ program {
+ name: "video_move_stop";
+ signal: "mouse,up,*";
+ source: "video_mover";
+ action: SIGNAL_EMIT "frame_move" "stop";
+ }
+ program {
+ name: "video_resize_start";
+ signal: "mouse,down,*";
+ source: "video_resizer";
+ action: SIGNAL_EMIT "frame_resize" "start";
+ }
+ program {
+ name: "video_resize_stop";
+ signal: "mouse,up,*";
+ source: "video_resizer";
+ action: SIGNAL_EMIT "frame_resize" "stop";
+ }
+ program {
+ name: "panel_show";
+ signal: "mouse,in";
+ source: "panel";
+ action: STATE_SET "shown" 0.0;
+ transition: LINEAR 1.0;
+ target: "panel_clip";
+ }
+ program {
+ name: "panel_hide";
+ signal: "mouse,in";
+ source: "panel2";
+ action: STATE_SET "default" 0.0;
+ transition: LINEAR 1.0;
+ target: "panel_clip";
+ }
+ }
+ }
+#else
+ group {
+ name: "video_controller";
+ parts {
+ // need swallow parts:
+ // "video_swallow"
+ //
+ // need txt parts:
+ // "video_speed_txt"
+ // "video_progress_txt"
+ //
+ // need dragables:
+ // "video_progress" horizontal
+ // "video_speed" vertical
+ part {
+ name: "video_swallow";
+ mouse_events: 0;
+ type: SWALLOW;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 0.0;
+ offset: -8 23;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 0.0 1.0;
+ offset: 7 -25;
+ }
+ }
+ }
+ part {
+ name: "video_frame_left";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ relative: 0.0 1.0;
+ offset: 32 -1;
+ }
+ image {
+ border: 0 0 33 33;
+ normal: "video_frame_left.png";
+ }
+ }
+ }
+ part {
+ name: "video_frame_right";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 1.0 0.0;
+ offset: -32 0;
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ image {
+ border: 0 0 33 33;
+ normal: "video_frame_right.png";
+ }
+ }
+ }
+ part {
+ name: "video_frame_top";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 0.0 0.0;
+ offset: -1 31;
+ }
+ image {
+ normal: "video_frame_top.png";
+ }
+ }
+ }
+ part {
+ name: "video_frame_bottom";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 1.0;
+ offset: 0 -32;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 0.0 1.0;
+ offset: -1 -1;
+ }
+ image {
+ normal: "video_frame_bottom.png";
+ }
+ }
+ }
+ part {
+ name: "video_speed_txt";
+ type: TEXT;
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_bottom";
+ relative: 0.0 0.0;
+ offset: 0 8;
+ }
+ rel2 {
+ to: "video_frame_bottom";
+ relative: 1.0 1.0;
+ offset: -1 -13;
+ }
+ color: 0 0 0 255;
+ text {
+ text: "Video Speed";
+ font: "Vera";
+ size: 6;
+ align: 1.0 0.5;
+ };
+ }
+ }
+ part {
+ name: "video_progress_confine";
+ mouse_events: 0;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 1 18;
+ align: 0.5 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 1.0;
+ offset: 0 -25;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 0.0 1.0;
+ offset: -1 -25;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_progress";
+ type: RECT;
+ mouse_events: 1;
+ dragable {
+ x: 1 1 0;
+ y: 0 0 0;
+ confine: "video_progress_confine";
+ }
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 34 18;
+ rel1 {
+ to: "video_progress_confine";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_progress_confine";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_progress_img";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 42 26;
+ rel1 {
+ to: "video_progress";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_progress";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "h_slider.png";
+ }
+ }
+ }
+ part {
+ name: "video_speed_confine";
+ mouse_events: 0;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 0.0;
+ offset: 0 24;
+ }
+ rel2 {
+ to: "video_frame_left";
+ relative: 1.0 1.0;
+ offset: 48 -49;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_speed";
+ mouse_events: 1;
+ dragable {
+ x: 0 0 0;
+ y: -1 1 0;
+ confine: "video_speed_confine";
+ }
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 24 24;
+ rel1 {
+ to: "video_speed_confine";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_speed_confine";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "knob.png";
+ }
+ }
+ }
+ part {
+ name: "video_play";
+ mouse_events: 1;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 24 24;
+ rel1 {
+ to: "video_frame_bottom";
+ relative: 0.0 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_bottom";
+ relative: 0.0 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "knob.png";
+ }
+ }
+ }
+ part {
+ name: "video_pause";
+ mouse_events: 1;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 24 24;
+ rel1 {
+ to: "video_frame_bottom";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_bottom";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "knob.png";
+ }
+ }
+ }
+ part {
+ name: "video_stop";
+ mouse_events: 1;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 24 24;
+ rel1 {
+ to: "video_frame_bottom";
+ relative: 1.0 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_bottom";
+ relative: 1.0 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "knob.png";
+ }
+ }
+ }
+ part {
+ name: "video_mover";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_top";
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_top";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 255 20 20 20;
+ }
+ }
+ part {
+ name: "video_resizer";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_right";
+ relative: 0.0 1.0;
+ offset: 0 -31;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 20 20 255 20;
+ }
+ }
+ part {
+ name: "video_progress_txt";
+ type: TEXT;
+ mouse_events: 0;
+ effect: OUTLINE;
+ description {
+ state: "default" 0.0;
+ align: 1.0 1.0;
+ fixed: 1 1;
+ rel1 {
+ relative: 1.0 1.0;
+ offset: -2 -2;
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -2 -2;
+ }
+ color: 255 255 255 255;
+ color2: 0 0 0 255;
+ text {
+ text: "XX:XX:XX / XX:XX:XX";
+ font: "Vera";
+ size: 10;
+ align: 1.0 1.0;
+ min: 1 1;
+ };
+ }
+ }
+ }
+ programs {
+ // emit signals:
+ // "video_control" "play"
+ // "video_control" "pause"
+ // "video_control" "stop"
+ // "drag" "video_progress"
+ // "drag" "video_speed"
+ //
+ // get signals:
+ // "video_state" "play"
+ // "video_state" "pause"
+ // "video_state" "stop"
+ program {
+ name: "video_play";
+ signal: "mouse,down,1";
+ source: "video_play";
+ action: SIGNAL_EMIT "video_control" "play";
+ }
+ program {
+ name: "video_pause";
+ signal: "mouse,down,1";
+ source: "video_pause";
+ action: SIGNAL_EMIT "video_control" "pause";
+ }
+ program {
+ name: "video_stop";
+ signal: "mouse,down,1";
+ source: "video_stop";
+ action: SIGNAL_EMIT "video_control" "stop";
+ }
+ program {
+ name: "video_move_start";
+ signal: "mouse,down,*";
+ source: "video_mover";
+ action: SIGNAL_EMIT "frame_move" "start";
+ }
+ program {
+ name: "video_move_stop";
+ signal: "mouse,up,*";
+ source: "video_mover";
+ action: SIGNAL_EMIT "frame_move" "stop";
+ }
+ program {
+ name: "video_resize_start";
+ signal: "mouse,down,*";
+ source: "video_resizer";
+ action: SIGNAL_EMIT "frame_resize" "start";
+ }
+ program {
+ name: "video_resize_stop";
+ signal: "mouse,up,*";
+ source: "video_resizer";
+ action: SIGNAL_EMIT "frame_resize" "stop";
+ }
+ }
+ }
+#endif
+}
--- /dev/null
+emotion (@VERSION@-1) unstable; urgency=low
+
+ * CVS Release.
+
+ -- Falko Schmidt <falko@alphagemini.org> Thu, 01 Apr 2008 15:11:22 +0000
+
--- /dev/null
+Source: emotion
+Priority: optional
+Section: libs
+Maintainer: Falko Schmidt <falko@alphagemini.org>
+Build-Depends: cdbs, debhelper (>= 6), pkg-config, libeet-dev, libevas-dev, libedje-dev, libecore-dev, libembryo-dev, libxine-dev (>=1.1.0), libgstreamer0.10-dev, libgstreamer-plugins-base0.10-dev
+Standards-Version: 3.7.3
+Homepage: http://www.enlightenment.org
+
+Package: libemotion0
+Section: libs
+Architecture: any
+Depends: ${shlibs:Depends}
+Provides: libemotion
+Description: Video playback library used in Enlightenment DR0.17
+ This is the emotion library, a wrapper library used in the next-generation
+ Enlightenment suite for video playback.
+ .
+ This package contains the shared libraries.
+
+Package: libemotion-dev
+Section: libdevel
+Architecture: any
+Depends: libemotion0 (= ${Source-Version}), libevas-dev, libecore-dev
+Provides: libemotion0-dev
+Description: Headers, static libraries and documentation for Emotion
+ This package contains development files for the Emotion library, a wrapper
+ library used in the next-generation Enlightenment suite for video playback.
+ .
+ This package contains headers and static libraries for development with
+ libemotion.
+
+Package: libemotion-xine
+Section: libs
+Architecture: any
+Depends: ${shlibs:Depends}, libemotion0, libxine1 (>=1.1.0)
+Provides: libemotion-xine
+Description: Video playback library used in Enlightenment DR0.17
+ This is the emotion library, a wrapper library used in the next-generation
+ Enlightenment suite for video playback.
+ .
+ This package provides the xine module for emotion.
+
+Package: libemotion-gstreamer
+Section: libs
+Architecture: any
+Depends: ${shlibs:Depends}, libemotion0, gstreamer0.10-plugins-good
+Provides: libemotion-gstreamer
+Description: Video playback library used in Enlightenment DR0.17
+ This is the emotion library, a wrapper library used in the next-generation
+ Enlightenment suite for video playback.
+ .
+ This package provides the gstreamer module for emotion.
+
+Package: libemotion-bin
+Section: x11
+Architecture: any
+Depends: ${shlibs:Depends}
+Provides: libemotion0-bin
+Description: Test programs for Emotion
+ This is the emotion library, a wrapper library used in the next-generation
+ Enlightenment suite for video playback.
+ .
+ This package contains test programs for the Emotion library.
--- /dev/null
+This package was debianized by Victor Koeppel <v_koeppel@yahoo.fr> on
+Sat, 19 Jun 2004 20:20:49 +0200.
+
+It was downloaded from the e17/libs/emotion module of the enlightenment cvs
+tree. For more information, see:
+
+ http://www.enlightenment.org/cvs.html
+
+Upstream Author: The Rasterman (Carsten Haitzler) <raster@rasterman.com>
+
+Copyright:
+
+Copyright (C) 2000 Carsten Haitzler and various contributors (see AUTHORS)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies of the Software and its Copyright notices. In addition publicly
+documented acknowledgment must be given that this software has been used if no
+source code of this software is made available publicly. This includes
+acknowledgments in either Copyright notices, Manuals, Publicity and Marketing
+documents or any documentation provided with any product containing this
+software. This License does not apply to any software that links to the
+libraries provided by this software (statically or dynamically), but only to
+the software provided.
+
+Please see the COPYING.PLAIN for a plain-english explanation of this notice
+and it's intent.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--- /dev/null
+debian/tmp/usr/bin/emotion_*
+debian/tmp/usr/share/emotion/data/theme.edj
--- /dev/null
+debian/tmp/usr/include/*
+debian/tmp/usr/lib/pkgconfig/*
+debian/tmp/usr/lib/*.a
+debian/tmp/usr/lib/*.so
--- /dev/null
+debian/tmp/usr/lib/emotion/*gstreamer*.so
--- /dev/null
+debian/tmp/usr/lib/emotion/*xine*.so
--- /dev/null
+debian/tmp/usr/lib/libemotion.so.*
--- /dev/null
+#!/usr/bin/make -f
+
+include /usr/share/cdbs/1/rules/debhelper.mk
+include /usr/share/cdbs/1/class/autotools.mk
+
+DEB_MAKE_CLEAN_TARGET := clean
+
+clean::
+ ./autogen.sh --prefix=/usr $(DEB_CONFIGURE_EXTRA_FLAGS)
--- /dev/null
+html
+latex
+man
--- /dev/null
+body {
+ background: url("b.png");
+ background-repeat: repeat-x;
+ background-position: top left;
+ background-color: #f4f4f4;
+ text-align: center;
+ font-family: sans-serif;
+ padding: 0;
+ margin: 0;
+}
+
+div.main {
+ margin: 1em auto;
+ vertical-align: top;
+ font-family: "Bitstream Vera", "Vera", "Trebuchet MS", Trebuchet, Tahoma, sans-serif;
+ color: #444444;
+ font-size: 0.8em;
+ text-align: justify;
+ width: 80%;
+}
+
+td.t { background-image:url("t.gif"); }
+td.t[class] { background-image:url("t.png"); }
+td.tl { background-image:url("tl.gif"); }
+td.tl[class] { background-image:url("tl.png"); }
+
+td.nav, td.lnav, td.rnav {
+ align: middle;
+ text-align: center;
+ vertical-align: middle;
+ width: 100px;
+ height: 25px;
+ font-family: "Bitstream Vera", "Vera", "Trebuchet MS", Trebuchet, Tahoma, sans-serif;
+ color: #000000;
+ font-size: 9px;
+ font-weight: bold;
+ white-space: no-wrap;
+}
+
+td.lnav[class] { background-image:url("n.png"); }
+td.lnav[class] { background-image:url("n.png"); }
+td.rnav { background-image:url("n.gif"); }
+td.rnav[class] { background-image:url("n.png"); }
+
+hr {
+ width: 200px;
+ height: 1px;
+ background: #dddddd;
+ border: 0;
+}
+
+p { color: #444444 ;}
+p.tiny, small {
+ color: #888888;
+ font-size: 0.5em;
+}
+
+h1 {
+ text-align: center;
+ font-size: 1.3em;
+}
+
+h2 { font-size: 1.1em; }
+h3 { font-size: 0.9em; }
+
+span.keyword { color: #008000; }
+span.keywordtype { color: #604020; }
+span.keywordflow { color: #e08000; }
+span.comment { color: #800000; }
+span.preprocessor { color: #806020; }
+span.stringliteral { color: #002080; }
+span.charliteral { color: #008080; }
+
+a:link {
+ color: #445566;
+ text-decoration: underline;
+}
+
+a:visited {
+ color: #667788;
+ text-decoration: underline;
+}
+
+a:active {
+ color: #88cccc;
+ text-decoration: none;
+}
+
+a:hover {
+ color: #112266;
+ text-decoration: underline;
+}
+
+a.nav {
+ text-decoration: none;
+ display: block;
+}
+
+a.nav:link, a.nav:visited { color: #888888; }
+a.nav:active { color: #000000; }
+a.nav:hover { color: #444444; }
+a.code:link, a.code:visited { text-decoration: none; }
+
+div.fragment {
+ font-size: 1em;
+ border: 1px dotted #cccccc;
+ background-color: #ffffff;
+ text-align: left;
+ vertical-align: middle;
+ padding: 2px;
+ margin-left: 25px;
+ margin-right: 25px;
+ overflow: auto;
+}
+
+td.indexkey {
+ font-weight: bold;
+ padding-left: 10px;
+ padding-right: 0;
+ padding-top: 2px;
+ padding-bottom: 0px;
+ margin: 0;
+ margin-top: 2px;
+ margin-bottom: 2px;
+ border: 1px dotted #cccccc;
+ border-right: 0px dotted #cccccc;
+}
+
+td.indexvalue {
+ font-style: italic;
+ padding-right: 10px;
+ padding-left: 0;
+ padding-top: 2px;
+ padding-bottom: 2px;
+ margin: 0;
+ margin-top: 2px;
+ margin-bottom: 2px;
+ border: 1px dotted #cccccc;
+ border-left: 0px dotted #cccccc;
+}
+
+.mdescRight { font-style: italic; }
+.memitem {
+ padding-left: 2px;
+ padding-right: 2px;
+ border: 1px dotted #cccccc;
+ background-color: #ffffff;
+}
+.memname {
+ white-space: nowrap;
+ font-weight: bold;
+}
+.paramname { font-weight: normal; }
+
+div.ah {
+ border: thin solid #888888;
+ font-weight: bold;
+ margin-bottom: 3px;
+ margin-top: 3px;
+}
+
--- /dev/null
+td.md {
+ background-color: #ffffff;
+ font-family: monospace;
+ text-align: left;
+ vertical-align: center;
+ font-size: 10;
+ padding-right : 1px;
+ padding-top : 1px;
+ padding-left : 1px;
+ padding-bottom : 1px;
+ margin-left : 1px;
+ margin-right : 1px;
+ margin-top : 1px;
+ margin-bottom : 1px
+}
+td.mdname {
+ font-family: monospace;
+ text-align: left;
+ vertical-align: center;
+ font-size: 10;
+ padding-right : 1px;
+ padding-top : 1px;
+ padding-left : 1px;
+ padding-bottom : 1px;
+ margin-left : 1px;
+ margin-right : 1px;
+ margin-top : 1px;
+ margin-bottom : 1px
+}
+h1
+{
+ text-align: center;
+ color: #333333
+}
+h2
+{
+ text-align: left;
+ color: #333333
+}
+h3
+{
+ text-align: left;
+ color: #333333
+}
+a:link
+{
+ text-decoration: none;
+ color: #444444;
+ font-weight: bold;
+}
+a:visited
+{
+ text-decoration: none;
+ color: #666666;
+ font-weight: bold;
+}
+a:hover
+{
+ text-decoration: none;
+ color: #000000;
+ font-weight: bold;
+}
+a.nav:link
+{
+ text-decoration: none;
+ color: #444444;
+ font-weight: normal;
+}
+a.nav:visited
+{
+ text-decoration: none;
+ color: #666666;
+ font-weight: normal;
+}
+a.nav:hover
+{
+ text-decoration: none;
+ color: #000000;
+ font-weight: normal;
+}
+a.qindex:link
+{
+ text-decoration: none;
+ color: #444444;
+ font-weight: normal;
+}
+a.qindex:visited
+{
+ text-decoration: none;
+ color: #666666;
+ font-weight: normal;
+}
+a.qindex:hover
+{
+ text-decoration: none;
+ color: #000000;
+ font-weight: normal;
+}
+p
+{
+ color: #000000;
+ font-family: sans-serif;
+ font-size: 10;
+}
+body {
+ background-image: url("hilite.png");
+ background-repeat: no-repeat;
+ background-position: left top;
+ background-color: #dddddd;
+ color: #000000;
+ font-family: sans-serif;
+ padding: 8px;
+ margin: 0;
+}
+div.fragment
+{
+ background-image: url("hilite.png");
+ background-repeat: no-repeat;
+ background-position: left top;
+ border: thin solid #888888;
+ background-color: #eeeeee;
+ padding: 4px;
+ text-align: left;
+ vertical-align: center;
+ font-size: 12;
+}
+hr
+{
+ border: 0;
+ background-color: #000000;
+ width: 80%;
+ height: 1;
+}
+dl
+{
+ background-image: url("hilite.png");
+ background-repeat: no-repeat;
+ background-position: left top;
+ border: thin solid #aaaaaa;
+ background-color: #eeeeee;
+ padding: 4px;
+ text-align: left;
+ vertical-align: center;
+ font-size: 12;
+}
+em
+{
+ color: #334466;
+ font-family: courier;
+ font-size: 10;
+ font-style: normal;
+}
+
+div.nav
+{
+ border: thin solid #000000;
+ background-color: #ffffff;
+ padding: 1px;
+ text-align: center;
+ vertical-align: center;
+ font-size: 12;
+}
+div.body
+{
+ border: thin solid #000000;
+ background-color: #ffffff;
+ padding: 4px;
+ text-align: left;
+ font-size: 10;
+}
+div.diag
+{
+ border: thin solid #888888;
+ background-color: #eeeeee;
+ padding: 4px;
+ text-align: center;
+ font-size: 8;
+}
--- /dev/null
+ </div>
+ <hr />
+ <p class="tiny">Copyright © Enlightenment.org</p>
+ <p class="tiny">$projectname Documentation Generated: $datetime</p>
+ </body>
+</html>
--- /dev/null
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+<html>
+ <head>
+ <title>$title</title>
+ <link href="e.css" rel="stylesheet" type="text/css" />
+ </head>
+
+<body>
+ <table width="100%" border="0" cellpadding="0" cellspacing="0">
+ <tr>
+ <td class="t" width="50%" valign="top" align="left">
+ <table width="100px" height="100px" border="0" cellpadding="0" cellspacing="0">
+ <tr><td class="lnav"><a class="nav" href="http://web.enlightenment.org/p.php?p=index">Home</a></td></tr>
+ <tr><td class="lnav"><a class="nav" href="http://web.enlightenment.org/p.php?p=news">News</a></td></tr>
+ <tr><td class="lnav"><a class="nav" href="http://web.enlightenment.org/p.php?p=about">About</a></td></tr>
+ <tr><td class="rnav"><a class="nav" href="http://web.enlightenment.org/p.php?p=download">Download</a></td></tr>
+ </table>
+ </td>
+ <td class="tl" width="612" height="250" valign="bottom" align="center">
+ <img src="_.gif" width="612" height="1" />
+ <table border="0" cellpadding="0" cellspacing="4px">
+ <tr>
+ <td class='nav'><a class='nav' href="index.html">Main Page</a></td>
+ <td class="nav"><a class="nav" href="emotion_8c.html">API Reference</a></td>
+ <td class="nav"><a class="nav" href="pages.html">Related Pages</a></td>
+ </tr>
+ </table>
+ <hr />
+ </td>
+ <td class="t" width="50%" valign="top" align="right">
+ <table width="100px" height="100px" border="0" cellpadding="0" cellspacing="0">
+ <tr><td class="rnav"><a class="nav" href="http://web.enlightenment.org/p.php?p=support">Support</a></td></tr>
+ <tr><td class="lnav"><a class="nav" href="http://web.enlightenment.org/p.php?p=contribute">Contribute</a></td></tr>
+ <tr><td class="lnav"><a class="nav" href="http://web.enlightenment.org/p.php?p=contact">Contact</a></td></tr>
+ <tr><td class="lnav"><a class="nav" href="http://trac.enlightenment.org/e">Tracker</a></td></tr>
+ </table>
+ </td>
+ </tr>
+ </table>
+
+ <div class="main">
--- /dev/null
+/**
+@file
+@brief Emotion Media Library
+
+These routines are used for Emotion.
+*/
+
+/**
+
+@mainpage Emotion Library Documentation
+@image html emotion.png
+@version @VERSION@
+@author Carsten Haitzler <raster@rasterman.com>
+@date 2003-2004
+
+@section intro What is Emotion?
+
+A media object library for Evas and Ecore.
+
+@todo Complete documentation of API
+
+*/
--- /dev/null
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: emotion
+Description: Emotion evas based multimedia library
+Requires: evas
+Version: @VERSION@
+Libs: -L${libdir} -lemotion
+Libs.private: @EDJE_LIBS@ @ECORE_LIBS@ @XINE_LIBS@ @GST_LIBS@
+Cflags: -I${includedir}
--- /dev/null
+%define _missing_doc_files_terminate_build 0
+
+Summary: emotion
+Name: @PACKAGE@
+Version: @VERSION@
+Release: 0.%(date '+%Y%m%d')
+License: BSD
+Group: System Environment/Libraries
+URL: http://www.enlightenment.org/
+Source: ftp://ftp.enlightenment.org/pub/emotion/%{name}-%{version}.tar.gz
+Packager: %{?_packager:%{_packager}}%{!?_packager:Michael Jennings <mej@eterm.org>}
+Vendor: %{?_vendorinfo:%{_vendorinfo}}%{!?_vendorinfo:The Enlightenment Project (http://www.enlightenment.org/)}
+Distribution: %{?_distribution:%{_distribution}}%{!?_distribution:%{_vendor}}
+#BuildSuggests: xorg-x11-devel XFree86-devel libX11-devel libxine1-devel xine-lib-devel
+#BuildSuggests: gstreamer-devel gstreamer-plugins-devel gstreamer-plugins-base-devel gstreamer-ffmpeg
+BuildRequires: eet-devel evas-devel edje-devel edje-bin
+BuildRequires: /usr/bin/xine-config
+BuildRoot: %{_tmppath}/%{name}-%{version}-root
+
+%description
+Emotion is a Media Library
+
+%package devel
+Summary: Emotion headers, static libraries, documentation and test programs
+Group: System Environment/Libraries
+Requires: %{name} = %{version}
+Requires: eet-devel evas-devel edje-devel
+
+%description devel
+Headers, static libraries, test programs and documentation for Emotion
+
+%prep
+%setup -q
+
+%build
+%{configure} --prefix=%{_prefix} --enable-static --enable-shared
+%{__make} %{?_smp_mflags} %{?mflags}
+
+%install
+%{__make} %{?mflags_install} DESTDIR=$RPM_BUILD_ROOT install
+test -x `which doxygen` && sh gendoc || :
+
+# Remove useless static modules
+%{__rm} -rf $RPM_BUILD_ROOT%{_libdir}/%{name}/*a
+
+%post
+/sbin/ldconfig || :
+
+%postun
+/sbin/ldconfig || :
+
+%clean
+test "x$RPM_BUILD_ROOT" != "x/" && rm -rf $RPM_BUILD_ROOT
+
+%files
+%defattr(-, root, root)
+%doc AUTHORS COPYING* README
+%dir %{_libdir}/%{name}
+%{_bindir}/%{name}_*
+%{_libdir}/libemotion.so.*
+%{_libdir}/%{name}/*.so
+#%{_libdir}/xine/plugins/*/xineplug_vo_out_emotion.so
+%{_datadir}/%{name}
+
+%files devel
+%defattr(-, root, root)
+%doc doc/html
+%{_includedir}/*.h
+%{_libdir}/libemotion.a
+%{_libdir}/libemotion.la
+%{_libdir}/libemotion.so
+#%{_libdir}/xine/plugins/*/xineplug_vo_out_emotion.a
+#%{_libdir}/xine/plugins/*/xineplug_vo_out_emotion.la
+%{_libdir}/pkgconfig/%{name}.pc
+
+%changelog
--- /dev/null
+#!/bin/sh
+cp ./emotion.c.in ./emotion.c
+for I in `find ./src/lib -name "Emotion.h" -print`; do
+ cat $I >> ./emotion.c
+done
+#for I in `find ./src/lib -name "*.c" -print`; do
+# cat $I >> ./emotion.c
+#done
+rm -rf ./doc/html ./doc/latex ./doc/man
+doxygen
+cp doc/img/*.png doc/html/
+cp doc/img/*.gif doc/html/
+rm -f emotion_docs.tar emotion_docs.tar.gz
+tar -cvf emotion_docs.tar doc/html doc/man doc/latex
+gzip -9 emotion_docs.tar
+exit 0
--- /dev/null
+dnl Perform a check for a GStreamer element using gst-inspect
+dnl Thomas Vander Stichele <thomas at apestaart dot org>
+dnl Last modification: 25/01/2005
+
+dnl AM_GST_ELEMENT_CHECK(ELEMENT-NAME, ACTION-IF-FOUND, ACTION-IF-NOT-FOUND)
+
+AC_DEFUN([AM_GST_ELEMENT_CHECK],
+[
+ if test "x$GST_INSPECT" = "x"; then
+ AC_CHECK_PROGS(GST_INSPECT, gst-inspect gst-inspect-0.10, [])
+ fi
+
+ if test "x$GST_INSPECT" != "x"; then
+ AC_MSG_CHECKING(GStreamer element $1)
+ if $GST_INSPECT $1 > /dev/null 2> /dev/null ; then
+ AC_MSG_RESULT(found.)
+ $2
+ else
+ AC_MSG_RESULT(not found.)
+ $3
+ fi
+ fi
+])
--- /dev/null
+Makefile
+Makefile.in
--- /dev/null
+
+MAINTAINERCLEANFILES = Makefile.in
+
+SUBDIRS = lib bin modules
--- /dev/null
+.deps
+.libs
+Makefile
+Makefile.in
+emotion_test
--- /dev/null
+
+MAINTAINERCLEANFILES = Makefile.in
+
+AM_CPPFLAGS = \
+-I$(top_srcdir) \
+-I$(top_srcdir)/src/lib \
+-DPACKAGE_BIN_DIR=\"$(bindir)\" \
+-DPACKAGE_LIB_DIR=\"$(libdir)\" \
+-DPACKAGE_DATA_DIR=\"$(datadir)/$(PACKAGE)\" \
+@EVAS_CFLAGS@ \
+@ECORE_CFLAGS@ \
+@EDJE_CFLAGS@
+
+bin_PROGRAMS = \
+emotion_test
+
+emotion_test_SOURCES = \
+emotion_test_main.c
+
+emotion_test_LDADD = \
+@EVAS_LIBS@ @ECORE_LIBS@ @EDJE_LIBS@ \
+$(top_builddir)/src/lib/libemotion.la
--- /dev/null
+#include "config.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#if defined(HAVE_ECORE_X_H) || defined(HAVE_ECORE_FB_H)
+
+#include <Evas.h>
+#include <Ecore.h>
+#ifndef FB_ONLY
+#include <Ecore_X.h>
+#else
+#include <Ecore_Fb.h>
+#endif
+#include <Ecore_Evas.h>
+#include <Edje.h>
+
+#include "Emotion.h"
+
+typedef struct _Frame_Data Frame_Data;
+
+struct _Frame_Data
+{
+ unsigned char moving : 1;
+ unsigned char resizing : 1;
+ int button;
+ Evas_Coord x, y;
+};
+
+static int main_start(int argc, char **argv);
+static void main_stop(void);
+static void main_resize(Ecore_Evas *ee);
+static int main_signal_exit(void *data, int ev_type, void *ev);
+static void main_delete_request(Ecore_Evas *ee);
+
+static void bg_setup(void);
+static void bg_resize(Evas_Coord w, Evas_Coord h);
+static void bg_key_down(void *data, Evas * e, Evas_Object * obj, void *event_info);
+
+static Evas_Object *o_bg = NULL;
+
+static double start_time = 0.0;
+static Ecore_Evas *ecore_evas = NULL;
+static Evas *evas = NULL;
+static int startw = 800;
+static int starth = 600;
+
+static Evas_List *video_objs = NULL;
+
+static int
+main_start(int argc, char **argv)
+{
+ int mode = 0;
+
+ start_time = ecore_time_get();
+ if (!ecore_init()) return -1;
+ ecore_app_args_set(argc, (const char **)argv);
+ ecore_event_handler_add(ECORE_EVENT_SIGNAL_EXIT, main_signal_exit, NULL);
+ if (!ecore_evas_init()) return -1;
+#ifndef FB_ONLY
+ {
+ int i;
+
+ for (i = 1; i < argc; i++)
+ {
+ if (((!strcmp(argv[i], "-g")) ||
+ (!strcmp(argv[i], "-geometry")) ||
+ (!strcmp(argv[i], "--geometry"))) && (i < (argc - 1)))
+ {
+ int n, w, h;
+ char buf[16], buf2[16];
+
+ n = sscanf(argv[i +1], "%10[^x]x%10s", buf, buf2);
+ if (n == 2)
+ {
+ w = atoi(buf);
+ h = atoi(buf2);
+ startw = w;
+ starth = h;
+ }
+ i++;
+ }
+ else if (!strcmp(argv[i], "-gl"))
+ {
+ mode = 1;
+ }
+ else if (!strcmp(argv[i], "-fb"))
+ {
+ mode = 2;
+ }
+ else if (!strcmp(argv[i], "-xr"))
+ {
+ mode = 3;
+ }
+ }
+ }
+#if HAVE_ECORE_EVAS_X
+ if (mode == 0)
+ ecore_evas = ecore_evas_software_x11_new(NULL, 0, 0, 0, startw, starth);
+#endif
+#if HAVE_ECORE_EVAS_X11_GL
+ if (mode == 1)
+ ecore_evas = ecore_evas_gl_x11_new(NULL, 0, 0, 0, startw, starth);
+#endif
+#if HAVE_ECORE_EVAS_FB
+ if (mode == 2)
+ ecore_evas = ecore_evas_fb_new(NULL, 0, startw, starth);
+#endif
+ if (mode == 3)
+ ecore_evas = ecore_evas_xrender_x11_new(NULL, 0, 0, 0, startw, starth);
+
+#else
+ startw = 240;
+ starth = 320;
+ ecore_evas = ecore_evas_fb_new(NULL, 270, startw, starth);
+#endif
+ if (!ecore_evas) return -1;
+ ecore_evas_callback_delete_request_set(ecore_evas, main_delete_request);
+ ecore_evas_callback_resize_set(ecore_evas, main_resize);
+ ecore_evas_title_set(ecore_evas, "Evas Media Test Program");
+ ecore_evas_name_class_set(ecore_evas, "evas_media_test", "main");
+ ecore_evas_show(ecore_evas);
+ evas = ecore_evas_get(ecore_evas);
+ evas_image_cache_set(evas, 8 * 1024 * 1024);
+ evas_font_cache_set(evas, 1 * 1024 * 1024);
+ evas_font_path_append(evas, PACKAGE_DATA_DIR"/data/fonts");
+
+ edje_init();
+ edje_frametime_set(1.0 / 30.0);
+ return 1;
+}
+
+static void
+main_stop(void)
+{
+ main_signal_exit(NULL, 0, NULL);
+ edje_shutdown();
+ ecore_evas_shutdown();
+ ecore_shutdown();
+}
+
+static void
+main_resize(Ecore_Evas *ee)
+{
+ Evas_Coord w, h;
+
+ evas_output_viewport_get(evas, NULL, NULL, &w, &h);
+ bg_resize(w, h);
+}
+
+static int
+main_signal_exit(void *data, int ev_type, void *ev)
+{
+ ecore_main_loop_quit();
+ while (video_objs)
+ {
+ printf("del obj!\n");
+ evas_object_del(video_objs->data);
+ video_objs = evas_list_remove_list(video_objs, video_objs);
+ printf("done\n");
+ }
+ return 1;
+}
+
+static void
+main_delete_request(Ecore_Evas *ee)
+{
+ ecore_main_loop_quit();
+}
+
+void
+bg_setup(void)
+{
+ Evas_Object *o;
+
+ o = edje_object_add(evas);
+ edje_object_file_set(o, PACKAGE_DATA_DIR"/data/theme.edj", "background");
+ evas_object_move(o, 0, 0);
+ evas_object_resize(o, startw, starth);
+ evas_object_layer_set(o, -999);
+ evas_object_show(o);
+ evas_object_focus_set(o, 1);
+ evas_object_event_callback_add(o, EVAS_CALLBACK_KEY_DOWN, bg_key_down, NULL);
+ o_bg = o;
+}
+
+void
+bg_resize(Evas_Coord w, Evas_Coord h)
+{
+ evas_object_resize(o_bg, w, h);
+}
+
+static void
+broadcast_event(Emotion_Event ev)
+{
+ Evas_List *l;
+
+ for (l = video_objs; l; l = l->next)
+ {
+ Evas_Object *obj;
+
+ obj = l->data;
+ emotion_object_event_simple_send(obj, ev);
+ }
+}
+
+static void
+bg_key_down(void *data, Evas * e, Evas_Object * obj, void *event_info)
+{
+ Evas_Event_Key_Down *ev;
+
+ ev = (Evas_Event_Key_Down *)event_info;
+ if (!strcmp(ev->keyname, "Escape"))
+ ecore_main_loop_quit();
+ else if (!strcmp(ev->keyname, "Up"))
+ broadcast_event(EMOTION_EVENT_UP);
+ else if (!strcmp(ev->keyname, "Down"))
+ broadcast_event(EMOTION_EVENT_DOWN);
+ else if (!strcmp(ev->keyname, "Left"))
+ broadcast_event(EMOTION_EVENT_LEFT);
+ else if (!strcmp(ev->keyname, "Right"))
+ broadcast_event(EMOTION_EVENT_RIGHT);
+ else if (!strcmp(ev->keyname, "Return"))
+ broadcast_event(EMOTION_EVENT_SELECT);
+ else if (!strcmp(ev->keyname, "m"))
+ broadcast_event(EMOTION_EVENT_MENU1);
+ else if (!strcmp(ev->keyname, "Prior"))
+ broadcast_event(EMOTION_EVENT_PREV);
+ else if (!strcmp(ev->keyname, "Next"))
+ broadcast_event(EMOTION_EVENT_NEXT);
+ else if (!strcmp(ev->keyname, "0"))
+ broadcast_event(EMOTION_EVENT_0);
+ else if (!strcmp(ev->keyname, "1"))
+ broadcast_event(EMOTION_EVENT_1);
+ else if (!strcmp(ev->keyname, "2"))
+ broadcast_event(EMOTION_EVENT_2);
+ else if (!strcmp(ev->keyname, "3"))
+ broadcast_event(EMOTION_EVENT_3);
+ else if (!strcmp(ev->keyname, "4"))
+ broadcast_event(EMOTION_EVENT_4);
+ else if (!strcmp(ev->keyname, "5"))
+ broadcast_event(EMOTION_EVENT_5);
+ else if (!strcmp(ev->keyname, "6"))
+ broadcast_event(EMOTION_EVENT_6);
+ else if (!strcmp(ev->keyname, "7"))
+ broadcast_event(EMOTION_EVENT_7);
+ else if (!strcmp(ev->keyname, "8"))
+ broadcast_event(EMOTION_EVENT_8);
+ else if (!strcmp(ev->keyname, "9"))
+ broadcast_event(EMOTION_EVENT_9);
+ else if (!strcmp(ev->keyname, "-"))
+ broadcast_event(EMOTION_EVENT_10);
+ else if (!strcmp(ev->keyname, "bracketleft"))
+ {
+ Evas_List *l;
+
+ for (l = video_objs; l; l = l->next)
+ {
+ Evas_Object *obj;
+
+ obj = l->data;
+ emotion_object_audio_volume_set(obj, emotion_object_audio_volume_get(obj) - 0.1);
+ }
+ }
+ else if (!strcmp(ev->keyname, "bracketright"))
+ {
+ Evas_List *l;
+
+ for (l = video_objs; l; l = l->next)
+ {
+ Evas_Object *obj;
+
+ obj = l->data;
+ emotion_object_audio_volume_set(obj, emotion_object_audio_volume_get(obj) + 0.1);
+ }
+ }
+ else if (!strcmp(ev->keyname, "v"))
+ {
+ Evas_List *l;
+
+ for (l = video_objs; l; l = l->next)
+ {
+ Evas_Object *obj;
+
+ obj = l->data;
+ if (emotion_object_video_mute_get(obj))
+ emotion_object_video_mute_set(obj, 0);
+ else
+ emotion_object_video_mute_set(obj, 1);
+ }
+ }
+ else if (!strcmp(ev->keyname, "a"))
+ {
+ Evas_List *l;
+
+ for (l = video_objs; l; l = l->next)
+ {
+ Evas_Object *obj;
+
+ obj = l->data;
+ if (emotion_object_audio_mute_get(obj))
+ {
+ emotion_object_audio_mute_set(obj, 0);
+ printf("unmute\n");
+ }
+ else
+ {
+ emotion_object_audio_mute_set(obj, 1);
+ printf("mute\n");
+ }
+ }
+ }
+ else if (!strcmp(ev->keyname, "i"))
+ {
+ Evas_List *l;
+
+ for (l = video_objs; l; l = l->next)
+ {
+ Evas_Object *obj;
+
+ obj = l->data;
+ printf("audio channels: %i\n", emotion_object_audio_channel_count(obj));
+ printf("video channels: %i\n", emotion_object_video_channel_count(obj));
+ printf("spu channels: %i\n", emotion_object_spu_channel_count(obj));
+ printf("seekable: %i\n", emotion_object_seekable_get(obj));
+ }
+ }
+ else if (!strcmp(ev->keyname, "f"))
+ {
+ if (!ecore_evas_fullscreen_get(ecore_evas))
+ ecore_evas_fullscreen_set(ecore_evas, 1);
+ else
+ ecore_evas_fullscreen_set(ecore_evas, 0);
+ }
+ else if (!strcmp(ev->keyname, "d"))
+ {
+ if (!ecore_evas_avoid_damage_get(ecore_evas))
+ ecore_evas_avoid_damage_set(ecore_evas, 1);
+ else
+ ecore_evas_avoid_damage_set(ecore_evas, 0);
+ }
+ else if (!strcmp(ev->keyname, "s"))
+ {
+ if (!ecore_evas_shaped_get(ecore_evas))
+ {
+ ecore_evas_shaped_set(ecore_evas, 1);
+ evas_object_hide(o_bg);
+ }
+ else
+ {
+ ecore_evas_shaped_set(ecore_evas, 0);
+ evas_object_show(o_bg);
+ }
+ }
+ else if (!strcmp(ev->keyname, "b"))
+ {
+ if (!ecore_evas_borderless_get(ecore_evas))
+ ecore_evas_borderless_set(ecore_evas, 1);
+ else
+ ecore_evas_borderless_set(ecore_evas, 0);
+ }
+ else if (!strcmp(ev->keyname, "q"))
+ {
+ ecore_main_loop_quit();
+ while (video_objs)
+ {
+ printf("del obj!\n");
+ evas_object_del(video_objs->data);
+ video_objs = evas_list_remove_list(video_objs, video_objs);
+ printf("done\n");
+ }
+ }
+ else
+ {
+ printf("UNHANDLED: %s\n", ev->keyname);
+ }
+}
+
+static void
+video_obj_in_cb(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+// evas_object_color_set(obj, 255, 255, 255, 100);
+}
+
+static void
+video_obj_out_cb(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+// evas_object_color_set(obj, 255, 255, 255, 200);
+}
+
+static void
+video_obj_down_cb(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Down *e;
+
+ e = event_info;
+ evas_object_color_set(obj, 200, 50, 40, 200);
+ evas_object_raise(obj);
+}
+
+static void
+video_obj_up_cb(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Up *e;
+
+ e = event_info;
+ evas_object_color_set(obj, 100, 100, 100, 100);
+}
+
+static void
+video_obj_move_cb(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Move *e;
+
+ e = event_info;
+ if (e->buttons & 0x1)
+ {
+ Evas_Coord x, y;
+
+ evas_object_geometry_get(obj, &x, &y, NULL, NULL);
+ x += e->cur.canvas.x - e->prev.canvas.x;
+ y += e->cur.canvas.y - e->prev.canvas.y;
+ evas_object_move(obj, x, y);
+ }
+ else if (e->buttons & 0x4)
+ {
+ Evas_Coord w, h;
+
+ evas_object_geometry_get(obj, NULL, NULL, &w, &h);
+ w += e->cur.canvas.x - e->prev.canvas.x;
+ h += e->cur.canvas.y - e->prev.canvas.y;
+ evas_object_resize(obj, w, h);
+ }
+}
+
+static void
+video_obj_frame_decode_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+ double pos, len;
+ char buf[256];
+ int ph, pm, ps, pf, lh, lm, ls;
+
+ oe = data;
+ pos = emotion_object_position_get(obj);
+ len = emotion_object_play_length_get(obj);
+// printf("%3.3f, %3.3f\n", pos, len);
+ edje_object_part_drag_value_set(oe, "video_progress", pos / len, 0.0);
+ lh = len / 3600;
+ lm = len / 60 - (lh * 60);
+ ls = len - (lm * 60);
+ ph = pos / 3600;
+ pm = pos / 60 - (ph * 60);
+ ps = pos - (pm * 60);
+ pf = pos * 100 - (ps * 100) - (pm * 60 * 100) - (ph * 60 * 60 * 100);
+ snprintf(buf, sizeof(buf), "%i:%02i:%02i.%02i / %i:%02i:%02i",
+ ph, pm, ps, pf, lh, lm, ls);
+ edje_object_part_text_set(oe, "video_progress_txt", buf);
+
+ if (0)
+ {
+ double t;
+ static double pt = 0.0;
+ t = ecore_time_get();
+ printf("FPS: %3.3f\n", 1.0 / (t - pt));
+ pt = t;
+ }
+}
+
+static void
+video_obj_frame_resize_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+ int iw, ih;
+ Evas_Coord w, h;
+ double ratio;
+
+ oe = data;
+ emotion_object_size_get(obj, &iw, &ih);
+ ratio = emotion_object_ratio_get(obj);
+ printf("HANDLE %ix%i @ %3.3f\n", iw, ih, ratio);
+ if (ratio > 0.0) iw = (ih * ratio) + 0.5;
+ edje_extern_object_min_size_set(obj, iw, ih);
+ edje_object_part_swallow(oe, "video_swallow", obj);
+ edje_object_size_min_calc(oe, &w, &h);
+ evas_object_resize(oe, w, h);
+ edje_extern_object_min_size_set(obj, 0, 0);
+ edje_object_part_swallow(oe, "video_swallow", obj);
+}
+
+static void
+video_obj_length_change_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+ double pos, len;
+ char buf[256];
+ int ph, pm, ps, pf, lh, lm, ls;
+
+ oe = data;
+ pos = emotion_object_position_get(obj);
+ len = emotion_object_play_length_get(obj);
+ edje_object_part_drag_value_set(oe, "video_progress", pos / len, 0.0);
+ lh = len / 3600;
+ lm = len / 60 - (lh * 60);
+ ls = len - (lm * 60);
+ ph = pos / 3600;
+ pm = pos / 60 - (ph * 60);
+ ps = pos - (pm * 60);
+ pf = pos * 100 - (ps * 100) - (pm * 60 * 100) - (ph * 60 * 60 * 100);
+ snprintf(buf, sizeof(buf), "%i:%02i:%02i.%02i / %i:%02i:%02i",
+ ph, pm, ps, pf, lh, lm, ls);
+ edje_object_part_text_set(oe, "video_progress_txt", buf);
+}
+
+static void
+video_obj_stopped_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+
+ oe = data;
+ printf("video stopped!\n");
+ emotion_object_position_set(obj, 0.0);
+ emotion_object_play_set(obj, 1);
+}
+
+static void
+video_obj_channels_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+
+ oe = data;
+ printf("channels changed: [AUD %i][VID %i][SPU %i]\n",
+ emotion_object_audio_channel_count(obj),
+ emotion_object_video_channel_count(obj),
+ emotion_object_spu_channel_count(obj));
+}
+
+static void
+video_obj_title_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+
+ oe = data;
+ printf("video title to: \"%s\"\n", emotion_object_title_get(obj));
+}
+
+static void
+video_obj_progress_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+
+ oe = data;
+ printf("progress: \"%s\" %3.3f\n",
+ emotion_object_progress_info_get(obj),
+ emotion_object_progress_status_get(obj));
+}
+
+static void
+video_obj_ref_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+
+ oe = data;
+ printf("video ref to: \"%s\" %i\n",
+ emotion_object_ref_file_get(obj),
+ emotion_object_ref_num_get(obj));
+}
+
+static void
+video_obj_button_num_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+
+ oe = data;
+ printf("video spu buttons to: %i\n",
+ emotion_object_spu_button_count_get(obj));
+}
+
+static void
+video_obj_button_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+
+ oe = data;
+ printf("video selected spu button: %i\n",
+ emotion_object_spu_button_get(obj));
+}
+
+
+
+static void
+video_obj_signal_play_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Evas_Object *ov;
+
+ ov = data;
+ emotion_object_play_set(ov, 1);
+ edje_object_signal_emit(o, "video_state", "play");
+}
+
+static void
+video_obj_signal_pause_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Evas_Object *ov;
+
+ ov = data;
+ emotion_object_play_set(ov, 0);
+ edje_object_signal_emit(o, "video_state", "pause");
+}
+
+static void
+video_obj_signal_stop_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Evas_Object *ov;
+
+ ov = data;
+ emotion_object_play_set(ov, 0);
+ emotion_object_position_set(ov, 0);
+ edje_object_signal_emit(o, "video_state", "stop");
+}
+
+static void
+video_obj_signal_jump_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Evas_Object *ov;
+ double len;
+ double x, y;
+
+ ov = data;
+ edje_object_part_drag_value_get(o, source, &x, &y);
+ len = emotion_object_play_length_get(ov);
+ emotion_object_position_set(ov, x * len);
+}
+
+static void
+video_obj_signal_speed_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Evas_Object *ov;
+ double spd;
+ double x, y;
+ char buf[256];
+
+ ov = data;
+ edje_object_part_drag_value_get(o, source, &x, &y);
+ spd = 255 * y;
+ evas_object_color_set(ov, spd, spd, spd, spd);
+ snprintf(buf, sizeof(buf), "%.0f", spd);
+ edje_object_part_text_set(o, "video_speed_txt", buf);
+}
+
+static void
+video_obj_signal_frame_move_start_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Frame_Data *fd;
+ Evas_Coord x, y;
+
+ fd = evas_object_data_get(o, "frame_data");
+ fd->moving = 1;
+ evas_pointer_canvas_xy_get(evas_object_evas_get(o), &x, &y);
+ fd->x = x;
+ fd->y = y;
+ evas_object_raise(o);
+}
+
+static void
+video_obj_signal_frame_move_stop_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Frame_Data *fd;
+
+ fd = evas_object_data_get(o, "frame_data");
+ fd->moving = 0;
+}
+
+static void
+video_obj_signal_frame_resize_start_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Frame_Data *fd;
+ Evas_Coord x, y;
+
+ fd = evas_object_data_get(o, "frame_data");
+ fd->resizing = 1;
+ evas_pointer_canvas_xy_get(evas_object_evas_get(o), &x, &y);
+ fd->x = x;
+ fd->y = y;
+ evas_object_raise(o);
+}
+
+static void
+video_obj_signal_frame_resize_stop_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Frame_Data *fd;
+
+ fd = evas_object_data_get(o, "frame_data");
+ fd->resizing = 0;
+}
+
+static void
+video_obj_signal_frame_move_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Frame_Data *fd;
+
+ fd = evas_object_data_get(o, "frame_data");
+ if (fd->moving)
+ {
+ Evas_Coord x, y, ox, oy;
+
+ evas_pointer_canvas_xy_get(evas_object_evas_get(o), &x, &y);
+ evas_object_geometry_get(o, &ox, &oy, NULL, NULL);
+ evas_object_move(o, ox + (x - fd->x), oy + (y - fd->y));
+ fd->x = x;
+ fd->y = y;
+ }
+ else if (fd->resizing)
+ {
+ Evas_Coord x, y, ow, oh;
+
+ evas_pointer_canvas_xy_get(evas_object_evas_get(o), &x, &y);
+ evas_object_geometry_get(o, NULL, NULL, &ow, &oh);
+ evas_object_resize(o, ow + (x - fd->x), oh + (y - fd->y));
+ fd->x = x;
+ fd->y = y;
+ }
+}
+
+
+static void
+init_video_object(char *module_filename, char *filename)
+{
+ Evas_Object *o, *oe;
+ int iw, ih;
+ Evas_Coord w, h;
+ Frame_Data *fd;
+
+
+/* basic video object setup */
+ o = emotion_object_add(evas);
+ if (!emotion_object_init(o, module_filename))
+ return;
+ emotion_object_file_set(o, filename);
+ emotion_object_play_set(o, 1);
+ evas_object_move(o, 0, 0);
+ evas_object_resize(o, 320, 240);
+ emotion_object_smooth_scale_set(o, 1);
+ evas_object_show(o);
+/* end basic video setup. all the rest here is just to be fancy */
+
+
+ video_objs = evas_list_append(video_objs, o);
+
+ emotion_object_size_get(o, &iw, &ih);
+ w = iw; h = ih;
+
+ fd = calloc(1, sizeof(Frame_Data));
+
+ oe = edje_object_add(evas);
+ evas_object_data_set(oe, "frame_data", fd);
+ edje_object_file_set(oe, PACKAGE_DATA_DIR"/data/theme.edj", "video_controller");
+ edje_extern_object_min_size_set(o, w, h);
+ edje_object_part_swallow(oe, "video_swallow", o);
+ edje_object_size_min_calc(oe, &w, &h);
+// evas_object_move(oe, rand() % (int)(startw - w), rand() % (int)(starth - h));
+ evas_object_move(oe, 0, 0);
+ evas_object_resize(oe, w, h);
+ edje_extern_object_min_size_set(o, 0, 0);
+ edje_object_part_swallow(oe, "video_swallow", o);
+
+ evas_object_smart_callback_add(o, "frame_decode", video_obj_frame_decode_cb, oe);
+ evas_object_smart_callback_add(o, "frame_resize", video_obj_frame_resize_cb, oe);
+ evas_object_smart_callback_add(o, "length_change", video_obj_length_change_cb, oe);
+
+ evas_object_smart_callback_add(o, "decode_stop", video_obj_stopped_cb, oe);
+ evas_object_smart_callback_add(o, "channels_change", video_obj_channels_cb, oe);
+ evas_object_smart_callback_add(o, "title_change", video_obj_title_cb, oe);
+ evas_object_smart_callback_add(o, "progress_change", video_obj_progress_cb, oe);
+ evas_object_smart_callback_add(o, "ref_change", video_obj_ref_cb, oe);
+ evas_object_smart_callback_add(o, "button_num_change", video_obj_button_num_cb, oe);
+ evas_object_smart_callback_add(o, "button_change", video_obj_button_cb, oe);
+
+ edje_object_signal_callback_add(oe, "video_control", "play", video_obj_signal_play_cb, o);
+ edje_object_signal_callback_add(oe, "video_control", "pause", video_obj_signal_pause_cb, o);
+ edje_object_signal_callback_add(oe, "video_control", "stop", video_obj_signal_stop_cb, o);
+ edje_object_signal_callback_add(oe, "drag", "video_progress", video_obj_signal_jump_cb, o);
+ edje_object_signal_callback_add(oe, "drag", "video_speed", video_obj_signal_speed_cb, o);
+
+ edje_object_signal_callback_add(oe, "frame_move", "start", video_obj_signal_frame_move_start_cb, oe);
+ edje_object_signal_callback_add(oe, "frame_move", "stop", video_obj_signal_frame_move_stop_cb, oe);
+ edje_object_signal_callback_add(oe, "frame_resize", "start", video_obj_signal_frame_resize_start_cb, oe);
+ edje_object_signal_callback_add(oe, "frame_resize", "stop", video_obj_signal_frame_resize_stop_cb, oe);
+ edje_object_signal_callback_add(oe, "mouse,move", "*", video_obj_signal_frame_move_cb, oe);
+
+ edje_object_part_drag_value_set(oe, "video_speed", 0.0, 1.0);
+ edje_object_part_text_set(oe, "video_speed_txt", "1.0");
+
+ edje_object_signal_emit(o, "video_state", "play");
+
+ evas_object_show(oe);
+}
+
+static int
+enter_idle(void *data)
+{
+ double t;
+ static double pt = 0.0;
+ static int frames = 0;
+
+ t = ecore_time_get();
+ if (frames == 0) pt = t;
+ frames++;
+ if (frames == 100)
+ {
+// printf("FPS: %3.3f\n", frames / (t - pt));
+ frames = 0;
+ }
+ return 1;
+}
+
+int
+main(int argc, char **argv)
+{
+ char *module_filename;
+ int i;
+
+ if (main_start(argc, argv) < 1) return -1;
+ bg_setup();
+
+ module_filename = "xine";
+
+ for (i = 1; i < argc; i++)
+ {
+ if (((!strcmp(argv[i], "-g")) ||
+ (!strcmp(argv[i], "-geometry")) ||
+ (!strcmp(argv[i], "--geometry"))) && (i < (argc - 1)))
+ i++;
+ else if (((!strcmp(argv[i], "-h")) ||
+ (!strcmp(argv[i], "-help")) ||
+ (!strcmp(argv[i], "--help"))))
+ {
+ printf("Usage:\n");
+ printf(" %s [-gl] [-g WxH] [-xine] [-gstreamer] filename\n", argv[0]);
+ exit(-1);
+ }
+ else if (!strcmp(argv[i], "-gl"))
+ {
+ }
+ else if (!strcmp(argv[i], "-fb"))
+ {
+ }
+ else if (!strcmp(argv[i], "-xr"))
+ {
+ }
+ else if (!strcmp(argv[i], "-xine"))
+ {
+ module_filename = "xine";
+ }
+ else if (!strcmp(argv[i], "-gstreamer"))
+ {
+ module_filename = "gstreamer";
+ }
+ else
+ {
+ printf ("module : %s\n", module_filename);
+ init_video_object(module_filename, argv[i]);
+ }
+ }
+
+ ecore_idle_enterer_add(enter_idle, NULL);
+
+ ecore_main_loop_begin();
+ main_stop();
+ return 0;
+}
+
+#else
+int main()
+{
+ puts("Could not find Ecore_X.h or Ecore_Fb.h so test is disabled");
+ return 0;
+}
+#endif
--- /dev/null
+.deps
+.libs
+Makefile
+Makefile.in
+*.lo
+*.la
--- /dev/null
+#ifndef EMOTION_H
+#define EMOTION_H
+
+#ifdef EAPI
+#undef EAPI
+#endif
+#ifdef WIN32
+# ifdef BUILDING_DLL
+# define EAPI __declspec(dllexport)
+# else
+# define EAPI __declspec(dllimport)
+# endif
+#else
+# ifdef __GNUC__
+# if __GNUC__ >= 4
+# define EAPI __attribute__ ((visibility("default")))
+# else
+# define EAPI
+# endif
+# else
+# define EAPI
+# endif
+#endif
+
+#include <Evas.h>
+
+enum _Emotion_Module
+{
+ EMOTION_MODULE_XINE,
+ EMOTION_MODULE_GSTREAMER
+};
+
+enum _Emotion_Event
+{
+ EMOTION_EVENT_MENU1, // Escape Menu
+ EMOTION_EVENT_MENU2, // Title Menu
+ EMOTION_EVENT_MENU3, // Root Menu
+ EMOTION_EVENT_MENU4, // Subpicture Menu
+ EMOTION_EVENT_MENU5, // Audio Menu
+ EMOTION_EVENT_MENU6, // Angle Menu
+ EMOTION_EVENT_MENU7, // Part Menu
+ EMOTION_EVENT_UP,
+ EMOTION_EVENT_DOWN,
+ EMOTION_EVENT_LEFT,
+ EMOTION_EVENT_RIGHT,
+ EMOTION_EVENT_SELECT,
+ EMOTION_EVENT_NEXT,
+ EMOTION_EVENT_PREV,
+ EMOTION_EVENT_ANGLE_NEXT,
+ EMOTION_EVENT_ANGLE_PREV,
+ EMOTION_EVENT_FORCE,
+ EMOTION_EVENT_0,
+ EMOTION_EVENT_1,
+ EMOTION_EVENT_2,
+ EMOTION_EVENT_3,
+ EMOTION_EVENT_4,
+ EMOTION_EVENT_5,
+ EMOTION_EVENT_6,
+ EMOTION_EVENT_7,
+ EMOTION_EVENT_8,
+ EMOTION_EVENT_9,
+ EMOTION_EVENT_10
+};
+
+enum _Emotion_Meta_Info
+{
+ EMOTION_META_INFO_TRACK_TITLE,
+ EMOTION_META_INFO_TRACK_ARTIST,
+ EMOTION_META_INFO_TRACK_ALBUM,
+ EMOTION_META_INFO_TRACK_YEAR,
+ EMOTION_META_INFO_TRACK_GENRE,
+ EMOTION_META_INFO_TRACK_COMMENT,
+ EMOTION_META_INFO_TRACK_DISC_ID,
+ EMOTION_META_INFO_TRACK_COUNT
+};
+
+typedef enum _Emotion_Module Emotion_Module;
+typedef enum _Emotion_Event Emotion_Event;
+typedef enum _Emotion_Meta_Info Emotion_Meta_Info;
+
+#define EMOTION_CHANNEL_AUTO -1
+#define EMOTION_CHANNEL_DEFAULT 0
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* api calls available */
+EAPI Evas_Object *emotion_object_add (Evas *evas);
+EAPI void emotion_object_module_option_set (Evas_Object *obj, const char *opt, const char *val);
+EAPI Evas_Bool emotion_object_init (Evas_Object *obj, const char *module_filename);
+EAPI void emotion_object_file_set (Evas_Object *obj, const char *filename);
+EAPI const char *emotion_object_file_get (Evas_Object *obj);
+EAPI void emotion_object_play_set (Evas_Object *obj, Evas_Bool play);
+EAPI Evas_Bool emotion_object_play_get (Evas_Object *obj);
+EAPI void emotion_object_position_set (Evas_Object *obj, double sec);
+EAPI double emotion_object_position_get (Evas_Object *obj);
+EAPI Evas_Bool emotion_object_video_handled_get (Evas_Object *obj);
+EAPI Evas_Bool emotion_object_audio_handled_get (Evas_Object *obj);
+EAPI Evas_Bool emotion_object_seekable_get (Evas_Object *obj);
+EAPI double emotion_object_play_length_get (Evas_Object *obj);
+EAPI void emotion_object_size_get (Evas_Object *obj, int *iw, int *ih);
+EAPI void emotion_object_smooth_scale_set (Evas_Object *obj, Evas_Bool smooth);
+EAPI Evas_Bool emotion_object_smooth_scale_get (Evas_Object *obj);
+EAPI double emotion_object_ratio_get (Evas_Object *obj);
+EAPI void emotion_object_event_simple_send (Evas_Object *obj, Emotion_Event ev);
+EAPI void emotion_object_audio_volume_set (Evas_Object *obj, double vol);
+EAPI double emotion_object_audio_volume_get (Evas_Object *obj);
+EAPI void emotion_object_audio_mute_set (Evas_Object *obj, Evas_Bool mute);
+EAPI Evas_Bool emotion_object_audio_mute_get (Evas_Object *obj);
+EAPI int emotion_object_audio_channel_count (Evas_Object *obj);
+EAPI const char *emotion_object_audio_channel_name_get(Evas_Object *obj, int channel);
+EAPI void emotion_object_audio_channel_set (Evas_Object *obj, int channel);
+EAPI int emotion_object_audio_channel_get (Evas_Object *obj);
+EAPI void emotion_object_video_mute_set (Evas_Object *obj, Evas_Bool mute);
+EAPI Evas_Bool emotion_object_video_mute_get (Evas_Object *obj);
+EAPI int emotion_object_video_channel_count (Evas_Object *obj);
+EAPI const char *emotion_object_video_channel_name_get(Evas_Object *obj, int channel);
+EAPI void emotion_object_video_channel_set (Evas_Object *obj, int channel);
+EAPI int emotion_object_video_channel_get (Evas_Object *obj);
+EAPI void emotion_object_spu_mute_set (Evas_Object *obj, Evas_Bool mute);
+EAPI Evas_Bool emotion_object_spu_mute_get (Evas_Object *obj);
+EAPI int emotion_object_spu_channel_count (Evas_Object *obj);
+EAPI const char *emotion_object_spu_channel_name_get (Evas_Object *obj, int channel);
+EAPI void emotion_object_spu_channel_set (Evas_Object *obj, int channel);
+EAPI int emotion_object_spu_channel_get (Evas_Object *obj);
+EAPI int emotion_object_chapter_count (Evas_Object *obj);
+EAPI void emotion_object_chapter_set (Evas_Object *obj, int chapter);
+EAPI int emotion_object_chapter_get (Evas_Object *obj);
+EAPI const char *emotion_object_chapter_name_get (Evas_Object *obj, int chapter);
+EAPI void emotion_object_play_speed_set (Evas_Object *obj, double speed);
+EAPI double emotion_object_play_speed_get (Evas_Object *obj);
+EAPI void emotion_object_eject (Evas_Object *obj);
+EAPI const char *emotion_object_title_get (Evas_Object *obj);
+EAPI const char *emotion_object_progress_info_get (Evas_Object *obj);
+EAPI double emotion_object_progress_status_get (Evas_Object *obj);
+EAPI const char *emotion_object_ref_file_get (Evas_Object *obj);
+EAPI int emotion_object_ref_num_get (Evas_Object *obj);
+EAPI int emotion_object_spu_button_count_get (Evas_Object *obj);
+EAPI int emotion_object_spu_button_get (Evas_Object *obj);
+EAPI const char *emotion_object_meta_info_get (Evas_Object *obj, Emotion_Meta_Info meta);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+
+MAINTAINERCLEANFILES = Makefile.in
+
+AM_CPPFLAGS = \
+-I$(top_srcdir) \
+-I$(top_srcdir)/src/lib \
+-DPACKAGE_BIN_DIR=\"$(bindir)\" \
+-DPACKAGE_LIB_DIR=\"$(libdir)\" \
+-DPACKAGE_DATA_DIR=\"$(datadir)/$(PACKAGE)\" \
+@EVAS_CFLAGS@ \
+@ECORE_CFLAGS@
+
+lib_LTLIBRARIES = libemotion.la
+include_HEADERS = Emotion.h
+libemotion_la_SOURCES = \
+emotion_smart.c \
+emotion_private.h
+
+libemotion_la_LIBADD = @EVAS_LIBS@ @ECORE_LIBS@
+libemotion_la_DEPENDENCIES = $(top_builddir)/config.h
+libemotion_la_LDFLAGS = -version-info @version_info@
--- /dev/null
+#ifndef EMOTION_PRIVATE_H
+#define EMOTION_PRIVATE_H
+
+#include <Evas.h>
+#include <Ecore.h>
+#include <Ecore_Job.h>
+#include <Ecore_Data.h>
+#include <Ecore_Str.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "config.h"
+
+#define META_TRACK_TITLE 1
+#define META_TRACK_ARTIST 2
+#define META_TRACK_GENRE 3
+#define META_TRACK_COMMENT 4
+#define META_TRACK_ALBUM 5
+#define META_TRACK_YEAR 6
+#define META_TRACK_DISCID 7
+#define META_TRACK_COUNT 8
+
+typedef enum _Emotion_Format Emotion_Format;
+typedef enum _Emotion_Vis Emotion_Vis;
+typedef struct _Emotion_Video_Module Emotion_Video_Module;
+typedef struct _Emotion_Module_Options Emotion_Module_Options;
+
+enum _Emotion_Format
+{
+ EMOTION_FORMAT_NONE,
+ EMOTION_FORMAT_I420,
+ EMOTION_FORMAT_YV12,
+ EMOTION_FORMAT_YUY2, /* unused for now since evas does not support yuy2 format */
+ EMOTION_FORMAT_BGRA
+};
+
+enum _Emotion_Vis
+{
+ EMOTION_VIS_GOOM,
+ EMOTION_VIS_LIBVISUAL_BUMPSCOPE,
+ EMOTION_VIS_LIBVISUAL_CORONA,
+ EMOTION_VIS_LIBVISUAL_DANCING_PARTICLES,
+ EMOTION_VIS_LIBVISUAL_GDKPIXBUF,
+ EMOTION_VIS_LIBVISUAL_G_FORCE,
+ EMOTION_VIS_LIBVISUAL_GOOM,
+ EMOTION_VIS_LIBVISUAL_INFINITE,
+ EMOTION_VIS_LIBVISUAL_JAKDAW,
+ EMOTION_VIS_LIBVISUAL_JESS,
+ EMOTION_VIS_LIBVISUAL_LV_ANALYSER,
+ EMOTION_VIS_LIBVISUAL_LV_FLOWER,
+ EMOTION_VIS_LIBVISUAL_LV_GLTEST,
+ EMOTION_VIS_LIBVISUAL_LV_SCOPE,
+ EMOTION_VIS_LIBVISUAL_MADSPIN,
+ EMOTION_VIS_LIBVISUAL_NEBULUS,
+ EMOTION_VIS_LIBVISUAL_OINKSIE,
+ EMOTION_VIS_LIBVISUAL_PLASMA
+};
+
+struct _Emotion_Module_Options
+{
+ unsigned char no_video : 1;
+ unsigned char no_audio : 1;
+};
+
+struct _Emotion_Video_Module
+{
+ unsigned char (*init) (Evas_Object *obj, void **video, Emotion_Module_Options *opt);
+ int (*shutdown) (void *video);
+ unsigned char (*file_open) (const char *file, Evas_Object *obj, void *video);
+ void (*file_close) (void *ef);
+ void (*play) (void *ef, double pos);
+ void (*stop) (void *ef);
+ void (*size_get) (void *ef, int *w, int *h);
+ void (*pos_set) (void *ef, double pos);
+ void (*vis_set) (void *ef, Emotion_Vis vis);
+ double (*len_get) (void *ef);
+ int (*fps_num_get) (void *ef);
+ int (*fps_den_get) (void *ef);
+ double (*fps_get) (void *ef);
+ double (*pos_get) (void *ef);
+ Emotion_Vis (*vis_get) (void *ef);
+ double (*ratio_get) (void *ef);
+ int (*video_handled) (void *ef);
+ int (*audio_handled) (void *ef);
+ int (*seekable) (void *ef);
+ void (*frame_done) (void *ef);
+ Emotion_Format (*format_get) (void *ef);
+ void (*video_data_size_get) (void *ef, int *w, int *h);
+ int (*yuv_rows_get) (void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows);
+ int (*bgra_data_get) (void *ef, unsigned char **bgra_data);
+ void (*event_feed) (void *ef, int event);
+ void (*event_mouse_button_feed) (void *ef, int button, int x, int y);
+ void (*event_mouse_move_feed) (void *ef, int x, int y);
+ int (*video_channel_count) (void *ef);
+ void (*video_channel_set) (void *ef, int channel);
+ int (*video_channel_get) (void *ef);
+ const char * (*video_channel_name_get) (void *ef, int channel);
+ void (*video_channel_mute_set) (void *ef, int mute);
+ int (*video_channel_mute_get) (void *ef);
+ int (*audio_channel_count) (void *ef);
+ void (*audio_channel_set) (void *ef, int channel);
+ int (*audio_channel_get) (void *ef);
+ const char * (*audio_channel_name_get) (void *ef, int channel);
+ void (*audio_channel_mute_set) (void *ef, int mute);
+ int (*audio_channel_mute_get) (void *ef);
+ void (*audio_channel_volume_set) (void *ef, double vol);
+ double (*audio_channel_volume_get) (void *ef);
+ int (*spu_channel_count) (void *ef);
+ void (*spu_channel_set) (void *ef, int channel);
+ int (*spu_channel_get) (void *ef);
+ const char * (*spu_channel_name_get) (void *ef, int channel);
+ void (*spu_channel_mute_set) (void *ef, int mute);
+ int (*spu_channel_mute_get) (void *ef);
+ int (*chapter_count) (void *ef);
+ void (*chapter_set) (void *ef, int chapter);
+ int (*chapter_get) (void *ef);
+ const char * (*chapter_name_get) (void *ef, int chapter);
+ void (*speed_set) (void *ef, double speed);
+ double (*speed_get) (void *ef);
+ int (*eject) (void *ef);
+ const char * (*meta_get) (void *ef, int meta);
+
+ Ecore_Plugin *plugin;
+ Ecore_Path_Group *path_group;
+};
+
+EAPI void *_emotion_video_get(Evas_Object *obj);
+EAPI void _emotion_frame_new(Evas_Object *obj);
+EAPI void _emotion_video_pos_update(Evas_Object *obj, double pos, double len);
+EAPI void _emotion_frame_resize(Evas_Object *obj, int w, int h, double ratio);
+EAPI void _emotion_decode_stop(Evas_Object *obj);
+EAPI void _emotion_playback_finished(Evas_Object *obj);
+EAPI void _emotion_audio_level_change(Evas_Object *obj);
+EAPI void _emotion_channels_change(Evas_Object *obj);
+EAPI void _emotion_title_set(Evas_Object *obj, char *title);
+EAPI void _emotion_progress_set(Evas_Object *obj, char *info, double stat);
+EAPI void _emotion_file_ref_set(Evas_Object *obj, char *file, int num);
+EAPI void _emotion_spu_button_num_set(Evas_Object *obj, int num);
+EAPI void _emotion_spu_button_set(Evas_Object *obj, int button);
+
+#endif
--- /dev/null
+#include "Emotion.h"
+#include "emotion_private.h"
+
+#define E_SMART_OBJ_GET(smart, o, type) \
+ { \
+ char *_e_smart_str; \
+ \
+ if (!o) return; \
+ smart = evas_object_smart_data_get(o); \
+ if (!smart) return; \
+ _e_smart_str = (char *)evas_object_type_get(o); \
+ if (!_e_smart_str) return; \
+ if (strcmp(_e_smart_str, type)) return; \
+ }
+
+#define E_SMART_OBJ_GET_RETURN(smart, o, type, ret) \
+ { \
+ char *_e_smart_str; \
+ \
+ if (!o) return ret; \
+ smart = evas_object_smart_data_get(o); \
+ if (!smart) return ret; \
+ _e_smart_str = (char *)evas_object_type_get(o); \
+ if (!_e_smart_str) return ret; \
+ if (strcmp(_e_smart_str, type)) return ret; \
+ }
+
+#define E_OBJ_NAME "emotion_object"
+
+typedef struct _Smart_Data Smart_Data;
+
+struct _Smart_Data
+{
+ Emotion_Video_Module *module;
+ void *video;
+
+ char *module_name;
+
+ char *file;
+ Evas_Object *obj;
+ double ratio;
+ double pos;
+ double seek_pos;
+ double len;
+
+ Ecore_Job *job;
+
+ unsigned char play : 1;
+ unsigned char seek : 1;
+
+ char *title;
+ struct {
+ char *info;
+ double stat;
+ } progress;
+ struct {
+ char *file;
+ int num;
+ } ref;
+ struct {
+ int button_num;
+ int button;
+ } spu;
+
+ Emotion_Module_Options module_options;
+};
+
+static void _mouse_move(void *data, Evas *ev, Evas_Object *obj, void *event_info);
+static void _mouse_down(void *data, Evas *ev, Evas_Object *obj, void *event_info);
+static void _pos_set_job(void *data);
+static void _pixels_get(void *data, Evas_Object *obj);
+
+static void _smart_init(void);
+static void _smart_add(Evas_Object * obj);
+static void _smart_del(Evas_Object * obj);
+static void _smart_move(Evas_Object * obj, Evas_Coord x, Evas_Coord y);
+static void _smart_resize(Evas_Object * obj, Evas_Coord w, Evas_Coord h);
+static void _smart_show(Evas_Object * obj);
+static void _smart_hide(Evas_Object * obj);
+static void _smart_color_set(Evas_Object * obj, int r, int g, int b, int a);
+static void _smart_clip_set(Evas_Object * obj, Evas_Object * clip);
+static void _smart_clip_unset(Evas_Object * obj);
+
+/**********************************/
+/* Globals for the E Video Object */
+/**********************************/
+static Evas_Smart *smart = NULL;
+static Ecore_Path_Group *path_group = NULL;
+
+static unsigned char
+_emotion_module_open(const char *name, Evas_Object *obj, Emotion_Video_Module **mod, void **video)
+{
+ Ecore_Plugin *plugin;
+ char *tmp = NULL;
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!path_group)
+ path_group = ecore_path_group_new();
+ tmp = getenv("EMOTION_MODULES_DIR");
+ if (tmp)
+ ecore_path_group_add(path_group, tmp);
+ ecore_path_group_add(path_group, PACKAGE_LIB_DIR"/emotion/");
+ plugin = ecore_plugin_load(path_group, name, NULL);
+ if (plugin)
+ {
+ unsigned char (*func_module_open)(Evas_Object *, Emotion_Video_Module **, void **, Emotion_Module_Options *);
+
+ func_module_open = ecore_plugin_symbol_get(plugin, "module_open");
+ if (func_module_open)
+ {
+ if (func_module_open(obj, mod, video, &(sd->module_options)))
+ {
+ if (*mod)
+ {
+ (*mod)->plugin = plugin;
+ (*mod)->path_group = path_group;
+ return 1;
+ }
+ }
+ }
+ ecore_plugin_unload(plugin);
+ }
+ else
+ printf ("Unable to load module %s\n", name);
+
+ if (path_group)
+ {
+ ecore_path_group_del(path_group);
+ path_group = NULL;
+ }
+
+ return 0;
+}
+
+static void
+_emotion_module_close(Emotion_Video_Module *mod, void *video)
+{
+ Ecore_Plugin *plugin;
+ void (*module_close) (Emotion_Video_Module *module, void *);
+
+ plugin = mod->plugin;
+ module_close = ecore_plugin_symbol_get(mod->plugin, "module_close");
+ if ((module_close) && (video)) module_close(mod, video);
+ /* FIXME: we can't go dlclosing here as a thread still may be running from
+ * the module - this in theory will leak- but it shouldnt be too bad and
+ * mean that once a module is dlopened() it cant be closed - its refcount
+ * will just keep going up
+ */
+ /*
+ ecore_plugin_unload(plugin);
+ */
+ if (path_group)
+ {
+ ecore_path_group_del(path_group);
+ path_group = NULL;
+ }
+}
+
+/*******************************/
+/* Externally accessible calls */
+/*******************************/
+
+
+
+EAPI Evas_Object *
+emotion_object_add(Evas *evas)
+{
+ _smart_init();
+ return evas_object_smart_add(evas, smart);
+}
+
+EAPI void
+emotion_object_module_option_set(Evas_Object *obj, const char *opt, const char *val)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if ((!opt) || (!val)) return;
+ if (!strcmp(opt, "video"))
+ {
+ if (!strcmp(val, "off")) sd->module_options.no_video = 1;
+ }
+ else if (!strcmp(opt, "audio"))
+ {
+ if (!strcmp(val, "off")) sd->module_options.no_audio = 1;
+ }
+}
+
+EAPI Evas_Bool
+emotion_object_init(Evas_Object *obj, const char *module_filename)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+
+ if (sd->file) free(sd->file);
+ sd->file = NULL;
+ if (sd->title) free(sd->title);
+ sd->title = NULL;
+ if (sd->progress.info) free(sd->progress.info);
+ sd->progress.info = NULL;
+ sd->progress.stat = 0.0;
+ if (sd->ref.file) free(sd->ref.file);
+ sd->ref.file = NULL;
+ sd->ref.num = 0;
+ sd->spu.button_num = 0;
+ sd->spu.button = -1;
+ sd->ratio = 1.0;
+ sd->pos = 0;
+ sd->seek_pos = 0;
+ sd->len = 0;
+
+ ecore_job_init();
+
+ if ((!sd->module) || (!sd->video))
+ {
+ if (!_emotion_module_open(module_filename, obj,
+ &sd->module, &sd->video))
+ return 0;
+ }
+
+ return 1;
+}
+
+EAPI void
+emotion_object_file_set(Evas_Object *obj, const char *file)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+
+ if ((file) && (sd->file) && (!strcmp(file, sd->file))) return;
+ if ((file) && (file[0] != 0))
+ {
+ int w, h;
+
+ if (sd->file) free(sd->file);
+ sd->file = strdup(file);
+ if (sd->module)
+ {
+ sd->module->file_close(sd->video);
+ evas_object_image_size_set(sd->obj, 0, 0);
+ }
+ if (!sd->module->file_open(sd->file, obj, sd->video))
+ return;
+ sd->module->size_get(sd->video, &w, &h);
+ evas_object_image_size_set(sd->obj, w, h);
+ sd->ratio = sd->module->ratio_get(sd->video);
+ sd->pos = 0.0;
+ if (sd->play) sd->module->play(sd->video, 0.0);
+ }
+ else
+ {
+ if (sd->video && sd->module)
+ {
+ sd->module->file_close(sd->video);
+ evas_object_image_size_set(sd->obj, 0, 0);
+ }
+ if (sd->file) free(sd->file);
+ sd->file = NULL;
+ }
+}
+
+EAPI const char *
+emotion_object_file_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->file;
+}
+
+EAPI void
+emotion_object_play_set(Evas_Object *obj, Evas_Bool play)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (((play) && (sd->play)) || ((!play) && (!sd->play))) return;
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->play = play;
+ if (sd->play) sd->module->play(sd->video, sd->pos);
+ else sd->module->stop(sd->video);
+}
+
+EAPI Evas_Bool
+emotion_object_play_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->video) return 0;
+
+ return sd->play;
+}
+
+EAPI void
+emotion_object_position_set(Evas_Object *obj, double sec)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->seek_pos = sec;
+ sd->seek = 1;
+ sd->pos = sd->seek_pos;
+ if (sd->job) ecore_job_del(sd->job);
+ sd->job = ecore_job_add(_pos_set_job, obj);
+}
+
+EAPI double
+emotion_object_position_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video) return 0.0;
+ return sd->pos;
+}
+
+EAPI Evas_Bool
+emotion_object_seekable_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->seekable(sd->video);
+}
+
+EAPI Evas_Bool
+emotion_object_video_handled_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->video_handled(sd->video);
+}
+
+EAPI Evas_Bool
+emotion_object_audio_handled_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->audio_handled(sd->video);
+}
+
+EAPI double
+emotion_object_play_length_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video) return 0.0;
+ sd->len = sd->module->len_get(sd->video);
+ return sd->len;
+}
+
+EAPI void
+emotion_object_size_get(Evas_Object *obj, int *iw, int *ih)
+{
+ Smart_Data *sd;
+
+ if (iw) *iw = 0;
+ if (ih) *ih = 0;
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_image_size_get(sd->obj, iw, ih);
+}
+
+EAPI void
+emotion_object_smooth_scale_set(Evas_Object *obj, Evas_Bool smooth)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_image_smooth_scale_set(sd->obj, smooth);
+}
+
+EAPI Evas_Bool
+emotion_object_smooth_scale_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return evas_object_image_smooth_scale_get(sd->obj);
+}
+
+EAPI double
+emotion_object_ratio_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 1.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video) return 0.0;
+ return sd->ratio;
+}
+
+EAPI void
+emotion_object_event_simple_send(Evas_Object *obj, Emotion_Event ev)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->event_feed(sd->video, ev);
+}
+
+EAPI void
+emotion_object_audio_volume_set(Evas_Object *obj, double vol)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->audio_channel_volume_set(sd->video, vol);
+}
+
+EAPI double
+emotion_object_audio_volume_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 1.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video) return 0.0;
+ return sd->module->audio_channel_volume_get(sd->video);
+}
+
+EAPI void
+emotion_object_audio_mute_set(Evas_Object *obj, Evas_Bool mute)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->audio_channel_mute_set(sd->video, mute);
+}
+
+EAPI Evas_Bool
+emotion_object_audio_mute_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->audio_channel_mute_get(sd->video);
+}
+
+EAPI int
+emotion_object_audio_channel_count(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->audio_channel_count(sd->video);
+}
+
+EAPI const char *
+emotion_object_audio_channel_name_get(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video) return NULL;
+ return sd->module->audio_channel_name_get(sd->video, channel);
+}
+
+EAPI void
+emotion_object_audio_channel_set(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->audio_channel_set(sd->video, channel);
+}
+
+EAPI int
+emotion_object_audio_channel_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->audio_channel_get(sd->video);
+}
+
+EAPI void
+emotion_object_video_mute_set(Evas_Object *obj, Evas_Bool mute)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->video_channel_mute_set(sd->video, mute);
+}
+
+EAPI Evas_Bool
+emotion_object_video_mute_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->video_channel_mute_get(sd->video);
+}
+
+EAPI int
+emotion_object_video_channel_count(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->video_channel_count(sd->video);
+}
+
+EAPI const char *
+emotion_object_video_channel_name_get(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video) return NULL;
+ return sd->module->video_channel_name_get(sd->video, channel);
+}
+
+EAPI void
+emotion_object_video_channel_set(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->video_channel_set(sd->video, channel);
+}
+
+EAPI int
+emotion_object_video_channel_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->video_channel_get(sd->video);
+}
+
+EAPI void
+emotion_object_spu_mute_set(Evas_Object *obj, Evas_Bool mute)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->spu_channel_mute_set(sd->video, mute);
+}
+
+EAPI Evas_Bool
+emotion_object_spu_mute_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->spu_channel_mute_get(sd->video);
+}
+
+EAPI int
+emotion_object_spu_channel_count(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->spu_channel_count(sd->video);
+}
+
+EAPI const char *
+emotion_object_spu_channel_name_get(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video) return NULL;
+ return sd->module->spu_channel_name_get(sd->video, channel);
+}
+
+EAPI void
+emotion_object_spu_channel_set(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->spu_channel_set(sd->video, channel);
+}
+
+EAPI int
+emotion_object_spu_channel_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->spu_channel_get(sd->video);
+}
+
+EAPI int
+emotion_object_chapter_count(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->chapter_count(sd->video);
+}
+
+EAPI void
+emotion_object_chapter_set(Evas_Object *obj, int chapter)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->chapter_set(sd->video, chapter);
+}
+
+EAPI int
+emotion_object_chapter_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->chapter_get(sd->video);
+}
+
+EAPI const char *
+emotion_object_chapter_name_get(Evas_Object *obj, int chapter)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video) return NULL;
+ return sd->module->chapter_name_get(sd->video, chapter);
+}
+
+EAPI void
+emotion_object_play_speed_set(Evas_Object *obj, double speed)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->speed_set(sd->video, speed);
+}
+
+EAPI double
+emotion_object_play_speed_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video) return 0.0;
+ return sd->module->speed_get(sd->video);
+}
+
+EAPI void
+emotion_object_eject(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->eject(sd->video);
+}
+
+EAPI const char *
+emotion_object_title_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->title;
+}
+
+EAPI const char *
+emotion_object_progress_info_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->progress.info;
+}
+
+EAPI double
+emotion_object_progress_status_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ return sd->progress.stat;
+}
+
+EAPI const char *
+emotion_object_ref_file_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->ref.file;
+}
+
+EAPI int
+emotion_object_ref_num_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return sd->ref.num;
+}
+
+EAPI int
+emotion_object_spu_button_count_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return sd->spu.button_num;
+}
+
+EAPI int
+emotion_object_spu_button_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return sd->spu.button;
+}
+
+EAPI const char *
+emotion_object_meta_info_get(Evas_Object *obj, Emotion_Meta_Info meta)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video) return NULL;
+ switch (meta)
+ {
+ case EMOTION_META_INFO_TRACK_TITLE:
+ return sd->module->meta_get(sd->video, META_TRACK_TITLE);
+ break;
+ case EMOTION_META_INFO_TRACK_ARTIST:
+ return sd->module->meta_get(sd->video, META_TRACK_ARTIST);
+ break;
+ case EMOTION_META_INFO_TRACK_ALBUM:
+ return sd->module->meta_get(sd->video, META_TRACK_ALBUM);
+ break;
+ case EMOTION_META_INFO_TRACK_YEAR:
+ return sd->module->meta_get(sd->video, META_TRACK_YEAR);
+ break;
+ case EMOTION_META_INFO_TRACK_GENRE:
+ return sd->module->meta_get(sd->video, META_TRACK_GENRE);
+ break;
+ case EMOTION_META_INFO_TRACK_COMMENT:
+ return sd->module->meta_get(sd->video, META_TRACK_COMMENT);
+ break;
+ case EMOTION_META_INFO_TRACK_DISC_ID:
+ return sd->module->meta_get(sd->video, META_TRACK_DISCID);
+ break;
+ default:
+ break;
+ }
+ return NULL;
+}
+
+
+
+
+
+
+/*****************************/
+/* Utility calls for modules */
+/*****************************/
+
+EAPI void *
+_emotion_video_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->video;
+}
+
+EAPI void
+_emotion_frame_new(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+// printf("pix get set 1 %p\n", sd->obj);
+ evas_object_image_pixels_dirty_set(sd->obj, 1);
+ evas_object_smart_callback_call(obj, "frame_decode", NULL);
+}
+
+EAPI void
+_emotion_video_pos_update(Evas_Object *obj, double pos, double len)
+{
+ Smart_Data *sd;
+ int npos = 0, nlen = 0;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (pos != sd->pos) npos = 1;
+ if (len != sd->len) nlen = 1;
+ sd->pos = pos;
+ sd->len = len;
+ if (npos) evas_object_smart_callback_call(obj, "position_update", NULL);
+ if (nlen) evas_object_smart_callback_call(obj, "length_change", NULL);
+}
+
+EAPI void
+_emotion_frame_resize(Evas_Object *obj, int w, int h, double ratio)
+{
+ Smart_Data *sd;
+ int iw, ih;
+ int changed = 0;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_image_size_get(sd->obj, &iw, &ih);
+ if ((w != iw) || (h != ih))
+ {
+ if (h > 0) sd->ratio = (double)w / (double)h;
+ else sd->ratio = 1.0;
+ evas_object_image_size_set(sd->obj, w, h);
+ changed = 1;
+ }
+ if (ratio != sd->ratio)
+ {
+ sd->ratio = ratio;
+ changed = 1;
+ }
+ if (changed) evas_object_smart_callback_call(obj, "frame_resize", NULL);
+}
+
+EAPI void
+_emotion_decode_stop(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (sd->play)
+ {
+ sd->play = 0;
+ evas_object_smart_callback_call(obj, "decode_stop", NULL);
+ }
+}
+
+EAPI void
+_emotion_playback_finished(Evas_Object *obj)
+{
+ evas_object_smart_callback_call(obj, "playback_finished", NULL);
+}
+
+EAPI void
+_emotion_audio_level_change(Evas_Object *obj)
+{
+ evas_object_smart_callback_call(obj, "audio_level_change", NULL);
+}
+
+EAPI void
+_emotion_channels_change(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_smart_callback_call(obj, "channels_change", NULL);
+}
+
+EAPI void
+_emotion_title_set(Evas_Object *obj, char *title)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (sd->title) free(sd->title);
+ sd->title = strdup(title);
+ evas_object_smart_callback_call(obj, "title_change", NULL);
+}
+
+EAPI void
+_emotion_progress_set(Evas_Object *obj, char *info, double stat)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (sd->progress.info) free(sd->progress.info);
+ sd->progress.info = strdup(info);
+ sd->progress.stat = stat;
+ evas_object_smart_callback_call(obj, "progress_change", NULL);
+}
+
+EAPI void
+_emotion_file_ref_set(Evas_Object *obj, char *file, int num)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (sd->ref.file) free(sd->ref.file);
+ sd->ref.file = strdup(file);
+ sd->ref.num = num;
+ evas_object_smart_callback_call(obj, "ref_change", NULL);
+}
+
+EAPI void
+_emotion_spu_button_num_set(Evas_Object *obj, int num)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ sd->spu.button_num = num;
+ evas_object_smart_callback_call(obj, "button_num_change", NULL);
+}
+
+EAPI void
+_emotion_spu_button_set(Evas_Object *obj, int button)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ sd->spu.button = button;
+ evas_object_smart_callback_call(obj, "button_change", NULL);
+}
+
+
+/****************************/
+/* Internal object routines */
+/****************************/
+
+static void
+_mouse_move(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Move *e;
+ Smart_Data *sd;
+ int x, y, iw, ih;
+ Evas_Coord ox, oy, ow, oh;
+
+ e = event_info;
+ sd = data;
+ if (!sd->module) return;
+ if (!sd->video) return;
+ evas_object_geometry_get(obj, &ox, &oy, &ow, &oh);
+ evas_object_image_size_get(obj, &iw, &ih);
+ if ((iw < 1) || (ih < 1)) return;
+ x = (((int)e->cur.canvas.x - ox) * iw) / ow;
+ y = (((int)e->cur.canvas.y - oy) * ih) / oh;
+ sd->module->event_mouse_move_feed(sd->video, x, y);
+}
+
+static void
+_mouse_down(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Down *e;
+ Smart_Data *sd;
+ int x, y, iw, ih;
+ Evas_Coord ox, oy, ow, oh;
+
+ e = event_info;
+ sd = data;
+ if (!sd->module) return;
+ if (!sd->video) return;
+ evas_object_geometry_get(obj, &ox, &oy, &ow, &oh);
+ evas_object_image_size_get(obj, &iw, &ih);
+ if ((iw < 1) || (ih < 1)) return;
+ x = (((int)e->canvas.x - ox) * iw) / ow;
+ y = (((int)e->canvas.y - oy) * ih) / oh;
+ sd->module->event_mouse_button_feed(sd->video, 1, x, y);
+}
+
+static void
+_pos_set_job(void *data)
+{
+ Evas_Object *obj;
+ Smart_Data *sd;
+
+ obj = data;
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ sd->job = NULL;
+ if (sd->seek)
+ {
+ sd->module->pos_set(sd->video, sd->seek_pos);
+ sd->seek = 0;
+ }
+}
+
+/* called by evas when it needs pixels for the image object */
+static void
+_pixels_get(void *data, Evas_Object *obj)
+{
+ int iw, ih, w, h;
+ Smart_Data *sd;
+ Emotion_Format format;
+ unsigned char *bgra_data;
+
+ sd = data;
+ evas_object_image_size_get(obj, &iw, &ih);
+ sd->module->video_data_size_get(sd->video, &w, &h);
+ w = (w >> 1) << 1;
+ h = (h >> 1) << 1;
+ if ((w != iw) || (h != ih))
+ {
+ evas_object_image_colorspace_set(obj, EVAS_COLORSPACE_YCBCR422P601_PL);
+ evas_object_image_alpha_set(obj, 0);
+ evas_object_image_size_set(obj, w, h);
+ iw = w;
+ ih = h;
+ }
+ if ((iw < 1) || (ih < 1))
+ {
+// printf("pix get set 0 (1)\n");
+ evas_object_image_pixels_dirty_set(obj, 0);
+ }
+ else
+ {
+ format = sd->module->format_get(sd->video);
+ if ((format == EMOTION_FORMAT_YV12) || (format == EMOTION_FORMAT_I420))
+ {
+ unsigned char **rows;
+
+ evas_object_image_colorspace_set(obj, EVAS_COLORSPACE_YCBCR422P601_PL);
+ rows = evas_object_image_data_get(obj, 1);
+ if (rows)
+ {
+ if (sd->module->yuv_rows_get(sd->video, iw, ih,
+ rows,
+ &rows[ih],
+ &rows[ih + (ih / 2)]))
+ evas_object_image_data_update_add(obj, 0, 0, iw, ih);
+ }
+ evas_object_image_data_set(obj, rows);
+// printf("pix get set 0 (2)\n");
+ evas_object_image_pixels_dirty_set(obj, 0);
+ }
+ else if (format == EMOTION_FORMAT_BGRA)
+ {
+ if (sd->module->bgra_data_get(sd->video, &bgra_data))
+ {
+ evas_object_image_data_set(obj, bgra_data);
+// printf("pix get set 0 (3)\n");
+ evas_object_image_pixels_dirty_set(obj, 0);
+ }
+ }
+ }
+// no need for this because on any new frame decode info from the decoder
+// module, the previous "current frame" is released (done) for us anyway
+// sd->module->frame_done(sd->video);
+
+/*
+ evas_object_image_size_get(obj, &iw, &ih);
+ sd->module->video_data_size_get(sd->video, &w, &h);
+ if ((w != iw) || (h != ih))
+ {
+ evas_object_image_size_set(obj, w, h);
+ iw = w;
+ ih = h;
+ }
+ format = sd->module->format_get(sd->video);
+ if ((format == EMOTION_FORMAT_YV12) || (format == EMOTION_FORMAT_I420))
+ {
+ unsigned char **rows;
+ Evas_Pixel_Import_Source ps;
+
+ ps.format = EVAS_PIXEL_FORMAT_YUV420P_601;
+ ps.w = iw;
+ ps.h = ih;
+
+ ps.rows = malloc(ps.h * 2 * sizeof(void *));
+ if (!ps.rows)
+ {
+ sd->module->frame_done(sd->video);
+ return;
+ }
+
+ rows = (unsigned char **)ps.rows;
+
+ if (sd->module->yuv_rows_get(sd->video, iw, ih,
+ rows,
+ &rows[ps.h],
+ &rows[ps.h + (ps.h / 2)]))
+ evas_object_image_pixels_import(obj, &ps);
+ evas_object_image_pixels_dirty_set(obj, 0);
+ free(ps.rows);
+ }
+ else if (format == EMOTION_FORMAT_BGRA)
+ {
+ if (sd->module->bgra_data_get(sd->video, &bgra_data));
+ {
+ evas_object_image_data_set(obj, bgra_data);
+ evas_object_image_pixels_dirty_set(obj, 0);
+ }
+ }
+ sd->module->frame_done(sd->video);
+ */
+}
+
+/*******************************************/
+/* Internal smart object required routines */
+/*******************************************/
+static void
+_smart_init(void)
+{
+ if (smart) return;
+ {
+ static const Evas_Smart_Class sc =
+ {
+ E_OBJ_NAME,
+ EVAS_SMART_CLASS_VERSION,
+ _smart_add,
+ _smart_del,
+ _smart_move,
+ _smart_resize,
+ _smart_show,
+ _smart_hide,
+ _smart_color_set,
+ _smart_clip_set,
+ _smart_clip_unset,
+ NULL
+ };
+ smart = evas_smart_class_new(&sc);
+ }
+}
+
+static void
+_smart_add(Evas_Object * obj)
+{
+ Smart_Data *sd;
+ unsigned int *pixel;
+
+ sd = calloc(1, sizeof(Smart_Data));
+ if (!sd) return;
+ sd->obj = evas_object_image_add(evas_object_evas_get(obj));
+ evas_object_event_callback_add(sd->obj, EVAS_CALLBACK_MOUSE_MOVE, _mouse_move, sd);
+ evas_object_event_callback_add(sd->obj, EVAS_CALLBACK_MOUSE_DOWN, _mouse_down, sd);
+ evas_object_image_pixels_get_callback_set(sd->obj, _pixels_get, sd);
+ evas_object_smart_member_add(sd->obj, obj);
+ sd->ratio = 1.0;
+ sd->spu.button = -1;
+ evas_object_image_alpha_set(sd->obj, 0);
+ pixel = evas_object_image_data_get(sd->obj, 1);
+ if (pixel)
+ {
+ *pixel = 0xff000000;
+ evas_object_image_data_set(obj, pixel);
+ }
+ evas_object_smart_data_set(obj, sd);
+}
+
+static void
+_smart_del(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ if (sd->video) sd->module->file_close(sd->video);
+ _emotion_module_close(sd->module, sd->video);
+ evas_object_del(sd->obj);
+ if (sd->file) free(sd->file);
+ if (sd->job) ecore_job_del(sd->job);
+ if (sd->progress.info) free(sd->progress.info);
+ if (sd->ref.file) free(sd->ref.file);
+ free(sd);
+
+ ecore_job_shutdown();
+}
+
+static void
+_smart_move(Evas_Object * obj, Evas_Coord x, Evas_Coord y)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_move(sd->obj, x, y);
+}
+
+static void
+_smart_resize(Evas_Object * obj, Evas_Coord w, Evas_Coord h)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_image_fill_set(sd->obj, 0, 0, w, h);
+ evas_object_resize(sd->obj, w, h);
+}
+
+static void
+_smart_show(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_show(sd->obj);
+
+}
+
+static void
+_smart_hide(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_hide(sd->obj);
+}
+
+static void
+_smart_color_set(Evas_Object * obj, int r, int g, int b, int a)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_color_set(sd->obj, r, g, b, a);
+}
+
+static void
+_smart_clip_set(Evas_Object * obj, Evas_Object * clip)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_clip_set(sd->obj, clip);
+}
+
+static void
+_smart_clip_unset(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_clip_unset(sd->obj);
+}
--- /dev/null
+.deps
+.libs
+Makefile
+Makefile.in
+*.la
+*.lo
--- /dev/null
+
+MAINTAINERCLEANFILES = Makefile.in
+
+SUBDIRS = xine gstreamer
--- /dev/null
+.deps
+.libs
+Makefile
+Makefile.in
+*.la
+*.lo
--- /dev/null
+
+MAINTAINERCLEANFILES = Makefile.in
+
+AM_CPPFLAGS = \
+-I$(top_srcdir) \
+-I$(top_srcdir)/src/lib \
+-DPACKAGE_BIN_DIR=\"$(bindir)\" \
+-DPACKAGE_LIB_DIR=\"$(libdir)\" \
+-DPACKAGE_DATA_DIR=\"$(datadir)/$(PACKAGE)\" \
+@EVAS_CFLAGS@ \
+@ECORE_CFLAGS@ \
+@GST_CFLAGS@
+
+if BUILD_GSTREAMER_MODULE
+
+pkgdir = $(libdir)/emotion
+
+pkg_LTLIBRARIES = gstreamer.la
+gstreamer_la_SOURCES = \
+emotion_gstreamer.c \
+emotion_gstreamer.h \
+emotion_gstreamer_pipeline.c \
+emotion_gstreamer_pipeline.h \
+emotion_gstreamer_pipeline_cdda.c \
+emotion_gstreamer_pipeline_dvd.c \
+emotion_gstreamer_pipeline_file.c \
+emotion_gstreamer_pipeline_uri.c
+gstreamer_la_LIBADD = @EVAS_LIBS@ @ECORE_LIBS@ @GST_LIBS@ $(top_builddir)/src/lib/libemotion.la
+gstreamer_la_LDFLAGS = -module -avoid-version
+gstreamer_la_LIBTOOLFLAGS = --tag=disable-static
+gstreamer_la_DEPENDENCIES = $(top_builddir)/config.h
+
+endif
\ No newline at end of file
--- /dev/null
+/*
+ * vim:ts=8:sw=3:sts=8:noexpandtab:cino=>5n-3f0^-2{2
+ */
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "Emotion.h"
+#include "emotion_private.h"
+#include "emotion_gstreamer.h"
+#include "emotion_gstreamer_pipeline.h"
+
+
+/* Callbacks to get the eos */
+static int _eos_timer_fct (void *data);
+static int _em_fd_ev_active (void *data, Ecore_Fd_Handler *fdh);
+static void _for_each_tag (GstTagList const* list, gchar const* tag, void *data);
+static void _free_metadata (Emotion_Gstreamer_Metadata *m);
+
+/* Interface */
+
+static unsigned char em_init (Evas_Object *obj,
+ void **emotion_video,
+ Emotion_Module_Options *opt);
+
+static int em_shutdown (void *video);
+
+static unsigned char em_file_open (const char *file,
+ Evas_Object *obj,
+ void *video);
+
+static void em_file_close (void *video);
+
+static void em_play (void *video,
+ double pos);
+
+static void em_stop (void *video);
+
+static void em_size_get (void *video,
+ int *width,
+ int *height);
+
+static void em_pos_set (void *video,
+ double pos);
+
+static void em_vis_set (void *video,
+ Emotion_Vis vis);
+
+static double em_len_get (void *video);
+
+static int em_fps_num_get (void *video);
+
+static int em_fps_den_get (void *video);
+
+static double em_fps_get (void *video);
+
+static double em_pos_get (void *video);
+
+static Emotion_Vis em_vis_get (void *video);
+
+static double em_ratio_get (void *video);
+
+static int em_video_handled (void *video);
+
+static int em_audio_handled (void *video);
+
+static int em_seekable (void *video);
+
+static void em_frame_done (void *video);
+
+static Emotion_Format em_format_get (void *video);
+
+static void em_video_data_size_get (void *video,
+ int *w,
+ int *h);
+
+static int em_yuv_rows_get (void *video,
+ int w,
+ int h,
+ unsigned char **yrows,
+ unsigned char **urows,
+ unsigned char **vrows);
+
+static int em_bgra_data_get (void *video,
+ unsigned char **bgra_data);
+
+static void em_event_feed (void *video,
+ int event);
+
+static void em_event_mouse_button_feed (void *video,
+ int button,
+ int x,
+ int y);
+
+static void em_event_mouse_move_feed (void *video,
+ int x,
+ int y);
+
+static int em_video_channel_count (void *video);
+
+static void em_video_channel_set (void *video,
+ int channel);
+
+static int em_video_channel_get (void *video);
+
+static const char *em_video_channel_name_get (void *video,
+ int channel);
+
+static void em_video_channel_mute_set (void *video,
+ int mute);
+
+static int em_video_channel_mute_get (void *video);
+
+static int em_audio_channel_count (void *video);
+
+static void em_audio_channel_set (void *video,
+ int channel);
+
+static int em_audio_channel_get (void *video);
+
+static const char *em_audio_channel_name_get (void *video,
+ int channel);
+
+static void em_audio_channel_mute_set (void *video,
+ int mute);
+
+static int em_audio_channel_mute_get (void *video);
+
+static void em_audio_channel_volume_set (void *video,
+ double vol);
+
+static double em_audio_channel_volume_get (void *video);
+
+static int em_spu_channel_count (void *video);
+
+static void em_spu_channel_set (void *video,
+ int channel);
+
+static int em_spu_channel_get (void *video);
+
+static const char *em_spu_channel_name_get (void *video,
+ int channel);
+
+static void em_spu_channel_mute_set (void *video,
+ int mute);
+
+static int em_spu_channel_mute_get (void *video);
+
+static int em_chapter_count (void *video);
+
+static void em_chapter_set (void *video,
+ int chapter);
+
+static int em_chapter_get (void *video);
+
+static const char *em_chapter_name_get (void *video,
+ int chapter);
+
+static void em_speed_set (void *video,
+ double speed);
+
+static double em_speed_get (void *video);
+
+static int em_eject (void *video);
+
+static const char *em_meta_get (void *video,
+ int meta);
+
+/* Module interface */
+
+static Emotion_Video_Module em_module =
+{
+ em_init, /* init */
+ em_shutdown, /* shutdown */
+ em_file_open, /* file_open */
+ em_file_close, /* file_close */
+ em_play, /* play */
+ em_stop, /* stop */
+ em_size_get, /* size_get */
+ em_pos_set, /* pos_set */
+ em_vis_set, /* vis_set */
+ em_len_get, /* len_get */
+ em_fps_num_get, /* fps_num_get */
+ em_fps_den_get, /* fps_den_get */
+ em_fps_get, /* fps_get */
+ em_pos_get, /* pos_get */
+ em_vis_get, /* vis_get */
+ em_ratio_get, /* ratio_get */
+ em_video_handled, /* video_handled */
+ em_audio_handled, /* audio_handled */
+ em_seekable, /* seekable */
+ em_frame_done, /* frame_done */
+ em_format_get, /* format_get */
+ em_video_data_size_get, /* video_data_size_get */
+ em_yuv_rows_get, /* yuv_rows_get */
+ em_bgra_data_get, /* bgra_data_get */
+ em_event_feed, /* event_feed */
+ em_event_mouse_button_feed, /* event_mouse_button_feed */
+ em_event_mouse_move_feed, /* event_mouse_move_feed */
+ em_video_channel_count, /* video_channel_count */
+ em_video_channel_set, /* video_channel_set */
+ em_video_channel_get, /* video_channel_get */
+ em_video_channel_name_get, /* video_channel_name_get */
+ em_video_channel_mute_set, /* video_channel_mute_set */
+ em_video_channel_mute_get, /* video_channel_mute_get */
+ em_audio_channel_count, /* audio_channel_count */
+ em_audio_channel_set, /* audio_channel_set */
+ em_audio_channel_get, /* audio_channel_get */
+ em_audio_channel_name_get, /* audio_channel_name_get */
+ em_audio_channel_mute_set, /* audio_channel_mute_set */
+ em_audio_channel_mute_get, /* audio_channel_mute_get */
+ em_audio_channel_volume_set, /* audio_channel_volume_set */
+ em_audio_channel_volume_get, /* audio_channel_volume_get */
+ em_spu_channel_count, /* spu_channel_count */
+ em_spu_channel_set, /* spu_channel_set */
+ em_spu_channel_get, /* spu_channel_get */
+ em_spu_channel_name_get, /* spu_channel_name_get */
+ em_spu_channel_mute_set, /* spu_channel_mute_set */
+ em_spu_channel_mute_get, /* spu_channel_mute_get */
+ em_chapter_count, /* chapter_count */
+ em_chapter_set, /* chapter_set */
+ em_chapter_get, /* chapter_get */
+ em_chapter_name_get, /* chapter_name_get */
+ em_speed_set, /* speed_set */
+ em_speed_get, /* speed_get */
+ em_eject, /* eject */
+ em_meta_get, /* meta_get */
+ NULL /* handle */
+};
+
+static unsigned char
+em_init(Evas_Object *obj,
+ void **emotion_video,
+ Emotion_Module_Options *opt)
+{
+ Emotion_Gstreamer_Video *ev;
+ GError *error;
+ int fds[2];
+
+ if (!emotion_video)
+ return 0;
+
+ ev = calloc(1, sizeof(Emotion_Gstreamer_Video));
+ if (!ev) return 0;
+
+ ev->obj = obj;
+ ev->obj_data = NULL;
+
+ /* Initialization of gstreamer */
+ if (!gst_init_check(NULL, NULL, &error))
+ goto failure_gstreamer;
+
+ /* We allocate the sinks lists */
+ ev->video_sinks = ecore_list_new();
+ if (!ev->video_sinks)
+ goto failure_video_sinks;
+ ecore_list_free_cb_set(ev->video_sinks, ECORE_FREE_CB(free));
+ ev->audio_sinks = ecore_list_new();
+ if (!ev->audio_sinks)
+ goto failure_audio_sinks;
+ ecore_list_free_cb_set(ev->audio_sinks, ECORE_FREE_CB(free));
+
+ *emotion_video = ev;
+
+ /* Default values */
+ ev->ratio = 1.0;
+ ev->video_sink_nbr = 0;
+ ev->audio_sink_nbr = 0;
+ ev->vis = EMOTION_VIS_GOOM;
+
+ /* Create the file descriptors */
+ if (pipe(fds) == 0)
+ {
+ ev->fd_ev_read = fds[0];
+ ev->fd_ev_write = fds[1];
+ fcntl(ev->fd_ev_read, F_SETFL, O_NONBLOCK);
+ ev->fd_ev_handler = ecore_main_fd_handler_add(ev->fd_ev_read,
+ ECORE_FD_READ,
+ _em_fd_ev_active,
+ ev, NULL, NULL);
+ ecore_main_fd_handler_active_set(ev->fd_ev_handler, ECORE_FD_READ);
+ }
+ else
+ goto failure_pipe;
+
+ return 1;
+
+failure_pipe:
+ ecore_list_destroy(ev->audio_sinks);
+failure_audio_sinks:
+ ecore_list_destroy(ev->video_sinks);
+failure_video_sinks:
+failure_gstreamer:
+ free(ev);
+
+ return 0;
+}
+
+static int
+em_shutdown(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ if (!ev)
+ return 0;
+
+ ecore_main_fd_handler_del(ev->fd_ev_handler);
+
+ close(ev->fd_ev_write);
+ close(ev->fd_ev_read);
+
+ /* FIXME: and the evas object ? */
+ if (ev->obj_data) free(ev->obj_data);
+
+ ecore_list_destroy(ev->video_sinks);
+ ecore_list_destroy(ev->audio_sinks);
+
+ free(ev);
+
+ return 1;
+}
+
+
+static unsigned char
+em_file_open(const char *file,
+ Evas_Object *obj,
+ void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ ev->pipeline = gst_pipeline_new("pipeline");
+ if (!ev->pipeline)
+ return 0;
+
+ ev->eos_bus = gst_pipeline_get_bus(GST_PIPELINE(ev->pipeline));
+ if (!ev->eos_bus)
+ {
+ gst_object_unref(ev->pipeline);
+ return 0;
+ }
+
+ /* Evas Object */
+ ev->obj = obj;
+
+ /* CD Audio */
+ if (strstr(file, "cdda://"))
+ {
+ const char *device = NULL;
+ unsigned int track = 1;
+
+ device = file + strlen("cdda://");
+ if (device[0] == '/')
+ {
+ char *tmp;
+
+ if ((tmp = strchr(device, '?')) || (tmp = strchr(device, '#')))
+ {
+ sscanf(tmp + 1, "%d", &track);
+ tmp[0] = '\0';
+ }
+ }
+ else
+ {
+ device = NULL;
+ sscanf(file, "cdda://%d", &track);
+ }
+ fprintf(stderr, "[Emotion] [gst] build CD Audio pipeline\n");
+ if (!(emotion_pipeline_cdda_build(ev, device, track)))
+ {
+ fprintf(stderr, "[Emotion] [gst] error while building CD Audio pipeline\n");
+ gst_object_unref(ev->pipeline);
+ return 0;
+ }
+ }
+ /* Dvd */
+ else if (strstr(file, "dvd://"))
+ {
+
+ fprintf(stderr, "[Emotion] [gst] build DVD pipeline\n");
+ if (!(emotion_pipeline_dvd_build(ev, NULL)))
+ {
+ fprintf(stderr, "[Emotion] [gst] error while building DVD pipeline\n");
+ gst_object_unref(ev->pipeline);
+ return 0;
+ }
+ }
+ /* http */
+ else if (strstr(file, "http://"))
+ {
+ fprintf(stderr, "[Emotion] [gst] build URI pipeline\n");
+ if (!(emotion_pipeline_uri_build(ev, file)))
+ {
+ fprintf(stderr, "[Emotion] [gst] error while building URI pipeline\n");
+ gst_object_unref(ev->pipeline);
+ return 0;
+ }
+ }
+ /* Normal media file */
+ else
+ {
+ const char *filename;
+
+ filename = strstr(file, "file://")
+ ? file + strlen("file://")
+ : file;
+
+ fprintf(stderr, "[Emotion] [gst] build file pipeline\n");
+ if (!(emotion_pipeline_file_build(ev, filename)))
+ {
+ fprintf(stderr, "[Emotion] [gst] error while building File pipeline\n");
+ gst_object_unref(ev->pipeline);
+ return 0;
+ }
+ }
+
+ ev->position = 0.0;
+
+ {
+ /* on recapitule : */
+ Emotion_Video_Sink *vsink;
+ Emotion_Audio_Sink *asink;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_first_goto(ev->video_sinks);
+ if (vsink)
+ {
+ fprintf(stderr, "video : \n");
+ fprintf(stderr, " size : %dx%d\n", vsink->width, vsink->height);
+ fprintf(stderr, " fps : %d/%d\n", vsink->fps_num, vsink->fps_den);
+ fprintf(stderr, " fourcc : %" GST_FOURCC_FORMAT "\n", GST_FOURCC_ARGS(vsink->fourcc));
+ fprintf(stderr, " length : %" GST_TIME_FORMAT "\n\n",
+ GST_TIME_ARGS((guint64)(vsink->length_time * GST_SECOND)));
+ }
+
+ asink = (Emotion_Audio_Sink *)ecore_list_first_goto(ev->audio_sinks);
+ if (asink)
+ {
+ fprintf(stderr, "audio : \n");
+ fprintf(stderr, " chan : %d\n", asink->channels);
+ fprintf(stderr, " rate : %d\n", asink->samplerate);
+ fprintf(stderr, " length : %" GST_TIME_FORMAT "\n\n",
+ GST_TIME_ARGS((guint64)(asink->length_time * GST_SECOND)));
+ }
+ }
+
+ if (ev->metadata)
+ _free_metadata(ev->metadata);
+ ev->metadata = calloc(1, sizeof(Emotion_Gstreamer_Metadata));
+
+ _eos_timer_fct(ev);
+
+ return 1;
+}
+
+static void
+em_file_close(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ if (!ev)
+ return;
+
+ /* we clear the sink lists */
+ ecore_list_clear(ev->video_sinks);
+ ecore_list_clear(ev->audio_sinks);
+
+ /* shutdown eos */
+ if (ev->eos_timer)
+ {
+ ecore_timer_del(ev->eos_timer);
+ ev->eos_timer = NULL;
+ }
+
+ if (ev->eos_bus)
+ {
+ gst_object_unref(GST_OBJECT(ev->eos_bus));
+ ev->eos_bus = NULL;
+ }
+
+ if (ev->metadata)
+ {
+ _free_metadata(ev->metadata);
+ ev->metadata = NULL;
+ }
+
+ if (ev->pipeline)
+ {
+ gst_element_set_state(ev->pipeline, GST_STATE_NULL);
+ gst_object_unref(ev->pipeline);
+ ev->pipeline = NULL;
+ }
+}
+
+static void
+em_play(void *video,
+ double pos)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ gst_element_set_state(ev->pipeline, GST_STATE_PLAYING);
+ ev->play = 1;
+
+ /* eos */
+ ev->eos_timer = ecore_timer_add(0.1, _eos_timer_fct, ev);
+}
+
+static void
+em_stop(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ /* shutdown eos */
+ if (ev->eos_timer)
+ {
+ ecore_timer_del(ev->eos_timer);
+ ev->eos_timer = NULL;
+ }
+
+ gst_element_set_state(ev->pipeline, GST_STATE_PAUSED);
+ ev->play = 0;
+}
+
+static void
+em_size_get(void *video,
+ int *width,
+ int *height)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Sink *vsink;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_index_goto(ev->video_sinks, ev->video_sink_nbr);
+ if (vsink)
+ {
+ if (width) *width = vsink->width;
+ if (height) *height = vsink->height;
+ }
+ else
+ {
+ if (width) *width = 0;
+ if (height) *height = 0;
+ }
+}
+
+static void
+em_pos_set(void *video,
+ double pos)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Sink *vsink;
+ Emotion_Audio_Sink *asink;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (ev->seek_to_pos == pos) return;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_index_goto(ev->video_sinks, ev->video_sink_nbr);
+ asink = (Emotion_Audio_Sink *)ecore_list_index_goto(ev->video_sinks, ev->audio_sink_nbr);
+
+ if (vsink)
+ {
+ gst_element_seek(vsink->sink, 1.0,
+ GST_FORMAT_TIME,
+ GST_SEEK_FLAG_ACCURATE | GST_SEEK_FLAG_FLUSH,
+ GST_SEEK_TYPE_SET,
+ (gint64)(pos * (double)GST_SECOND),
+ GST_SEEK_TYPE_NONE, -1);
+ }
+ if (asink)
+ {
+ gst_element_seek(asink->sink, 1.0,
+ GST_FORMAT_TIME,
+ GST_SEEK_FLAG_ACCURATE | GST_SEEK_FLAG_FLUSH,
+ GST_SEEK_TYPE_SET,
+ (gint64)(pos * (double)GST_SECOND),
+ GST_SEEK_TYPE_NONE, -1);
+ }
+ ev->seek_to_pos = pos;
+}
+
+static void
+em_vis_set(void *video,
+ Emotion_Vis vis)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (ev->vis == vis) return;
+ ev->vis = vis;
+}
+
+static double
+em_len_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Sink *vsink;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_index_goto(ev->video_sinks, ev->video_sink_nbr);
+ if (vsink)
+ return (double)vsink->length_time;
+
+ return 0.0;
+}
+
+static int
+em_fps_num_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Sink *vsink;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_index_goto(ev->video_sinks, ev->video_sink_nbr);
+ if (vsink)
+ return vsink->fps_num;
+
+ return 0;
+}
+
+static int
+em_fps_den_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Sink *vsink;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_index_goto(ev->video_sinks, ev->video_sink_nbr);
+ if (vsink)
+ return vsink->fps_den;
+
+ return 1;
+}
+
+static double
+em_fps_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Sink *vsink;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_index_goto(ev->video_sinks, ev->video_sink_nbr);
+ if (vsink)
+ return (double)vsink->fps_num / (double)vsink->fps_den;
+
+ return 0.0;
+}
+
+static double
+em_pos_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->position;
+}
+
+static Emotion_Vis
+em_vis_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->vis;
+}
+
+static double
+em_ratio_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->ratio;
+}
+
+static int
+em_video_handled(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (ecore_list_empty_is(ev->video_sinks))
+ return 0;
+
+ return 1;
+}
+
+static int
+em_audio_handled(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (ecore_list_empty_is(ev->audio_sinks))
+ return 0;
+
+ return 1;
+}
+
+static int
+em_seekable(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return 1;
+}
+
+static void
+em_frame_done(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+}
+
+static Emotion_Format
+em_format_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Sink *vsink;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_index_goto(ev->video_sinks, ev->video_sink_nbr);
+ if (vsink)
+ {
+ switch (vsink->fourcc)
+ {
+ case GST_MAKE_FOURCC('I', '4', '2', '0'):
+ return EMOTION_FORMAT_I420;
+ case GST_MAKE_FOURCC('Y', 'V', '1', '2'):
+ return EMOTION_FORMAT_YV12;
+ case GST_MAKE_FOURCC('Y', 'U', 'Y', '2'):
+ return EMOTION_FORMAT_YUY2;
+ case GST_MAKE_FOURCC('A', 'R', 'G', 'B'):
+ return EMOTION_FORMAT_BGRA;
+ default:
+ return EMOTION_FORMAT_NONE;
+ }
+ }
+ return EMOTION_FORMAT_NONE;
+}
+
+static void
+em_video_data_size_get(void *video, int *w, int *h)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Sink *vsink;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_index_goto(ev->video_sinks, ev->video_sink_nbr);
+ if (vsink)
+ {
+ *w = vsink->width;
+ *h = vsink->height;
+ }
+ else
+ {
+ *w = 0;
+ *h = 0;
+ }
+}
+
+static int
+em_yuv_rows_get(void *video,
+ int w,
+ int h,
+ unsigned char **yrows,
+ unsigned char **urows,
+ unsigned char **vrows)
+{
+ Emotion_Gstreamer_Video *ev;
+ int i;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (ev->obj_data)
+ {
+ if (em_format_get(video) == EMOTION_FORMAT_I420)
+ {
+ for (i = 0; i < h; i++)
+ yrows[i] = &ev->obj_data[i * w];
+
+ for (i = 0; i < (h / 2); i++)
+ urows[i] = &ev->obj_data[h * w + i * (w / 2)];
+
+ for (i = 0; i < (h / 2); i++)
+ vrows[i] = &ev->obj_data[h * w + h * (w /4) + i * (w / 2)];
+ }
+ else if (em_format_get(video) == EMOTION_FORMAT_YV12)
+ {
+ for (i = 0; i < h; i++)
+ yrows[i] = &ev->obj_data[i * w];
+
+ for (i = 0; i < (h / 2); i++)
+ vrows[i] = &ev->obj_data[h * w + i * (w / 2)];
+
+ for (i = 0; i < (h / 2); i++)
+ urows[i] = &ev->obj_data[h * w + h * (w /4) + i * (w / 2)];
+ }
+ else
+ return 0;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+em_bgra_data_get(void *video, unsigned char **bgra_data)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (ev->obj_data && em_format_get(video) == EMOTION_FORMAT_BGRA)
+ {
+ *bgra_data = ev->obj_data;
+ return 1;
+ }
+ return 0;
+}
+
+static void
+em_event_feed(void *video, int event)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+}
+
+static void
+em_event_mouse_button_feed(void *video, int button, int x, int y)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+}
+
+static void
+em_event_mouse_move_feed(void *video, int x, int y)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+}
+
+/* Video channels */
+static int
+em_video_channel_count(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ecore_list_count(ev->video_sinks);
+}
+
+static void
+em_video_channel_set(void *video,
+ int channel)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (channel < 0) channel = 0;
+ /* FIXME: a faire... */
+}
+
+static int
+em_video_channel_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->video_sink_nbr;
+}
+
+static const char *
+em_video_channel_name_get(void *video,
+ int channel)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return NULL;
+}
+
+static void
+em_video_channel_mute_set(void *video,
+ int mute)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ ev->video_mute = mute;
+}
+
+static int
+em_video_channel_mute_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->video_mute;
+}
+
+/* Audio channels */
+
+static int
+em_audio_channel_count(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ecore_list_count(ev->audio_sinks);
+}
+
+static void
+em_audio_channel_set(void *video,
+ int channel)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (channel < -1) channel = -1;
+ /* FIXME: a faire... */
+}
+
+static int
+em_audio_channel_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->audio_sink_nbr;
+}
+
+static const char *
+em_audio_channel_name_get(void *video,
+ int channel)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return NULL;
+}
+
+static void
+em_audio_channel_mute_set(void *video,
+ int mute)
+{
+ Emotion_Gstreamer_Video *ev;
+ GstElement *volume;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (ev->audio_mute == mute)
+ return;
+
+ ev->audio_mute = mute;
+ volume = gst_bin_get_by_name(GST_BIN(ev->pipeline), "volume");
+ if (!volume) return;
+
+ if (mute)
+ g_object_set(G_OBJECT(volume), "volume", 0.0, NULL);
+ else
+ g_object_set(G_OBJECT(volume), "volume", ev->volume * 10.0, NULL);
+
+ gst_object_unref(volume);
+}
+
+static int
+em_audio_channel_mute_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->audio_mute;
+}
+
+static void
+em_audio_channel_volume_set(void *video,
+ double vol)
+{
+ Emotion_Gstreamer_Video *ev;
+ GstElement *volume;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (vol < 0.0)
+ vol = 0.0;
+ if (vol > 1.0)
+ vol = 1.0;
+ ev->volume = vol;
+ volume = gst_bin_get_by_name(GST_BIN(ev->pipeline), "volume");
+ if (!volume) return;
+ g_object_set(G_OBJECT(volume), "volume",
+ vol * 10.0, NULL);
+ gst_object_unref(volume);
+}
+
+static double
+em_audio_channel_volume_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->volume;
+}
+
+/* spu stuff */
+
+static int
+em_spu_channel_count(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return 0;
+}
+
+static void
+em_spu_channel_set(void *video, int channel)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+}
+
+static int
+em_spu_channel_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return 1;
+}
+
+static const char *
+em_spu_channel_name_get(void *video, int channel)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ return NULL;
+}
+
+static void
+em_spu_channel_mute_set(void *video, int mute)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+}
+
+static int
+em_spu_channel_mute_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return 0;
+}
+
+static int
+em_chapter_count(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ return 0;
+}
+
+static void
+em_chapter_set(void *video, int chapter)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+}
+
+static int
+em_chapter_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return 0;
+}
+
+static const char *
+em_chapter_name_get(void *video, int chapter)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return NULL;
+}
+
+static void
+em_speed_set(void *video, double speed)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+}
+
+static double
+em_speed_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return 1.0;
+}
+
+static int
+em_eject(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return 1;
+}
+
+static const char *
+em_meta_get(void *video, int meta)
+{
+ Emotion_Gstreamer_Video *ev;
+ const char *str;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (!ev || !ev->metadata) return NULL;
+ switch (meta)
+ {
+ case META_TRACK_TITLE:
+ str = ev->metadata->title;
+ break;
+ case META_TRACK_ARTIST:
+ str = ev->metadata->artist;
+ break;
+ case META_TRACK_ALBUM:
+ str = ev->metadata->album;
+ break;
+ case META_TRACK_YEAR:
+ str = ev->metadata->year;
+ break;
+ case META_TRACK_GENRE:
+ str = ev->metadata->genre;
+ break;
+ case META_TRACK_COMMENT:
+ str = ev->metadata->comment;
+ break;
+ case META_TRACK_DISCID:
+ str = ev->metadata->disc_id;
+ break;
+ default:
+ break;
+ }
+
+ return str;
+}
+
+unsigned char
+module_open(Evas_Object *obj,
+ Emotion_Video_Module **module,
+ void **video,
+ Emotion_Module_Options *opt)
+{
+ if (!module)
+ return 0;
+
+ if (!em_module.init(obj, video, opt))
+ return 0;
+
+ *module = &em_module;
+ return 1;
+}
+
+void
+module_close(Emotion_Video_Module *module,
+ void *video)
+{
+ em_module.shutdown(video);
+}
+
+static void
+_for_each_tag(GstTagList const* list,
+ gchar const* tag,
+ void *data)
+{
+ Emotion_Gstreamer_Video *ev;
+ const GValue *val;
+ int i;
+ int count;
+
+
+ ev = (Emotion_Gstreamer_Video*)data;
+
+ if (!ev || !ev->metadata) return;
+
+ count = gst_tag_list_get_tag_size(list, tag);
+ val = gst_tag_list_get_value_index(list, tag, 0);
+
+ for (i = 0; i < count; i++)
+ {
+ if (!strcmp(tag, GST_TAG_TITLE))
+ {
+ char *str;
+ if (ev->metadata->title) g_free(ev->metadata->title);
+ if (gst_tag_list_get_string(list, GST_TAG_TITLE, &str))
+ ev->metadata->title = str;
+ else
+ ev->metadata->title = NULL;
+ break;
+ }
+ if (!strcmp(tag, GST_TAG_ALBUM))
+ {
+ gchar *str;
+ if (ev->metadata->album) g_free(ev->metadata->album);
+ if (gst_tag_list_get_string(list, GST_TAG_ALBUM, &str))
+ ev->metadata->album = str;
+ else
+ ev->metadata->album = NULL;
+ break;
+ }
+ if (!strcmp(tag, GST_TAG_ARTIST))
+ {
+ gchar *str;
+ if (ev->metadata->artist) g_free( ev->metadata->artist);
+ if (gst_tag_list_get_string(list, GST_TAG_ARTIST, &str))
+ ev->metadata->artist = str;
+ else
+ ev->metadata->artist = NULL;
+ break;
+ }
+ if (!strcmp(tag, GST_TAG_GENRE))
+ {
+ gchar *str;
+ if (ev->metadata->genre) g_free( ev->metadata->genre);
+ if (gst_tag_list_get_string(list, GST_TAG_GENRE, &str))
+ ev->metadata->genre = str;
+ else
+ ev->metadata->genre = NULL;
+ break;
+ }
+ if (!strcmp(tag, GST_TAG_COMMENT))
+ {
+ gchar *str;
+ if (ev->metadata->comment) g_free(ev->metadata->comment);
+ if (gst_tag_list_get_string(list, GST_TAG_COMMENT, &str))
+ ev->metadata->comment = str;
+ else
+ ev->metadata->comment = NULL;
+ break;
+ }
+ if (!strcmp(tag, GST_TAG_DATE))
+ {
+ gchar *str;
+ const GValue *date;
+ if (ev->metadata->year) g_free(ev->metadata->year);
+ date = gst_tag_list_get_value_index(list, GST_TAG_DATE, 0);
+ if (date)
+ str = g_strdup_value_contents(date);
+ else
+ str = NULL;
+ ev->metadata->year = str;
+ break;
+ }
+
+ if (!strcmp(tag, GST_TAG_TRACK_NUMBER))
+ {
+ gchar *str;
+ const GValue *track;
+ if (ev->metadata->count) g_free( ev->metadata->count);
+ track = gst_tag_list_get_value_index(list, GST_TAG_TRACK_NUMBER, 0);
+ if (track)
+ str = g_strdup_value_contents(track);
+ else
+ str = NULL;
+ ev->metadata->count = str;
+ break;
+ }
+
+#ifdef GST_TAG_CDDA_CDDB_DISCID
+ if (!strcmp(tag, GST_TAG_CDDA_CDDB_DISCID))
+ {
+ gchar *str;
+ const GValue *discid;
+ if (ev->metadata->disc_id) g_free(ev->metadata->disc_id);
+ discid = gst_tag_list_get_value_index(list, GST_TAG_CDDA_CDDB_DISCID, 0);
+ if (discid)
+ str = g_strdup_value_contents(discid);
+ else
+ str = NULL;
+ ev->metadata->disc_id = str;
+ break;
+ }
+#endif
+ }
+
+}
+
+static void
+_free_metadata(Emotion_Gstreamer_Metadata *m)
+{
+ if (!m) return;
+
+ if (m->title) g_free(m->title);
+ if (m->album) g_free(m->album);
+ if (m->artist) g_free(m->artist);
+ if (m->genre) g_free(m->genre);
+ if (m->comment) g_free(m->comment);
+ if (m->year) g_free(m->year);
+ if (m->count) g_free(m->count);
+ if (m->disc_id) g_free(m->disc_id);
+
+ free(m);
+}
+
+static int
+_em_fd_ev_active(void *data, Ecore_Fd_Handler *fdh)
+{
+ int fd;
+ int len;
+ void *buf[2];
+ unsigned char *frame_data;
+ Emotion_Gstreamer_Video *ev;
+ GstBuffer *buffer;
+
+ ev = data;
+ fd = ecore_main_fd_handler_fd_get(fdh);
+
+ while ((len = read(fd, buf, sizeof(buf))) > 0)
+ {
+ if (len == sizeof(buf))
+ {
+ Emotion_Video_Sink *vsink;
+
+ frame_data = buf[0];
+ buffer = buf[1];
+ _emotion_frame_new(ev->obj);
+ vsink = (Emotion_Video_Sink *)ecore_list_index_goto(ev->video_sinks, ev->video_sink_nbr);
+ if (vsink) _emotion_video_pos_update(ev->obj, ev->position, vsink->length_time);
+ }
+ }
+ return 1;
+}
+
+static int
+_eos_timer_fct(void *data)
+{
+ Emotion_Gstreamer_Video *ev;
+ GstMessage *msg;
+
+ ev = (Emotion_Gstreamer_Video *)data;
+ while ((msg = gst_bus_poll(ev->eos_bus, GST_MESSAGE_ERROR | GST_MESSAGE_EOS | GST_MESSAGE_TAG, 0)))
+ {
+ switch (GST_MESSAGE_TYPE(msg))
+ {
+ case GST_MESSAGE_ERROR:
+ {
+ gchar *debug;
+ GError *err;
+
+ gst_message_parse_error(msg, &err, &debug);
+ g_free(debug);
+
+ g_print("Error: %s\n", err->message);
+ g_error_free(err);
+
+ break;
+ }
+ case GST_MESSAGE_EOS:
+ if (ev->eos_timer)
+ {
+ ecore_timer_del(ev->eos_timer);
+ ev->eos_timer = NULL;
+ }
+ ev->play = 0;
+ _emotion_decode_stop(ev->obj);
+ _emotion_playback_finished(ev->obj);
+ break;
+ case GST_MESSAGE_TAG:
+ {
+ GstTagList *new_tags;
+ gst_message_parse_tag(msg, &new_tags);
+ if (new_tags)
+ {
+ gst_tag_list_foreach(new_tags, (GstTagForeachFunc)_for_each_tag, ev);
+ gst_tag_list_free(new_tags);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ gst_message_unref(msg);
+ }
+ return 1;
+}
--- /dev/null
+/*
+ * vim:ts=8:sw=3:sts=8:noexpandtab:cino=>5n-3f0^-2{2
+ */
+#ifndef __EMOTION_GSTREAMER_H__
+#define __EMOTION_GSTREAMER_H__
+
+
+#include <Evas.h>
+#include <Ecore.h>
+#include <Ecore_Data.h>
+
+#define HTTP_STREAM 0
+#define RTSP_STREAM 1
+#include <gst/gst.h>
+
+#include "emotion_private.h"
+
+
+typedef struct _Emotion_Video_Sink Emotion_Video_Sink;
+
+struct _Emotion_Video_Sink
+{
+ GstElement *sink;
+ gdouble length_time;
+ gint width;
+ gint height;
+ gint fps_num;
+ gint fps_den;
+ guint32 fourcc;
+};
+
+typedef struct _Emotion_Audio_Sink Emotion_Audio_Sink;
+
+struct _Emotion_Audio_Sink
+{
+ GstElement *sink;
+ gdouble length_time;
+ gint channels;
+ gint samplerate;
+};
+
+typedef struct _Emotion_Gstreamer_Metadata Emotion_Gstreamer_Metadata;
+
+struct _Emotion_Gstreamer_Metadata
+{
+ char *title;
+ char *album;
+ char *artist;
+ char *genre;
+ char *comment;
+ char *year;
+ char *count;
+ char *disc_id;
+};
+
+
+typedef struct _Emotion_Gstreamer_Video Emotion_Gstreamer_Video;
+
+struct _Emotion_Gstreamer_Video
+{
+ /* Gstreamer elements */
+ GstElement *pipeline;
+
+ /* eos */
+ GstBus *eos_bus;
+ Ecore_Timer *eos_timer;
+
+ /* Sinks */
+ Ecore_List *video_sinks;
+ Ecore_List *audio_sinks;
+
+ int video_sink_nbr;
+ int audio_sink_nbr;
+
+ /* Evas object */
+ Evas_Object *obj;
+ unsigned char *obj_data;
+
+ /* Characteristics of stream */
+ double position;
+ double ratio;
+ double volume;
+
+ volatile int seek_to;
+ volatile int get_poslen;
+ volatile double seek_to_pos;
+
+ int fd_ev_read;
+ int fd_ev_write;
+ Ecore_Fd_Handler *fd_ev_handler;
+
+ Emotion_Gstreamer_Metadata *metadata;
+
+ Emotion_Vis vis;
+
+ unsigned char play : 1;
+ unsigned char video_mute : 1;
+ unsigned char audio_mute : 1;
+};
+
+unsigned char module_open(Evas_Object *obj,
+ Emotion_Video_Module **module,
+ void **video, Emotion_Module_Options *opt);
+
+void module_close(Emotion_Video_Module *module,
+ void *video);
+
+
+#endif /* __EMOTION_GSTREAMER_H__ */
--- /dev/null
+/*
+ * vim:ts=8:sw=3:sts=8:noexpandtab:cino=>5n-3f0^-2{2
+ */
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "emotion_private.h"
+#include "emotion_gstreamer.h"
+#include "emotion_gstreamer_pipeline.h"
+
+
+gboolean
+emotion_pipeline_pause(GstElement *pipeline)
+{
+ GstStateChangeReturn res;
+
+ res = gst_element_set_state((pipeline), GST_STATE_PAUSED);
+ if (res == GST_STATE_CHANGE_FAILURE)
+ {
+ g_print("Emotion-Gstreamer ERROR: could not pause\n");
+ return 0;
+ }
+
+ res = gst_element_get_state((pipeline), NULL, NULL, GST_CLOCK_TIME_NONE);
+ if (res != GST_STATE_CHANGE_SUCCESS)
+ {
+ g_print("Emotion-Gstreamer ERROR: could not complete pause\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Send the video frame to the evas object */
+void
+cb_handoff(GstElement *fakesrc,
+ GstBuffer *buffer,
+ GstPad *pad,
+ gpointer user_data)
+{
+ GstQuery *query;
+ void *buf[2];
+
+ Emotion_Gstreamer_Video *ev = (Emotion_Gstreamer_Video *)user_data;
+ if (!ev)
+ return;
+
+ if (!ev->video_mute)
+ {
+ if (!ev->obj_data)
+ ev->obj_data = malloc(GST_BUFFER_SIZE(buffer) * sizeof(void));
+
+ memcpy(ev->obj_data, GST_BUFFER_DATA(buffer), GST_BUFFER_SIZE(buffer));
+ buf[0] = GST_BUFFER_DATA(buffer);
+ buf[1] = buffer;
+ write(ev->fd_ev_write, buf, sizeof(buf));
+ }
+ else
+ {
+ Emotion_Audio_Sink *asink;
+ asink = (Emotion_Audio_Sink *)ecore_list_index_goto(ev->audio_sinks, ev->audio_sink_nbr);
+ _emotion_video_pos_update(ev->obj, ev->position, asink->length_time);
+ }
+
+ query = gst_query_new_position(GST_FORMAT_TIME);
+ if (gst_pad_query(gst_pad_get_peer(pad), query))
+ {
+ gint64 position;
+
+ gst_query_parse_position(query, NULL, &position);
+ ev->position = (double)position / (double)GST_SECOND;
+ }
+ gst_query_unref(query);
+}
+
+void
+file_new_decoded_pad_cb(GstElement *decodebin,
+ GstPad *new_pad,
+ gboolean last,
+ gpointer user_data)
+{
+ Emotion_Gstreamer_Video *ev;
+ GstCaps *caps;
+ gchar *str;
+
+ ev = (Emotion_Gstreamer_Video *)user_data;
+ caps = gst_pad_get_caps(new_pad);
+ str = gst_caps_to_string(caps);
+ /* video stream */
+ if (g_str_has_prefix(str, "video/"))
+ {
+ Emotion_Video_Sink *vsink;
+ GstElement *queue;
+ GstPad *videopad;
+
+ vsink = (Emotion_Video_Sink *)malloc(sizeof(Emotion_Video_Sink));
+ if (!vsink) return;
+ if (!ecore_list_append(ev->video_sinks, vsink))
+ {
+ free(vsink);
+ return;
+ }
+
+ queue = gst_element_factory_make("queue", NULL);
+ vsink->sink = gst_element_factory_make("fakesink", "videosink");
+ gst_bin_add_many(GST_BIN(ev->pipeline), queue, vsink->sink, NULL);
+ gst_element_link(queue, vsink->sink);
+ videopad = gst_element_get_pad(queue, "sink");
+ gst_pad_link(new_pad, videopad);
+ gst_object_unref(videopad);
+ if (ecore_list_count(ev->video_sinks) == 1)
+ {
+ ev->ratio = (double)vsink->width / (double)vsink->height;
+ }
+ gst_element_set_state(queue, GST_STATE_PAUSED);
+ gst_element_set_state(vsink->sink, GST_STATE_PAUSED);
+ }
+ /* audio stream */
+ else if (g_str_has_prefix(str, "audio/"))
+ {
+ Emotion_Audio_Sink *asink;
+ GstPad *audiopad;
+
+ asink = (Emotion_Audio_Sink *)malloc(sizeof(Emotion_Audio_Sink));
+ if (!asink) return;
+ if (!ecore_list_append(ev->audio_sinks, asink))
+ {
+ free(asink);
+ return;
+ }
+
+ asink->sink = emotion_audio_sink_create(ev, ecore_list_index(ev->audio_sinks));
+ gst_bin_add(GST_BIN(ev->pipeline), asink->sink);
+ audiopad = gst_element_get_pad(asink->sink, "sink");
+ gst_pad_link(new_pad, audiopad);
+ gst_element_set_state(asink->sink, GST_STATE_PAUSED);
+ }
+
+ free(str);
+}
+
+Emotion_Video_Sink *
+emotion_video_sink_new(Emotion_Gstreamer_Video *ev)
+{
+ Emotion_Video_Sink *vsink;
+
+ if (!ev) return NULL;
+
+ vsink = (Emotion_Video_Sink *)malloc(sizeof(Emotion_Video_Sink));
+ if (!vsink) return NULL;
+
+ if (!ecore_list_append(ev->video_sinks, vsink))
+ {
+ free(vsink);
+ return NULL;
+ }
+ return vsink;
+}
+
+void
+emotion_video_sink_free(Emotion_Gstreamer_Video *ev, Emotion_Video_Sink *vsink)
+{
+ if (!ev || !vsink) return;
+
+ if (ecore_list_goto(ev->video_sinks, vsink))
+ {
+ ecore_list_remove(ev->video_sinks);
+ free(vsink);
+ }
+}
+
+Emotion_Video_Sink *
+emotion_visualization_sink_create(Emotion_Gstreamer_Video *ev, Emotion_Audio_Sink *asink)
+{
+ Emotion_Video_Sink *vsink;
+
+ if (!ev) return NULL;
+
+ vsink = emotion_video_sink_new(ev);
+ if (!vsink) return NULL;
+
+ vsink->sink = gst_bin_get_by_name(GST_BIN(asink->sink), "vissink1");
+ if (!vsink->sink)
+ {
+ emotion_video_sink_free(ev, vsink);
+ return NULL;
+ }
+ vsink->width = 320;
+ vsink->height = 200;
+ ev->ratio = (double)vsink->width / (double)vsink->height;
+ vsink->fps_num = 25;
+ vsink->fps_den = 1;
+ vsink->fourcc = GST_MAKE_FOURCC('A', 'R', 'G', 'B');
+ vsink->length_time = asink->length_time;
+
+ g_object_set(G_OBJECT(vsink->sink), "sync", TRUE, NULL);
+ g_object_set(G_OBJECT(vsink->sink), "signal-handoffs", TRUE, NULL);
+ g_signal_connect(G_OBJECT(vsink->sink),
+ "handoff",
+ G_CALLBACK(cb_handoff), ev);
+ return vsink;
+}
+
+int
+emotion_pipeline_cdda_track_count_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ GstBus *bus;
+ guint tracks_count = 0;
+ gboolean done;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ if (!ev) return tracks_count;
+
+ done = FALSE;
+ bus = gst_element_get_bus(ev->pipeline);
+ if (!bus) return tracks_count;
+
+ while (!done)
+ {
+ GstMessage *message;
+
+ message = gst_bus_pop(bus);
+ if (message == NULL)
+ /* All messages read, we're done */
+ break;
+
+ switch (GST_MESSAGE_TYPE(message))
+ {
+ case GST_MESSAGE_TAG:
+ {
+ GstTagList *tags;
+
+ gst_message_parse_tag(message, &tags);
+
+ gst_tag_list_get_uint(tags, GST_TAG_TRACK_COUNT, &tracks_count);
+ if (tracks_count) done = TRUE;
+ break;
+ }
+ case GST_MESSAGE_ERROR:
+ default:
+ break;
+ }
+ gst_message_unref(message);
+ }
+
+ gst_object_unref(GST_OBJECT(bus));
+
+ return tracks_count;
+}
+
+GstElement *
+emotion_audio_sink_create(Emotion_Gstreamer_Video *ev, int index)
+{
+ gchar buf[128];
+ GstElement *bin;
+ GstElement *audiobin;
+ GstElement *visbin = NULL;
+ GstElement *tee;
+ GstPad *teepad;
+ GstPad *binpad;
+
+ /* audio sink */
+ bin = gst_bin_new(NULL);
+ if (!bin) return NULL;
+
+ g_snprintf(buf, 128, "tee%d", index);
+ tee = gst_element_factory_make("tee", buf);
+
+ /* audio part */
+ {
+ GstElement *queue;
+ GstElement *conv;
+ GstElement *resample;
+ GstElement *volume;
+ GstElement *sink;
+ GstPad *audiopad;
+ double vol;
+
+ audiobin = gst_bin_new(NULL);
+
+ queue = gst_element_factory_make("queue", NULL);
+ conv = gst_element_factory_make("audioconvert", NULL);
+ resample = gst_element_factory_make("audioresample", NULL);
+ volume = gst_element_factory_make("volume", "volume");
+ g_object_get(G_OBJECT(volume), "volume", &vol, NULL);
+ ev->volume = vol / 10.0;
+
+ if (index == 1)
+ sink = gst_element_factory_make("autoaudiosink", NULL);
+ else
+ sink = gst_element_factory_make("fakesink", NULL);
+
+ gst_bin_add_many(GST_BIN(audiobin),
+ queue, conv, resample, volume, sink, NULL);
+ gst_element_link_many(queue, conv, resample, volume, sink, NULL);
+
+ audiopad = gst_element_get_pad(queue, "sink");
+ gst_element_add_pad(audiobin, gst_ghost_pad_new("sink", audiopad));
+ gst_object_unref(audiopad);
+ }
+
+ /* visualisation part */
+ {
+ GstElement *vis = NULL;
+ char *vis_name;
+
+ switch (ev->vis)
+ {
+ case EMOTION_VIS_GOOM:
+ vis_name = "goom";
+ break;
+ case EMOTION_VIS_LIBVISUAL_BUMPSCOPE:
+ vis_name = "libvisual_bumpscope";
+ break;
+ case EMOTION_VIS_LIBVISUAL_CORONA:
+ vis_name = "libvisual_corona";
+ break;
+ case EMOTION_VIS_LIBVISUAL_DANCING_PARTICLES:
+ vis_name = "libvisual_dancingparticles";
+ break;
+ case EMOTION_VIS_LIBVISUAL_GDKPIXBUF:
+ vis_name = "libvisual_gdkpixbuf";
+ break;
+ case EMOTION_VIS_LIBVISUAL_G_FORCE:
+ vis_name = "libvisual_G-Force";
+ break;
+ case EMOTION_VIS_LIBVISUAL_GOOM:
+ vis_name = "libvisual_goom";
+ break;
+ case EMOTION_VIS_LIBVISUAL_INFINITE:
+ vis_name = "libvisual_infinite";
+ break;
+ case EMOTION_VIS_LIBVISUAL_JAKDAW:
+ vis_name = "libvisual_jakdaw";
+ break;
+ case EMOTION_VIS_LIBVISUAL_JESS:
+ vis_name = "libvisual_jess";
+ break;
+ case EMOTION_VIS_LIBVISUAL_LV_ANALYSER:
+ vis_name = "libvisual_lv_analyzer";
+ break;
+ case EMOTION_VIS_LIBVISUAL_LV_FLOWER:
+ vis_name = "libvisual_lv_flower";
+ break;
+ case EMOTION_VIS_LIBVISUAL_LV_GLTEST:
+ vis_name = "libvisual_lv_gltest";
+ break;
+ case EMOTION_VIS_LIBVISUAL_LV_SCOPE:
+ vis_name = "libvisual_lv_scope";
+ break;
+ case EMOTION_VIS_LIBVISUAL_MADSPIN:
+ vis_name = "libvisual_madspin";
+ break;
+ case EMOTION_VIS_LIBVISUAL_NEBULUS:
+ vis_name = "libvisual_nebulus";
+ break;
+ case EMOTION_VIS_LIBVISUAL_OINKSIE:
+ vis_name = "libvisual_oinksie";
+ break;
+ case EMOTION_VIS_LIBVISUAL_PLASMA:
+ vis_name = "libvisual_plazma";
+ break;
+ default:
+ vis_name = "goom";
+ break;
+ }
+
+ g_snprintf(buf, 128, "vis%d", index);
+ if ((vis = gst_element_factory_make(vis_name, buf)))
+ {
+ GstElement *queue;
+ GstElement *conv;
+ GstElement *cspace;
+ GstElement *sink;
+ GstPad *vispad;
+ GstCaps *caps;
+
+ g_snprintf(buf, 128, "visbin%d", index);
+ visbin = gst_bin_new(buf);
+
+ queue = gst_element_factory_make("queue", NULL);
+ conv = gst_element_factory_make("audioconvert", NULL);
+ cspace = gst_element_factory_make("ffmpegcolorspace", NULL);
+ g_snprintf(buf, 128, "vissink%d", index);
+ sink = gst_element_factory_make("fakesink", buf);
+
+ gst_bin_add_many(GST_BIN(visbin),
+ queue, conv, vis, cspace, sink, NULL);
+ gst_element_link_many(queue, conv, vis, cspace, NULL);
+ caps = gst_caps_new_simple("video/x-raw-rgb",
+ "bpp", G_TYPE_INT, 32,
+ "width", G_TYPE_INT, 320,
+ "height", G_TYPE_INT, 200,
+ NULL);
+ gst_element_link_filtered(cspace, sink, caps);
+
+ vispad = gst_element_get_pad(queue, "sink");
+ gst_element_add_pad(visbin, gst_ghost_pad_new("sink", vispad));
+ gst_object_unref(vispad);
+ }
+ }
+
+ gst_bin_add_many(GST_BIN(bin), tee, audiobin, NULL);
+ if (visbin)
+ gst_bin_add(GST_BIN(bin), visbin);
+
+ binpad = gst_element_get_pad(audiobin, "sink");
+ teepad = gst_element_get_request_pad(tee, "src%d");
+ gst_pad_link(teepad, binpad);
+ gst_object_unref(teepad);
+ gst_object_unref(binpad);
+
+ if (visbin)
+ {
+ binpad = gst_element_get_pad(visbin, "sink");
+ teepad = gst_element_get_request_pad(tee, "src%d");
+ gst_pad_link(teepad, binpad);
+ gst_object_unref(teepad);
+ gst_object_unref(binpad);
+ }
+
+ teepad = gst_element_get_pad(tee, "sink");
+ gst_element_add_pad(bin, gst_ghost_pad_new("sink", teepad));
+ gst_object_unref(teepad);
+
+ return bin;
+}
+
+void
+emotion_streams_sinks_get(Emotion_Gstreamer_Video *ev, GstElement *decoder)
+{
+ GstIterator *it;
+ gpointer data;
+
+ ecore_list_first_goto(ev->video_sinks);
+ ecore_list_first_goto(ev->audio_sinks);
+
+ it = gst_element_iterate_src_pads(decoder);
+ while (gst_iterator_next(it, &data) == GST_ITERATOR_OK)
+ {
+ GstPad *pad;
+ GstCaps *caps;
+ gchar *str;
+
+ pad = GST_PAD(data);
+
+ caps = gst_pad_get_caps(pad);
+ str = gst_caps_to_string(caps);
+ g_print("caps !! %s\n", str);
+
+ /* video stream */
+ if (g_str_has_prefix(str, "video/"))
+ {
+ Emotion_Video_Sink *vsink;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_next(ev->video_sinks);
+
+ emotion_video_sink_fill(vsink, pad, caps);
+ }
+ /* audio stream */
+ else if (g_str_has_prefix(str, "audio/"))
+ {
+ Emotion_Audio_Sink *asink;
+ gint index;
+
+ asink = (Emotion_Audio_Sink *)ecore_list_next(ev->audio_sinks);
+
+ emotion_audio_sink_fill(asink, pad, caps);
+
+ index = ecore_list_index(ev->audio_sinks);
+
+ if (ecore_list_count(ev->video_sinks) == 0)
+ {
+ if (index == 1)
+ {
+ Emotion_Video_Sink *vsink;
+
+ vsink = emotion_visualization_sink_create(ev, asink);
+ if (!vsink) goto finalize;
+ }
+ }
+ else
+ {
+ gchar buf[128];
+ GstElement *visbin;
+
+ g_snprintf(buf, 128, "visbin%d", index);
+ visbin = gst_bin_get_by_name(GST_BIN(ev->pipeline), buf);
+ if (visbin)
+ {
+ GstPad *srcpad;
+ GstPad *sinkpad;
+
+ sinkpad = gst_element_get_pad(visbin, "sink");
+ srcpad = gst_pad_get_peer(sinkpad);
+ gst_pad_unlink(srcpad, sinkpad);
+
+ gst_object_unref(srcpad);
+ gst_object_unref(sinkpad);
+ }
+ }
+ }
+finalize:
+ gst_caps_unref(caps);
+ g_free(str);
+ gst_object_unref(pad);
+ }
+ gst_iterator_free(it);
+}
+
+void
+emotion_video_sink_fill(Emotion_Video_Sink *vsink, GstPad *pad, GstCaps *caps)
+{
+ GstStructure *structure;
+ GstQuery *query;
+ const GValue *val;
+ gchar *str;
+
+ structure = gst_caps_get_structure(caps, 0);
+ str = gst_caps_to_string(caps);
+
+ gst_structure_get_int(structure, "width", &vsink->width);
+ gst_structure_get_int(structure, "height", &vsink->height);
+
+ vsink->fps_num = 1;
+ vsink->fps_den = 1;
+ val = gst_structure_get_value(structure, "framerate");
+ if (val)
+ {
+ vsink->fps_num = gst_value_get_fraction_numerator(val);
+ vsink->fps_den = gst_value_get_fraction_denominator(val);
+ }
+ if (g_str_has_prefix(str, "video/x-raw-yuv"))
+ {
+ val = gst_structure_get_value(structure, "format");
+ vsink->fourcc = gst_value_get_fourcc(val);
+ }
+ else if (g_str_has_prefix(str, "video/x-raw-rgb"))
+ vsink->fourcc = GST_MAKE_FOURCC('A', 'R', 'G', 'B');
+ else
+ vsink->fourcc = 0;
+
+ query = gst_query_new_duration(GST_FORMAT_TIME);
+ if (gst_pad_query(pad, query))
+ {
+ gint64 time;
+
+ gst_query_parse_duration(query, NULL, &time);
+ vsink->length_time = (double)time / (double)GST_SECOND;
+ }
+ g_free(str);
+ gst_query_unref(query);
+}
+
+void
+emotion_audio_sink_fill(Emotion_Audio_Sink *asink, GstPad *pad, GstCaps *caps)
+{
+ GstStructure *structure;
+ GstQuery *query;
+
+ structure = gst_caps_get_structure(caps, 0);
+
+ gst_structure_get_int(structure, "channels", &asink->channels);
+ gst_structure_get_int(structure, "rate", &asink->samplerate);
+
+ query = gst_query_new_duration(GST_FORMAT_TIME);
+ if (gst_pad_query(pad, query))
+ {
+ gint64 time;
+
+ gst_query_parse_duration(query, NULL, &time);
+ asink->length_time = (double)time / (double)GST_SECOND;
+ }
+ gst_query_unref(query);
+}
--- /dev/null
+/*
+ * vim:ts=8:sw=3:sts=8:noexpandtab:cino=>5n-3f0^-2{2
+ */
+#ifndef __EMOTION_GSTREAMER_PIPELINE_H__
+#define __EMOTION_GSTREAMER_PIPELINE_H__
+
+
+#include <gst/gst.h>
+
+
+gboolean emotion_pipeline_pause (GstElement *pipeline);
+
+int emotion_pipeline_cdda_build (void *video, const char * device, unsigned int track);
+int emotion_pipeline_file_build (void *video, const char *file);
+int emotion_pipeline_uri_build (void *video, const char *uri);
+int emotion_pipeline_dvd_build (void *video, const char *device);
+int emotion_pipeline_cdda_track_count_get (void *video);
+
+GstElement *emotion_audio_sink_create (Emotion_Gstreamer_Video *ev, int index);
+Emotion_Video_Sink *emotion_video_sink_new (Emotion_Gstreamer_Video *ev);
+void emotion_video_sink_free (Emotion_Gstreamer_Video *ev, Emotion_Video_Sink *vsink);
+Emotion_Video_Sink *emotion_visualization_sink_create (Emotion_Gstreamer_Video *ev, Emotion_Audio_Sink *asink);
+
+void emotion_streams_sinks_get (Emotion_Gstreamer_Video *ev, GstElement *decoder);
+
+void emotion_video_sink_fill (Emotion_Video_Sink *vsink, GstPad *pad, GstCaps *caps);
+
+void emotion_audio_sink_fill (Emotion_Audio_Sink *asink, GstPad *pad, GstCaps *caps);
+
+void cb_handoff (GstElement *fakesrc,
+ GstBuffer *buffer,
+ GstPad *pad,
+ gpointer user_data);
+
+void file_new_decoded_pad_cb (GstElement *decodebin,
+ GstPad *new_pad,
+ gboolean last,
+ gpointer user_data);
+
+
+#endif /* __EMOTION_GSTREAMER_PIPELINE_H__ */
--- /dev/null
+/*
+ * vim:ts=8:sw=3:sts=8:noexpandtab:cino=>5n-3f0^-2{2
+ */
+#include "emotion_gstreamer.h"
+#include "emotion_gstreamer_pipeline.h"
+
+
+static Emotion_Audio_Sink *_emotion_audio_sink_new (Emotion_Gstreamer_Video *ev);
+static void _emotion_audio_sink_free (Emotion_Gstreamer_Video *ev, Emotion_Audio_Sink *asink);
+
+int
+emotion_pipeline_cdda_build(void *video, const char * device, unsigned int track)
+{
+ GstElement *cdiocddasrc;
+ Emotion_Video_Sink *vsink;
+ Emotion_Audio_Sink *asink;
+ Emotion_Gstreamer_Video *ev;
+ /* GstFormat format; */
+ /* gint64 tracks_count; */
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ if (!ev) return 0;
+
+ cdiocddasrc = gst_element_factory_make("cdiocddasrc", "src");
+ if (!cdiocddasrc)
+ {
+ g_print("cdiocddasrc element missing. Install it.\n");
+ goto failure_cdiocddasrc;
+ }
+
+ if (device)
+ g_object_set(G_OBJECT(cdiocddasrc), "device", device, NULL);
+
+ g_object_set(G_OBJECT(cdiocddasrc), "track", track, NULL);
+
+ asink = _emotion_audio_sink_new(ev);
+ if (!asink)
+ goto failure_emotion_sink;
+
+ asink->sink = emotion_audio_sink_create(ev, 1);
+ if (!asink->sink)
+ goto failure_gstreamer_sink;
+
+ gst_bin_add_many((GST_BIN(ev->pipeline)), cdiocddasrc, asink->sink, NULL);
+
+ if (!gst_element_link(cdiocddasrc, asink->sink))
+ goto failure_link;
+
+ vsink = emotion_visualization_sink_create(ev, asink);
+ if (!vsink) goto failure_link;
+
+ if (!emotion_pipeline_pause(ev->pipeline))
+ goto failure_gstreamer_pause;
+
+ {
+ GstQuery *query;
+ GstPad *pad;
+ GstCaps *caps;
+ GstStructure *structure;
+
+ /* should always be found */
+ pad = gst_element_get_pad(cdiocddasrc, "src");
+
+ caps = gst_pad_get_caps(pad);
+ structure = gst_caps_get_structure(GST_CAPS(caps), 0);
+
+ gst_structure_get_int(structure, "channels", &asink->channels);
+ gst_structure_get_int(structure, "rate", &asink->samplerate);
+
+ gst_caps_unref(caps);
+
+ query = gst_query_new_duration(GST_FORMAT_TIME);
+ if (gst_pad_query(pad, query))
+ {
+ gint64 time;
+
+ gst_query_parse_duration(query, NULL, &time);
+ asink->length_time = (double)time / (double)GST_SECOND;
+ vsink->length_time = asink->length_time;
+ }
+ gst_query_unref(query);
+ gst_object_unref(GST_OBJECT(pad));
+ }
+
+ return 1;
+
+failure_gstreamer_pause:
+ emotion_video_sink_free(ev, vsink);
+failure_link:
+ gst_bin_remove(GST_BIN(ev->pipeline), asink->sink);
+failure_gstreamer_sink:
+ _emotion_audio_sink_free(ev, asink);
+failure_emotion_sink:
+ gst_bin_remove(GST_BIN(ev->pipeline), cdiocddasrc);
+failure_cdiocddasrc:
+
+ return 0;
+}
+
+static Emotion_Audio_Sink *
+_emotion_audio_sink_new(Emotion_Gstreamer_Video *ev)
+{
+ Emotion_Audio_Sink *asink;
+
+ if (!ev) return NULL;
+
+ asink = (Emotion_Audio_Sink *)malloc(sizeof(Emotion_Audio_Sink));
+ if (!asink) return NULL;
+
+ if (!ecore_list_append(ev->audio_sinks, asink))
+ {
+ free(asink);
+ return NULL;
+ }
+ return asink;
+}
+
+static void
+_emotion_audio_sink_free(Emotion_Gstreamer_Video *ev, Emotion_Audio_Sink *asink)
+{
+ if (!ev || !asink) return;
+
+ if (ecore_list_goto(ev->audio_sinks, asink))
+ {
+ ecore_list_remove(ev->audio_sinks);
+ free(asink);
+ }
+}
--- /dev/null
+/*
+ * vim:ts=8:sw=3:sts=8:noexpandtab:cino=>5n-3f0^-2{2
+ */
+#include "emotion_gstreamer.h"
+#include "emotion_gstreamer_pipeline.h"
+
+
+static void dvd_pad_added_cb (GstElement *dvddemuxer,
+ GObject *new_pad,
+ gpointer user_data);
+
+static void dvd_no_more_pads_cb (GstElement *dvddemuxer,
+ gpointer user_data);
+
+static int no_more_pads = 0;
+
+
+int
+emotion_pipeline_dvd_build(void *video, const char *device)
+{
+ GstElement *dvdreadsrc;
+ GstElement *dvddemux;
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ if (!ev) return 0;
+
+ dvdreadsrc = gst_element_factory_make("dvdreadsrc", "src");
+ if (!dvdreadsrc)
+ goto failure_dvdreadsrc;
+ if (device)
+ g_object_set(G_OBJECT(dvdreadsrc), "device", device, NULL);
+
+ dvddemux = gst_element_factory_make("dvddemux", "dvddemux");
+ if (!dvddemux)
+ goto failure_dvddemux;
+ g_signal_connect(dvddemux, "pad-added",
+ G_CALLBACK(dvd_pad_added_cb), ev);
+ g_signal_connect(dvddemux, "no-more-pads",
+ G_CALLBACK(dvd_no_more_pads_cb), ev);
+
+ gst_bin_add_many(GST_BIN(ev->pipeline), dvdreadsrc, dvddemux, NULL);
+ if (!gst_element_link(dvdreadsrc, dvddemux))
+ goto failure_link;
+
+ if (!emotion_pipeline_pause(ev->pipeline))
+ goto failure_gstreamer_pause;
+
+ while (no_more_pads == 0)
+ {
+ g_print("toto\n");
+ }
+ no_more_pads = 0;
+
+ /* We get the informations of streams */
+ ecore_list_first_goto(ev->video_sinks);
+ ecore_list_first_goto(ev->audio_sinks);
+
+ {
+ GstIterator *it;
+ gpointer data;
+
+ it = gst_element_iterate_src_pads(dvddemux);
+ while (gst_iterator_next(it, &data) == GST_ITERATOR_OK)
+ {
+ GstPad *pad;
+ GstCaps *caps;
+ gchar *str;
+
+ pad = GST_PAD(data);
+
+ caps = gst_pad_get_caps(pad);
+ str = gst_caps_to_string(caps);
+ g_print("caps !! %s\n", str);
+ /* video stream */
+ if (g_str_has_prefix(str, "video/mpeg"))
+ {
+ Emotion_Video_Sink *vsink;
+ GstPad *sink_pad;
+ GstCaps *sink_caps;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_next(ev->video_sinks);
+ sink_pad = gst_element_get_pad(gst_bin_get_by_name(GST_BIN(ev->pipeline), "mpeg2dec"), "src");
+ sink_caps = gst_pad_get_caps(sink_pad);
+ str = gst_caps_to_string(sink_caps);
+ g_print(" ** caps v !! %s\n", str);
+
+ emotion_video_sink_fill(vsink, sink_pad, sink_caps);
+
+ gst_caps_unref(sink_caps);
+ gst_object_unref(sink_pad);
+ }
+ /* audio stream */
+ else if (g_str_has_prefix(str, "audio/"))
+ {
+ Emotion_Audio_Sink *asink;
+ GstPad *sink_pad;
+ GstCaps *sink_caps;
+
+ asink = (Emotion_Audio_Sink *)ecore_list_next(ev->audio_sinks);
+ sink_pad = gst_element_get_pad(gst_bin_get_by_name(GST_BIN(ev->pipeline), "a52dec"), "src");
+ sink_caps = gst_pad_get_caps(sink_pad);
+
+ emotion_audio_sink_fill(asink, sink_pad, sink_caps);
+ }
+ gst_caps_unref(caps);
+ g_free(str);
+ gst_object_unref(pad);
+ }
+ gst_iterator_free(it);
+ }
+
+ /* The first vsink is a valid Emotion_Video_Sink * */
+ /* If no video stream is found, it's a visualisation sink */
+ {
+ Emotion_Video_Sink *vsink;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_first_goto(ev->video_sinks);
+ if (vsink && vsink->sink)
+ {
+ g_object_set(G_OBJECT(vsink->sink), "sync", TRUE, NULL);
+ g_object_set(G_OBJECT(vsink->sink), "signal-handoffs", TRUE, NULL);
+ g_signal_connect(G_OBJECT(vsink->sink),
+ "handoff",
+ G_CALLBACK(cb_handoff), ev);
+ }
+ }
+
+ return 1;
+
+failure_gstreamer_pause:
+failure_link:
+ gst_element_set_state(ev->pipeline, GST_STATE_NULL);
+ gst_bin_remove(GST_BIN(ev->pipeline), dvddemux);
+failure_dvddemux:
+ gst_bin_remove(GST_BIN(ev->pipeline), dvdreadsrc);
+failure_dvdreadsrc:
+
+ return 0;
+}
+
+static void
+dvd_pad_added_cb(GstElement *dvddemuxer,
+ GObject *new_pad,
+ gpointer user_data)
+{
+ Emotion_Gstreamer_Video *ev;
+ GstCaps *caps;
+ gchar *str;
+
+ ev = (Emotion_Gstreamer_Video *)user_data;
+ caps = gst_pad_get_caps(GST_PAD(new_pad));
+ str = gst_caps_to_string(caps);
+ /* video stream */
+ if (g_str_has_prefix(str, "video/mpeg"))
+ {
+ Emotion_Video_Sink *vsink;
+ GstElement *queue;
+ GstElement *decoder;
+ GstPad *videopad;
+
+ vsink = (Emotion_Video_Sink *)malloc(sizeof(Emotion_Video_Sink));
+ if (!vsink) return;
+ if (!ecore_list_append(ev->video_sinks, vsink))
+ {
+ free(vsink);
+ return;
+ }
+
+ queue = gst_element_factory_make("queue", NULL);
+ decoder = gst_element_factory_make("mpeg2dec", "mpeg2dec");
+ vsink->sink = gst_element_factory_make("fakesink", "videosink");
+ gst_bin_add_many(GST_BIN(ev->pipeline), queue, decoder, vsink->sink, NULL);
+ gst_element_link(queue, decoder);
+ gst_element_link(decoder, vsink->sink);
+ videopad = gst_element_get_pad(queue, "sink");
+ gst_pad_link(GST_PAD(new_pad), videopad);
+ gst_object_unref(videopad);
+ if (ecore_list_count(ev->video_sinks) == 1)
+ {
+ ev->ratio = (double)vsink->width / (double)vsink->height;
+ }
+ gst_element_set_state(queue, GST_STATE_PAUSED);
+ gst_element_set_state(decoder, GST_STATE_PAUSED);
+ gst_element_set_state(vsink->sink, GST_STATE_PAUSED);
+ }
+ /* audio stream */
+ else if (g_str_has_prefix(str, "audio/"))
+ {
+ Emotion_Audio_Sink *asink;
+ GstElement *queue;
+ GstElement *decoder;
+ GstElement *conv;
+ GstElement *resample;
+ GstElement *volume;
+ GstPad *audiopad;
+ double vol;
+
+ asink = (Emotion_Audio_Sink *)malloc(sizeof(Emotion_Audio_Sink));
+ if (!asink) return;
+ if (!ecore_list_append(ev->audio_sinks, asink))
+ {
+ free(asink);
+ return;
+ }
+
+ queue = gst_element_factory_make("queue", NULL);
+ decoder = gst_element_factory_make("a52dec", "a52dec");
+ conv = gst_element_factory_make("audioconvert", NULL);
+ resample = gst_element_factory_make("audioresample", NULL);
+ volume = gst_element_factory_make("volume", "volume");
+ g_object_get(G_OBJECT(volume), "volume", &vol, NULL);
+ ev->volume = vol / 10.0;
+
+ /* FIXME: must manage several audio streams */
+ asink->sink = gst_element_factory_make("fakesink", NULL);
+
+ gst_bin_add_many(GST_BIN(ev->pipeline),
+ queue, decoder, conv, resample, volume, asink->sink, NULL);
+ gst_element_link_many(queue, decoder, conv, resample, volume, asink->sink, NULL);
+
+ audiopad = gst_element_get_pad(queue, "sink");
+ gst_pad_link(GST_PAD(new_pad), audiopad);
+ gst_object_unref(audiopad);
+
+ gst_element_set_state(queue, GST_STATE_PAUSED);
+ gst_element_set_state(decoder, GST_STATE_PAUSED);
+ gst_element_set_state(conv, GST_STATE_PAUSED);
+ gst_element_set_state(resample, GST_STATE_PAUSED);
+ gst_element_set_state(volume, GST_STATE_PAUSED);
+ gst_element_set_state(asink->sink, GST_STATE_PAUSED);
+ }
+}
+
+static void
+dvd_no_more_pads_cb(GstElement *dvddemuxer,
+ gpointer user_data)
+{
+ no_more_pads = 1;
+}
--- /dev/null
+/*
+ * vim:ts=8:sw=3:sts=8:noexpandtab:cino=>5n-3f0^-2{2
+ */
+#include "emotion_gstreamer.h"
+#include "emotion_gstreamer_pipeline.h"
+
+int
+emotion_pipeline_file_build(void *video, const char *file)
+{
+ GstElement *filesrc;
+ GstElement *decodebin;
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ if (!ev) return 0;
+
+ filesrc = gst_element_factory_make("filesrc", "src");
+ if (!filesrc)
+ goto failure_filesrc;
+ g_object_set(G_OBJECT(filesrc), "location", file, NULL);
+
+ decodebin = gst_element_factory_make("decodebin", "decodebin");
+ if (!decodebin)
+ goto failure_decodebin;
+ g_signal_connect(decodebin, "new-decoded-pad",
+ G_CALLBACK(file_new_decoded_pad_cb), ev);
+
+ gst_bin_add_many(GST_BIN(ev->pipeline), filesrc, decodebin, NULL);
+ if (!gst_element_link(filesrc, decodebin))
+ goto failure_link;
+
+ if (!emotion_pipeline_pause(ev->pipeline))
+ goto failure_gstreamer_pause;
+
+ emotion_streams_sinks_get(ev, decodebin);
+
+ /* The first vsink is a valid Emotion_Video_Sink * */
+ /* If no video stream is found, it's a visualisation sink */
+ {
+ Emotion_Video_Sink *vsink;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_first_goto(ev->video_sinks);
+ if (vsink && vsink->sink)
+ {
+ g_object_set(G_OBJECT(vsink->sink), "sync", TRUE, NULL);
+ g_object_set(G_OBJECT(vsink->sink), "signal-handoffs", TRUE, NULL);
+ g_signal_connect(G_OBJECT(vsink->sink),
+ "handoff",
+ G_CALLBACK(cb_handoff), ev);
+ }
+ }
+
+ return 1;
+
+failure_gstreamer_pause:
+failure_link:
+ gst_element_set_state(ev->pipeline, GST_STATE_NULL);
+ gst_bin_remove(GST_BIN(ev->pipeline), decodebin);
+failure_decodebin:
+ gst_bin_remove(GST_BIN(ev->pipeline), filesrc);
+failure_filesrc:
+
+ return 0;
+}
--- /dev/null
+/*
+ * vim:ts=8:sw=3:sts=8:noexpandtab:cino=>5n-3f0^-2{2
+ */
+#include "emotion_gstreamer.h"
+#include "emotion_gstreamer_pipeline.h"
+
+int
+emotion_pipeline_uri_build(void *video, const char *uri)
+{
+ GstElement *src;
+ GstElement *decodebin;
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ if (!ev) return 0;
+
+ if (gst_uri_protocol_is_supported(GST_URI_SRC, uri))
+ goto failure_src;
+ src = gst_element_make_from_uri(GST_URI_SRC, uri, "src");
+ if (!src)
+ goto failure_src;
+ g_object_set(G_OBJECT(src), "location", uri, NULL);
+
+ decodebin = gst_element_factory_make("decodebin", "decodebin");
+ if (!decodebin)
+ goto failure_decodebin;
+ g_signal_connect(decodebin, "new-decoded-pad",
+ G_CALLBACK(file_new_decoded_pad_cb), ev);
+
+ gst_bin_add_many(GST_BIN(ev->pipeline), src, decodebin, NULL);
+ if (!gst_element_link(src, decodebin))
+ goto failure_link;
+
+ if (!emotion_pipeline_pause(ev->pipeline))
+ goto failure_gstreamer_pause;
+
+ emotion_streams_sinks_get(ev, decodebin);
+
+ /* The first vsink is a valid Emotion_Video_Sink * */
+ /* If no video stream is found, it's a visualisation sink */
+ {
+ Emotion_Video_Sink *vsink;
+
+ vsink = (Emotion_Video_Sink *)ecore_list_first_goto(ev->video_sinks);
+ if (vsink && vsink->sink)
+ {
+ g_object_set(G_OBJECT(vsink->sink), "sync", TRUE, NULL);
+ g_object_set(G_OBJECT(vsink->sink), "signal-handoffs", TRUE, NULL);
+ g_signal_connect(G_OBJECT(vsink->sink),
+ "handoff",
+ G_CALLBACK(cb_handoff), ev);
+ }
+ }
+
+ return 1;
+
+failure_gstreamer_pause:
+failure_link:
+ gst_element_set_state(ev->pipeline, GST_STATE_NULL);
+ gst_bin_remove(GST_BIN(ev->pipeline), decodebin);
+failure_decodebin:
+ gst_bin_remove(GST_BIN(ev->pipeline), src);
+failure_src:
+
+ return 0;
+}
--- /dev/null
+.deps
+.libs
+Makefile
+Makefile.in
+*.la
+*.lo
--- /dev/null
+
+MAINTAINERCLEANFILES = Makefile.in
+
+AM_CPPFLAGS = \
+-I$(top_srcdir) \
+-I$(top_srcdir)/src/lib \
+-I$(top_srcdir)/src/modules \
+-DPACKAGE_BIN_DIR=\"$(bindir)\" \
+-DPACKAGE_LIB_DIR=\"$(libdir)\" \
+-DPACKAGE_DATA_DIR=\"$(datadir)/$(PACKAGE)\" \
+@EVAS_CFLAGS@ \
+@ECORE_CFLAGS@ \
+@XINE_CFLAGS@
+
+if BUILD_XINE_MODULE
+
+pkgdir = $(libdir)/emotion
+
+pkg_LTLIBRARIES = xine.la
+
+xine_la_SOURCES = \
+emotion_xine.c \
+emotion_xine.h \
+emotion_xine_vo_out.c
+xine_la_LIBADD = @EVAS_LIBS@ @ECORE_LIBS@ @XINE_LIBS@ $(top_builddir)/src/lib/libemotion.la -lpthread
+xine_la_LDFLAGS = -module -avoid-version
+xine_la_LIBTOOLFLAGS = --tag=disable-static
+xine_la_DEPENDENCIES = $(top_builddir)/config.h
+
+endif
--- /dev/null
+#include "Emotion.h"
+#include "emotion_private.h"
+#include "emotion_xine.h"
+
+/* module api */
+static unsigned char em_init (Evas_Object *obj, void **emotion_video, Emotion_Module_Options *opt);
+static int em_shutdown (void *ef);
+static unsigned char em_file_open (const char *file, Evas_Object *obj, void *ef);
+static void em_file_close (void *ef);
+static void em_play (void *ef, double pos);
+static void em_stop (void *ef);
+static void em_size_get (void *ef, int *w, int *h);
+static void em_pos_set (void *ef, double pos);
+static void em_vis_set (void *ef, Emotion_Vis vis);
+static double em_len_get (void *ef);
+static int em_fps_num_get (void *ef);
+static int em_fps_den_get (void *ef);
+static double em_fps_get (void *ef);
+static double em_pos_get (void *ef);
+static Emotion_Vis em_vis_get (void *ef);
+static double em_ratio_get (void *ef);
+static int em_seekable (void *ef);
+static void em_frame_done (void *ef);
+static Emotion_Format em_format_get (void *ef);
+static void em_video_data_size_get (void *ef, int *w, int *h);
+static int em_yuv_rows_get (void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows);
+static int em_bgra_data_get (void *ef, unsigned char **bgra_data);
+static void em_event_feed (void *ef, int event);
+static void em_event_mouse_button_feed (void *ef, int button, int x, int y);
+static void em_event_mouse_move_feed (void *ef, int x, int y);
+static int em_video_channel_count (void *ef);
+static void em_video_channel_set (void *ef, int channel);
+static int em_video_channel_get (void *ef);
+static const char *em_video_channel_name_get (void *ef, int channel);
+static void em_video_channel_mute_set (void *ef, int mute);
+static int em_video_channel_mute_get (void *ef);
+static int em_audio_channel_count (void *ef);
+static void em_audio_channel_set (void *ef, int channel);
+static int em_audio_channel_get (void *ef);
+static const char *em_audio_channel_name_get (void *ef, int channel);
+static void em_audio_channel_mute_set (void *ef, int mute);
+static int em_audio_channel_mute_get (void *ef);
+static void em_audio_channel_volume_set(void *ef, double vol);
+static double em_audio_channel_volume_get(void *ef);
+static int em_spu_channel_count (void *ef);
+static void em_spu_channel_set (void *ef, int channel);
+static int em_spu_channel_get (void *ef);
+static const char *em_spu_channel_name_get (void *ef, int channel);
+static void em_spu_channel_mute_set (void *ef, int mute);
+static int em_spu_channel_mute_get (void *ef);
+static int em_chapter_count (void *ef);
+static void em_chapter_set (void *ef, int chapter);
+static int em_chapter_get (void *ef);
+static const char *em_chapter_name_get (void *ef, int chapter);
+static void em_speed_set (void *ef, double speed);
+static double em_speed_get (void *ef);
+static int em_eject (void *ef);
+static const char *em_meta_get (void *ef, int meta);
+
+/* internal util calls */
+static void *_em_slave (void *par);
+static void _em_slave_event (void *data, int type, void *arg);
+static int _em_fd_active (void *data, Ecore_Fd_Handler *fdh);
+static void _em_event (void *data, const xine_event_t *event);
+static void _em_module_event (void *data, int type);
+static int _em_fd_ev_active (void *data, Ecore_Fd_Handler *fdh);
+//static int _em_timer (void *data);
+static void *_em_get_pos_len_th(void *par);
+static void _em_get_pos_len (Emotion_Xine_Video *ev);
+
+extern plugin_info_t emotion_xine_plugin_info[];
+
+/* this is a slave controller thread for the xine module - libxine loves
+ * to deadlock, internally stall and otherwise have unpredictable behavior
+ * if we use the main process thread for many things - so a lot will be
+ * farmed off to this slave. its job is to handle opening, closing, file
+ * opening, recoder init etc. and all sorts of things can that often block.
+ * anything this thread needs to return, it will return via the event pipe.
+ */
+static void *
+_em_slave(void *par)
+{
+ Emotion_Xine_Video *ev;
+ void *buf[2];
+ int len;
+
+ ev = (Emotion_Xine_Video *)par;
+ while ((len = read(ev->fd_slave_read, buf, sizeof(buf))) > 0)
+ {
+ if (len == sizeof(buf))
+ {
+ Emotion_Xine_Event *eev;
+
+ ev = buf[0];
+ eev = buf[1];
+ switch (eev->mtype)
+ {
+ case 0: /* noop */
+ break;
+ case 1: /* init */
+ {
+ ev->decoder = xine_new();
+ xine_init(ev->decoder);
+ xine_register_plugins(ev->decoder, emotion_xine_plugin_info);
+ if (1)
+ {
+ xine_cfg_entry_t cf;
+ if (xine_config_lookup_entry(ev->decoder, "input.dvd_use_readahead", &cf))
+ {
+ cf.num_value = 1; // 0 or 1
+ xine_config_update_entry(ev->decoder, &cf);
+ }
+ }
+ printf("OPEN VIDEO PLUGIN...\n");
+ if (!ev->opt_no_video)
+ ev->video = xine_open_video_driver(ev->decoder, "emotion",
+ XINE_VISUAL_TYPE_NONE, ev);
+ printf("RESULT: xine_open_video_driver() = %p\n", ev->video);
+ // Let xine autodetect the best audio output driver
+ if (!ev->opt_no_audio)
+ ev->audio = xine_open_audio_driver(ev->decoder, NULL, ev);
+ // ev->audio = xine_open_audio_driver(ev->decoder, "oss", ev);
+ // dont use alsa - alsa has oss emulation.
+ // ev->audio = xine_open_audio_driver(ev->decoder, "alsa", ev);
+ // ev->audio = xine_open_audio_driver(ev->decoder, "arts", ev);
+ // ev->audio = xine_open_audio_driver(ev->decoder, "esd", ev);
+ ev->stream = xine_stream_new(ev->decoder, ev->audio, ev->video);
+ ev->queue = xine_event_new_queue(ev->stream);
+ xine_event_create_listener_thread(ev->queue, _em_event, ev);
+ ev->opening = 0;
+ ev->play_ok = 1;
+ _em_module_event(ev, 1); /* event - open done */
+ }
+ break;
+ case 3: /* shutdown */
+ {
+ _em_module_event(ev, 3);
+ printf("EX shutdown stop\n");
+ xine_stop(ev->stream);
+ // pthread_mutex_lock(&(ev->get_pos_len_mutex));
+ if (!ev->get_pos_thread_deleted)
+ {
+ printf("closing get_pos thread, %p\n", ev);
+ pthread_mutex_lock(&(ev->get_pos_len_mutex));
+ pthread_cond_broadcast(&(ev->get_pos_len_cond));
+ pthread_mutex_unlock(&(ev->get_pos_len_mutex));
+ while (ev->get_poslen);
+ }
+ printf("EX dispose %p\n", ev);
+ xine_dispose(ev->stream);
+ printf("EX dispose evq %p\n", ev);
+ xine_event_dispose_queue(ev->queue);
+ printf("EX close video drv %p\n", ev);
+ if (ev->video) xine_close_video_driver(ev->decoder, ev->video);
+ printf("EX wait for vo to go\n");
+ while (ev->have_vo);
+ printf("EX vo gone\n");
+ printf("EX close audio drv %p\n", ev);
+ if (ev->audio) xine_close_audio_driver(ev->decoder, ev->audio);
+ printf("EX xine exit %p\n", ev);
+ xine_exit(ev->decoder);
+ printf("EX DONE %p\n", ev);
+ close(ev->fd_write);
+ close(ev->fd_read);
+ close(ev->fd_ev_write);
+ close(ev->fd_ev_read);
+ close(ev->fd_slave_write);
+ close(ev->fd_slave_read);
+ ev->closing = 0;
+ if (eev->xine_event) free(eev->xine_event);
+ free(eev);
+ free(ev);
+ return NULL;
+ }
+ break;
+ case 2: /* file open */
+ {
+ int pos_stream = 0;
+ int pos_time = 0;
+ int length_time = 0;
+ uint32_t v;
+ char *file;
+
+ file = eev->xine_event;
+ printf("OPN STREAM %s\n", file);
+ if (xine_open(ev->stream, file))
+ {
+ if (xine_get_pos_length(ev->stream, &pos_stream, &pos_time, &length_time))
+ {
+ if (length_time == 0)
+ {
+ ev->pos = (double)pos_stream / 65535;
+ ev->len = 1.0;
+ ev->no_time = 1;
+ }
+ else
+ {
+ ev->pos = 0.0;
+ ev->len = (double)length_time / 1000.0;
+ }
+ }
+ else
+ {
+ ev->pos = 0.0;
+ ev->len = 1.0;
+ }
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_FRAME_DURATION);
+ if (v > 0) ev->fps = 90000.0 / (double)v;
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_WIDTH);
+ ev->w = v;
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HEIGHT);
+ ev->h = v;
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_RATIO);
+ ev->ratio = (double)v / 10000.0;
+ ev->just_loaded = 1;
+ ev->get_poslen = 0;
+ }
+ _em_module_event(ev, 2); /* event - open done */
+ }
+ break;
+ case 11: /* file close */
+ {
+ printf("EX done %p\n", ev);
+ em_frame_done(ev);
+ printf("EX stop %p\n", ev);
+ xine_stop(ev->stream);
+ printf("EX close %p\n", ev);
+ xine_close(ev->stream);
+ printf("EX close done %p\n", ev);
+ _em_module_event(ev, 11);
+ }
+ break;
+ case 4: /* play */
+ {
+ double pos;
+ int pos_stream, pos_time, length_time;
+
+ pos = *((double *)eev->xine_event);
+ if ((xine_get_param(ev->stream, XINE_PARAM_SPEED) == XINE_SPEED_PAUSE) &&
+ (pos == ev->pos) &&
+ (!ev->just_loaded))
+ {
+ xine_set_param(ev->stream, XINE_PARAM_SPEED, XINE_SPEED_NORMAL);
+ }
+ else
+ {
+ if (ev->no_time)
+ xine_play(ev->stream, pos * 65535, 0);
+ else
+ xine_play(ev->stream, 0, pos * 1000);
+ }
+ ev->just_loaded = 0;
+
+ if (xine_get_pos_length(ev->stream,
+ &pos_stream,
+ &pos_time,
+ &length_time))
+ {
+ if (length_time == 0)
+ {
+ ev->pos = (double)pos_stream / 65535;
+ ev->len = 1.0;
+ ev->no_time = 1;
+ }
+ else
+ {
+ ev->pos = (double)pos_time / 1000.0;
+ ev->len = (double)length_time / 1000.0;
+ }
+ }
+ _em_module_event(ev, 4);
+ }
+ break;
+ case 5: /* stop */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_SPEED, XINE_SPEED_PAUSE);
+ _em_module_event(ev, 5);
+ }
+ break;
+ case 6: /* seek */
+ {
+ double pos;
+
+ pos = *((double *)eev->xine_event);
+ if (ev->seeked_pos != ev->seek_to_pos)
+ {
+ if (ev->no_time)
+ xine_play(ev->stream, pos * 65535, 0);
+ else
+ xine_play(ev->stream, 0, pos * 1000);
+ if (!ev->play)
+ xine_set_param(ev->stream, XINE_PARAM_SPEED, XINE_SPEED_PAUSE);
+ ev->seeked_pos = ev->seek_to_pos;
+ }
+ _em_module_event(ev, 6);
+ }
+ break;
+ case 7: /* eject */
+ {
+ xine_eject(ev->stream);
+ _em_module_event(ev, 7);
+ }
+ break;
+ case 8: /* spu mute */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_IGNORE_SPU, ev->spu_mute);
+ _em_module_event(ev, 8);
+ }
+ break;
+ case 9: /* channel */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_SPU_CHANNEL, ev->spu_channel);
+ _em_module_event(ev, 9);
+ }
+ break;
+ case 10: /* vol */
+ {
+ double vol;
+
+ vol = *((double *)eev->xine_event);
+ if (vol < 0.0) vol = 0.0;
+ if (vol > 1.0) vol = 1.0;
+ xine_set_param(ev->stream, XINE_PARAM_AUDIO_VOLUME, vol * 100);
+ _em_module_event(ev, 10);
+ }
+ break;
+ case 12: /* audio mute */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_AUDIO_MUTE, ev->audio_mute);
+ }
+ break;
+ case 13: /* audio mute */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_AUDIO_CHANNEL_LOGICAL, ev->audio_channel);
+ }
+ break;
+ case 14: /* audio mute */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_VIDEO_CHANNEL, ev->video_channel);
+ }
+ break;
+ default:
+ break;
+ }
+ if (eev->xine_event) free(eev->xine_event);
+ free(eev);
+ }
+ }
+ return NULL;
+}
+static void
+_em_slave_event(void *data, int type, void *arg)
+{
+ void *buf[2];
+ Emotion_Xine_Event *new_ev;
+ Emotion_Xine_Video *ev;
+
+ ev = data;
+ new_ev = calloc(1, sizeof(Emotion_Xine_Event));
+ if (!new_ev) return;
+ new_ev->mtype = type;
+ new_ev->type = -1;
+ new_ev->xine_event = arg;
+ buf[0] = data;
+ buf[1] = new_ev;
+ write(ev->fd_slave_write, buf, sizeof(buf));
+}
+
+static unsigned char
+em_init(Evas_Object *obj, void **emotion_video, Emotion_Module_Options *opt)
+{
+ Emotion_Xine_Video *ev;
+ int fds[2];
+
+ if (!emotion_video) return 0;
+
+ ev = calloc(1, sizeof(Emotion_Xine_Video));
+ if (!ev) return 0;
+ ev->obj = obj;
+
+ if (pipe(fds) == 0)
+ {
+ ev->fd_read = fds[0];
+ ev->fd_write = fds[1];
+ fcntl(ev->fd_read, F_SETFL, O_NONBLOCK);
+ ev->fd_handler = ecore_main_fd_handler_add(ev->fd_read, ECORE_FD_READ,
+ _em_fd_active, ev,
+ NULL, NULL);
+ ecore_main_fd_handler_active_set(ev->fd_handler, ECORE_FD_READ);
+ }
+ if (pipe(fds) == 0)
+ {
+ ev->fd_ev_read = fds[0];
+ ev->fd_ev_write = fds[1];
+ fcntl(ev->fd_ev_read, F_SETFL, O_NONBLOCK);
+ ev->fd_ev_handler = ecore_main_fd_handler_add(ev->fd_ev_read,
+ ECORE_FD_READ, _em_fd_ev_active, ev, NULL, NULL);
+ ecore_main_fd_handler_active_set(ev->fd_ev_handler, ECORE_FD_READ);
+ }
+ if (pipe(fds) == 0)
+ {
+ ev->fd_slave_read = fds[0];
+ ev->fd_slave_write = fds[1];
+ fcntl(ev->fd_slave_write, F_SETFL, O_NONBLOCK);
+ }
+ ev->delete_me = 0;
+ ev->get_pos_thread_deleted = 0;
+ ev->opening = 1;
+ ev->play_ok = 0;
+
+ if (opt)
+ {
+ ev->opt_no_audio = opt->no_audio;
+ ev->opt_no_video = opt->no_video;
+ }
+
+ pthread_cond_init(&(ev->get_pos_len_cond), NULL);
+ pthread_mutex_init(&(ev->get_pos_len_mutex), NULL);
+ pthread_create(&ev->get_pos_len_th, NULL, _em_get_pos_len_th, ev);
+
+ pthread_create(&ev->slave_th, NULL, _em_slave, ev);
+ pthread_detach(ev->slave_th);
+ _em_slave_event(ev, 1, NULL);
+
+ *emotion_video = ev;
+ return 1;
+}
+
+static int
+em_shutdown(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->closing = 1;
+ ev->delete_me = 1;
+ printf("EXM del fds %p\n", ev);
+ ecore_main_fd_handler_del(ev->fd_handler);
+ ecore_main_fd_handler_del(ev->fd_ev_handler);
+
+ ev->closing = 1;
+ _em_slave_event(ev, 3, NULL);
+ printf("EXM done %p\n", ev);
+ return 1;
+}
+
+static unsigned char
+em_file_open(const char *file, Evas_Object *obj, void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (!ev) return 0;
+ _em_slave_event(ev, 2, strdup(file));
+ return 1;
+}
+
+static void
+em_file_close(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (!ev) return;
+ _em_slave_event(ev, 11, NULL);
+}
+
+static void
+em_play(void *ef, double pos)
+{
+ Emotion_Xine_Video *ev;
+ double *ppos;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->play = 1;
+ ev->play_ok = 0;
+ ppos = malloc(sizeof(double));
+ *ppos = pos;
+ _em_slave_event(ev, 4, ppos);
+}
+
+static void
+em_stop(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->play = 0;
+ ev->play_ok = 0;
+ _em_slave_event(ev, 5, NULL);
+}
+
+static void
+em_size_get(void *ef, int *w, int *h)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (w) *w = ev->w;
+ if (h) *h = ev->h;
+}
+
+static void
+em_pos_set(void *ef, double pos)
+{
+ Emotion_Xine_Video *ev;
+ double *ppos;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (pos == ev->seek_to_pos) return;
+ ppos = malloc(sizeof(double));
+ *ppos = pos;
+ ev->seek_to_pos = pos;
+ _em_slave_event(ev, 6, ppos);
+}
+
+static void
+em_vis_set(void *ef,
+ Emotion_Vis vis)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->vis == vis) return;
+ ev->vis = vis;
+}
+
+static double
+em_len_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->len;
+}
+
+static int
+em_fps_num_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return (int)(ev->fps * 10000.0);
+}
+
+static int
+em_fps_den_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return 10000;
+}
+
+static double
+em_fps_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->fps;
+}
+
+static double
+em_pos_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->pos;
+}
+
+static Emotion_Vis
+em_vis_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+
+ return ev->vis;
+}
+
+static double
+em_ratio_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->ratio;
+}
+
+static int
+em_video_handled(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO) &&
+ xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HANDLED));
+}
+
+static int
+em_audio_handled(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_AUDIO) &&
+ xine_get_stream_info(ev->stream, XINE_STREAM_INFO_AUDIO_HANDLED));
+}
+
+static int
+em_seekable(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return xine_get_stream_info(ev->stream, XINE_STREAM_INFO_SEEKABLE);
+}
+
+static void
+em_frame_done(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->cur_frame)
+ {
+ ev->fq--;
+ if (ev->cur_frame->done_func)
+ ev->cur_frame->done_func(ev->cur_frame->done_data);
+ ev->cur_frame = NULL;
+ }
+}
+
+static Emotion_Format
+em_format_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+ volatile Emotion_Xine_Video_Frame *fr;
+
+ ev = (Emotion_Xine_Video *)ef;
+ fr = ev->cur_frame;
+ if (fr) return fr->format;
+ return EMOTION_FORMAT_YV12;
+}
+
+static void
+em_video_data_size_get(void *ef, int *w, int *h)
+{
+ Emotion_Xine_Video *ev;
+ volatile Emotion_Xine_Video_Frame *fr;
+
+ ev = (Emotion_Xine_Video *)ef;
+ fr = ev->cur_frame;
+ if (!fr)
+ {
+ if (w) *w = 0;
+ if (h) *h = 0;
+ return;
+ }
+ if (w) *w = fr->w;
+ if (h) *h = fr->h;
+}
+
+static int
+em_yuv_rows_get(void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows)
+{
+ Emotion_Xine_Video *ev;
+ volatile Emotion_Xine_Video_Frame *fr;
+
+ ev = (Emotion_Xine_Video *)ef;
+ fr = ev->cur_frame;
+ if (!fr) return 0;
+ if (fr->y)
+ {
+ int i;
+
+ for (i = 0; i < h; i++) yrows[i] = fr->y + (i * fr->y_stride);
+ for (i = 0; i < (h / 2); i++) urows[i] = fr->u + (i * fr->u_stride);
+ for (i = 0; i < (h / 2); i++) vrows[i] = fr->v + (i * fr->v_stride);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+em_bgra_data_get(void *ef, unsigned char **bgra_data)
+{
+ Emotion_Xine_Video *ev;
+ volatile Emotion_Xine_Video_Frame *fr;
+
+ ev = (Emotion_Xine_Video *)ef;
+ fr = ev->cur_frame;
+ if (!fr) return 0;
+ if (fr->bgra_data)
+ {
+ *bgra_data = fr->bgra_data;
+ return 1;
+ }
+ return 0;
+}
+
+static void
+em_event_feed(void *ef, int event)
+{
+ Emotion_Xine_Video *ev;
+ xine_event_t xine_event;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if ((ev->opening) || (!ev->play_ok)) return;
+ xine_event.data_length = 0;
+ xine_event.data = NULL;
+ xine_event.stream = ev->stream;
+ gettimeofday(&xine_event.tv, NULL);
+ switch (event)
+ {
+ case EMOTION_EVENT_MENU1:
+ xine_event.type = XINE_EVENT_INPUT_MENU1;
+ break;
+ case EMOTION_EVENT_MENU2:
+ xine_event.type = XINE_EVENT_INPUT_MENU2;
+ break;
+ case EMOTION_EVENT_MENU3:
+ xine_event.type = XINE_EVENT_INPUT_MENU3;
+ break;
+ case EMOTION_EVENT_MENU4:
+ xine_event.type = XINE_EVENT_INPUT_MENU4;
+ break;
+ case EMOTION_EVENT_MENU5:
+ xine_event.type = XINE_EVENT_INPUT_MENU5;
+ break;
+ case EMOTION_EVENT_MENU6:
+ xine_event.type = XINE_EVENT_INPUT_MENU6;
+ break;
+ case EMOTION_EVENT_MENU7:
+ xine_event.type = XINE_EVENT_INPUT_MENU7;
+ break;
+ case EMOTION_EVENT_UP:
+ xine_event.type = XINE_EVENT_INPUT_UP;
+ break;
+ case EMOTION_EVENT_DOWN:
+ xine_event.type = XINE_EVENT_INPUT_DOWN;
+ break;
+ case EMOTION_EVENT_LEFT:
+ xine_event.type = XINE_EVENT_INPUT_LEFT;
+ break;
+ case EMOTION_EVENT_RIGHT:
+ xine_event.type = XINE_EVENT_INPUT_RIGHT;
+ break;
+ case EMOTION_EVENT_SELECT:
+ xine_event.type = XINE_EVENT_INPUT_SELECT;
+ break;
+ case EMOTION_EVENT_NEXT:
+ xine_event.type = XINE_EVENT_INPUT_NEXT;
+ break;
+ case EMOTION_EVENT_PREV:
+ xine_event.type = XINE_EVENT_INPUT_PREVIOUS;
+ break;
+ case EMOTION_EVENT_ANGLE_NEXT:
+ xine_event.type = XINE_EVENT_INPUT_ANGLE_NEXT;
+ break;
+ case EMOTION_EVENT_ANGLE_PREV:
+ xine_event.type = XINE_EVENT_INPUT_ANGLE_PREVIOUS;
+ break;
+ case EMOTION_EVENT_FORCE:
+ xine_event.type = XINE_EVENT_INPUT_BUTTON_FORCE;
+ break;
+ case EMOTION_EVENT_0:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_0;
+ break;
+ case EMOTION_EVENT_1:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_1;
+ break;
+ case EMOTION_EVENT_2:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_2;
+ break;
+ case EMOTION_EVENT_3:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_3;
+ break;
+ case EMOTION_EVENT_4:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_4;
+ break;
+ case EMOTION_EVENT_5:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_5;
+ break;
+ case EMOTION_EVENT_6:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_6;
+ break;
+ case EMOTION_EVENT_7:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_7;
+ break;
+ case EMOTION_EVENT_8:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_8;
+ break;
+ case EMOTION_EVENT_9:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_9;
+ break;
+ case EMOTION_EVENT_10:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_10_ADD;
+ break;
+ default:
+ return;
+ break;
+ }
+ xine_event_send(ev->stream, &xine_event);
+}
+
+static void
+em_event_mouse_button_feed(void *ef, int button, int x, int y)
+{
+ Emotion_Xine_Video *ev;
+ xine_event_t xine_event;
+ xine_input_data_t xine_input;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if ((ev->opening) || (!ev->play_ok)) return;
+ xine_event.stream = ev->stream;
+ gettimeofday(&xine_event.tv, NULL);
+ xine_event.type = XINE_EVENT_INPUT_MOUSE_BUTTON;
+ xine_input.button = 1;
+ xine_input.x = x;
+ xine_input.y = y;
+ xine_event.data = &xine_input;
+ xine_event.data_length = sizeof(xine_input);
+ xine_event_send(ev->stream, &xine_event);
+}
+
+static void
+em_event_mouse_move_feed(void *ef, int x, int y)
+{
+ Emotion_Xine_Video *ev;
+ xine_event_t xine_event;
+ xine_input_data_t xine_input;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if ((ev->opening) || (!ev->play_ok)) return;
+ xine_event.stream = ev->stream;
+ gettimeofday(&xine_event.tv, NULL);
+ xine_event.type = XINE_EVENT_INPUT_MOUSE_MOVE;
+ xine_input.button = 0;
+ xine_input.x = x;
+ xine_input.y = y;
+ xine_event.data = &xine_input;
+ xine_event.data_length = sizeof(xine_input);
+ xine_event_send(ev->stream, &xine_event);
+}
+
+static int
+em_video_channel_count(void *ef)
+{
+ Emotion_Xine_Video *ev;
+ int v;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_CHANNELS);
+ if ((v < 1) &&
+ xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO)) return 1;
+ return v;
+}
+
+static void
+em_video_channel_set(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (channel < 0) channel = 0;
+ ev->video_channel = channel;
+ _em_slave_event(ev, 14, NULL);
+}
+
+static int
+em_video_channel_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return xine_get_param(ev->stream, XINE_PARAM_VIDEO_CHANNEL);
+}
+
+static const char *
+em_video_channel_name_get(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return NULL;
+}
+
+static void
+em_video_channel_mute_set(void *ef, int mute)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->video_mute = mute;
+}
+
+static int
+em_video_channel_mute_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->video_mute;
+}
+
+static int
+em_audio_channel_count(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return xine_get_stream_info(ev->stream, XINE_STREAM_INFO_MAX_AUDIO_CHANNEL);
+}
+
+static void
+em_audio_channel_set(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (channel < -1) channel = -1;
+ ev->audio_channel = channel;
+ _em_slave_event(ev, 13, NULL);
+}
+
+static int
+em_audio_channel_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return xine_get_param(ev->stream, XINE_PARAM_AUDIO_CHANNEL_LOGICAL);
+}
+
+static const char *
+em_audio_channel_name_get(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+ static char lang[XINE_LANG_MAX + 1];
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening) return NULL;
+ lang[0] = 0;
+ if (xine_get_audio_lang(ev->stream, channel, lang)) return lang;
+ return NULL;
+}
+
+static void
+em_audio_channel_mute_set(void *ef, int mute)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->audio_mute = mute;
+ _em_slave_event(ev, 12, NULL);
+}
+
+static int
+em_audio_channel_mute_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->audio_mute;
+}
+
+static void
+em_audio_channel_volume_set(void *ef, double vol)
+{
+ Emotion_Xine_Video *ev;
+ double *ppos;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ppos = malloc(sizeof(double));
+ *ppos = vol;
+ _em_slave_event(ev, 10, ppos);
+}
+
+static double
+em_audio_channel_volume_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return (double)xine_get_param(ev->stream, XINE_PARAM_AUDIO_VOLUME) / 100.0;
+}
+
+static int
+em_spu_channel_count(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return xine_get_stream_info(ev->stream, XINE_STREAM_INFO_MAX_SPU_CHANNEL);
+}
+
+static void
+em_spu_channel_set(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (channel < 0) channel = 0;
+ ev->spu_channel = channel;
+ _em_slave_event(ev, 9, NULL);
+}
+
+static int
+em_spu_channel_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return xine_get_param(ev->stream, XINE_PARAM_SPU_CHANNEL);
+}
+
+static const char *
+em_spu_channel_name_get(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+ static char lang[XINE_LANG_MAX + 1];
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening) return NULL;
+ lang[0] = 0;
+ if (xine_get_spu_lang(ev->stream, channel, lang)) return lang;
+ return NULL;
+}
+
+static void
+em_spu_channel_mute_set(void *ef, int mute)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->spu_mute = mute;
+ _em_slave_event(ev, 8, NULL);
+}
+
+static int
+em_spu_channel_mute_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->spu_mute;
+}
+
+static int
+em_chapter_count(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ if (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_CHAPTERS))
+ return 99;
+ return 0;
+}
+
+static void
+em_chapter_set(void *ef, int chapter)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+}
+
+static int
+em_chapter_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return 0;
+}
+
+static const char *
+em_chapter_name_get(void *ef, int chapter)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return NULL;
+}
+
+static void
+em_speed_set(void *ef, double speed)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+}
+
+static double
+em_speed_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return 1.0;
+}
+
+static int
+em_eject(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ _em_slave_event(ev, 7, NULL);
+ return 1;
+}
+
+static const char *
+em_meta_get(void *ef, int meta)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if ((ev->opening) || (!ev->play_ok)) return NULL;
+ switch (meta)
+ {
+ case META_TRACK_TITLE:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_TITLE);
+ break;
+ case META_TRACK_ARTIST:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_ARTIST);
+ break;
+ case META_TRACK_GENRE:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_GENRE);
+ break;
+ case META_TRACK_COMMENT:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_COMMENT);
+ break;
+ case META_TRACK_ALBUM:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_ALBUM);
+ break;
+ case META_TRACK_YEAR:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_YEAR);
+ break;
+ case META_TRACK_DISCID:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_CDINDEX_DISCID);
+ break;
+ default:
+ break;
+ }
+ return NULL;
+}
+
+static int
+_em_fd_active(void *data, Ecore_Fd_Handler *fdh)
+{
+ void *buf;
+ int fd, len;
+ Emotion_Xine_Video_Frame *fr;
+ Emotion_Xine_Video *ev;
+
+ ev = data;
+ fd = ecore_main_fd_handler_fd_get(fdh);
+ while ((len = read(fd, &buf, sizeof(buf))) > 0)
+ {
+ if (len == sizeof(buf))
+ {
+ fr = buf;
+ ev = _emotion_video_get(fr->obj);
+ if (ev)
+ {
+ em_frame_done(ev);
+ ev->cur_frame = fr;
+ _em_get_pos_len(ev);
+ if ((xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO)) &&
+ (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HANDLED)))
+ {
+ if (ev->video_mute) em_frame_done(ev);
+ else _emotion_frame_new(fr->obj);
+ }
+ _emotion_frame_resize(fr->obj, fr->w, fr->h, fr->ratio);
+ _emotion_video_pos_update(fr->obj, ev->pos, ev->len);
+ }
+ }
+ }
+ return 1;
+}
+
+static void
+_em_event(void *data, const xine_event_t *event)
+{
+ void *buf[2];
+ Emotion_Xine_Event *new_ev;
+ Emotion_Xine_Video *ev;
+
+ ev = data;
+ new_ev = calloc(1, sizeof(Emotion_Xine_Event));
+ if (!new_ev) return;
+ new_ev->mtype = 0;
+ new_ev->type = event->type;
+ if (event->data)
+ {
+ new_ev->xine_event = malloc(event->data_length);
+ if (!new_ev->xine_event)
+ {
+ free(new_ev);
+ return;
+ }
+ memcpy(new_ev->xine_event, event->data, event->data_length);
+ }
+ buf[0] = data;
+ buf[1] = new_ev;
+ write(ev->fd_ev_write, buf, sizeof(buf));
+}
+
+static void
+_em_module_event(void *data, int type)
+{
+ void *buf[2];
+ Emotion_Xine_Event *new_ev;
+ Emotion_Xine_Video *ev;
+
+ ev = data;
+ new_ev = calloc(1, sizeof(Emotion_Xine_Event));
+ if (!new_ev) return;
+ new_ev->mtype = type;
+ new_ev->type = -1;
+ buf[0] = data;
+ buf[1] = new_ev;
+ write(ev->fd_ev_write, buf, sizeof(buf));
+}
+
+static int
+_em_fd_ev_active(void *data, Ecore_Fd_Handler *fdh)
+{
+ Emotion_Xine_Video *ev;
+ int fd, len;
+ void *buf[2];
+
+ ev = data;
+ fd = ecore_main_fd_handler_fd_get(fdh);
+ while ((len = read(fd, buf, sizeof(buf))) > 0)
+ {
+ if (len == sizeof(buf))
+ {
+ Emotion_Xine_Event *eev;
+
+ ev = buf[0];
+ eev = buf[1];
+ if (eev->mtype != 0)
+ {
+ switch (eev->mtype)
+ {
+ case 1: /* init done */
+ ev->play_ok = 1;
+ break;
+ case 2: /* open done */
+ ev->play_ok = 1;
+ break;
+ case 3: /* shutdown done */
+ ev->play_ok = 1;
+ break;
+ case 4: /* play done */
+ ev->play_ok = 1;
+ break;
+ case 5: /* stop done */
+ ev->play_ok = 1;
+ break;
+ case 6: /* seek done */
+ ev->play_ok = 1;
+ break;
+ case 7: /* eject done */
+ ev->play_ok = 1;
+ break;
+ case 8: /* spu mute done */
+ ev->play_ok = 1;
+ break;
+ case 9: /* channel done */
+ ev->play_ok = 1;
+ break;
+ case 10: /* volume done */
+ ev->play_ok = 1;
+ break;
+ case 11: /* close done */
+ ev->play_ok = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ else
+ {
+ switch (eev->type)
+ {
+ case XINE_EVENT_UI_PLAYBACK_FINISHED:
+ {
+ ev->play = 0;
+ _emotion_decode_stop(ev->obj);
+ _emotion_playback_finished(ev->obj);
+ }
+ break;
+ case XINE_EVENT_UI_CHANNELS_CHANGED:
+ {
+ _emotion_channels_change(ev->obj);
+ }
+ break;
+ case XINE_EVENT_UI_SET_TITLE:
+ {
+ xine_ui_data_t *e;
+
+ e = (xine_ui_data_t *)eev->xine_event;
+ _emotion_title_set(ev->obj, e->str);
+ }
+ break;
+ case XINE_EVENT_FRAME_FORMAT_CHANGE:
+ {
+ xine_format_change_data_t *e;
+
+ e = (xine_format_change_data_t *)eev->xine_event;
+ }
+ break;
+ case XINE_EVENT_UI_MESSAGE:
+ {
+ xine_ui_message_data_t *e;
+
+ e = (xine_ui_message_data_t *)eev->xine_event;
+ printf("EV: UI Message [FIXME: break this out to emotion api]\n");
+ // e->type = error type(XINE_MSG_NO_ERROR, XINE_MSG_GENERAL_WARNING, XINE_MSG_UNKNOWN_HOST etc.)
+ // e->messages is a list of messages DOUBLE null terminated
+ }
+ break;
+ case XINE_EVENT_AUDIO_LEVEL:
+ {
+ xine_audio_level_data_t *e;
+
+ e = (xine_audio_level_data_t *)eev->xine_event;
+ _emotion_audio_level_change(ev->obj);
+ printf("EV: Audio Level [FIXME: break this out to emotion api]\n");
+ // e->left (0->100)
+ // e->right
+ // e->mute
+ }
+ break;
+ case XINE_EVENT_PROGRESS:
+ {
+ xine_progress_data_t *e;
+
+ e = (xine_progress_data_t *)eev->xine_event;
+ printf("PROGRESS: %i\n", e->percent);
+ _emotion_progress_set(ev->obj, (char *)e->description, (double)e->percent / 100.0);
+ }
+ break;
+ case XINE_EVENT_MRL_REFERENCE:
+ {
+ xine_mrl_reference_data_t *e;
+
+ e = (xine_mrl_reference_data_t *)eev->xine_event;
+ _emotion_file_ref_set(ev->obj, e->mrl, e->alternative);
+ }
+ break;
+ case XINE_EVENT_UI_NUM_BUTTONS:
+ {
+ xine_ui_data_t *e;
+
+ e = (xine_ui_data_t *)eev->xine_event;
+ _emotion_spu_button_num_set(ev->obj, e->num_buttons);
+ }
+ break;
+ case XINE_EVENT_SPU_BUTTON:
+ {
+ xine_spu_button_t *e;
+
+ e = (xine_spu_button_t *)eev->xine_event;
+ if (e->direction == 1)
+ _emotion_spu_button_set(ev->obj, e->button);
+ else
+ _emotion_spu_button_set(ev->obj, -1);
+ }
+ break;
+ case XINE_EVENT_DROPPED_FRAMES:
+ {
+ xine_dropped_frames_t *e;
+
+ e = (xine_dropped_frames_t *)eev->xine_event;
+ printf("EV: Dropped Frames (skipped %i) (discarded %i) [FIXME: break this out to the emotion api]\n", e->skipped_frames, e->discarded_frames);
+ // e->skipped_frames = % frames skipped * 10
+ // e->discarded_frames = % frames skipped * 10
+ }
+ break;
+ default:
+ // printf("EV: unknown event type %i\n", eev->type);
+ break;
+ }
+ }
+ if (eev->xine_event) free(eev->xine_event);
+ free(eev);
+ }
+ }
+ return 1;
+}
+
+static void *
+_em_get_pos_len_th(void *par)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)par;
+
+ for (;;)
+ {
+ pthread_mutex_lock(&(ev->get_pos_len_mutex));
+ pthread_cond_wait(&(ev->get_pos_len_cond), &(ev->get_pos_len_mutex));
+ pthread_mutex_unlock(&(ev->get_pos_len_mutex));
+ if (ev->get_poslen)
+ {
+ int pos_stream = 0;
+ int pos_time = 0;
+ int length_time = 0;
+
+ if (xine_get_pos_length(ev->stream, &pos_stream, &pos_time, &length_time))
+ {
+ if (length_time == 0)
+ {
+ ev->pos = (double)pos_stream / 65535;
+ ev->len = 1.0;
+ ev->no_time = 1;
+ }
+ else
+ {
+ ev->pos = (double)pos_time / 1000.0;
+ ev->len = (double)length_time / 1000.0;
+ ev->no_time = 0;
+ }
+ }
+ ev->get_poslen = 0;
+ //printf("get pos %3.3f\n", ev->pos);
+ }
+ if (ev->delete_me)
+ {
+ ev->get_pos_thread_deleted = 1;
+ return NULL;
+ }
+ }
+ return NULL;
+}
+
+static void
+_em_get_pos_len(Emotion_Xine_Video *ev)
+{
+ if (ev->get_poslen) return;
+ ev->get_poslen = 1;
+ pthread_mutex_lock(&(ev->get_pos_len_mutex));
+ pthread_cond_broadcast(&(ev->get_pos_len_cond));
+ pthread_mutex_unlock(&(ev->get_pos_len_mutex));
+}
+
+static Emotion_Video_Module em_module =
+{
+ em_init, /* init */
+ em_shutdown, /* shutdown */
+ em_file_open, /* file_open */
+ em_file_close, /* file_close */
+ em_play, /* play */
+ em_stop, /* stop */
+ em_size_get, /* size_get */
+ em_pos_set, /* pos_set */
+ em_vis_set, /* vis_set */
+ em_len_get, /* len_get */
+ em_fps_num_get, /* fps_num_get */
+ em_fps_den_get, /* fps_den_get */
+ em_fps_get, /* fps_get */
+ em_pos_get, /* pos_get */
+ em_vis_get, /* vis_get */
+ em_ratio_get, /* ratio_get */
+ em_video_handled, /* video_handled */
+ em_audio_handled, /* audio_handled */
+ em_seekable, /* seekable */
+ em_frame_done, /* frame_done */
+ em_format_get, /* format_get */
+ em_video_data_size_get, /* video_data_size_get */
+ em_yuv_rows_get, /* yuv_rows_get */
+ em_bgra_data_get, /* bgra_data_get */
+ em_event_feed, /* event_feed */
+ em_event_mouse_button_feed, /* event_mouse_button_feed */
+ em_event_mouse_move_feed, /* event_mouse_move_feed */
+ em_video_channel_count, /* video_channel_count */
+ em_video_channel_set, /* video_channel_set */
+ em_video_channel_get, /* video_channel_get */
+ em_video_channel_name_get, /* video_channel_name_get */
+ em_video_channel_mute_set, /* video_channel_mute_set */
+ em_video_channel_mute_get, /* video_channel_mute_get */
+ em_audio_channel_count, /* audio_channel_count */
+ em_audio_channel_set, /* audio_channel_set */
+ em_audio_channel_get, /* audio_channel_get */
+ em_audio_channel_name_get, /* audio_channel_name_get */
+ em_audio_channel_mute_set, /* audio_channel_mute_set */
+ em_audio_channel_mute_get, /* audio_channel_mute_get */
+ em_audio_channel_volume_set, /* audio_channel_volume_set */
+ em_audio_channel_volume_get, /* audio_channel_volume_get */
+ em_spu_channel_count, /* spu_channel_count */
+ em_spu_channel_set, /* spu_channel_set */
+ em_spu_channel_get, /* spu_channel_get */
+ em_spu_channel_name_get, /* spu_channel_name_get */
+ em_spu_channel_mute_set, /* spu_channel_mute_set */
+ em_spu_channel_mute_get, /* spu_channel_mute_get */
+ em_chapter_count, /* chapter_count */
+ em_chapter_set, /* chapter_set */
+ em_chapter_get, /* chapter_get */
+ em_chapter_name_get, /* chapter_name_get */
+ em_speed_set, /* speed_set */
+ em_speed_get, /* speed_get */
+ em_eject, /* eject */
+ em_meta_get, /* meta_get */
+
+ NULL /* handle */
+};
+
+unsigned char
+module_open(Evas_Object *obj, const Emotion_Video_Module **module, void **video, Emotion_Module_Options *opt)
+{
+ if (!module)
+ return 0;
+
+ if (!em_module.init(obj, video, opt))
+ return 0;
+
+ *module = &em_module;
+ return 1;
+}
+
+void
+module_close(Emotion_Video_Module *module, void *video)
+{
+ em_module.shutdown(video);
+}
+
+#if 0
+void
+em_debug(Emotion_Xine_Video *ev)
+{
+ int has_chapters = 0;
+ int max_spu = 0;
+ int max_audio = 0;
+ int video_channels = 0;
+ int video_streams = 0;
+ int video_seekable = 0;
+ char *title;
+ char *comment;
+ char *artist;
+ char *genre;
+ char *album;
+ char *year;
+ char *cdindex_discid;
+ int video_channel = 0;
+ int audio_channel = 0;
+ int spu_channel = 0;
+ int video_ratio = 0;
+ int audio_mode = 0;
+
+// return;
+ has_chapters = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_CHAPTERS);
+ max_spu = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_MAX_SPU_CHANNEL);
+ max_audio = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_MAX_AUDIO_CHANNEL);
+ video_channels = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_CHANNELS);
+ video_streams = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_STREAMS);
+ video_seekable = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_SEEKABLE);
+ title = xine_get_meta_info(ev->stream, XINE_META_INFO_TITLE);
+ comment = xine_get_meta_info(ev->stream, XINE_META_INFO_COMMENT);
+ artist = xine_get_meta_info(ev->stream, XINE_META_INFO_ARTIST);
+ genre = xine_get_meta_info(ev->stream, XINE_META_INFO_GENRE);
+ album = xine_get_meta_info(ev->stream, XINE_META_INFO_ALBUM);
+ year = xine_get_meta_info(ev->stream, XINE_META_INFO_YEAR);
+ cdindex_discid = xine_get_meta_info(ev->stream, XINE_META_INFO_CDINDEX_DISCID);
+ video_channel = xine_get_param(ev->stream, XINE_PARAM_VIDEO_CHANNEL);
+ audio_channel = xine_get_param(ev->stream, XINE_PARAM_AUDIO_CHANNEL_LOGICAL);
+ spu_channel = xine_get_param(ev->stream, XINE_PARAM_SPU_CHANNEL);
+ video_ratio = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_RATIO);
+ audio_mode = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_AUDIO_MODE);
+ printf("has_chapters = %i\n", has_chapters);
+ printf("max_spu = %i\n", max_spu);
+ printf("max_audio = %i\n", max_audio);
+ printf("video_channels = %i\n", video_channels);
+ printf("video_streams = %i\n", video_streams);
+ printf("video_seekable = %i\n", video_seekable);
+ printf("title = %s\n", title);
+ printf("comment = %s\n", comment);
+ printf("artist = %s\n", artist);
+ printf("genre = %s\n", genre);
+ printf("album = %s\n", album);
+ printf("year = %s\n", year);
+ printf("cdindex_discid = %s\n", cdindex_discid);
+ printf("video_channel = %i\n", video_channel);
+ printf("audio_channel = %i\n", audio_channel);
+ printf("spu_channels = %i\n", spu_channel);
+ printf("video_ratio = %i\n", video_ratio);
+ printf("audio_mode = %i\n", audio_mode);
+ {
+ int i;
+
+ for (i = 0; i <= max_audio; i++)
+ {
+ char lang[XINE_LANG_MAX + 1];
+
+ lang[0] = 0;
+ printf(" AUDIO %i = ", i);
+ if (xine_get_audio_lang(ev->stream, i, lang))
+ printf("%s\n", lang);
+ else
+ printf("NONE\n");
+ }
+ for (i = 0; i <= max_spu; i++)
+ {
+ char lang[XINE_LANG_MAX + 1];
+
+ lang[0] = 0;
+ printf(" SPU %i = ", i);
+ if (xine_get_spu_lang(ev->stream, i, lang))
+ printf("%s\n", lang);
+ else
+ printf("NONE\n");
+ }
+ }
+}
+#endif
--- /dev/null
+#ifndef EMOTION_XINE_H
+#define EMOTION_XINE_H
+
+#include <xine.h>
+#include <xine/xine_plugin.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <pthread.h>
+
+typedef struct _Emotion_Xine_Video Emotion_Xine_Video;
+typedef struct _Emotion_Xine_Video_Frame Emotion_Xine_Video_Frame;
+typedef struct _Emotion_Xine_Event Emotion_Xine_Event;
+
+struct _Emotion_Xine_Video
+{
+ xine_t *decoder;
+ xine_video_port_t *video;
+ xine_audio_port_t *audio;
+ xine_stream_t *stream;
+ xine_event_queue_t *queue;
+ volatile double len;
+ volatile double pos;
+ double fps;
+ double ratio;
+ int w, h;
+ Evas_Object *obj;
+ volatile Emotion_Xine_Video_Frame *cur_frame;
+ volatile int get_poslen;
+ volatile int spu_channel;
+ volatile int audio_channel;
+ volatile int video_channel;
+ volatile double seek_to_pos;
+ volatile double seeked_pos;
+ volatile int fq;
+ Emotion_Vis vis;
+ int fd_read;
+ int fd_write;
+ Ecore_Fd_Handler *fd_handler;
+ int fd_ev_read;
+ int fd_ev_write;
+ Ecore_Fd_Handler *fd_ev_handler;
+ unsigned char play : 1;
+ unsigned char just_loaded : 1;
+ unsigned char video_mute : 1;
+ unsigned char audio_mute : 1;
+ unsigned char spu_mute : 1;
+ unsigned char opt_no_video : 1;
+ unsigned char opt_no_audio : 1;
+ volatile unsigned char delete_me : 1;
+ volatile unsigned char no_time : 1;
+ volatile unsigned char opening : 1;
+ volatile unsigned char closing : 1;
+ volatile unsigned char have_vo : 1;
+ volatile unsigned char play_ok : 1;
+
+ pthread_t get_pos_len_th;
+ pthread_cond_t get_pos_len_cond;
+ pthread_mutex_t get_pos_len_mutex;
+
+ pthread_t slave_th;
+ int fd_slave_read;
+ int fd_slave_write;
+
+ unsigned char get_pos_thread_deleted : 1;
+};
+
+struct _Emotion_Xine_Video_Frame
+{
+ int w, h;
+ double ratio;
+ Emotion_Format format;
+ unsigned char *y, *u, *v;
+ unsigned char *bgra_data;
+ int y_stride, u_stride, v_stride;
+ Evas_Object *obj;
+ double timestamp;
+ void (*done_func)(void *data);
+ void *done_data;
+ void *frame;
+};
+
+struct _Emotion_Xine_Event
+{
+ int type;
+ void *xine_event;
+ int mtype;
+};
+
+unsigned char module_open(Evas_Object *obj, const Emotion_Video_Module **module, void **video, Emotion_Module_Options *opt);
+void module_close(Emotion_Video_Module *module, void *video);
+
+#endif
--- /dev/null
+/***************************************************************************/
+/*** emotion xine display engine ***/
+/***************************************************************************/
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "emotion_private.h"
+#include "emotion_xine.h"
+
+#include <xine.h>
+#include <xine/video_out.h>
+#include <xine/xine_internal.h>
+#include <xine/xineutils.h>
+#include <xine/vo_scale.h>
+
+#define BLEND_BYTE(dst, src, o) (((src)*o + ((dst)*(0xf-o)))/0xf)
+
+/***************************************************************************/
+typedef struct _Emotion_Frame Emotion_Frame;
+typedef struct _Emotion_Driver Emotion_Driver;
+typedef struct _Emotion_Class Emotion_Class;
+typedef struct _Emotion_Lut Emotion_Lut;
+
+struct _Emotion_Frame
+{
+ vo_frame_t vo_frame;
+ int width;
+ int height;
+ double ratio;
+ int format;
+ xine_t *xine;
+
+ Emotion_Xine_Video_Frame frame;
+ unsigned char in_use : 1;
+};
+
+struct _Emotion_Driver
+{
+ vo_driver_t vo_driver;
+ config_values_t *config;
+ int ratio;
+ xine_t *xine;
+ Emotion_Xine_Video *ev;
+};
+
+struct _Emotion_Class
+{
+ video_driver_class_t driver_class;
+ config_values_t *config;
+ xine_t *xine;
+};
+
+struct _Emotion_Lut
+{
+ uint8_t cb : 8;
+ uint8_t cr : 8;
+ uint8_t y : 8;
+ uint8_t foo : 8;
+} __attribute__ ((packed));
+
+/***************************************************************************/
+static void *_emotion_class_init (xine_t *xine, void *visual);
+static void _emotion_class_dispose (video_driver_class_t *driver_class);
+static char *_emotion_class_identifier_get (video_driver_class_t *driver_class);
+static char *_emotion_class_description_get (video_driver_class_t *driver_class);
+
+static vo_driver_t *_emotion_open (video_driver_class_t *driver_class, const void *visual);
+static void _emotion_dispose (vo_driver_t *vo_driver);
+
+static int _emotion_redraw (vo_driver_t *vo_driver);
+
+static uint32_t _emotion_capabilities_get (vo_driver_t *vo_driver);
+static int _emotion_gui_data_exchange (vo_driver_t *vo_driver, int data_type, void *data);
+
+static int _emotion_property_set (vo_driver_t *vo_driver, int property, int value);
+static int _emotion_property_get (vo_driver_t *vo_driver, int property);
+static void _emotion_property_min_max_get (vo_driver_t *vo_driver, int property, int *min, int *max);
+
+static vo_frame_t *_emotion_frame_alloc (vo_driver_t *vo_driver);
+static void _emotion_frame_dispose (vo_frame_t *vo_frame);
+static void _emotion_frame_format_update (vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags);
+static void _emotion_frame_display (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
+static void _emotion_frame_field (vo_frame_t *vo_frame, int which_field);
+
+static void _emotion_frame_data_free (Emotion_Frame *fr);
+static void _emotion_frame_data_unlock (Emotion_Frame *fr);
+
+static void _emotion_overlay_begin (vo_driver_t *vo_driver, vo_frame_t *vo_frame, int changed);
+static void _emotion_overlay_end (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
+static void _emotion_overlay_blend (vo_driver_t *vo_driver, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay);
+
+static void _emotion_overlay_mem_blend_8 (uint8_t *mem, uint8_t val, uint8_t o, size_t sz);
+static void _emotion_overlay_blend_yuv (uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3]);
+
+static void _emotion_yuy2_to_bgra32 (int width, int height, unsigned char *src, unsigned char *dst);
+
+/***************************************************************************/
+static vo_info_t _emotion_info =
+{
+ 1, /* priority */
+ XINE_VISUAL_TYPE_NONE /* visual type */
+};
+
+plugin_info_t emotion_xine_plugin_info[] =
+{
+ { PLUGIN_VIDEO_OUT, 21, "emotion", XINE_VERSION_CODE, &_emotion_info, _emotion_class_init },
+ { PLUGIN_NONE, 0, "", 0, NULL, NULL }
+};
+
+/***************************************************************************/
+static void *
+_emotion_class_init(xine_t *xine, void *visual)
+{
+ Emotion_Class *cl;
+
+// printf("emotion: _emotion_class_init()\n");
+ cl = (Emotion_Class *) xine_xmalloc(sizeof(Emotion_Class));
+ if (!cl) return NULL;
+ cl->driver_class.open_plugin = _emotion_open;
+ cl->driver_class.get_identifier = _emotion_class_identifier_get;
+ cl->driver_class.get_description = _emotion_class_description_get;
+ cl->driver_class.dispose = _emotion_class_dispose;
+ cl->config = xine->config;
+ cl->xine = xine;
+
+ return cl;
+}
+
+static void
+_emotion_class_dispose(video_driver_class_t *driver_class)
+{
+ Emotion_Class *cl;
+
+ cl = (Emotion_Class *)driver_class;
+ free(cl);
+}
+
+static char *
+_emotion_class_identifier_get(video_driver_class_t *driver_class)
+{
+ Emotion_Class *cl;
+
+ cl = (Emotion_Class *)driver_class;
+ return "emotion";
+}
+
+static char *
+_emotion_class_description_get(video_driver_class_t *driver_class)
+{
+ Emotion_Class *cl;
+
+ cl = (Emotion_Class *)driver_class;
+ return "Emotion xine video output plugin";
+}
+
+/***************************************************************************/
+static vo_driver_t *
+_emotion_open(video_driver_class_t *driver_class, const void *visual)
+{
+ Emotion_Class *cl;
+ Emotion_Driver *dv;
+
+ cl = (Emotion_Class *)driver_class;
+ /* visual here is the data ptr passed to xine_open_video_driver() */
+// printf("emotion: _emotion_open()\n");
+ dv = (Emotion_Driver *)xine_xmalloc(sizeof(Emotion_Driver));
+ if (!dv) return NULL;
+
+ dv->config = cl->config;
+ dv->xine = cl->xine;
+ dv->ratio = XINE_VO_ASPECT_AUTO;
+ dv->vo_driver.get_capabilities = _emotion_capabilities_get;
+ dv->vo_driver.alloc_frame = _emotion_frame_alloc;
+ dv->vo_driver.update_frame_format = _emotion_frame_format_update;
+ dv->vo_driver.overlay_begin = _emotion_overlay_begin;
+ dv->vo_driver.overlay_blend = _emotion_overlay_blend;
+ dv->vo_driver.overlay_end = _emotion_overlay_end;
+ dv->vo_driver.display_frame = _emotion_frame_display;
+ dv->vo_driver.get_property = _emotion_property_get;
+ dv->vo_driver.set_property = _emotion_property_set;
+ dv->vo_driver.get_property_min_max = _emotion_property_min_max_get;
+ dv->vo_driver.gui_data_exchange = _emotion_gui_data_exchange;
+ dv->vo_driver.dispose = _emotion_dispose;
+ dv->vo_driver.redraw_needed = _emotion_redraw;
+ dv->ev = (Emotion_Xine_Video *)visual;
+ dv->ev->have_vo = 1;
+ printf("emotion: _emotion_open = %p\n", &dv->vo_driver);
+ return &dv->vo_driver;
+}
+
+static void
+_emotion_dispose(vo_driver_t *vo_driver)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+ dv->ev->have_vo = 0;
+ printf("emotion: _emotion_dispose(%p)\n", dv);
+ free(dv);
+}
+
+/***************************************************************************/
+static int
+_emotion_redraw(vo_driver_t *vo_driver)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_redraw()\n");
+ return 0;
+}
+
+/***************************************************************************/
+static uint32_t
+_emotion_capabilities_get(vo_driver_t *vo_driver)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_capabilities_get()\n");
+ return VO_CAP_YV12 | VO_CAP_YUY2;
+}
+
+/***************************************************************************/
+static int
+_emotion_gui_data_exchange(vo_driver_t *vo_driver, int data_type, void *data)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_gui_data_exchange()\n");
+ switch (data_type)
+ {
+ case XINE_GUI_SEND_COMPLETION_EVENT:
+ break;
+ case XINE_GUI_SEND_DRAWABLE_CHANGED:
+ break;
+ case XINE_GUI_SEND_EXPOSE_EVENT:
+ break;
+ case XINE_GUI_SEND_TRANSLATE_GUI_TO_VIDEO:
+ break;
+ case XINE_GUI_SEND_VIDEOWIN_VISIBLE:
+ break;
+ case XINE_GUI_SEND_SELECT_VISUAL:
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/***************************************************************************/
+static int
+_emotion_property_set(vo_driver_t *vo_driver, int property, int value)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_property_set()\n");
+ switch (property)
+ {
+ case VO_PROP_ASPECT_RATIO:
+ if (value >= XINE_VO_ASPECT_NUM_RATIOS)
+ value = XINE_VO_ASPECT_AUTO;
+// printf("DRIVER RATIO SET %i!\n", value);
+ dv->ratio = value;
+ break;
+ default:
+ break;
+ }
+ return value;
+}
+
+static int
+_emotion_property_get(vo_driver_t *vo_driver, int property)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_property_get()\n");
+ switch (property)
+ {
+ case VO_PROP_ASPECT_RATIO:
+ return dv->ratio;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void
+_emotion_property_min_max_get(vo_driver_t *vo_driver, int property, int *min, int *max)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_property_min_max_get()\n");
+ *min = 0;
+ *max = 0;
+}
+
+/***************************************************************************/
+static vo_frame_t *
+_emotion_frame_alloc(vo_driver_t *vo_driver)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_frame_alloc()\n");
+ fr = (Emotion_Frame *)xine_xmalloc(sizeof(Emotion_Frame));
+ if (!fr) return NULL;
+
+ fr->vo_frame.base[0] = NULL;
+ fr->vo_frame.base[1] = NULL;
+ fr->vo_frame.base[2] = NULL;
+
+ fr->vo_frame.proc_slice = NULL;
+ fr->vo_frame.proc_frame = NULL;
+ fr->vo_frame.field = _emotion_frame_field;
+ fr->vo_frame.dispose = _emotion_frame_dispose;
+ fr->vo_frame.driver = vo_driver;
+
+ return (vo_frame_t *)fr;
+}
+
+static void
+_emotion_frame_dispose(vo_frame_t *vo_frame)
+{
+ Emotion_Frame *fr;
+
+ fr = (Emotion_Frame *)vo_frame;
+// printf("emotion: _emotion_frame_dispose()\n");
+ _emotion_frame_data_free(fr);
+ free(fr);
+}
+
+static void
+_emotion_frame_format_update(vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+ fr = (Emotion_Frame *)vo_frame;
+
+ if ((fr->width != width) || (fr->height != height) ||
+ (fr->format != format) || (!fr->vo_frame.base[0]))
+ {
+// printf("emotion: _emotion_frame_format_update()\n");
+ _emotion_frame_data_free(fr);
+
+ fr->width = width;
+ fr->height = height;
+ fr->format = format;
+
+ switch (format)
+ {
+ case XINE_IMGFMT_YV12:
+ {
+ int y_size, uv_size;
+
+ fr->frame.format = EMOTION_FORMAT_YV12;
+ fr->vo_frame.pitches[0] = 8 * ((width + 7) / 8);
+ fr->vo_frame.pitches[1] = 8 * ((width + 15) / 16);
+ fr->vo_frame.pitches[2] = 8 * ((width + 15) / 16);
+
+ y_size = fr->vo_frame.pitches[0] * height;
+ uv_size = fr->vo_frame.pitches[1] * ((height + 1) / 2);
+
+ fr->vo_frame.base[0] = malloc(y_size + (2 * uv_size));
+ fr->vo_frame.base[1] = fr->vo_frame.base[0] + y_size + uv_size;
+ fr->vo_frame.base[2] = fr->vo_frame.base[0] + y_size;
+ fr->frame.w = fr->width;
+ fr->frame.h = fr->height;
+ fr->frame.ratio = fr->vo_frame.ratio;
+ fr->frame.y = fr->vo_frame.base[0];
+ fr->frame.u = fr->vo_frame.base[1];
+ fr->frame.v = fr->vo_frame.base[2];
+ fr->frame.bgra_data = NULL;
+ fr->frame.y_stride = fr->vo_frame.pitches[0];
+ fr->frame.u_stride = fr->vo_frame.pitches[1];
+ fr->frame.v_stride = fr->vo_frame.pitches[2];
+ fr->frame.obj = dv->ev->obj;
+ }
+ break;
+ case XINE_IMGFMT_YUY2:
+ {
+ fr->frame.format = EMOTION_FORMAT_BGRA;
+ fr->vo_frame.pitches[0] = 8 * ((width + 3) / 4);
+ fr->vo_frame.pitches[1] = 0;
+ fr->vo_frame.pitches[2] = 0;
+
+ fr->vo_frame.base[0] = malloc(fr->vo_frame.pitches[0] * height);
+ fr->vo_frame.base[1] = NULL;
+ fr->vo_frame.base[2] = NULL;
+
+ fr->frame.w = fr->width;
+ fr->frame.h = fr->height;
+ fr->frame.ratio = fr->vo_frame.ratio;
+ fr->frame.y = NULL;
+ fr->frame.u = NULL;
+ fr->frame.v = NULL;
+ fr->frame.bgra_data = malloc(fr->width * fr->height * 4);
+ fr->frame.y_stride = 0;
+ fr->frame.u_stride = 0;
+ fr->frame.v_stride = 0;
+ fr->frame.obj = dv->ev->obj;
+ }
+ break;
+ default:
+ break;
+ }
+ if (((format == XINE_IMGFMT_YV12)
+ && ((fr->vo_frame.base[0] == NULL)
+ || (fr->vo_frame.base[1] == NULL)
+ || (fr->vo_frame.base[2] == NULL)))
+ || ((format == XINE_IMGFMT_YUY2)
+ && ((fr->vo_frame.base[0] == NULL)
+ || (fr->frame.bgra_data == NULL))))
+ {
+ _emotion_frame_data_free(fr);
+ }
+ }
+ fr->frame.ratio = fr->vo_frame.ratio;
+ fr->ratio = ratio;
+}
+
+static void
+_emotion_frame_display(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+ fr = (Emotion_Frame *)vo_frame;
+// printf("emotion: _emotion_frame_display()\n");
+// printf("EX VO: fq %i %p\n", dv->ev->fq, dv->ev);
+// if my frame queue is too deep ( > 4 frames) simply block and wait for them
+// to drain
+// while (dv->ev->fq > 4) usleep(1);
+ if (dv->ev)
+ {
+ void *buf;
+ int ret;
+
+ if (dv->ev->closing) return;
+ if (fr->format == XINE_IMGFMT_YUY2)
+ {
+ _emotion_yuy2_to_bgra32(fr->width, fr->height, fr->vo_frame.base[0], fr->frame.bgra_data);
+ }
+
+ buf = &(fr->frame);
+ fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0;
+ fr->frame.done_func = _emotion_frame_data_unlock;
+ fr->frame.done_data = fr;
+// printf("FRAME FOR %p\n", dv->ev);
+ ret = write(dv->ev->fd_write, &buf, sizeof(void *));
+// printf("-- FRAME DEC %p == %i\n", fr->frame.obj, ret);
+ fr->in_use = 1;
+ dv->ev->fq++;
+ }
+ /* hmm - must find a way to sanely copy data out... FIXME problem */
+// fr->vo_frame.free(&fr->vo_frame);
+}
+
+static void
+_emotion_frame_field(vo_frame_t *vo_frame, int which_field)
+{
+ Emotion_Frame *fr;
+
+ fr = (Emotion_Frame *)vo_frame;
+// printf("emotion: _emotion_frame_field()\n");
+}
+
+/***************************************************************************/
+static void
+_emotion_frame_data_free(Emotion_Frame *fr)
+{
+ if (fr->vo_frame.base[0])
+ {
+ free(fr->vo_frame.base[0]);
+ fr->vo_frame.base[0] = NULL;
+ fr->vo_frame.base[1] = NULL;
+ fr->vo_frame.base[2] = NULL;
+ fr->frame.y = fr->vo_frame.base[0];
+ fr->frame.u = fr->vo_frame.base[1];
+ fr->frame.v = fr->vo_frame.base[2];
+ }
+ if (fr->frame.bgra_data)
+ {
+ free(fr->frame.bgra_data);
+ fr->frame.bgra_data = NULL;
+ }
+}
+
+static void
+_emotion_frame_data_unlock(Emotion_Frame *fr)
+{
+// printf("emotion: _emotion_frame_data_unlock()\n");
+ if (fr->in_use)
+ {
+ fr->vo_frame.free(&fr->vo_frame);
+ fr->in_use = 0;
+ }
+}
+
+/***************************************************************************/
+static void
+_emotion_overlay_begin(vo_driver_t *vo_driver, vo_frame_t *vo_frame, int changed)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+ fr = (Emotion_Frame *)vo_frame;
+// printf("emotion: _emotion_overlay_begin()\n");
+}
+
+static void
+_emotion_overlay_end(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+ fr = (Emotion_Frame *)vo_frame;
+// printf("emotion: _emotion_overlay_end()\n");
+}
+
+static void
+_emotion_overlay_blend(vo_driver_t *vo_driver, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+ fr = (Emotion_Frame *)vo_frame;
+// printf("emotion: _emotion_overlay_blend()\n");
+ _emotion_overlay_blend_yuv(fr->vo_frame.base, vo_overlay,
+ fr->width, fr->height,
+ fr->vo_frame.pitches);
+}
+
+static void _emotion_overlay_mem_blend_8(uint8_t *mem, uint8_t val, uint8_t o, size_t sz)
+{
+ uint8_t *limit = mem + sz;
+ while (mem < limit)
+ {
+ *mem = BLEND_BYTE(*mem, val, o);
+ mem++;
+ }
+}
+
+static void _emotion_overlay_blend_yuv(uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3])
+{
+ Emotion_Lut *my_clut;
+ uint8_t *my_trans;
+ int src_width;
+ int src_height;
+ rle_elem_t *rle;
+ rle_elem_t *rle_limit;
+ int x_off;
+ int y_off;
+ int ymask, xmask;
+ int rle_this_bite;
+ int rle_remainder;
+ int rlelen;
+ int x, y;
+ int hili_right;
+ uint8_t clr = 0;
+
+ src_width = img_overl->width;
+ src_height = img_overl->height;
+ rle = img_overl->rle;
+ rle_limit = rle + img_overl->num_rle;
+ x_off = img_overl->x;
+ y_off = img_overl->y;
+
+ if (!rle) return;
+
+ uint8_t *dst_y = dst_base[0] + dst_pitches[0] * y_off + x_off;
+ uint8_t *dst_cr = dst_base[2] + (y_off / 2) * dst_pitches[1] + (x_off / 2) + 1;
+ uint8_t *dst_cb = dst_base[1] + (y_off / 2) * dst_pitches[2] + (x_off / 2) + 1;
+ my_clut = (Emotion_Lut *) img_overl->hili_color;
+ my_trans = img_overl->hili_trans;
+
+ /* avoid wraping overlay if drawing to small image */
+ if( (x_off + img_overl->hili_right) < dst_width )
+ hili_right = img_overl->hili_right;
+ else
+ hili_right = dst_width - 1 - x_off;
+
+ /* avoid buffer overflow */
+ if( (src_height + y_off) >= dst_height )
+ src_height = dst_height - 1 - y_off;
+
+ rlelen=rle_remainder=0;
+ for (y = 0; y < src_height; y++)
+ {
+ ymask = ((img_overl->hili_top > y) || (img_overl->hili_bottom < y));
+ xmask = 0;
+
+ for (x = 0; x < src_width;)
+ {
+ uint16_t o;
+
+ if (rlelen == 0)
+ {
+ rle_remainder = rlelen = rle->len;
+ clr = rle->color;
+ rle++;
+ }
+ if (rle_remainder == 0)
+ {
+ rle_remainder = rlelen;
+ }
+ if ((rle_remainder + x) > src_width)
+ {
+ /* Do something for long rlelengths */
+ rle_remainder = src_width - x;
+ }
+
+ if (ymask == 0)
+ {
+ if (x <= img_overl->hili_left)
+ {
+ /* Starts outside clip area */
+ if ((x + rle_remainder - 1) > img_overl->hili_left )
+ {
+ /* Cutting needed, starts outside, ends inside */
+ rle_this_bite = (img_overl->hili_left - x + 1);
+ rle_remainder -= rle_this_bite;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ else
+ {
+ /* no cutting needed, starts outside, ends outside */
+ rle_this_bite = rle_remainder;
+ rle_remainder = 0;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ }
+ else if (x < hili_right)
+ {
+ /* Starts inside clip area */
+ if ((x + rle_remainder) > hili_right )
+ {
+ /* Cutting needed, starts inside, ends outside */
+ rle_this_bite = (hili_right - x);
+ rle_remainder -= rle_this_bite;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->hili_color;
+ my_trans = img_overl->hili_trans;
+ xmask++;
+ }
+ else
+ {
+ /* no cutting needed, starts inside, ends inside */
+ rle_this_bite = rle_remainder;
+ rle_remainder = 0;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->hili_color;
+ my_trans = img_overl->hili_trans;
+ xmask++;
+ }
+ }
+ else if (x >= hili_right)
+ {
+ /* Starts outside clip area, ends outsite clip area */
+ if ((x + rle_remainder ) > src_width )
+ {
+ /* Cutting needed, starts outside, ends at right edge */
+ /* It should never reach here due to the earlier test of src_width */
+ rle_this_bite = (src_width - x );
+ rle_remainder -= rle_this_bite;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ else
+ {
+ /* no cutting needed, starts outside, ends outside */
+ rle_this_bite = rle_remainder;
+ rle_remainder = 0;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ }
+ }
+ else
+ {
+ /* Outside clip are due to y */
+ /* no cutting needed, starts outside, ends outside */
+ rle_this_bite = rle_remainder;
+ rle_remainder = 0;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ o = my_trans[clr];
+ if (o)
+ {
+ if (o >= 15)
+ {
+ memset(dst_y + x, my_clut[clr].y, rle_this_bite);
+ if (y & 1)
+ {
+ memset(dst_cr + (x >> 1), my_clut[clr].cr, (rle_this_bite+1) >> 1);
+ memset(dst_cb + (x >> 1), my_clut[clr].cb, (rle_this_bite+1) >> 1);
+ }
+ }
+ else
+ {
+ _emotion_overlay_mem_blend_8(dst_y + x, my_clut[clr].y, o, rle_this_bite);
+ if (y & 1)
+ {
+ /* Blending cr and cb should use a different function, with pre -128 to each sample */
+ _emotion_overlay_mem_blend_8(dst_cr + (x >> 1), my_clut[clr].cr, o, (rle_this_bite+1) >> 1);
+ _emotion_overlay_mem_blend_8(dst_cb + (x >> 1), my_clut[clr].cb, o, (rle_this_bite+1) >> 1);
+ }
+ }
+ }
+ x += rle_this_bite;
+ if (rle >= rle_limit)
+ {
+ break;
+ }
+ }
+ if (rle >= rle_limit)
+ {
+ break;
+ }
+
+ dst_y += dst_pitches[0];
+
+ if (y & 1)
+ {
+ dst_cr += dst_pitches[2];
+ dst_cb += dst_pitches[1];
+ }
+ }
+}
+
+//TODO: Really need to improve this converter!
+#define LIMIT(x) ((x) > 0xff ? 0xff : ((x) < 0 ? 0 : (x)))
+
+static void
+_emotion_yuy2_to_bgra32(int width, int height, unsigned char *src, unsigned char *dst)
+{
+ int i, j;
+ unsigned char *y, *u, *v;
+
+ y = src;
+ u = src + 1;
+ v = src + 3;
+ for (i = 0; i < width; i++)
+ {
+ for (j = 0; j < height; j++)
+ {
+ *dst++ = LIMIT(1.164 * (*y - 16) + 2.018 * (*u - 128));
+ *dst++ = LIMIT(1.164 * (*y - 16) - 0.813 * (*v - 128) - 0.391 * (*u - 128));
+ *dst++ = LIMIT(1.164 * (*y - 16) + 1.596 * (*v - 128));
+ *dst++ = 0;
+
+ y += 2;
+ if (j % 2 == 1)
+ {
+ u += 4;
+ v += 4;
+ }
+ }
+ }
+}