--- /dev/null
+The Rasterman (Carsten Haitzler) <raster@rasterman.com>
+
--- /dev/null
+Copyright (C) 2000 Carsten Haitzler and various contributors (see AUTHORS)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies of the Software and its Copyright notices. In addition publicly
+documented acknowledgment must be given that this software has been used if no
+source code of this software is made available publicly. This includes
+acknowledgments in either Copyright notices, Manuals, Publicity and Marketing
+documents or any documentation provided with any product containing this
+software. This License does not apply to any software that links to the
+libraries provided by this software (statically or dynamically), but only to
+the software provided.
+
+Please see the COPYING.PLAIN for a plain-english explanation of this notice
+and it's intent.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--- /dev/null
+Plain English Copyright Notice
+
+This file is not intended to be the actual License. The reason this file
+exists is that we here are programmers and engineers. We aren't lawyers. We
+provide licenses that we THINK say the right things, but we have our own
+intentions at heart. This is a plain-english explanation of what those
+intentions are, and if you follow them you will be within the "spirit" of
+the license.
+
+The intent is for us to enjoy writing software that is useful to us (the
+AUTHORS) and allow others to use it freely and also benefit from the work we
+put into making it. We don't want to restrict others using it. They should
+not *HAVE* to make the source code of the applications they write that
+simply link to these libraries (be that statically or dynamically), or for
+them to be limited as to what license they choose to use (be it open, closed
+or anything else). But we would like to know you are using these libraries.
+We simply would like to know that it has been useful to someone. This is why
+we ask for acknowledgement of some sort.
+
+You can do what you want with the source of this software - it doesn't
+matter. We still have it here for ourselves and it is open and free to use
+and download and play with. It can't be taken away. We don't really mind what
+you do with the source to your software. We would simply like to know that
+you are using it - especially if it makes it to a commerical product. If you
+simply e-mail all the AUTHORS (see COPYING and AUTHORS files) telling us, and
+then make sure you include a paragraph or page in the manual for the product
+with the copyright notice and state that you used this software, we will be
+very happy. If you want to contribute back modifications and fixes you may have
+made we will welcome those too with open arms (generally). If you want help
+with changes needed, ports needed or features to be added, arrangements can
+be easily made with some dialogue.
+
+Carsten Haitzler <raster@rasterman.com>
--- /dev/null
+PROJECT_NAME = Emotion
+PROJECT_NUMBER =
+OUTPUT_DIRECTORY = doc
+INPUT = emotion.c
+IMAGE_PATH = doc/img
+OUTPUT_LANGUAGE = English
+GENERATE_HTML = YES
+HTML_OUTPUT = html
+HTML_FILE_EXTENSION = .html
+HTML_HEADER = doc/head.html
+HTML_FOOTER = doc/foot.html
+HTML_STYLESHEET = doc/emotion.css
+HTML_ALIGN_MEMBERS = YES
+ENUM_VALUES_PER_LINE = 1
+GENERATE_HTMLHELP = NO
+CHM_FILE =
+HHC_LOCATION =
+GENERATE_CHI = NO
+BINARY_TOC = NO
+TOC_EXPAND = NO
+DISABLE_INDEX = NO
+EXTRACT_ALL = NO
+EXTRACT_PRIVATE = NO
+EXTRACT_STATIC = NO
+EXTRACT_LOCAL_CLASSES = NO
+HIDE_UNDOC_MEMBERS = YES
+HIDE_UNDOC_CLASSES = YES
+HIDE_FRIEND_COMPOUNDS = YES
+BRIEF_MEMBER_DESC = YES
+REPEAT_BRIEF = YES
+ALWAYS_DETAILED_SEC = NO
+INLINE_INHERITED_MEMB = NO
+FULL_PATH_NAMES = NO
+STRIP_FROM_PATH =
+INTERNAL_DOCS = NO
+STRIP_CODE_COMMENTS = YES
+CASE_SENSE_NAMES = YES
+SHORT_NAMES = NO
+HIDE_SCOPE_NAMES = NO
+VERBATIM_HEADERS = NO
+SHOW_INCLUDE_FILES = NO
+JAVADOC_AUTOBRIEF = YES
+MULTILINE_CPP_IS_BRIEF = NO
+DETAILS_AT_TOP = NO
+INHERIT_DOCS = YES
+INLINE_INFO = YES
+SORT_MEMBER_DOCS = YES
+DISTRIBUTE_GROUP_DOC = NO
+TAB_SIZE = 2
+GENERATE_TODOLIST = YES
+GENERATE_TESTLIST = YES
+GENERATE_BUGLIST = YES
+GENERATE_DEPRECATEDLIST= YES
+ALIASES =
+ENABLED_SECTIONS =
+MAX_INITIALIZER_LINES = 30
+OPTIMIZE_OUTPUT_FOR_C = YES
+OPTIMIZE_OUTPUT_JAVA = NO
+SHOW_USED_FILES = NO
+QUIET = NO
+WARNINGS = YES
+WARN_IF_UNDOCUMENTED = YES
+WARN_FORMAT = "$file:$line: $text"
+WARN_LOGFILE =
+FILE_PATTERNS =
+RECURSIVE = NO
+EXCLUDE =
+EXCLUDE_SYMLINKS = NO
+EXCLUDE_PATTERNS =
+EXAMPLE_PATH =
+EXAMPLE_PATTERNS =
+EXAMPLE_RECURSIVE = NO
+INPUT_FILTER =
+FILTER_SOURCE_FILES = NO
+SOURCE_BROWSER = NO
+INLINE_SOURCES = NO
+REFERENCED_BY_RELATION = YES
+REFERENCES_RELATION = YES
+ALPHABETICAL_INDEX = YES
+COLS_IN_ALPHA_INDEX = 2
+IGNORE_PREFIX =
+GENERATE_TREEVIEW = NO
+TREEVIEW_WIDTH = 250
+GENERATE_LATEX = YES
+LATEX_OUTPUT = latex
+LATEX_CMD_NAME = latex
+MAKEINDEX_CMD_NAME = makeindex
+COMPACT_LATEX = NO
+PAPER_TYPE = a4wide
+EXTRA_PACKAGES =
+LATEX_HEADER =
+PDF_HYPERLINKS = YES
+USE_PDFLATEX = NO
+LATEX_BATCHMODE = NO
+GENERATE_RTF = NO
+RTF_OUTPUT = rtf
+COMPACT_RTF = NO
+RTF_HYPERLINKS = NO
+RTF_STYLESHEET_FILE =
+RTF_EXTENSIONS_FILE =
+GENERATE_MAN = YES
+MAN_OUTPUT = man
+MAN_EXTENSION = .3
+MAN_LINKS = YES
+GENERATE_XML = NO
+XML_SCHEMA =
+XML_DTD =
+GENERATE_AUTOGEN_DEF = NO
+ENABLE_PREPROCESSING = YES
+MACRO_EXPANSION = NO
+EXPAND_ONLY_PREDEF = NO
+SEARCH_INCLUDES = NO
+INCLUDE_PATH =
+INCLUDE_FILE_PATTERNS =
+PREDEFINED =
+EXPAND_AS_DEFINED =
+SKIP_FUNCTION_MACROS = YES
+TAGFILES =
+GENERATE_TAGFILE =
+ALLEXTERNALS = NO
+EXTERNAL_GROUPS = YES
+PERL_PATH = /usr/bin/perl
+CLASS_DIAGRAMS = NO
+HIDE_UNDOC_RELATIONS = YES
+HAVE_DOT = NO
+CLASS_GRAPH = NO
+COLLABORATION_GRAPH = NO
+TEMPLATE_RELATIONS = NO
+INCLUDE_GRAPH = NO
+INCLUDED_BY_GRAPH = NO
+GRAPHICAL_HIERARCHY = NO
+DOT_IMAGE_FORMAT = png
+DOT_PATH =
+DOTFILE_DIRS =
+MAX_DOT_GRAPH_WIDTH = 512
+MAX_DOT_GRAPH_HEIGHT = 512
+GENERATE_LEGEND = YES
+DOT_CLEANUP = YES
+SEARCHENGINE = NO
+CGI_NAME = search.cgi
+CGI_URL =
+DOC_URL =
+DOC_ABSPATH =
+BIN_ABSPATH = /usr/bin/
+EXT_DOC_PATHS =
--- /dev/null
+COMPILING and INSTALLING:
+
+If you got a official release tar archive do:
+ ./configure
+
+( otherwise if you got this from enlightenment cvs do: ./autogen.sh )
+
+Then to compile:
+ make
+
+To install (run this as root, or the user who handles installs):
+ make install
+
+NOTE: You MUST make install Emotion for it to run properly.
--- /dev/null
+## Process this file with automake to produce Makefile.in
+
+SUBDIRS = src data
+
+MAINTAINERCLEANFILES = Makefile.in aclocal.m4 config.guess \
+ config.h.in config.sub configure install-sh \
+ ltconfig ltmain.sh missing mkinstalldirs \
+ stamp-h.in emotion_docs.tar emotion_docs.tar.gz \
+ emotion.c acconfig.h
+
+bin_SCRIPTS = emotion-config
+
+EXTRA_DIST = README AUTHORS COPYING COPYING-PLAIN emotion.spec emotion.c.in gendoc Doxyfile emotion_docs.tar.gz
--- /dev/null
+Emotion 0.0.1
--- /dev/null
+#!/bin/sh
+# Run this to generate all the initial makefiles, etc.
+
+srcdir=`dirname $0`
+PKG_NAME="the package."
+
+DIE=0
+
+(autoconf --version) < /dev/null > /dev/null 2>&1 || {
+ echo
+ echo "**Error**: You must have \`autoconf' installed to."
+ echo "Download the appropriate package for your distribution,"
+ echo "or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
+ DIE=1
+}
+
+(grep "^AM_PROG_LIBTOOL" $srcdir/configure.in >/dev/null) && {
+ (libtool --version) < /dev/null > /dev/null 2>&1 || {
+ echo
+ echo "**Error**: You must have \`libtool' installed."
+ echo "Get ftp://ftp.gnu.org/pub/gnu/libtool-1.2d.tar.gz"
+ echo "(or a newer version if it is available)"
+ DIE=1
+ }
+}
+
+grep "^AM_GNU_GETTEXT" $srcdir/configure.in >/dev/null && {
+ grep "sed.*POTFILES" $srcdir/configure.in >/dev/null || \
+ (gettext --version) < /dev/null > /dev/null 2>&1 || {
+ echo
+ echo "**Error**: You must have \`gettext' installed."
+ echo "Get ftp://alpha.gnu.org/gnu/gettext-0.10.35.tar.gz"
+ echo "(or a newer version if it is available)"
+ DIE=1
+ }
+}
+
+(automake --version) < /dev/null > /dev/null 2>&1 || {
+ echo
+ echo "**Error**: You must have \`automake' installed."
+ echo "Get ftp://ftp.gnu.org/pub/gnu/automake-1.3.tar.gz"
+ echo "(or a newer version if it is available)"
+ DIE=1
+ NO_AUTOMAKE=yes
+}
+
+
+# if no automake, don't bother testing for aclocal
+test -n "$NO_AUTOMAKE" || (aclocal --version) < /dev/null > /dev/null 2>&1 || {
+ echo
+ echo "**Error**: Missing \`aclocal'. The version of \`automake'"
+ echo "installed doesn't appear recent enough."
+ echo "Get ftp://ftp.gnu.org/pub/gnu/automake-1.3.tar.gz"
+ echo "(or a newer version if it is available)"
+ DIE=1
+}
+
+if test "$DIE" -eq 1; then
+ exit 1
+fi
+
+if test -z "$*"; then
+ echo "**Warning**: I am going to run \`configure' with no arguments."
+ echo "If you wish to pass any to it, please specify them on the"
+ echo \`$0\'" command line."
+ echo
+fi
+
+case $CC in
+xlc )
+ am_opt=--include-deps;;
+esac
+
+for coin in `find $srcdir -name configure.in -print`
+do
+ dr=`dirname $coin`
+ if test -f $dr/NO-AUTO-GEN; then
+ echo skipping $dr -- flagged as no auto-gen
+ else
+ echo processing $dr
+ macrodirs=`sed -n -e 's,AM_ACLOCAL_INCLUDE(\(.*\)),\1,gp' < $coin`
+ ( cd $dr
+ aclocalinclude="$ACLOCAL_FLAGS"
+ for k in $macrodirs; do
+ if test -d $k; then
+ aclocalinclude="$aclocalinclude -I $k"
+ ##else
+ ## echo "**Warning**: No such directory \`$k'. Ignored."
+ fi
+ done
+ if grep "^AM_GNU_GETTEXT" configure.in >/dev/null; then
+ if grep "sed.*POTFILES" configure.in >/dev/null; then
+ : do nothing -- we still have an old unmodified configure.in
+ else
+ echo "Creating $dr/aclocal.m4 ..."
+ test -r $dr/aclocal.m4 || touch $dr/aclocal.m4
+ echo "Running gettextize... Ignore non-fatal messages."
+ echo "no" | gettextize --force --copy
+ echo "Making $dr/aclocal.m4 writable ..."
+ test -r $dr/aclocal.m4 && chmod u+w $dr/aclocal.m4
+ fi
+ fi
+ if grep "^AM_GNOME_GETTEXT" configure.in >/dev/null; then
+ echo "Creating $dr/aclocal.m4 ..."
+ test -r $dr/aclocal.m4 || touch $dr/aclocal.m4
+ echo "Running gettextize... Ignore non-fatal messages."
+ echo "no" | gettextize --force --copy
+ echo "Making $dr/aclocal.m4 writable ..."
+ test -r $dr/aclocal.m4 && chmod u+w $dr/aclocal.m4
+ fi
+ if grep "^AM_PROG_LIBTOOL" configure.in >/dev/null; then
+ echo "Running libtoolize..."
+ libtoolize --force --copy
+ fi
+ echo "Running aclocal $aclocalinclude ..."
+ aclocal $aclocalinclude
+ if grep "^AM_CONFIG_HEADER" configure.in >/dev/null; then
+ echo "Running autoheader..."
+ autoheader
+ fi
+ echo "Running automake --gnu $am_opt ..."
+ automake --add-missing --gnu $am_opt
+ echo "Running autoconf ..."
+ autoconf
+ )
+ fi
+done
+
+#conf_flags="--enable-maintainer-mode --enable-compile-warnings" #--enable-iso-c
+
+if test x$NOCONFIGURE = x; then
+ echo Running $srcdir/configure $conf_flags "$@" ...
+ $srcdir/configure $conf_flags "$@" \
+ && echo Now type \`make\' to compile $PKG_NAME
+else
+ echo Skipping configure process.
+fi
--- /dev/null
+dnl Process this file with autoconf to produce a configure script.
+
+# get rid of that stupid cache mechanism
+rm -f config.cache
+
+AC_INIT(configure.in)
+AM_INIT_AUTOMAKE(emotion, 0.0.1)
+AM_CONFIG_HEADER(config.h)
+
+AC_C_BIGENDIAN
+AC_ISC_POSIX
+AC_PROG_CC
+AM_PROG_CC_STDC
+AC_HEADER_STDC
+AC_C_CONST
+AM_ENABLE_SHARED
+AM_PROG_LIBTOOL
+
+if test "x${exec_prefix}" = "xNONE"; then
+ if test "x${prefix}" = "xNONE"; then
+ bindir="${ac_default_prefix}/bin";
+ else
+ bindir="${prefix}/bin";
+ fi
+else
+ if test "x${prefix}" = "xNONE"; then
+ bindir="${ac_default_prefix}/bin";
+ else
+ bindir="${prefix}/bin";
+ fi
+fi
+
+if test "x${exec_prefix}" = "xNONE"; then
+ if test "x${prefix}" = "xNONE"; then
+ libdir="${ac_default_prefix}/lib";
+ else
+ libdir="${prefix}/lib";
+ fi
+else
+ if test "x${prefix}" = "xNONE"; then
+ libdir="${ac_default_prefix}/lib";
+ else
+ libdir="${prefix}/lib";
+ fi
+fi
+
+dnl Set PACKAGE_DATA_DIR in config.h.
+if test "x${datadir}" = 'x${prefix}/share'; then
+ if test "x${prefix}" = "xNONE"; then
+ AC_DEFINE_UNQUOTED(PACKAGE_DATA_DIR, "${ac_default_prefix}/share/${PACKAGE}", [Package Data Directory])
+ else
+ AC_DEFINE_UNQUOTED(PACKAGE_DATA_DIR, "${prefix}/share/${PACKAGE}", [Package Data Directory])
+ fi
+else
+ AC_DEFINE_UNQUOTED(PACKAGE_DATA_DIR, "${datadir}/${PACKAGE}", [Package Data Directory])
+fi
+
+dnl Set PACKAGE_BIN_DIR in config.h.
+if test "x${bindir}" = 'xNONE'; then
+ if test "x${prefix}" = "xNONE"; then
+ AC_DEFINE_UNQUOTED(PACKAGE_BIN_DIR, "${ac_default_prefix}/bin", [Install Location for Executables])
+ else
+ AC_DEFINE_UNQUOTED(PACKAGE_BIN_DIR, "${prefix}/bin", [Install Location for Executables])
+ fi
+else
+ AC_DEFINE_UNQUOTED(PACKAGE_BIN_DIR, "${bindir}". [Install Location for Executables])
+fi
+
+dnl Set PACKAGE_LIB_DIR in config.h.
+if test "x${libdir}" = 'xNONE'; then
+ if test "x${prefix}" = "xNONE"; then
+ AC_DEFINE_UNQUOTED(PACKAGE_LIB_DIR, "${ac_default_prefix}/lib", [Install Location for Libraries])
+ else
+ AC_DEFINE_UNQUOTED(PACKAGE_LIB_DIR, "${prefix}/lib", [Install Location for Libraries])
+ fi
+else
+ AC_DEFINE_UNQUOTED(PACKAGE_LIB_DIR, "${libdir}", [Install Location for Libraries])
+fi
+
+dnl Set PACKAGE_SOURCE_DIR in config.h.
+packagesrcdir=`cd $srcdir && pwd`
+AC_DEFINE_UNQUOTED(PACKAGE_SOURCE_DIR, "${packagesrcdir}", [Source Directory])
+
+function vser
+{
+ v=$1
+ VSTART=`echo $v | awk -F_ '{printf("%s", $1);}'`
+ V1=`echo $VSTART | awk -F\. '{printf("%s", $1);}'`
+ V2=`echo $VSTART | awk -F\. '{printf("%s", $2);}'`
+ V3=`echo $VSTART | awk -F\. '{printf("%s", $3);}'`
+ V4="0"
+ VEND=`echo $v | awk -F_ '{printf("%s", $2);}'`
+ if test -n "$VEND"; then
+ V4=`echo $VEND | sed s/pre//`
+ fi
+ V3=$(( $V3 * 100 ));
+ V2=$(( $V2 * 10000 ));
+ V1=$(( $V1 * 1000000 ));
+ V=$(( $V4 + $V3 + $V2 + $V1 ));
+ echo $V
+}
+
+if [ test -z "$EET_CONFIG" ]; then EET_CONFIG="eet-config"; fi
+if [ test -z "$EVAS_CONFIG" ]; then EVAS_CONFIG="evas-config"; fi
+if [ test -z "$EDJE_CONFIG" ]; then EDJE_CONFIG="edje-config"; fi
+if [ test -z "$ECORE_CONFIG" ]; then ECORE_CONFIG="ecore-config"; fi
+if [ test -z "$EMBRYO_CONFIG" ]; then EMBRYO_CONFIG="embryo-config"; fi
+if [ test -z "$XINE_CONFIG" ]; then XINE_CONFIG="xine-config"; fi
+
+V=`$ECORE_CONFIG --version`
+VV=`vser $V`
+VM="1.0.0_pre7"
+VVM=`vser $VM`
+if test $VV -lt $VVM; then
+ echo "Error. Ecore is not at least "$VM". It is "$V". Abort."
+ exit -1
+fi
+
+V=`$EVAS_CONFIG --version`
+VV=`vser $V`
+VM="1.0.0_pre13"
+VVM=`vser $VM`
+if test $VV -lt $VVM; then
+ echo "Error. Evas is not at least "$VM". It is "$V". Abort."
+ exit -1
+fi
+
+V=`$EDJE_CONFIG --version`
+VV=`vser $V`
+VM="0.5.0"
+VVM=`vser $VM`
+if test $VV -lt $VVM; then
+ echo "Error. Edje is not at least "$VM". It is "$V". Abort."
+ exit -1
+fi
+
+V=`$XINE_CONFIG --version`
+VV=`vser $V`
+VM="1.0.0"
+VVM=`vser $VM`
+if test $VV -lt $VVM; then
+ echo "Error. Xine is not at least "$VM". It is "$V". Abort."
+ exit -1
+fi
+
+my_cflags=`$EVAS_CONFIG --cflags`" "`$ECORE_CONFIG --cflags`
+my_libs=`$EVAS_CONFIG --libs`" "`$ECORE_CONFIG --libs`
+AC_SUBST(my_cflags)
+AC_SUBST(my_libs)
+
+edje_cflags=`$EDJE_CONFIG --cflags`
+edje_libs=`$EDJE_CONFIG --libs`
+AC_SUBST(edje_cflags)
+AC_SUBST(edje_libs)
+
+xine_cflags=`$XINE_CONFIG --cflags`
+xine_libs=`$XINE_CONFIG --libs`
+xine_plugins=`$XINE_CONFIG --plugindir`
+
+AC_SUBST(xine_cflags)
+AC_SUBST(xine_libs)
+AC_SUBST(xine_plugins)
+
+AC_OUTPUT([
+Makefile
+src/Makefile
+src/lib/Makefile
+src/modules/Makefile
+src/modules/xine/Makefile
+src/bin/Makefile
+data/Makefile
+emotion-config
+],[
+chmod +x emotion-config
+])
--- /dev/null
+filesdir = $(datadir)/emotion/data
+files_DATA = theme.eet
+
+EXTRA_DIST = $(files_DATA)
--- /dev/null
+td.md {
+ background-color: #ffffff;
+ font-family: monospace;
+ text-align: left;
+ vertical-align: center;
+ font-size: 10;
+ padding-right : 1px;
+ padding-top : 1px;
+ padding-left : 1px;
+ padding-bottom : 1px;
+ margin-left : 1px;
+ margin-right : 1px;
+ margin-top : 1px;
+ margin-bottom : 1px
+}
+td.mdname {
+ font-family: monospace;
+ text-align: left;
+ vertical-align: center;
+ font-size: 10;
+ padding-right : 1px;
+ padding-top : 1px;
+ padding-left : 1px;
+ padding-bottom : 1px;
+ margin-left : 1px;
+ margin-right : 1px;
+ margin-top : 1px;
+ margin-bottom : 1px
+}
+h1
+{
+ text-align: center;
+ color: #333333
+}
+h2
+{
+ text-align: left;
+ color: #333333
+}
+h3
+{
+ text-align: left;
+ color: #333333
+}
+a:link
+{
+ text-decoration: none;
+ color: #444444;
+ font-weight: bold;
+}
+a:visited
+{
+ text-decoration: none;
+ color: #666666;
+ font-weight: bold;
+}
+a:hover
+{
+ text-decoration: none;
+ color: #000000;
+ font-weight: bold;
+}
+a.nav:link
+{
+ text-decoration: none;
+ color: #444444;
+ font-weight: normal;
+}
+a.nav:visited
+{
+ text-decoration: none;
+ color: #666666;
+ font-weight: normal;
+}
+a.nav:hover
+{
+ text-decoration: none;
+ color: #000000;
+ font-weight: normal;
+}
+a.qindex:link
+{
+ text-decoration: none;
+ color: #444444;
+ font-weight: normal;
+}
+a.qindex:visited
+{
+ text-decoration: none;
+ color: #666666;
+ font-weight: normal;
+}
+a.qindex:hover
+{
+ text-decoration: none;
+ color: #000000;
+ font-weight: normal;
+}
+p
+{
+ color: #000000;
+ font-family: sans-serif;
+ font-size: 10;
+}
+body {
+ background-image: url("hilite.png");
+ background-repeat: no-repeat;
+ background-position: left top;
+ background-color: #dddddd;
+ color: #000000;
+ font-family: sans-serif;
+ padding: 8px;
+ margin: 0;
+}
+div.fragment
+{
+ background-image: url("hilite.png");
+ background-repeat: no-repeat;
+ background-position: left top;
+ border: thin solid #888888;
+ background-color: #eeeeee;
+ padding: 4px;
+ text-align: left;
+ vertical-align: center;
+ font-size: 12;
+}
+hr
+{
+ border: 0;
+ background-color: #000000;
+ width: 80%;
+ height: 1;
+}
+dl
+{
+ background-image: url("hilite.png");
+ background-repeat: no-repeat;
+ background-position: left top;
+ border: thin solid #aaaaaa;
+ background-color: #eeeeee;
+ padding: 4px;
+ text-align: left;
+ vertical-align: center;
+ font-size: 12;
+}
+em
+{
+ color: #334466;
+ font-family: courier;
+ font-size: 10;
+ font-style: normal;
+}
+
+div.nav
+{
+ border: thin solid #000000;
+ background-color: #ffffff;
+ padding: 1px;
+ text-align: center;
+ vertical-align: center;
+ font-size: 12;
+}
+div.body
+{
+ border: thin solid #000000;
+ background-color: #ffffff;
+ padding: 4px;
+ text-align: left;
+ font-size: 10;
+}
+div.diag
+{
+ border: thin solid #888888;
+ background-color: #eeeeee;
+ padding: 4px;
+ text-align: center;
+ font-size: 8;
+}
--- /dev/null
+</body>
+</html>
--- /dev/null
+<html>
+
+<head>
+
+<title>$title</title>
+<link href=emotion.css rel=stylesheet type=text/css>
+
+</head>
+
+<body>
+<div class=nav>
+<table border=0 align=center><tr>
+<td width=16><img src=emotion_mini.png width=16 height=16 alt=E></td>
+<td width=100% align=center>
+E : M : O : T : I : O : N
+</td>
+<td width=16><img src=emotion_mini.png width=16 height=16 alt=E></td>
+</tr></table>
+</div>
--- /dev/null
+#!/bin/sh
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+exec_prefix_set=no
+
+usage="\
+Usage: evas-config [--prefix[=DIR]] [--exec-prefix[=DIR]] [--version] [--libs] [--cflags]"
+
+if test $# -eq 0; then
+ echo "${usage}" 1>&2
+ exit 1
+fi
+
+while test $# -gt 0; do
+ case "$1" in
+ -*=*) optarg=`echo "$1" | sed 's/[-_a-zA-Z0-9]*=//'` ;;
+ *) optarg= ;;
+ esac
+
+ case $1 in
+ --prefix=*)
+ prefix=$optarg
+ if test $exec_prefix_set = no ; then
+ exec_prefix=$optarg
+ fi
+ ;;
+ --prefix)
+ echo $prefix
+ ;;
+ --exec-prefix=*)
+ exec_prefix=$optarg
+ exec_prefix_set=yes
+ ;;
+ --exec-prefix)
+ echo $exec_prefix
+ ;;
+ --version)
+ echo @VERSION@
+ ;;
+ --cflags)
+ if test @includedir@ != /usr/include ; then
+ includes=-I@includedir@
+ fi
+ echo $includes
+ ;;
+ --libs)
+ libdirs=-L@libdir@
+ echo $libdirs -lemotion @my_libs@ -ldl
+ ;;
+ *)
+ echo "${usage}" 1>&2
+ exit 1
+ ;;
+ esac
+ shift
+done
+
+exit 0
--- /dev/null
+/**
+@file
+@brief Emotion Media Library
+
+These routines are used for Emotion.
+*/
+
+/**
+
+@mainpage Emotion Library Documentation
+@image html emotion.png
+@version 0.0.1
+@author Carsten Haitzler <raster@rasterman.com>
+@date 2003-2004
+
+@section intro What is Emotion?
+
+A media object library for Evas and Ecore.
+
+@todo Complete documentation of API
+
+*/
--- /dev/null
+# Note that this is NOT a relocatable package
+%define ver 0.0.1
+%define rel 1
+%define prefix /usr
+
+Summary: emotion
+Name: emotion
+Version: %ver
+Release: %rel
+Copyright: BSD
+Group: System Environment/Libraries
+Source: ftp://ftp.enlightenment.org/pub/emotion/emotion-%{ver}.tar.gz
+BuildRoot: /var/tmp/emotion-root
+Packager: The Rasterman <raster@rasterman.com>
+URL: http://www.enlightenment.org/
+BuildRequires: libjpeg-devel
+BuildRequires: zlib-devel
+Requires: libjpeg
+Requires: zlib
+
+Docdir: %{prefix}/doc
+
+%description
+
+Emotion is a Media Library
+
+%package devel
+Summary: Emotion headers, static libraries, documentation and test programs
+Group: System Environment/Libraries
+Requires: %{name} = %{version}
+
+%description devel
+Headers, static libraries, test programs and documentation for Eet
+
+%prep
+rm -rf $RPM_BUILD_ROOT
+
+%setup -q
+
+%build
+./configure --prefix=%prefix
+
+if [ "$SMP" != "" ]; then
+ (make "MAKE=make -k -j $SMP"; exit 0)
+ make
+else
+ make
+fi
+###########################################################################
+
+%install
+make DESTDIR=$RPM_BUILD_ROOT install
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%post
+/sbin/ldconfig
+
+%postun
+/sbin/ldconfig
+
+%files
+%defattr(-,root,root)
+%attr(755,root,root) %{prefix}/lib/libemotion.so*
+%attr(755,root,root) %{prefix}/lib/libemotion.la
+
+%files devel
+%attr(755,root,root) %{prefix}/lib/libemotion.a
+%attr(755,root,root) %{prefix}/bin/emotion*
+%{prefix}/include/Emotion*
+%{_datadir}/emotion
+%doc AUTHORS
+%doc COPYING
+%doc README
+%doc emotion_docs.tar.gz
+
+%changelog
+* Sat Jun 23 2001 The Rasterman <raster@rasterman.com>
+- Created spec file
--- /dev/null
+#!/bin/sh
+cp ./emotion.c.in ./emotion.c
+for I in `find ./src/lib -name "Emotion.h" -print`; do
+ cat $I >> ./emotion.c
+done
+#for I in `find ./src/lib -name "*.c" -print`; do
+# cat $I >> ./emotion.c
+#done
+rm -rf ./doc/html ./doc/latex ./doc/man
+doxygen
+cp doc/img/*.png doc/html/
+rm -f emotion_docs.tar emotion_docs.tar.gz
+tar -cvf emotion_docs.tar doc/html doc/man doc/latex
+gzip -9 emotion_docs.tar
+exit 0
--- /dev/null
+## Process this file with automake to produce Makefile.in
+
+SUBDIRS = lib bin modules
--- /dev/null
+## Process this file with automake to produce Makefile.in
+
+INCLUDES = \
+-I$(top_srcdir) \
+-I$(top_srcdir)/src/lib \
+@my_cflags@ @edje_cflags@
+
+bin_PROGRAMS = \
+emotion_test
+
+emotion_test_SOURCES = \
+emotion_test_main.c
+
+emotion_test_LDADD = \
+@my_libs@ @edje_libs@ \
+$(top_builddir)/src/lib/libemotion.la
+
+
--- /dev/null
+#include <Evas.h>
+#include <Ecore.h>
+#ifndef FB_ONLY
+#include <Ecore_X.h>
+#else
+#include <Ecore_Fb.h>
+#endif
+#include <Ecore_Evas.h>
+#include <Edje.h>
+
+#include "Emotion.h"
+
+#include "config.h"
+
+typedef struct _Frame_Data Frame_Data;
+
+struct _Frame_Data
+{
+ char moving : 1;
+ char resizing : 1;
+ int button;
+ Evas_Coord x, y;
+};
+
+static int main_start(int argc, char **argv);
+static void main_stop(void);
+static void main_resize(Ecore_Evas *ee);
+static int main_signal_exit(void *data, int ev_type, void *ev);
+static void main_delete_request(Ecore_Evas *ee);
+
+void bg_setup(void);
+void bg_resize(Evas_Coord w, Evas_Coord h);
+static void bg_key_down(void *data, Evas * e, Evas_Object * obj, void *event_info);
+
+static Evas_Object *o_bg = NULL;
+
+double start_time = 0.0;
+Ecore_Evas *ecore_evas = NULL;
+Evas *evas = NULL;
+int startw = 800;
+int starth = 600;
+
+Evas_List *video_objs = NULL;
+
+static int
+main_start(int argc, char **argv)
+{
+ int mode = 0;
+
+ start_time = ecore_time_get();
+ if (!ecore_init()) return -1;
+ ecore_app_args_set(argc, (const char **)argv);
+ ecore_event_handler_add(ECORE_EVENT_SIGNAL_EXIT, main_signal_exit, NULL);
+ if (!ecore_evas_init()) return -1;
+#ifndef FB_ONLY
+ {
+ int i;
+
+ for (i = 1; i < argc; i++)
+ {
+ if (((!strcmp(argv[i], "-g")) ||
+ (!strcmp(argv[i], "-geometry")) ||
+ (!strcmp(argv[i], "--geometry"))) && (i < (argc - 1)))
+ {
+ int n, w, h;
+ char buf[16], buf2[16];
+
+ n = sscanf(argv[i +1], "%10[^x]x%10s", buf, buf2);
+ if (n == 2)
+ {
+ w = atoi(buf);
+ h = atoi(buf2);
+ startw = w;
+ starth = h;
+ }
+ i++;
+ }
+ else if (!strcmp(argv[i], "-gl"))
+ {
+ mode = 1;
+ }
+ else if (!strcmp(argv[i], "-fb"))
+ {
+ mode = 2;
+ }
+ }
+ }
+ if (mode == 0)
+ ecore_evas = ecore_evas_software_x11_new(NULL, 0, 0, 0, startw, starth);
+ else if (mode == 1)
+ ecore_evas = ecore_evas_gl_x11_new(NULL, 0, 0, 0, startw, starth);
+ else if (mode == 2)
+ ecore_evas = ecore_evas_fb_new(NULL, 0, startw, starth);
+#else
+ startw = 240;
+ starth = 320;
+ ecore_evas = ecore_evas_fb_new(NULL, 270, startw, starth);
+#endif
+ if (!ecore_evas) return -1;
+ ecore_evas_callback_delete_request_set(ecore_evas, main_delete_request);
+ ecore_evas_callback_resize_set(ecore_evas, main_resize);
+ ecore_evas_title_set(ecore_evas, "Evas Media Test Program");
+ ecore_evas_name_class_set(ecore_evas, "evas_media_test", "main");
+ ecore_evas_show(ecore_evas);
+ evas = ecore_evas_get(ecore_evas);
+ evas_image_cache_set(evas, 8 * 1024 * 1024);
+ evas_font_cache_set(evas, 1 * 1024 * 1024);
+ evas_font_path_append(evas, PACKAGE_DATA_DIR"/data/fonts");
+
+ edje_init();
+ edje_frametime_set(1.0 / 30.0);
+ return 1;
+}
+
+static void
+main_stop(void)
+{
+ ecore_evas_shutdown();
+ ecore_shutdown();
+}
+
+static void
+main_resize(Ecore_Evas *ee)
+{
+ Evas_Coord w, h;
+
+ evas_output_viewport_get(evas, NULL, NULL, &w, &h);
+ bg_resize(w, h);
+}
+
+static int
+main_signal_exit(void *data, int ev_type, void *ev)
+{
+ ecore_main_loop_quit();
+ return 1;
+}
+
+static void
+main_delete_request(Ecore_Evas *ee)
+{
+ ecore_main_loop_quit();
+}
+
+void
+bg_setup(void)
+{
+ Evas_Object *o;
+
+ o = edje_object_add(evas);
+ edje_object_file_set(o, PACKAGE_DATA_DIR"/data/theme.eet", "background");
+ evas_object_move(o, 0, 0);
+ evas_object_resize(o, startw, starth);
+ evas_object_layer_set(o, -999);
+ evas_object_show(o);
+ evas_object_focus_set(o, 1);
+ evas_object_event_callback_add(o, EVAS_CALLBACK_KEY_DOWN, bg_key_down, NULL);
+ o_bg = o;
+}
+
+void
+bg_resize(Evas_Coord w, Evas_Coord h)
+{
+ evas_object_resize(o_bg, w, h);
+}
+
+static void
+broadcast_event(Emotion_Event ev)
+{
+ Evas_List *l;
+
+ for (l = video_objs; l; l = l->next)
+ {
+ Evas_Object *obj;
+
+ obj = l->data;
+ emotion_object_event_simple_send(obj, ev);
+ }
+}
+
+static void
+bg_key_down(void *data, Evas * e, Evas_Object * obj, void *event_info)
+{
+ Evas_Event_Key_Down *ev;
+
+ ev = (Evas_Event_Key_Down *)event_info;
+ if (!strcmp(ev->keyname, "Escape"))
+ ecore_main_loop_quit();
+ else if (!strcmp(ev->keyname, "Up"))
+ broadcast_event(EMOTION_EVENT_UP);
+ else if (!strcmp(ev->keyname, "Down"))
+ broadcast_event(EMOTION_EVENT_DOWN);
+ else if (!strcmp(ev->keyname, "Left"))
+ broadcast_event(EMOTION_EVENT_LEFT);
+ else if (!strcmp(ev->keyname, "Right"))
+ broadcast_event(EMOTION_EVENT_RIGHT);
+ else if (!strcmp(ev->keyname, "Return"))
+ broadcast_event(EMOTION_EVENT_SELECT);
+ else if (!strcmp(ev->keyname, "m"))
+ broadcast_event(EMOTION_EVENT_MENU1);
+ else if (!strcmp(ev->keyname, "Prior"))
+ broadcast_event(EMOTION_EVENT_PREV);
+ else if (!strcmp(ev->keyname, "Next"))
+ broadcast_event(EMOTION_EVENT_NEXT);
+ else if (!strcmp(ev->keyname, "0"))
+ broadcast_event(EMOTION_EVENT_0);
+ else if (!strcmp(ev->keyname, "1"))
+ broadcast_event(EMOTION_EVENT_1);
+ else if (!strcmp(ev->keyname, "2"))
+ broadcast_event(EMOTION_EVENT_2);
+ else if (!strcmp(ev->keyname, "3"))
+ broadcast_event(EMOTION_EVENT_3);
+ else if (!strcmp(ev->keyname, "4"))
+ broadcast_event(EMOTION_EVENT_4);
+ else if (!strcmp(ev->keyname, "5"))
+ broadcast_event(EMOTION_EVENT_5);
+ else if (!strcmp(ev->keyname, "6"))
+ broadcast_event(EMOTION_EVENT_6);
+ else if (!strcmp(ev->keyname, "7"))
+ broadcast_event(EMOTION_EVENT_7);
+ else if (!strcmp(ev->keyname, "8"))
+ broadcast_event(EMOTION_EVENT_8);
+ else if (!strcmp(ev->keyname, "9"))
+ broadcast_event(EMOTION_EVENT_9);
+ else if (!strcmp(ev->keyname, "-"))
+ broadcast_event(EMOTION_EVENT_10);
+ else if (!strcmp(ev->keyname, "bracketleft"))
+ {
+ Evas_List *l;
+
+ for (l = video_objs; l; l = l->next)
+ {
+ Evas_Object *obj;
+
+ obj = l->data;
+ emotion_object_audio_volume_set(obj, emotion_object_audio_volume_get(obj) - 0.1);
+ }
+ }
+ else if (!strcmp(ev->keyname, "bracketright"))
+ {
+ Evas_List *l;
+
+ for (l = video_objs; l; l = l->next)
+ {
+ Evas_Object *obj;
+
+ obj = l->data;
+ emotion_object_audio_volume_set(obj, emotion_object_audio_volume_get(obj) + 0.1);
+ }
+ }
+ else if (!strcmp(ev->keyname, "v"))
+ {
+ Evas_List *l;
+
+ for (l = video_objs; l; l = l->next)
+ {
+ Evas_Object *obj;
+
+ obj = l->data;
+ if (emotion_object_video_mute_get(obj))
+ emotion_object_video_mute_set(obj, 0);
+ else
+ emotion_object_video_mute_set(obj, 1);
+ }
+ }
+ else if (!strcmp(ev->keyname, "a"))
+ {
+ Evas_List *l;
+
+ for (l = video_objs; l; l = l->next)
+ {
+ Evas_Object *obj;
+
+ obj = l->data;
+ if (emotion_object_audio_mute_get(obj))
+ {
+ emotion_object_audio_mute_set(obj, 0);
+ printf("unmute\n");
+ }
+ else
+ {
+ emotion_object_audio_mute_set(obj, 1);
+ printf("mute\n");
+ }
+ }
+ }
+ else if (!strcmp(ev->keyname, "i"))
+ {
+ Evas_List *l;
+
+ for (l = video_objs; l; l = l->next)
+ {
+ Evas_Object *obj;
+
+ obj = l->data;
+ printf("audio channels: %i\n", emotion_object_audio_channel_count(obj));
+ printf("video channels: %i\n", emotion_object_video_channel_count(obj));
+ printf("spu channels: %i\n", emotion_object_spu_channel_count(obj));
+ printf("seekable: %i\n", emotion_object_seekable_get(obj));
+ }
+ }
+ else if (!strcmp(ev->keyname, "f"))
+ {
+ if (!ecore_evas_fullscreen_get(ecore_evas))
+ ecore_evas_fullscreen_set(ecore_evas, 1);
+ else
+ ecore_evas_fullscreen_set(ecore_evas, 0);
+ }
+ else if (!strcmp(ev->keyname, "d"))
+ {
+ if (!ecore_evas_avoid_damage_get(ecore_evas))
+ ecore_evas_avoid_damage_set(ecore_evas, 1);
+ else
+ ecore_evas_avoid_damage_set(ecore_evas, 0);
+ }
+ else if (!strcmp(ev->keyname, "s"))
+ {
+ if (!ecore_evas_shaped_get(ecore_evas))
+ {
+ ecore_evas_shaped_set(ecore_evas, 1);
+ evas_object_hide(o_bg);
+ }
+ else
+ {
+ ecore_evas_shaped_set(ecore_evas, 0);
+ evas_object_show(o_bg);
+ }
+ }
+ else if (!strcmp(ev->keyname, "b"))
+ {
+ if (!ecore_evas_borderless_get(ecore_evas))
+ ecore_evas_borderless_set(ecore_evas, 1);
+ else
+ ecore_evas_borderless_set(ecore_evas, 0);
+ }
+ else
+ {
+ printf("UNHANDLED: %s\n", ev->keyname);
+ }
+}
+
+static void
+video_obj_in_cb(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+// evas_object_color_set(obj, 255, 255, 255, 100);
+}
+
+static void
+video_obj_out_cb(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+// evas_object_color_set(obj, 255, 255, 255, 200);
+}
+
+static void
+video_obj_down_cb(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Down *e;
+
+ e = event_info;
+ evas_object_color_set(obj, 255, 50, 40, 200);
+ evas_object_raise(obj);
+}
+
+static void
+video_obj_up_cb(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Up *e;
+
+ e = event_info;
+ evas_object_color_set(obj, 255, 255, 255, 100);
+}
+
+static void
+video_obj_move_cb(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Move *e;
+
+ e = event_info;
+ if (e->buttons & 0x1)
+ {
+ Evas_Coord x, y;
+
+ evas_object_geometry_get(obj, &x, &y, NULL, NULL);
+ x += e->cur.canvas.x - e->prev.canvas.x;
+ y += e->cur.canvas.y - e->prev.canvas.y;
+ evas_object_move(obj, x, y);
+ }
+ else if (e->buttons & 0x4)
+ {
+ Evas_Coord w, h;
+
+ evas_object_geometry_get(obj, NULL, NULL, &w, &h);
+ w += e->cur.canvas.x - e->prev.canvas.x;
+ h += e->cur.canvas.y - e->prev.canvas.y;
+ evas_object_resize(obj, w, h);
+ }
+}
+
+static void
+video_obj_frame_decode_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+ double pos, len;
+ char buf[256];
+ int ph, pm, ps, pf, lh, lm, ls;
+
+ oe = data;
+ pos = emotion_object_position_get(obj);
+ len = emotion_object_play_length_get(obj);
+// printf("%3.3f, %3.3f\n", pos, len);
+ edje_object_part_drag_value_set(oe, "video_progress", pos / len, 0.0);
+ lh = len / 3600;
+ lm = len / 60 - (lh * 60);
+ ls = len - (lm * 60);
+ ph = pos / 3600;
+ pm = pos / 60 - (ph * 60);
+ ps = pos - (pm * 60);
+ pf = pos * 100 - (ps * 100) - (pm * 60 * 100) - (ph * 60 * 60 * 100);
+ snprintf(buf, sizeof(buf), "%i:%02i:%02i.%02i / %i:%02i:%02i",
+ ph, pm, ps, pf, lh, lm, ls);
+ edje_object_part_text_set(oe, "video_progress_txt", buf);
+
+ if (0)
+ {
+ double t;
+ static double pt = 0.0;
+ t = ecore_time_get();
+ printf("FPS: %3.3f\n", 1.0 / (t - pt));
+ pt = t;
+ }
+}
+
+static void
+video_obj_frame_resize_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+ int iw, ih;
+ Evas_Coord w, h;
+ double ratio;
+
+ oe = data;
+ emotion_object_size_get(obj, &iw, &ih);
+ ratio = emotion_object_ratio_get(obj);
+ printf("HANDLE %ix%i @ %3.3f\n", iw, ih, ratio);
+ if (ratio > 0.0) iw = ih * ratio;
+ edje_extern_object_min_size_set(obj, iw, ih);
+ edje_object_part_swallow(oe, "video_swallow", obj);
+ edje_object_size_min_calc(oe, &w, &h);
+ evas_object_resize(oe, w, h);
+ edje_extern_object_min_size_set(obj, 0, 0);
+ edje_object_part_swallow(oe, "video_swallow", obj);
+}
+
+static void
+video_obj_length_change_cb(void *data, Evas_Object *obj, void *event_info)
+{
+ Evas_Object *oe;
+ double pos, len;
+ char buf[256];
+ int ph, pm, ps, pf, lh, lm, ls;
+
+ oe = data;
+ pos = emotion_object_position_get(obj);
+ len = emotion_object_play_length_get(obj);
+ edje_object_part_drag_value_set(oe, "video_progress", pos / len, 0.0);
+ lh = len / 3600;
+ lm = len / 60 - (lh * 60);
+ ls = len - (lm * 60);
+ ph = pos / 3600;
+ pm = pos / 60 - (ph * 60);
+ ps = pos - (pm * 60);
+ pf = pos * 100 - (ps * 100) - (pm * 60 * 100) - (ph * 60 * 60 * 100);
+ snprintf(buf, sizeof(buf), "%i:%02i:%02i.%02i / %i:%02i:%02i",
+ ph, pm, ps, pf, lh, lm, ls);
+ edje_object_part_text_set(oe, "video_progress_txt", buf);
+}
+
+
+
+
+static void
+video_obj_signal_play_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Evas_Object *ov;
+
+ ov = data;
+ emotion_object_play_set(ov, 1);
+ edje_object_signal_emit(o, "video_state", "play");
+}
+
+static void
+video_obj_signal_pause_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Evas_Object *ov;
+
+ ov = data;
+ emotion_object_play_set(ov, 0);
+ edje_object_signal_emit(o, "video_state", "pause");
+}
+
+static void
+video_obj_signal_stop_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Evas_Object *ov;
+
+ ov = data;
+ emotion_object_play_set(ov, 0);
+ emotion_object_position_set(ov, 0);
+ edje_object_signal_emit(o, "video_state", "stop");
+}
+
+static void
+video_obj_signal_jump_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Evas_Object *ov;
+ double pos, len;
+ double x, y;
+
+ ov = data;
+ edje_object_part_drag_value_get(o, source, &x, &y);
+ len = emotion_object_play_length_get(ov);
+ emotion_object_position_set(ov, x * len);
+}
+
+static void
+video_obj_signal_speed_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Evas_Object *ov;
+ double spd;
+ double x, y;
+ char buf[256];
+
+ ov = data;
+ edje_object_part_drag_value_get(o, source, &x, &y);
+ spd = (y * 20.0) - 10.0;
+ emotion_object_play_speed_set(ov, spd);
+ snprintf(buf, sizeof(buf), "%1.1f", spd);
+ edje_object_part_text_set(o, "video_speed_txt", buf);
+}
+
+static void
+video_obj_signal_frame_move_start_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Frame_Data *fd;
+ Evas_Coord x, y;
+
+ fd = evas_object_data_get(o, "frame_data");
+ fd->moving = 1;
+ evas_pointer_canvas_xy_get(evas_object_evas_get(o), &x, &y);
+ fd->x = x;
+ fd->y = y;
+ evas_object_raise(o);
+}
+
+static void
+video_obj_signal_frame_move_stop_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Frame_Data *fd;
+
+ fd = evas_object_data_get(o, "frame_data");
+ fd->moving = 0;
+}
+
+static void
+video_obj_signal_frame_resize_start_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Frame_Data *fd;
+ Evas_Coord x, y;
+
+ fd = evas_object_data_get(o, "frame_data");
+ fd->resizing = 1;
+ evas_pointer_canvas_xy_get(evas_object_evas_get(o), &x, &y);
+ fd->x = x;
+ fd->y = y;
+ evas_object_raise(o);
+}
+
+static void
+video_obj_signal_frame_resize_stop_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Frame_Data *fd;
+
+ fd = evas_object_data_get(o, "frame_data");
+ fd->resizing = 0;
+}
+
+static void
+video_obj_signal_frame_move_cb(void *data, Evas_Object *o, const char *emission, const char *source)
+{
+ Frame_Data *fd;
+
+ fd = evas_object_data_get(o, "frame_data");
+ if (fd->moving)
+ {
+ Evas_Coord x, y, ox, oy;
+
+ evas_pointer_canvas_xy_get(evas_object_evas_get(o), &x, &y);
+ evas_object_geometry_get(o, &ox, &oy, NULL, NULL);
+ evas_object_move(o, ox + (x - fd->x), oy + (y - fd->y));
+ fd->x = x;
+ fd->y = y;
+ }
+ else if (fd->resizing)
+ {
+ Evas_Coord x, y, ow, oh;
+
+ evas_pointer_canvas_xy_get(evas_object_evas_get(o), &x, &y);
+ evas_object_geometry_get(o, NULL, NULL, &ow, &oh);
+ evas_object_resize(o, ow + (x - fd->x), oh + (y - fd->y));
+ fd->x = x;
+ fd->y = y;
+ }
+}
+
+
+static void
+init_video_object(char *file)
+{
+ Evas_Object *o, *oe;
+ int iw, ih;
+ Evas_Coord w, h;
+ Frame_Data *fd;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/* basic video object setup */
+ o = emotion_object_add(evas);
+ emotion_object_file_set(o, file);
+ emotion_object_play_set(o, 1);
+ evas_object_move(o, 0, 0);
+ evas_object_resize(o, 320, 240);
+ emotion_object_smooth_scale_set(o, 1);
+ evas_object_show(o);
+/* end basic video setup. all the rest here is just to be fancy */
+
+
+
+
+
+
+
+
+
+
+
+
+
+ video_objs = evas_list_append(video_objs, o);
+
+ emotion_object_size_get(o, &iw, &ih);
+ w = iw; h = ih;
+
+ fd = calloc(1, sizeof(Frame_Data));
+
+ oe = edje_object_add(evas);
+ evas_object_data_set(oe, "frame_data", fd);
+ edje_object_file_set(oe, PACKAGE_DATA_DIR"/data/theme.eet", "video_controller");
+ edje_extern_object_min_size_set(o, w, h);
+ edje_object_part_swallow(oe, "video_swallow", o);
+ edje_object_size_min_calc(oe, &w, &h);
+// evas_object_move(oe, rand() % (int)(startw - w), rand() % (int)(starth - h));
+ evas_object_move(oe, 0, 0);
+ evas_object_resize(oe, w, h);
+ edje_extern_object_min_size_set(o, 0, 0);
+ edje_object_part_swallow(oe, "video_swallow", o);
+
+ evas_object_smart_callback_add(o, "frame_decode", video_obj_frame_decode_cb, oe);
+ evas_object_smart_callback_add(o, "frame_resize", video_obj_frame_resize_cb, oe);
+ evas_object_smart_callback_add(o, "length_change", video_obj_length_change_cb, oe);
+
+ edje_object_signal_callback_add(oe, "video_control", "play", video_obj_signal_play_cb, o);
+ edje_object_signal_callback_add(oe, "video_control", "pause", video_obj_signal_pause_cb, o);
+ edje_object_signal_callback_add(oe, "video_control", "stop", video_obj_signal_stop_cb, o);
+ edje_object_signal_callback_add(oe, "drag", "video_progress", video_obj_signal_jump_cb, o);
+ edje_object_signal_callback_add(oe, "drag", "video_speed", video_obj_signal_speed_cb, o);
+
+ edje_object_signal_callback_add(oe, "frame_move", "start", video_obj_signal_frame_move_start_cb, oe);
+ edje_object_signal_callback_add(oe, "frame_move", "stop", video_obj_signal_frame_move_stop_cb, oe);
+ edje_object_signal_callback_add(oe, "frame_resize", "start", video_obj_signal_frame_resize_start_cb, oe);
+ edje_object_signal_callback_add(oe, "frame_resize", "stop", video_obj_signal_frame_resize_stop_cb, oe);
+ edje_object_signal_callback_add(oe, "mouse,move", "*", video_obj_signal_frame_move_cb, oe);
+
+ edje_object_part_drag_value_set(oe, "video_speed", 0.0, 11.0 / 20.0);
+ edje_object_part_text_set(oe, "video_speed_txt", "1.0");
+
+ edje_object_signal_emit(o, "video_state", "play");
+
+ evas_object_show(oe);
+}
+
+static int
+enter_idle(void *data)
+{
+ double t;
+ static double pt = 0.0;
+ static int frames = 0;
+
+ t = ecore_time_get();
+ if (frames == 0) pt = t;
+ frames++;
+ if (frames == 100)
+ {
+// printf("FPS: %3.3f\n", frames / (t - pt));
+ frames = 0;
+ }
+ return 1;
+}
+
+int
+main(int argc, char **argv)
+{
+ char *file;
+ int i;
+
+ if (main_start(argc, argv) < 1) return -1;
+ bg_setup();
+
+ for (i = 1; i < argc; i++)
+ {
+ if (((!strcmp(argv[i], "-g")) ||
+ (!strcmp(argv[i], "-geometry")) ||
+ (!strcmp(argv[i], "--geometry"))) && (i < (argc - 1)))
+ i++;
+ else if (((!strcmp(argv[i], "-h")) ||
+ (!strcmp(argv[i], "-help")) ||
+ (!strcmp(argv[i], "--help"))))
+ {
+ printf("Usage:\n");
+ printf(" %s [-gl] [-g WxH] \n", argv[0]);
+ exit(-1);
+ }
+ else if (!strcmp(argv[i], "-gl"))
+ {
+ }
+ else if (!strcmp(argv[i], "-fb"))
+ {
+ }
+ else
+ {
+ init_video_object(argv[i]);
+ }
+ }
+
+ ecore_idle_enterer_add(enter_idle, NULL);
+
+ ecore_main_loop_begin();
+ main_stop();
+ return 0;
+}
+
--- /dev/null
+#ifndef EMOTION_H
+#define EMOTION_H
+
+#include <Evas.h>
+#include <Ecore.h>
+
+enum _Emotion_Event
+{
+ EMOTION_EVENT_MENU1,
+ EMOTION_EVENT_MENU2,
+ EMOTION_EVENT_MENU3,
+ EMOTION_EVENT_MENU4,
+ EMOTION_EVENT_MENU5,
+ EMOTION_EVENT_MENU6,
+ EMOTION_EVENT_MENU7,
+ EMOTION_EVENT_UP,
+ EMOTION_EVENT_DOWN,
+ EMOTION_EVENT_LEFT,
+ EMOTION_EVENT_RIGHT,
+ EMOTION_EVENT_SELECT,
+ EMOTION_EVENT_NEXT,
+ EMOTION_EVENT_PREV,
+ EMOTION_EVENT_ANGLE_NEXT,
+ EMOTION_EVENT_ANGLE_PREV,
+ EMOTION_EVENT_FORCE,
+ EMOTION_EVENT_0,
+ EMOTION_EVENT_1,
+ EMOTION_EVENT_2,
+ EMOTION_EVENT_3,
+ EMOTION_EVENT_4,
+ EMOTION_EVENT_5,
+ EMOTION_EVENT_6,
+ EMOTION_EVENT_7,
+ EMOTION_EVENT_8,
+ EMOTION_EVENT_9,
+ EMOTION_EVENT_10
+};
+
+typedef enum _Emotion_Event Emotion_Event;
+
+#define EMOTION_CHANNEL_AUTO -1
+#define EMOTION_CHANNEL_DEFAULT 0
+
+/* api calls available */
+Evas_Object *emotion_object_add (Evas *evas);
+void emotion_object_file_set (Evas_Object *obj, const char *file);
+const char *emotion_object_file_get (Evas_Object *obj);
+void emotion_object_play_set (Evas_Object *obj, Evas_Bool play);
+Evas_Bool emotion_object_play_get (Evas_Object *obj);
+void emotion_object_position_set (Evas_Object *obj, double sec);
+double emotion_object_position_get (Evas_Object *obj);
+Evas_Bool emotion_object_seekable_get (Evas_Object *obj);
+double emotion_object_play_length_get (Evas_Object *obj);
+void emotion_object_size_get (Evas_Object *obj, int *iw, int *ih);
+void emotion_object_smooth_scale_set (Evas_Object *obj, Evas_Bool smooth);
+Evas_Bool emotion_object_smooth_scale_get (Evas_Object *obj);
+double emotion_object_ratio_get (Evas_Object *obj);
+void emotion_object_event_simple_send (Evas_Object *obj, Emotion_Event ev);
+void emotion_object_audio_volume_set (Evas_Object *obj, double vol);
+double emotion_object_audio_volume_get (Evas_Object *obj);
+void emotion_object_audio_mute_set (Evas_Object *obj, Evas_Bool mute);
+Evas_Bool emotion_object_audio_mute_get (Evas_Object *obj);
+int emotion_object_audio_channel_count (Evas_Object *obj);
+const char *emotion_object_audio_channel_name_get(Evas_Object *obj, int channel);
+void emotion_object_audio_channel_set (Evas_Object *obj, int channel);
+int emotion_object_audio_channel_get (Evas_Object *obj);
+void emotion_object_video_mute_set (Evas_Object *obj, Evas_Bool mute);
+Evas_Bool emotion_object_video_mute_get (Evas_Object *obj);
+int emotion_object_video_channel_count (Evas_Object *obj);
+const char *emotion_object_video_channel_name_get(Evas_Object *obj, int channel);
+void emotion_object_video_channel_set (Evas_Object *obj, int channel);
+int emotion_object_video_channel_get (Evas_Object *obj);
+void emotion_object_spu_mute_set (Evas_Object *obj, Evas_Bool mute);
+Evas_Bool emotion_object_spu_mute_get (Evas_Object *obj);
+int emotion_object_spu_channel_count (Evas_Object *obj);
+const char *emotion_object_spu_channel_name_get(Evas_Object *obj, int channel);
+void emotion_object_spu_channel_set (Evas_Object *obj, int channel);
+int emotion_object_spu_channel_get (Evas_Object *obj);
+int emotion_object_chapter_count (Evas_Object *obj);
+void emotion_object_chapter_set (Evas_Object *obj, int chapter);
+int emotion_object_chapter_get (Evas_Object *obj);
+const char *emotion_object_chapter_name_get (Evas_Object *obj, int chapter);
+void emotion_object_play_speed_set (Evas_Object *obj, double speed);
+double emotion_object_play_speed_get (Evas_Object *obj);
+void emotion_object_eject (Evas_Object *obj);
+
+#endif
--- /dev/null
+## Process this file with automake to produce Makefile.in
+
+AUTOMAKE_OPTIONS = 1.4 foreign
+
+MAINTAINERCLEANFILES = Makefile.in
+
+LDFLAGS =
+INCLUDES = -I$(includedir) \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/src/lib \
+ @my_cflags@
+
+lib_LTLIBRARIES = libemotion.la
+include_HEADERS = Emotion.h
+libemotion_la_SOURCES = \
+emotion_smart.c \
+emotion_private.h
+
+libemotion_la_LIBADD = $(LDFLAGS) @my_libs@ -ldl
+libemotion_la_DEPENDENCIES = $(top_builddir)/config.h
+libemotion_la_LDFLAGS = -version-info 0:1:0
--- /dev/null
+#ifndef EMOTION_PRIVATE_H
+#define EMOTION_PRIVATE_H
+
+#include <Evas.h>
+#include <Ecore.h>
+#include <Ecore_Job.h>
+
+#include "config.h"
+
+typedef struct _Emotion_Video_Module Emotion_Video_Module;
+
+struct _Emotion_Video_Module
+{
+ int (*init) (void);
+ int (*shutdown) (void);
+ void * (*file_open) (const char *file, Evas_Object *obj);
+ void (*file_close) (void *ef);
+ void (*play) (void *ef, double pos);
+ void (*stop) (void *ef);
+ void (*size_get) (void *ef, int *w, int *h);
+ void (*pos_set) (void *ef, double pos);
+ double (*len_get) (void *ef);
+ double (*fps_get) (void *ef);
+ double (*pos_get) (void *ef);
+ double (*ratio_get) (void *ef);
+ int (*seekable) (void *ef);
+ void (*frame_done) (void *ef);
+ void (*yuv_size_get) (void *ef, int *w, int *h);
+ int (*yuv_rows_get) (void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows);
+ void (*event_feed) (void *ef, int event);
+ void (*event_mouse_button_feed) (void *ef, int button, int x, int y);
+ void (*event_mouse_move_feed) (void *ef, int x, int y);
+ int (*video_channel_count) (void *ef);
+ void (*video_channel_set) (void *ef, int channel);
+ int (*video_channel_get) (void *ef);
+ const char * (*video_channel_name_get) (void *ef, int channel);
+ void (*video_channel_mute_set) (void *ef, int mute);
+ int (*video_channel_mute_get) (void *ef);
+ int (*audio_channel_count) (void *ef);
+ void (*audio_channel_set) (void *ef, int channel);
+ int (*audio_channel_get) (void *ef);
+ const char * (*audio_channel_name_get) (void *ef, int channel);
+ void (*audio_channel_mute_set) (void *ef, int mute);
+ int (*audio_channel_mute_get) (void *ef);
+ void (*audio_channel_volume_set) (void *ef, double vol);
+ double (*audio_channel_volume_get) (void *ef);
+ int (*spu_channel_count) (void *ef);
+ void (*spu_channel_set) (void *ef, int channel);
+ int (*spu_channel_get) (void *ef);
+ const char * (*spu_channel_name_get) (void *ef, int channel);
+ void (*spu_channel_mute_set) (void *ef, int mute);
+ int (*spu_channel_mute_get) (void *ef);
+ int (*chapter_count) (void *ef);
+ void (*chapter_set) (void *ef, int chapter);
+ int (*chapter_get) (void *ef);
+ const char * (*chapter_name_get) (void *ef, int chapter);
+ void (*speed_set) (void *ef, double speed);
+ double (*speed_get) (void *ef);
+ int (*eject) (void *ef);
+
+ void *handle;
+};
+
+void *_emotion_video_get(Evas_Object *obj);
+void _emotion_frame_new(Evas_Object *obj);
+void _emotion_video_pos_update(Evas_Object *obj, double pos, double len);
+void _emotion_frame_resize(Evas_Object *obj, int w, int h, double ratio);
+
+#endif
--- /dev/null
+#include "Emotion.h"
+#include "emotion_private.h"
+#include <dlfcn.h>
+
+#define E_SMART_OBJ_GET(smart, o, type) \
+ { \
+ char *_e_smart_str; \
+ \
+ if (!o) return; \
+ smart = evas_object_smart_data_get(o); \
+ if (!smart) return; \
+ _e_smart_str = (char *)evas_object_type_get(o); \
+ if (!_e_smart_str) return; \
+ if (strcmp(_e_smart_str, type)) return; \
+ }
+
+#define E_SMART_OBJ_GET_RETURN(smart, o, type, ret) \
+ { \
+ char *_e_smart_str; \
+ \
+ if (!o) return ret; \
+ smart = evas_object_smart_data_get(o); \
+ if (!smart) return ret; \
+ _e_smart_str = (char *)evas_object_type_get(o); \
+ if (!_e_smart_str) return ret; \
+ if (strcmp(_e_smart_str, type)) return ret; \
+ }
+
+#define E_OBJ_NAME "emotion_object"
+
+typedef struct _Smart_Data Smart_Data;
+
+struct _Smart_Data
+{
+ Emotion_Video_Module *module;
+ void *video;
+
+ char *file;
+ Evas_Object *obj;
+ double ratio;
+ double pos;
+ double seek_pos;
+ double len;
+
+ Ecore_Job *job;
+
+ unsigned char play : 1;
+ unsigned char seek : 1;
+};
+
+static void _mouse_move(void *data, Evas *ev, Evas_Object *obj, void *event_info);
+static void _mouse_down(void *data, Evas *ev, Evas_Object *obj, void *event_info);
+static void _pos_set_job(void *data);
+static void _pixels_get(void *data, Evas_Object *obj);
+
+static void _smart_init(void);
+static void _smart_add(Evas_Object * obj);
+static void _smart_del(Evas_Object * obj);
+static void _smart_layer_set(Evas_Object * obj, int layer);
+static void _smart_raise(Evas_Object * obj);
+static void _smart_lower(Evas_Object * obj);
+static void _smart_stack_above(Evas_Object * obj, Evas_Object * above);
+static void _smart_stack_below(Evas_Object * obj, Evas_Object * below);
+static void _smart_move(Evas_Object * obj, Evas_Coord x, Evas_Coord y);
+static void _smart_resize(Evas_Object * obj, Evas_Coord w, Evas_Coord h);
+static void _smart_show(Evas_Object * obj);
+static void _smart_hide(Evas_Object * obj);
+static void _smart_color_set(Evas_Object * obj, int r, int g, int b, int a);
+static void _smart_clip_set(Evas_Object * obj, Evas_Object * clip);
+static void _smart_clip_unset(Evas_Object * obj);
+
+/**********************************/
+/* Globals for the E Video Object */
+/**********************************/
+static Evas_Smart *smart = NULL;
+
+static Emotion_Video_Module *
+_emotion_module_open(const char *name)
+{
+ void *handle;
+ char buf[4096];
+
+ snprintf(buf, sizeof(buf), "%s/%s", PACKAGE_LIB_DIR"/emotion/", name);
+ handle = dlopen(buf, RTLD_NOW | RTLD_GLOBAL);
+ if (handle)
+ {
+ Emotion_Video_Module *(*func_module_open) (void);
+
+ func_module_open = dlsym(handle, "module_open");
+ if (func_module_open)
+ {
+ Emotion_Video_Module *mod;
+
+ mod = func_module_open();
+ if (mod)
+ {
+ mod->handle = handle;
+ return mod;
+ }
+ }
+ dlclose(handle);
+ }
+ return NULL;
+}
+
+static void
+_emotion_module_close(Emotion_Video_Module *mod)
+{
+ void *handle;
+ void (*module_close) (Emotion_Video_Module *module);
+
+ handle = mod->handle;
+ module_close = dlsym(handle, "module_close");
+ if (module_close) module_close(mod);
+ dlclose(handle);
+}
+
+/*******************************/
+/* Externally accessible calls */
+/*******************************/
+Evas_Object *
+emotion_object_add(Evas *evas)
+{
+ _smart_init();
+ return evas_object_smart_add(evas, smart);
+}
+
+void
+emotion_object_file_set(Evas_Object *obj, const char *file)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+
+ if ((file) && (sd->file) && (!strcmp(file, sd->file))) return;
+ if (sd->file) free(sd->file);
+ sd->file = NULL;
+
+ if (file)
+ {
+ int w, h;
+
+ sd->file = strdup(file);
+ if (sd->video) sd->module->file_close(sd->video);
+ if (sd->module) _emotion_module_close(sd->module);
+ sd->module = _emotion_module_open("emotion_decoder_xine.so");
+ if (!sd->module) return;
+ sd->video = sd->module->file_open(sd->file, obj);
+ if (sd->video)
+ {
+ sd->module->size_get(sd->video, &w, &h);
+ evas_object_image_size_set(sd->obj, w, h);
+ sd->ratio = sd->module->ratio_get(sd->video);
+ sd->pos = 0.0;
+ if (sd->play) sd->module->play(sd->video, 0.0);
+ }
+ }
+ else
+ {
+ if (sd->video)
+ {
+ sd->module->file_close(sd->video);
+ evas_object_image_size_set(sd->obj, 0, 0);
+ }
+ if (sd->module) _emotion_module_close(sd->module);
+ sd->module = NULL;
+ }
+}
+
+const char *
+emotion_object_file_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->file;
+}
+
+void
+emotion_object_play_set(Evas_Object *obj, Evas_Bool play)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (((play) && (sd->play)) || ((!play) && (!sd->play))) return;
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->play = play;
+ if (sd->play) sd->module->play(sd->video, sd->pos);
+ else sd->module->stop(sd->video);
+}
+
+Evas_Bool
+emotion_object_play_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->video) return 0;
+ return sd->play;
+}
+
+void
+emotion_object_position_set(Evas_Object *obj, double sec)
+{
+ Smart_Data *sd;
+ int frame;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->seek_pos = sec;
+ sd->seek = 1;
+ sd->pos = sd->seek_pos;
+ if (sd->job) ecore_job_del(sd->job);
+ sd->job = ecore_job_add(_pos_set_job, obj);
+}
+
+double
+emotion_object_position_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video) return 0.0;
+ return sd->pos;
+}
+
+Evas_Bool
+emotion_object_seekable_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->seekable(sd->video);
+}
+
+double
+emotion_object_play_length_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video) return 0.0;
+ sd->len = sd->module->len_get(sd->video);
+ return sd->len;
+}
+
+void
+emotion_object_size_get(Evas_Object *obj, int *iw, int *ih)
+{
+ Smart_Data *sd;
+
+ if (iw) *iw = 0;
+ if (ih) *ih = 0;
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_image_size_get(sd->obj, iw, ih);
+}
+
+void
+emotion_object_smooth_scale_set(Evas_Object *obj, Evas_Bool smooth)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_image_smooth_scale_set(sd->obj, smooth);
+}
+
+Evas_Bool
+emotion_object_smooth_scale_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return evas_object_image_smooth_scale_get(sd->obj);
+}
+
+double
+emotion_object_ratio_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 1.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video) return 0.0;
+ return sd->ratio;
+}
+
+void
+emotion_object_event_simple_send(Evas_Object *obj, Emotion_Event ev)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->event_feed(sd->video, ev);
+}
+
+void
+emotion_object_audio_volume_set(Evas_Object *obj, double vol)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->audio_channel_volume_set(sd->video, vol);
+}
+
+double
+emotion_object_audio_volume_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 1.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video) return 0.0;
+ return sd->module->audio_channel_volume_get(sd->video);
+}
+
+void
+emotion_object_audio_mute_set(Evas_Object *obj, Evas_Bool mute)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->audio_channel_mute_set(sd->video, mute);
+}
+
+Evas_Bool
+emotion_object_audio_mute_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->audio_channel_mute_get(sd->video);
+}
+
+int
+emotion_object_audio_channel_count(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->audio_channel_count(sd->video);
+}
+
+const char *
+emotion_object_audio_channel_name_get(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video) return NULL;
+ return sd->module->audio_channel_name_get(sd->video, channel);
+}
+
+void
+emotion_object_audio_channel_set(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->audio_channel_set(sd->video, channel);
+}
+
+int
+emotion_object_audio_channel_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->audio_channel_get(sd->video);
+}
+
+void
+emotion_object_video_mute_set(Evas_Object *obj, Evas_Bool mute)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->video_channel_mute_set(sd->video, mute);
+}
+
+Evas_Bool
+emotion_object_video_mute_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->video_channel_mute_get(sd->video);
+}
+
+int
+emotion_object_video_channel_count(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->video_channel_count(sd->video);
+}
+
+const char *
+emotion_object_video_channel_name_get(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video) return NULL;
+ return sd->module->video_channel_name_get(sd->video, channel);
+}
+
+void
+emotion_object_video_channel_set(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->video_channel_set(sd->video, channel);
+}
+
+int
+emotion_object_video_channel_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->video_channel_get(sd->video);
+}
+
+void
+emotion_object_spu_mute_set(Evas_Object *obj, Evas_Bool mute)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->spu_channel_mute_set(sd->video, mute);
+}
+
+Evas_Bool
+emotion_object_spu_mute_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->spu_channel_mute_get(sd->video);
+}
+
+int
+emotion_object_spu_channel_count(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->spu_channel_count(sd->video);
+}
+
+const char *
+emotion_object_spu_channel_name_get(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video) return NULL;
+ return sd->module->spu_channel_name_get(sd->video, channel);
+}
+
+void
+emotion_object_spu_channel_set(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->spu_channel_set(sd->video, channel);
+}
+
+int
+emotion_object_spu_channel_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->spu_channel_get(sd->video);
+}
+
+int
+emotion_object_chapter_count(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->chapter_count(sd->video);
+}
+
+void
+emotion_object_chapter_set(Evas_Object *obj, int chapter)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->chapter_set(sd->video, chapter);
+}
+
+int
+emotion_object_chapter_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video) return 0;
+ return sd->module->chapter_get(sd->video);
+}
+
+const char *
+emotion_object_chapter_name_get(Evas_Object *obj, int chapter)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video) return NULL;
+ return sd->module->chapter_name_get(sd->video, chapter);
+}
+
+void
+emotion_object_play_speed_set(Evas_Object *obj, double speed)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->speed_set(sd->video, speed);
+}
+
+double
+emotion_object_play_speed_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video) return 0.0;
+ return sd->module->speed_get(sd->video);
+}
+
+void
+emotion_object_eject(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video) return;
+ sd->module->eject(sd->video);
+}
+
+
+
+
+
+/*****************************/
+/* Utility calls for modules */
+/*****************************/
+
+void *
+_emotion_video_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->video;
+}
+
+void
+_emotion_frame_new(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_image_pixels_dirty_set(sd->obj, 1);
+}
+
+void
+_emotion_video_pos_update(Evas_Object *obj, double pos, double len)
+{
+ Smart_Data *sd;
+ int npos = 0, nlen = 0;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (pos != sd->pos) npos = 1;
+ if (len != sd->len) nlen = 1;
+ sd->pos = pos;
+ sd->len = len;
+ if (npos) evas_object_smart_callback_call(obj, "frame_decode", NULL);
+ if (nlen) evas_object_smart_callback_call(obj, "length_change", NULL);
+}
+
+void
+_emotion_frame_resize(Evas_Object *obj, int w, int h, double ratio)
+{
+ Smart_Data *sd;
+ int iw, ih;
+ int changed = 0;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_image_size_get(sd->obj, &iw, &ih);
+ if ((w != iw) || (h != ih))
+ {
+ evas_object_image_size_set(sd->obj, w, h);
+ changed = 1;
+ }
+ if (ratio != sd->ratio)
+ {
+ sd->ratio = ratio;
+ changed = 1;
+ }
+ if (changed) evas_object_smart_callback_call(obj, "frame_resize", NULL);
+}
+
+
+/****************************/
+/* Internal object routines */
+/****************************/
+
+static void
+_mouse_move(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Move *e;
+ Smart_Data *sd;
+ int x, y, iw, ih;
+ Evas_Coord ox, oy, ow, oh;
+
+ e = event_info;
+ sd = data;
+ if (!sd->module) return;
+ if (!sd->video) return;
+ evas_object_geometry_get(obj, &ox, &oy, &ow, &oh);
+ evas_object_image_size_get(obj, &iw, &ih);
+ if ((iw < 1) || (ih < 1)) return;
+ x = (((int)e->cur.canvas.x - ox) * iw) / ow;
+ y = (((int)e->cur.canvas.y - oy) * ih) / oh;
+ sd->module->event_mouse_move_feed(sd->video, x, y);
+}
+
+static void
+_mouse_down(void *data, Evas *ev, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Down *e;
+ Smart_Data *sd;
+ int x, y, iw, ih;
+ Evas_Coord ox, oy, ow, oh;
+
+ e = event_info;
+ sd = data;
+ if (!sd->module) return;
+ if (!sd->video) return;
+ evas_object_geometry_get(obj, &ox, &oy, &ow, &oh);
+ evas_object_image_size_get(obj, &iw, &ih);
+ if ((iw < 1) || (ih < 1)) return;
+ x = (((int)e->canvas.x - ox) * iw) / ow;
+ y = (((int)e->canvas.y - oy) * ih) / oh;
+ sd->module->event_mouse_button_feed(sd->video, 1, x, y);
+}
+
+static void
+_pos_set_job(void *data)
+{
+ Evas_Object *obj;
+ Smart_Data *sd;
+
+ obj = data;
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ sd->job = NULL;
+ if (sd->seek)
+ {
+ sd->module->pos_set(sd->video, sd->seek_pos);
+ sd->seek = 0;
+ }
+}
+
+/* called by evas when it needs pixels for the image object */
+static void
+_pixels_get(void *data, Evas_Object *obj)
+{
+ Evas_Pixel_Import_Source ps;
+ int iw, ih, w, h;
+ int i;
+ unsigned char **rows;
+ Smart_Data *sd;
+
+ sd = data;
+ evas_object_image_size_get(obj, &iw, &ih);
+ sd->module->yuv_size_get(sd->video, &w, &h);
+ if ((w != iw) || (h != ih))
+ {
+ evas_object_image_size_set(obj, w, h);
+ iw = w;
+ ih = h;
+ }
+ ps.format = EVAS_PIXEL_FORMAT_YUV420P_601;
+ ps.w = iw;
+ ps.h = ih;
+
+ ps.rows = malloc(ps.h * 2 * sizeof(void *));
+ if (!ps.rows)
+ {
+ sd->module->frame_done(sd->video);
+ return;
+ }
+
+
+ rows = (unsigned char **)ps.rows;
+
+ if (sd->module->yuv_rows_get(sd->video, iw, ih,
+ rows,
+ &rows[ps.h],
+ &rows[ps.h + (ps.h / 2)]))
+ evas_object_image_pixels_import(obj, &ps);
+ evas_object_image_pixels_dirty_set(obj, 0);
+ free(ps.rows);
+ sd->module->frame_done(sd->video);
+}
+
+/*******************************************/
+/* Internal smart object required routines */
+/*******************************************/
+static void
+_smart_init(void)
+{
+ if (smart) return;
+ smart = evas_smart_new(E_OBJ_NAME,
+ _smart_add,
+ _smart_del,
+ _smart_layer_set,
+ _smart_raise,
+ _smart_lower,
+ _smart_stack_above,
+ _smart_stack_below,
+ _smart_move,
+ _smart_resize,
+ _smart_show,
+ _smart_hide,
+ _smart_color_set,
+ _smart_clip_set,
+ _smart_clip_unset,
+ NULL);
+}
+
+static void
+_smart_add(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = calloc(1, sizeof(Smart_Data));
+ if (!sd) return;
+ sd->obj = evas_object_image_add(evas_object_evas_get(obj));
+ evas_object_event_callback_add(sd->obj, EVAS_CALLBACK_MOUSE_MOVE, _mouse_move, sd);
+ evas_object_event_callback_add(sd->obj, EVAS_CALLBACK_MOUSE_DOWN, _mouse_down, sd);
+ evas_object_image_pixels_get_callback_set(sd->obj, _pixels_get, sd);
+ evas_object_smart_member_add(sd->obj, obj);
+ sd->ratio = 1.0;
+ evas_object_image_alpha_set(sd->obj, 0);
+ evas_object_smart_data_set(obj, sd);
+}
+
+static void
+_smart_del(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ if (sd->video) sd->module->file_close(sd->video);
+ if (sd->module) _emotion_module_close(sd->module);
+ evas_object_del(sd->obj);
+ if (sd->file) free(sd->file);
+ if (sd->job) ecore_job_del(sd->job);
+ free(sd);
+}
+
+static void
+_smart_layer_set(Evas_Object * obj, int layer)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_layer_set(sd->obj, layer);
+}
+
+static void
+_smart_raise(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_raise(sd->obj);
+}
+
+static void
+_smart_lower(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ evas_object_lower(sd->obj);
+}
+
+static void
+_smart_stack_above(Evas_Object * obj, Evas_Object * above)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_stack_above(sd->obj, above);
+}
+
+static void
+_smart_stack_below(Evas_Object * obj, Evas_Object * below)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_stack_below(sd->obj, below);
+}
+
+static void
+_smart_move(Evas_Object * obj, Evas_Coord x, Evas_Coord y)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_move(sd->obj, x, y);
+}
+
+static void
+_smart_resize(Evas_Object * obj, Evas_Coord w, Evas_Coord h)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_image_fill_set(sd->obj, 0, 0, w, h);
+ evas_object_resize(sd->obj, w, h);
+}
+
+static void
+_smart_show(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_show(sd->obj);
+
+}
+
+static void
+_smart_hide(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_hide(sd->obj);
+}
+
+static void
+_smart_color_set(Evas_Object * obj, int r, int g, int b, int a)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_color_set(sd->obj, r, g, b, a);
+}
+
+static void
+_smart_clip_set(Evas_Object * obj, Evas_Object * clip)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_clip_set(sd->obj, clip);
+}
+
+static void
+_smart_clip_unset(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_clip_unset(sd->obj);
+}
--- /dev/null
+## Process this file with automake to produce Makefile.in
+
+SUBDIRS = xine
+
+AUTOMAKE_OPTIONS = 1.4 foreign
+
+MAINTAINERCLEANFILES = Makefile.in
+
+LDFLAGS =
+INCLUDES = -I$(includedir) \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/src/lib \
+ -I$(top_srcdir)/src/modules \
+ @my_cflags@ @xine_cflags@
+
+pkgdir = $(libdir)/emotion
+
+pkg_LTLIBRARIES = emotion_decoder_xine.la
+emotion_decoder_xine_la_SOURCES = \
+emotion_xine.c \
+emotion_xine.h
+emotion_decoder_xine_la_LIBADD = @my_libs@ @xine_libs@
+emotion_decoder_xine_la_LDFLAGS = \
+$(LDFLAGS) \
+-no-undefined -module -avoid-version \
+-L$(top_builddir)/src/lib -L$(top_builddir)/src/lib/.libs
+emotion_decoder_xine_la_DEPENDENCIES = $(top_builddir)/config.h
--- /dev/null
+#include "Emotion.h"
+#include "emotion_private.h"
+#include "emotion_xine.h"
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <pthread.h>
+
+static int init_count = 0;
+static int fd_read = -1;
+static int fd_write = -1;
+Ecore_Fd_Handler *fd_handler = NULL;
+static xine_t *decoder = NULL;
+
+static int em_init(void);
+static int em_shutdown(void);
+static void *em_file_open(const char *file, Evas_Object *obj);
+static void em_file_close(void *ef);
+static void em_play(void *ef, double pos);
+static void em_stop(void *ef);
+static void em_size_get(void *ef, int *w, int *h);
+static void em_pos_set(void *ef, double pos);
+static double em_len_get(void *ef);
+static double em_fps_get(void *ef);
+static double em_pos_get(void *ef);
+static double em_ratio_get(void *ef);
+static int em_seekable(void *ef);
+static void em_frame_done(void *ef);
+static void em_yuv_size_get(void *ef, int *w, int *h);
+static int em_yuv_rows_get(void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows);
+static void em_event_feed(void *ef, int event);
+static void em_event_mouse_button_feed(void *ef, int button, int x, int y);
+static void em_event_mouse_move_feed(void *ef, int x, int y);
+static int em_video_channel_count(void *ef);
+static void em_video_channel_set(void *ef, int channel);
+static int em_video_channel_get(void *ef);
+static const char *em_video_channel_name_get(void *ef, int channel);
+static void em_video_channel_mute_set(void *ef, int mute);
+static int em_video_channel_mute_get(void *ef);
+static int em_audio_channel_count(void *ef);
+static void em_audio_channel_set(void *ef, int channel);
+static int em_audio_channel_get(void *ef);
+static const char *em_audio_channel_name_get(void *ef, int channel);
+static void em_audio_channel_mute_set(void *ef, int mute);
+static int em_audio_channel_mute_get(void *ef);
+static void em_audio_channel_volume_set(void *ef, double vol);
+static double em_audio_channel_volume_get(void *ef);
+static int em_spu_channel_count(void *ef);
+static void em_spu_channel_set(void *ef, int channel);
+static int em_spu_channel_get(void *ef);
+static const char *em_spu_channel_name_get(void *ef, int channel);
+static void em_spu_channel_mute_set(void *ef, int mute);
+static int em_spu_channel_mute_get(void *ef);
+static int em_chapter_count(void *ef);
+static void em_chapter_set(void *ef, int chapter);
+static int em_chapter_get(void *ef);
+static const char *em_chapter_name_get(void *ef, int chapter);
+static void em_speed_set(void *ef, double speed);
+static double em_speed_get(void *ef);
+static int em_eject(void *ef);
+
+static void *_em_seek (void *par);
+static int _em_fd_active(void *data, Ecore_Fd_Handler *fdh);
+static int _em_timer (void *data);
+
+static int
+em_init(void)
+{
+ init_count++;
+ if (init_count > 1) return init_count;
+ decoder = xine_new();
+ if (decoder)
+ {
+ /* load config... */
+ if (0)
+ {
+ xine_cfg_entry_t entry;
+
+ if (xine_config_lookup_entry(decoder, "video.num_buffers", &entry))
+ {
+ entry.num_value = 1;
+ xine_config_update_entry(decoder, &entry);
+ }
+ }
+ xine_init(decoder);
+ {
+ int fds[2];
+
+ if (pipe(fds) == 0)
+ {
+ fd_read = fds[0];
+ fd_write = fds[1];
+ fcntl(fd_read, F_SETFL, O_NONBLOCK);
+ fd_handler = ecore_main_fd_handler_add(fd_read,
+ ECORE_FD_READ,
+ _em_fd_active,
+ decoder,
+ NULL,
+ NULL);
+ ecore_main_fd_handler_active_set(fd_handler, ECORE_FD_READ);
+ }
+ }
+ }
+ return init_count;
+}
+
+static int
+em_shutdown(void)
+{
+ init_count--;
+ if (init_count > 0) return init_count;
+ if (decoder)
+ {
+ xine_exit(decoder);
+ ecore_main_fd_handler_del(fd_handler);
+ close(fd_write);
+ close(fd_read);
+
+ decoder = NULL;
+ fd_handler = NULL;
+ fd_read = -1;
+ fd_write = -1;
+ }
+ return 0;
+}
+
+static void *
+em_file_open(const char *file, Evas_Object *obj)
+{
+ Emotion_Xine_Video *ev;
+ int pos_stream = 0;
+ int pos_time = 0;
+ int length_time = 0;
+ uint32_t v;
+
+ ev = calloc(1, sizeof(Emotion_Xine_Video));
+ if (!ev) return NULL;
+ ev->obj = obj;
+ ev->fd = fd_write;
+
+ /* some notes on parameters we could swizzle for certain inputs */
+ if (0)
+ {
+ xine_cfg_entry_t cf;
+
+ if (xine_config_lookup_entry(decoder, "input.dvd_device", &cf))
+ {
+ cf.str_value = "/dev/dvd";
+ xine_config_update_entry(decoder, &cf);
+ }
+ if (xine_config_lookup_entry(decoder, "input.css_decryption_method", &cf))
+ {
+ cf.str_value = "key"; // "key" "disk" "title"
+ xine_config_update_entry(decoder, &cf);
+ }
+ if (xine_config_lookup_entry(decoder, "input.dvd_region", &cf))
+ {
+ cf.num_value = 0; // 0 ... 1 - 8
+ xine_config_update_entry(decoder, &cf);
+ }
+ if (xine_config_lookup_entry(decoder, "input.dvd_use_readahead", &cf))
+ {
+ cf.num_value = 0; // 0 or 1
+ xine_config_update_entry(decoder, &cf);
+ }
+ // these are used any time in runtime - so changing affects all dvd's
+ if (xine_config_lookup_entry(decoder, "input.dvd_skip_behaviour", &cf))
+ {
+ cf.str_value = "skip program"; // "skip program" "skip part" "skip title"
+ xine_config_update_entry(decoder, &cf);
+ }
+ // these are used any time in runtime - so changing affects all dvd's
+ if (xine_config_lookup_entry(decoder, "input.dvd_seek_behaviour", &cf))
+ {
+ cf.str_value = "seek in program chain"; // "seek in program chain" "seek in program"
+ xine_config_update_entry(decoder, &cf);
+ }
+ if (xine_config_lookup_entry(decoder, "input.v4l_video_device_path", &cf))
+ {
+ cf.str_value = "/dev/video0";
+ xine_config_update_entry(decoder, &cf);
+ }
+ if (xine_config_lookup_entry(decoder, "input.cdda_use_cddb", &cf))
+ {
+ cf.num_value = 0; // 0 or 1
+ xine_config_update_entry(decoder, &cf);
+ }
+ if (xine_config_lookup_entry(decoder, "input.cdda_device", &cf))
+ {
+ cf.str_value = "/dev/cdrom";
+ xine_config_update_entry(decoder, &cf);
+ }
+ if (xine_config_lookup_entry(decoder, "audio.oss_device_name", &cf))
+ {
+ cf.str_value = "/dev/dsp";
+ xine_config_update_entry(decoder, &cf);
+ }
+ if (xine_config_lookup_entry(decoder, "audio.oss_device_number", &cf))
+ {
+ cf.num_value = -1; // -1 or 0 1 2 ...
+ xine_config_update_entry(decoder, &cf);
+ }
+ if (xine_config_lookup_entry(decoder, "audio.alsa_mmap_enable", &cf))
+ {
+ cf.num_value = 1; // 0 or 1
+ xine_config_update_entry(decoder, &cf);
+ }
+ if (xine_config_lookup_entry(decoder, "codec.a52_surround_downmix", &cf))
+ {
+ cf.num_value = 1; // 0 or 1
+ xine_config_update_entry(decoder, &cf);
+ }
+ if (xine_config_lookup_entry(decoder, "vcd.default_device", &cf))
+ {
+ cf.str_value = "/dev/cdrom";
+ xine_config_update_entry(decoder, &cf);
+ }
+ }
+ if (1)
+ {
+ char **auto_play_mrls;
+ int auto_play_num;
+
+ auto_play_mrls = xine_get_autoplay_mrls(decoder, "cdda", &auto_play_num);
+ printf("auto_play_mrls = %p\n", auto_play_mrls);
+ if (auto_play_mrls)
+ {
+ int i;
+
+ for (i = 0; i < auto_play_num; i++)
+ printf("MRL: %s\n", auto_play_mrls[i]);
+ }
+ }
+ ev->video = xine_open_video_driver(decoder, "emotion", XINE_VISUAL_TYPE_NONE, ev);
+ ev->audio = xine_open_audio_driver(decoder, "oss", ev);
+ ev->stream = xine_stream_new(decoder, ev->audio, ev->video);
+ if (!xine_open(ev->stream, file))
+ {
+ xine_dispose(ev->stream);
+ if (ev->video) xine_close_video_driver(decoder, ev->video);
+ if (ev->audio) xine_close_audio_driver(decoder, ev->audio);
+ free(ev);
+ return NULL;
+ }
+
+ if (xine_get_pos_length(ev->stream, &pos_stream, &pos_time, &length_time))
+ {
+ ev->pos = 0.0;
+ ev->len = (double)length_time / 1000.0;
+ }
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_FRAME_DURATION);
+ if (v > 0) ev->fps = 90000.0 / (double)v;
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_WIDTH);
+ ev->w = v;
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HEIGHT);
+ ev->h = v;
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_RATIO);
+ ev->ratio = (double)v / 10000.0;
+ ev->just_loaded = 1;
+ return ev;
+}
+
+static void
+em_file_close(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ while (ev->seek_to);
+ xine_stop(ev->stream);
+ xine_close(ev->stream);
+ xine_dispose(ev->stream);
+ if (ev->video) xine_close_video_driver(decoder, ev->video);
+ if (ev->audio) xine_close_audio_driver(decoder, ev->audio);
+ if (ev->timer) ecore_timer_del(ev->timer);
+ free(ev);
+}
+
+static void
+em_play(void *ef, double pos)
+{
+ Emotion_Xine_Video *ev;
+ int pos_stream = 0;
+ int pos_time = 0;
+ int length_time = 0;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->play = 1;
+ if ((xine_get_param(ev->stream, XINE_PARAM_SPEED) == XINE_SPEED_PAUSE) &&
+ (pos == ev->pos) &&
+ (!ev->just_loaded))
+ {
+ xine_set_param(ev->stream, XINE_PARAM_SPEED, XINE_SPEED_NORMAL);
+ }
+ else
+ {
+ ev->seek_to_pos = -0.1;
+ em_pos_set(ef, pos);
+ }
+ ev->just_loaded = 0;
+
+ if (xine_get_pos_length(ev->stream,
+ &pos_stream,
+ &pos_time,
+ &length_time))
+ {
+ ev->pos = (double)pos_time / 1000.0;
+ ev->len = (double)length_time / 1000.0;
+ }
+ if ((xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO)) &&
+ (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HANDLED)))
+ _emotion_frame_new(ev->obj);
+ _emotion_video_pos_update(ev->obj, ev->pos, ev->len);
+ if ((!xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO)) ||
+ (!xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HANDLED)))
+ {
+ if (!ev->timer)
+ ev->timer = ecore_timer_add(1.0 / 30.0, _em_timer, ev);
+ }
+}
+
+static void
+em_stop(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->play = 0;
+ xine_set_param(ev->stream, XINE_PARAM_SPEED, XINE_SPEED_PAUSE);
+ if (ev->timer)
+ {
+ ecore_timer_del(ev->timer);
+ ev->timer = NULL;
+ }
+}
+
+static void
+em_size_get(void *ef, int *w, int *h)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (w) *w = ev->w;
+ if (h) *h = ev->h;
+}
+
+static void
+em_pos_set(void *ef, double pos)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+
+ if (ev->seek_to_pos == pos) return;
+ ev->seek_to_pos = pos;
+ ev->seek_to++;
+ if (ev->seek_to == 1)
+ {
+ pthread_t th;
+ pthread_attr_t thattr;
+
+ if (pthread_create(&th, NULL, _em_seek, ev) != 0)
+ ev->seek_to--;
+ }
+}
+
+static double
+em_len_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->len;
+}
+
+static double
+em_fps_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->fps;
+}
+
+static double
+em_pos_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->pos;
+}
+
+static double
+em_ratio_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->ratio;
+}
+
+static int
+em_seekable(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return xine_get_stream_info(ev->stream, XINE_STREAM_INFO_SEEKABLE);
+}
+
+static void
+em_frame_done(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->cur_frame)
+ {
+ if (ev->cur_frame->done_func)
+ ev->cur_frame->done_func(ev->cur_frame->done_data);
+ ev->cur_frame = NULL;
+ }
+}
+
+static void
+em_yuv_size_get(void *ef, int *w, int *h)
+{
+ Emotion_Xine_Video *ev;
+ Emotion_Xine_Video_Frame *fr;
+
+ ev = (Emotion_Xine_Video *)ef;
+ fr = ev->cur_frame;
+ if (!fr)
+ {
+ if (w) *w = 0;
+ if (h) *h = 0;
+ return;
+ }
+ if (w) *w = fr->w;
+ if (h) *h = fr->h;
+}
+
+static int
+em_yuv_rows_get(void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows)
+{
+ Emotion_Xine_Video *ev;
+ Emotion_Xine_Video_Frame *fr;
+
+ ev = (Emotion_Xine_Video *)ef;
+ fr = ev->cur_frame;
+ if (!fr) return 0;
+ if (fr->y)
+ {
+ int i;
+
+ for (i = 0; i < h; i++) yrows[i] = fr->y + (i * fr->y_stride);
+ for (i = 0; i < (h / 2); i++) urows[i] = fr->u + (i * fr->u_stride);
+ for (i = 0; i < (h / 2); i++) vrows[i] = fr->v + (i * fr->v_stride);
+ return 1;
+ }
+ return 0;
+}
+
+static void
+em_event_feed(void *ef, int event)
+{
+ Emotion_Xine_Video *ev;
+ xine_event_t xine_event;
+
+ ev = (Emotion_Xine_Video *)ef;
+ xine_event.data_length = 0;
+ xine_event.data = NULL;
+ xine_event.stream = ev->stream;
+ gettimeofday(&xine_event.tv, NULL);
+ switch (event)
+ {
+ case EMOTION_EVENT_MENU1:
+ xine_event.type = XINE_EVENT_INPUT_MENU1;
+ break;
+ case EMOTION_EVENT_MENU2:
+ xine_event.type = XINE_EVENT_INPUT_MENU2;
+ break;
+ case EMOTION_EVENT_MENU3:
+ xine_event.type = XINE_EVENT_INPUT_MENU3;
+ break;
+ case EMOTION_EVENT_MENU4:
+ xine_event.type = XINE_EVENT_INPUT_MENU4;
+ break;
+ case EMOTION_EVENT_MENU5:
+ xine_event.type = XINE_EVENT_INPUT_MENU5;
+ break;
+ case EMOTION_EVENT_MENU6:
+ xine_event.type = XINE_EVENT_INPUT_MENU6;
+ break;
+ case EMOTION_EVENT_MENU7:
+ xine_event.type = XINE_EVENT_INPUT_MENU7;
+ break;
+ case EMOTION_EVENT_UP:
+ xine_event.type = XINE_EVENT_INPUT_UP;
+ break;
+ case EMOTION_EVENT_DOWN:
+ xine_event.type = XINE_EVENT_INPUT_DOWN;
+ break;
+ case EMOTION_EVENT_LEFT:
+ xine_event.type = XINE_EVENT_INPUT_LEFT;
+ break;
+ case EMOTION_EVENT_RIGHT:
+ xine_event.type = XINE_EVENT_INPUT_RIGHT;
+ break;
+ case EMOTION_EVENT_SELECT:
+ xine_event.type = XINE_EVENT_INPUT_SELECT;
+ break;
+ case EMOTION_EVENT_NEXT:
+ xine_event.type = XINE_EVENT_INPUT_NEXT;
+ break;
+ case EMOTION_EVENT_PREV:
+ xine_event.type = XINE_EVENT_INPUT_PREVIOUS;
+ break;
+ case EMOTION_EVENT_ANGLE_NEXT:
+ xine_event.type = XINE_EVENT_INPUT_ANGLE_NEXT;
+ break;
+ case EMOTION_EVENT_ANGLE_PREV:
+ xine_event.type = XINE_EVENT_INPUT_ANGLE_PREVIOUS;
+ break;
+ case EMOTION_EVENT_FORCE:
+ xine_event.type = XINE_EVENT_INPUT_BUTTON_FORCE;
+ break;
+ case EMOTION_EVENT_0:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_0;
+ break;
+ case EMOTION_EVENT_1:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_1;
+ break;
+ case EMOTION_EVENT_2:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_2;
+ break;
+ case EMOTION_EVENT_3:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_3;
+ break;
+ case EMOTION_EVENT_4:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_4;
+ break;
+ case EMOTION_EVENT_5:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_5;
+ break;
+ case EMOTION_EVENT_6:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_6;
+ break;
+ case EMOTION_EVENT_7:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_7;
+ break;
+ case EMOTION_EVENT_8:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_8;
+ break;
+ case EMOTION_EVENT_9:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_9;
+ break;
+ case EMOTION_EVENT_10:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_10_ADD;
+ break;
+ default:
+ return;
+ break;
+ }
+ xine_event_send(ev->stream, &xine_event);
+}
+
+static void
+em_event_mouse_button_feed(void *ef, int button, int x, int y)
+{
+ Emotion_Xine_Video *ev;
+ xine_event_t xine_event;
+ xine_input_data_t xine_input;
+
+ ev = (Emotion_Xine_Video *)ef;
+ xine_event.stream = ev->stream;
+ gettimeofday(&xine_event.tv, NULL);
+ xine_event.type = XINE_EVENT_INPUT_MOUSE_BUTTON;
+ xine_input.button = 1;
+ xine_input.x = x;
+ xine_input.y = y;
+ xine_event.data = &xine_input;
+ xine_event.data_length = sizeof(xine_input);
+ xine_event_send(ev->stream, &xine_event);
+}
+
+static void
+em_event_mouse_move_feed(void *ef, int x, int y)
+{
+ Emotion_Xine_Video *ev;
+ xine_event_t xine_event;
+ xine_input_data_t xine_input;
+
+ ev = (Emotion_Xine_Video *)ef;
+ xine_event.stream = ev->stream;
+ gettimeofday(&xine_event.tv, NULL);
+ xine_event.type = XINE_EVENT_INPUT_MOUSE_MOVE;
+ xine_input.button = 0;
+ xine_input.x = x;
+ xine_input.y = y;
+ xine_event.data = &xine_input;
+ xine_event.data_length = sizeof(xine_input);
+ xine_event_send(ev->stream, &xine_event);
+}
+
+static int
+em_video_channel_count(void *ef)
+{
+ Emotion_Xine_Video *ev;
+ int v;
+
+ ev = (Emotion_Xine_Video *)ef;
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_CHANNELS);
+ if ((v < 1) &&
+ xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO)) return 1;
+ return v;
+}
+
+static void
+em_video_channel_set(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (channel < 0) channel = 0;
+ xine_set_param(ev->stream, XINE_PARAM_VIDEO_CHANNEL, channel);
+}
+
+static int
+em_video_channel_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return xine_get_param(ev->stream, XINE_PARAM_VIDEO_CHANNEL);
+}
+
+static const char *
+em_video_channel_name_get(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return NULL;
+}
+
+static void
+em_video_channel_mute_set(void *ef, int mute)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->video_mute = mute;
+}
+
+static int
+em_video_channel_mute_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->video_mute;
+}
+
+static int
+em_audio_channel_count(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return xine_get_stream_info(ev->stream, XINE_STREAM_INFO_MAX_AUDIO_CHANNEL);
+}
+
+static void
+em_audio_channel_set(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (channel < -1) channel = -1;
+ xine_set_param(ev->stream, XINE_PARAM_AUDIO_CHANNEL_LOGICAL, channel);
+}
+
+static int
+em_audio_channel_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return xine_get_param(ev->stream, XINE_PARAM_AUDIO_CHANNEL_LOGICAL);
+}
+
+static const char *
+em_audio_channel_name_get(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+ static char lang[XINE_LANG_MAX + 1];
+
+ ev = (Emotion_Xine_Video *)ef;
+ lang[0] = 0;
+ if (xine_get_audio_lang(ev->stream, channel, lang)) return lang;
+ return NULL;
+}
+
+static void
+em_audio_channel_mute_set(void *ef, int mute)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->audio_mute = mute;
+ xine_set_param(ev->stream, XINE_PARAM_AUDIO_MUTE, ev->audio_mute);
+}
+
+static int
+em_audio_channel_mute_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->audio_mute;
+}
+
+static void
+em_audio_channel_volume_set(void *ef, double vol)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (vol < 0.0) vol = 0.0;
+ if (vol > 1.0) vol = 1.0;
+ xine_set_param(ev->stream, XINE_PARAM_AUDIO_VOLUME, vol * 100);
+}
+
+static double
+em_audio_channel_volume_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return (double)xine_get_param(ev->stream, XINE_PARAM_AUDIO_VOLUME) / 100.0;
+}
+
+static int
+em_spu_channel_count(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return xine_get_stream_info(ev->stream, XINE_STREAM_INFO_MAX_SPU_CHANNEL);
+}
+
+static void
+em_spu_channel_set(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (channel < 0) channel = 0;
+ xine_set_param(ev->stream, XINE_PARAM_SPU_CHANNEL, channel);
+}
+
+static int
+em_spu_channel_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return xine_get_param(ev->stream, XINE_PARAM_SPU_CHANNEL);
+}
+
+static const char *
+em_spu_channel_name_get(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+ static char lang[XINE_LANG_MAX + 1];
+
+ ev = (Emotion_Xine_Video *)ef;
+ lang[0] = 0;
+ if (xine_get_spu_lang(ev->stream, channel, lang)) return lang;
+ return NULL;
+}
+
+static void
+em_spu_channel_mute_set(void *ef, int mute)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->spu_mute = mute;
+ xine_set_param(ev->stream, XINE_PARAM_IGNORE_SPU, ev->spu_mute);
+}
+
+static int
+em_spu_channel_mute_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->spu_mute;
+}
+
+static int
+em_chapter_count(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_CHAPTERS))
+ return 99;
+ return 0;
+}
+
+static void
+em_chapter_set(void *ef, int chapter)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+}
+
+static int
+em_chapter_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return 0;
+}
+
+static const char *
+em_chapter_name_get(void *ef, int chapter)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return NULL;
+}
+
+static void
+em_speed_set(void *ef, double speed)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+}
+
+static double
+em_speed_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return 1.0;
+}
+
+static int
+em_eject(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ xine_eject(ev->stream);
+}
+
+
+
+
+
+
+
+
+static void *
+_em_seek(void *par)
+{
+ Emotion_Xine_Video *ev;
+ double ppos = -1.0;
+
+ ev = (Emotion_Xine_Video *)par;
+ while (ev->seek_to > 0)
+ {
+ if (ppos != ev->seek_to_pos)
+ {
+ ppos = ev->seek_to_pos;
+ xine_play(ev->stream, 0, ev->seek_to_pos * 1000);
+ }
+ ev->seek_to--;
+ }
+ if (!ev->play)
+ {
+ xine_set_param(ev->stream, XINE_PARAM_SPEED, XINE_SPEED_PAUSE);
+ }
+ return NULL;
+}
+
+static int
+_em_fd_active(void *data, Ecore_Fd_Handler *fdh)
+{
+ void *buf;
+ int fd, len;
+ Emotion_Xine_Video_Frame *fr;
+ Emotion_Xine_Video *ev;
+ int pos_stream = 0;
+ int pos_time = 0;
+ int length_time = 0;
+
+ fd = ecore_main_fd_handler_fd_get(fdh);
+ while ((len = read(fd, &buf, sizeof(void *))) > 0)
+ {
+ if (len == sizeof(void *))
+ {
+ fr = buf;
+ ev = _emotion_video_get(fr->obj);
+ if (ev)
+ {
+ if (ev->cur_frame)
+ {
+ if (ev->cur_frame->done_func)
+ ev->cur_frame->done_func(ev->cur_frame->done_data);
+ }
+ ev->cur_frame = fr;
+ if (xine_get_pos_length(ev->stream,
+ &pos_stream,
+ &pos_time,
+ &length_time))
+ {
+
+ ev->pos = (double)pos_time / 1000.0;
+ ev->len = (double)length_time / 1000.0;
+ }
+ if ((xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO)) &&
+ (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HANDLED)))
+ {
+ if (ev->video_mute)
+ {
+ if (ev->cur_frame->done_func)
+ ev->cur_frame->done_func(ev->cur_frame->done_data);
+ ev->cur_frame = NULL;
+ }
+ else
+ _emotion_frame_new(fr->obj);
+ }
+ _emotion_frame_resize(fr->obj, fr->w, fr->h, fr->ratio);
+ _emotion_video_pos_update(fr->obj, ev->pos, ev->len);
+ }
+ }
+ }
+}
+
+static int
+_em_timer(void *data)
+{
+ Emotion_Xine_Video *ev;
+ int pos_stream = 0;
+ int pos_time = 0;
+ int length_time = 0;
+
+ ev = data;
+ if (xine_get_pos_length(ev->stream,
+ &pos_stream,
+ &pos_time,
+ &length_time))
+ {
+
+ ev->pos = (double)pos_time / 1000.0;
+ ev->len = (double)length_time / 1000.0;
+ }
+ if ((xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO)) &&
+ (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HANDLED)))
+ _emotion_frame_new(ev->obj);
+ _emotion_video_pos_update(ev->obj, ev->pos, ev->len);
+ return 1;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+static Emotion_Video_Module em_module =
+{
+ em_init, /* init */
+ em_shutdown, /* shutdown */
+ em_file_open, /* file_open */
+ em_file_close, /* file_close */
+ em_play, /* play */
+ em_stop, /* stop */
+ em_size_get, /* size_get */
+ em_pos_set, /* pos_set */
+ em_len_get, /* len_get */
+ em_fps_get, /* fps_get */
+ em_pos_get, /* pos_get */
+ em_ratio_get, /* ratio_get */
+ em_seekable, /* seekable */
+ em_frame_done, /* frame_done */
+ em_yuv_size_get, /* yuv_size_get */
+ em_yuv_rows_get, /* yuv_rows_get */
+ em_event_feed, /* event_feed */
+ em_event_mouse_button_feed, /* event_mouse_button_feed */
+ em_event_mouse_move_feed, /* event_mouse_move_feed */
+ em_video_channel_count, /* video_channel_count */
+ em_video_channel_set, /* video_channel_set */
+ em_video_channel_get, /* video_channel_get */
+ em_video_channel_name_get, /* video_channel_name_get */
+ em_video_channel_mute_set, /* video_channel_mute_set */
+ em_video_channel_mute_get, /* video_channel_mute_get */
+ em_audio_channel_count, /* audio_channel_count */
+ em_audio_channel_set, /* audio_channel_set */
+ em_audio_channel_get, /* audio_channel_get */
+ em_audio_channel_name_get, /* audio_channel_name_get */
+ em_audio_channel_mute_set, /* audio_channel_mute_set */
+ em_audio_channel_mute_get, /* audio_channel_mute_get */
+ em_audio_channel_volume_set, /* audio_channel_volume_set */
+ em_audio_channel_volume_get, /* audio_channel_volume_get */
+ em_spu_channel_count, /* spu_channel_count */
+ em_spu_channel_set, /* spu_channel_set */
+ em_spu_channel_get, /* spu_channel_get */
+ em_spu_channel_name_get, /* spu_channel_name_get */
+ em_spu_channel_mute_set, /* spu_channel_mute_set */
+ em_spu_channel_mute_get, /* spu_channel_mute_get */
+ em_chapter_count, /* chapter_count */
+ em_chapter_set, /* chapter_set */
+ em_chapter_get, /* chapter_get */
+ em_chapter_name_get, /* chapter_name_get */
+ em_speed_set, /* speed_set */
+ em_speed_get, /* speed_get */
+ em_eject /* eject */
+};
+
+Emotion_Video_Module *
+module_open(void)
+{
+ em_module.init();
+ return &em_module;
+}
+
+void
+module_close(Emotion_Video_Module *module)
+{
+ em_module.shutdown();
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#if 0
+void
+em_debug(Emotion_Xine_Video *ev)
+{
+ int has_chapters = 0;
+ int max_spu = 0;
+ int max_audio = 0;
+ int video_channels = 0;
+ int video_streams = 0;
+ int video_seekable = 0;
+ char *title;
+ char *comment;
+ char *artist;
+ char *genre;
+ char *album;
+ char *year;
+ char *cdindex_discid;
+ int video_channel = 0;
+ int audio_channel = 0;
+ int spu_channel = 0;
+ int video_ratio = 0;
+ int audio_mode = 0;
+
+// return;
+ has_chapters = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_CHAPTERS);
+ max_spu = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_MAX_SPU_CHANNEL);
+ max_audio = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_MAX_AUDIO_CHANNEL);
+ video_channels = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_CHANNELS);
+ video_streams = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_STREAMS);
+ video_seekable = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_SEEKABLE);
+ title = xine_get_meta_info(ev->stream, XINE_META_INFO_TITLE);
+ comment = xine_get_meta_info(ev->stream, XINE_META_INFO_COMMENT);
+ artist = xine_get_meta_info(ev->stream, XINE_META_INFO_ARTIST);
+ genre = xine_get_meta_info(ev->stream, XINE_META_INFO_GENRE);
+ album = xine_get_meta_info(ev->stream, XINE_META_INFO_ALBUM);
+ year = xine_get_meta_info(ev->stream, XINE_META_INFO_YEAR);
+ cdindex_discid = xine_get_meta_info(ev->stream, XINE_META_INFO_CDINDEX_DISCID);
+ video_channel = xine_get_param(ev->stream, XINE_PARAM_VIDEO_CHANNEL);
+ audio_channel = xine_get_param(ev->stream, XINE_PARAM_AUDIO_CHANNEL_LOGICAL);
+ spu_channel = xine_get_param(ev->stream, XINE_PARAM_SPU_CHANNEL);
+ video_ratio = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_RATIO);
+ audio_mode = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_AUDIO_MODE);
+ printf("has_chapters = %i\n", has_chapters);
+ printf("max_spu = %i\n", max_spu);
+ printf("max_audio = %i\n", max_audio);
+ printf("video_channels = %i\n", video_channels);
+ printf("video_streams = %i\n", video_streams);
+ printf("video_seekable = %i\n", video_seekable);
+// printf("title = %s\n", title);
+// printf("comment = %s\n", comment);
+// printf("artist = %s\n", artist);
+// printf("genre = %s\n", genre);
+// printf("album = %s\n", album);
+// printf("year = %s\n", year);
+// printf("cdindex_discid = %s\n", cdindex_discid);
+ printf("video_channel = %i\n", video_channel);
+ printf("audio_channel = %i\n", audio_channel);
+ printf("spu_channels = %i\n", spu_channel);
+ printf("video_ratio = %i\n", video_ratio);
+ printf("audio_mode = %i\n", audio_mode);
+ {
+ int i;
+
+ for (i = 0; i <= max_audio; i++)
+ {
+ char lang[XINE_LANG_MAX + 1];
+
+ lang[0] = 0;
+ printf(" AUDIO %i = ", i);
+ if (xine_get_audio_lang(ev->stream, i, lang))
+ printf("%s\n", lang);
+ else
+ printf("NONE\n");
+ }
+ for (i = 0; i <= max_spu; i++)
+ {
+ char lang[XINE_LANG_MAX + 1];
+
+ lang[0] = 0;
+ printf(" SPU %i = ", i);
+ if (xine_get_spu_lang(ev->stream, i, lang))
+ printf("%s\n", lang);
+ else
+ printf("NONE\n");
+ }
+ }
+}
+#endif
--- /dev/null
+#ifndef EMOTION_XINE_H
+#define EMOTION_XINE_H
+
+#include <xine.h>
+
+typedef struct _Emotion_Xine_Video Emotion_Xine_Video;
+typedef struct _Emotion_Xine_Video_Frame Emotion_Xine_Video_Frame;
+
+struct _Emotion_Xine_Video
+{
+ xine_video_port_t *video;
+ xine_audio_port_t *audio;
+ xine_stream_t *stream;
+ int fd;
+ double len;
+ double pos;
+ double fps;
+ double ratio;
+ int w, h;
+ Evas_Object *obj;
+ Emotion_Xine_Video_Frame *cur_frame;
+ int seek_to;
+ double seek_to_pos;
+ Ecore_Timer *timer;
+ unsigned char play : 1;
+ unsigned char just_loaded : 1;
+ unsigned char video_mute : 1;
+ unsigned char audio_mute : 1;
+ unsigned char spu_mute : 1;
+};
+
+struct _Emotion_Xine_Video_Frame
+{
+ int w, h;
+ double ratio;
+ unsigned char *y, *u, *v;
+ int y_stride, u_stride, v_stride;
+ Evas_Object *obj;
+ double timestamp;
+ void (*done_func)(void *data);
+ void *done_data;
+ void *frame;
+};
+
+Emotion_Video_Module *module_open (void);
+void module_close(Emotion_Video_Module *module);
+
+#endif
--- /dev/null
+## Process this file with automake to produce Makefile.in
+
+AUTOMAKE_OPTIONS = 1.4 foreign
+
+MAINTAINERCLEANFILES = Makefile.in
+
+LDFLAGS =
+INCLUDES = -I$(includedir) \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/src/lib \
+ -I$(top_srcdir)/src/modules \
+ -I$(top_srcdir)/src/modules/xine \
+ @my_cflags@ @xine_cflags@
+
+pkgdir = @xine_plugins@
+
+pkg_LTLIBRARIES = xineplug_vo_out_emotion.la
+xineplug_vo_out_emotion_la_SOURCES = \
+emotion_xine_vo_out.c
+xineplug_vo_out_emotion_la_LIBADD = @xine_libs@
+xineplug_vo_out_emotion_la_LDFLAGS = \
+$(LDFLAGS) \
+-no-undefined -module -avoid-version \
+-L$(top_builddir)/src/lib -L$(top_builddir)/src/lib/.libs
+xineplug_vo_out_emotion_la_DEPENDENCIES = $(top_builddir)/config.h
--- /dev/null
+/***************************************************************************/
+/*** emotion xine display engine ***/
+/***************************************************************************/
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "emotion_private.h"
+#include "emotion_xine.h"
+
+#include <xine.h>
+#include <xine/video_out.h>
+#include <xine/xine_internal.h>
+#include <xine/xineutils.h>
+#include <xine/vo_scale.h>
+
+#define BLEND_BYTE(dst, src, o) (((src)*o + ((dst)*(0xf-o)))/0xf)
+
+/***************************************************************************/
+typedef struct _Emotion_Frame Emotion_Frame;
+typedef struct _Emotion_Driver Emotion_Driver;
+typedef struct _Emotion_Class Emotion_Class;
+typedef struct _Emotion_Lut Emotion_Lut;
+
+struct _Emotion_Frame
+{
+ vo_frame_t vo_frame;
+ int width;
+ int height;
+ double ratio;
+ int format;
+ xine_t *xine;
+
+ Emotion_Xine_Video_Frame frame;
+ unsigned char in_use : 1;
+};
+
+struct _Emotion_Driver
+{
+ vo_driver_t vo_driver;
+ config_values_t *config;
+ int ratio;
+ xine_t *xine;
+ Emotion_Xine_Video *ev;
+};
+
+struct _Emotion_Class
+{
+ video_driver_class_t driver_class;
+ config_values_t *config;
+ xine_t *xine;
+};
+
+struct _Emotion_Lut
+{
+ uint8_t cb : 8;
+ uint8_t cr : 8;
+ uint8_t y : 8;
+ uint8_t foo : 8;
+} __attribute__ ((packed));
+
+/***************************************************************************/
+static void *_emotion_class_init (xine_t *xine, void *visual);
+static void _emotion_class_dispose (video_driver_class_t *driver_class);
+static char *_emotion_class_identifier_get (video_driver_class_t *driver_class);
+static char *_emotion_class_description_get (video_driver_class_t *driver_class);
+
+static vo_driver_t *_emotion_open (video_driver_class_t *driver_class, const void *visual);
+static void _emotion_dispose (vo_driver_t *vo_driver);
+
+static int _emotion_redraw (vo_driver_t *vo_driver);
+
+static uint32_t _emotion_capabilities_get (vo_driver_t *vo_driver);
+static int _emotion_gui_data_exchange (vo_driver_t *vo_driver, int data_type, void *data);
+
+static int _emotion_property_set (vo_driver_t *vo_driver, int property, int value);
+static int _emotion_property_get (vo_driver_t *vo_driver, int property);
+static void _emotion_property_min_max_get (vo_driver_t *vo_driver, int property, int *min, int *max);
+
+static vo_frame_t *_emotion_frame_alloc (vo_driver_t *vo_driver);
+static void _emotion_frame_dispose (vo_frame_t *vo_frame);
+static void _emotion_frame_format_update (vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags);
+static void _emotion_frame_display (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
+static void _emotion_frame_field (vo_frame_t *vo_frame, int which_field);
+
+static void _emotion_frame_data_free (Emotion_Frame *fr);
+static void _emotion_frame_data_unlock (Emotion_Frame *fr);
+
+static void _emotion_overlay_begin (vo_driver_t *vo_driver, vo_frame_t *vo_frame, int changed);
+static void _emotion_overlay_end (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
+static void _emotion_overlay_blend (vo_driver_t *vo_driver, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay);
+
+static void _emotion_overlay_mem_blend_8 (uint8_t *mem, uint8_t val, uint8_t o, size_t sz);
+static void _emotion_overlay_blend_yuv (uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3]);
+
+/***************************************************************************/
+static vo_info_t _emotion_info =
+{
+ 1, /* priority */
+ XINE_VISUAL_TYPE_NONE /* visual type */
+};
+
+plugin_info_t xine_plugin_info[] =
+{
+ { PLUGIN_VIDEO_OUT, 19, "emotion", XINE_VERSION_CODE, &_emotion_info, _emotion_class_init },
+ { PLUGIN_NONE, 0, "", 0, NULL, NULL }
+};
+
+/***************************************************************************/
+static void *
+_emotion_class_init(xine_t *xine, void *visual)
+{
+ Emotion_Class *cl;
+
+// printf("emotion: _emotion_class_init()\n");
+ cl = (Emotion_Class *) xine_xmalloc(sizeof(Emotion_Class));
+ if (!cl) return NULL;
+ cl->driver_class.open_plugin = _emotion_open;
+ cl->driver_class.get_identifier = _emotion_class_identifier_get;
+ cl->driver_class.get_description = _emotion_class_description_get;
+ cl->driver_class.dispose = _emotion_class_dispose;
+ cl->config = xine->config;
+ cl->xine = xine;
+
+ return cl;
+}
+
+static void
+_emotion_class_dispose(video_driver_class_t *driver_class)
+{
+ Emotion_Class *cl;
+
+ cl = (Emotion_Class *)driver_class;
+ free(cl);
+}
+
+static char *
+_emotion_class_identifier_get(video_driver_class_t *driver_class)
+{
+ Emotion_Class *cl;
+
+ cl = (Emotion_Class *)driver_class;
+ return "emotion";
+}
+
+static char *
+_emotion_class_description_get(video_driver_class_t *driver_class)
+{
+ Emotion_Class *cl;
+
+ cl = (Emotion_Class *)driver_class;
+ return "Emotion xine video output plugin";
+}
+
+/***************************************************************************/
+static vo_driver_t *
+_emotion_open(video_driver_class_t *driver_class, const void *visual)
+{
+ Emotion_Class *cl;
+ Emotion_Driver *dv;
+
+ cl = (Emotion_Class *)driver_class;
+ /* visual here is the data ptr passed to xine_open_video_driver() */
+// printf("emotion: _emotion_open()\n");
+ dv = (Emotion_Driver *)xine_xmalloc(sizeof(Emotion_Driver));
+ if (!dv) return NULL;
+
+ dv->config = cl->config;
+ dv->xine = cl->xine;
+ dv->ratio = XINE_VO_ASPECT_AUTO;
+ dv->vo_driver.get_capabilities = _emotion_capabilities_get;
+ dv->vo_driver.alloc_frame = _emotion_frame_alloc;
+ dv->vo_driver.update_frame_format = _emotion_frame_format_update;
+ dv->vo_driver.overlay_begin = _emotion_overlay_begin;
+ dv->vo_driver.overlay_blend = _emotion_overlay_blend;
+ dv->vo_driver.overlay_end = _emotion_overlay_end;
+ dv->vo_driver.display_frame = _emotion_frame_display;
+ dv->vo_driver.get_property = _emotion_property_get;
+ dv->vo_driver.set_property = _emotion_property_set;
+ dv->vo_driver.get_property_min_max = _emotion_property_min_max_get;
+ dv->vo_driver.gui_data_exchange = _emotion_gui_data_exchange;
+ dv->vo_driver.dispose = _emotion_dispose;
+ dv->vo_driver.redraw_needed = _emotion_redraw;
+ dv->ev = (Emotion_Xine_Video *)visual;
+ return &dv->vo_driver;
+}
+
+static void
+_emotion_dispose(vo_driver_t *vo_driver)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_dispose()\n");
+ free(dv);
+}
+
+/***************************************************************************/
+static int
+_emotion_redraw(vo_driver_t *vo_driver)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_redraw()\n");
+ return 0;
+}
+
+/***************************************************************************/
+static uint32_t
+_emotion_capabilities_get(vo_driver_t *vo_driver)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_capabilities_get()\n");
+ return VO_CAP_YV12;
+}
+
+/***************************************************************************/
+static int
+_emotion_gui_data_exchange(vo_driver_t *vo_driver, int data_type, void *data)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_gui_data_exchange()\n");
+ switch (data_type)
+ {
+ case XINE_GUI_SEND_COMPLETION_EVENT:
+ break;
+ case XINE_GUI_SEND_DRAWABLE_CHANGED:
+ break;
+ case XINE_GUI_SEND_EXPOSE_EVENT:
+ break;
+ case XINE_GUI_SEND_TRANSLATE_GUI_TO_VIDEO:
+ break;
+ case XINE_GUI_SEND_VIDEOWIN_VISIBLE:
+ break;
+ case XINE_GUI_SEND_SELECT_VISUAL:
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/***************************************************************************/
+static int
+_emotion_property_set(vo_driver_t *vo_driver, int property, int value)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_property_set()\n");
+ switch (property)
+ {
+ case VO_PROP_ASPECT_RATIO:
+ if (value >= XINE_VO_ASPECT_NUM_RATIOS)
+ value = XINE_VO_ASPECT_AUTO;
+ printf("DRIVER RATIO SET %i!\n", value);
+ dv->ratio = value;
+ break;
+ default:
+ break;
+ }
+ return value;
+}
+
+static int
+_emotion_property_get(vo_driver_t *vo_driver, int property)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_property_get()\n");
+ switch (property)
+ {
+ case VO_PROP_ASPECT_RATIO:
+ return dv->ratio;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void
+_emotion_property_min_max_get(vo_driver_t *vo_driver, int property, int *min, int *max)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_property_min_max_get()\n");
+ *min = 0;
+ *max = 0;
+}
+
+/***************************************************************************/
+static vo_frame_t *
+_emotion_frame_alloc(vo_driver_t *vo_driver)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+// printf("emotion: _emotion_frame_alloc()\n");
+ fr = (Emotion_Frame *)xine_xmalloc(sizeof(Emotion_Frame));
+ if (!fr) return NULL;
+
+ fr->vo_frame.base[0] = NULL;
+ fr->vo_frame.base[1] = NULL;
+ fr->vo_frame.base[2] = NULL;
+
+ fr->vo_frame.proc_slice = NULL;
+ fr->vo_frame.proc_frame = NULL;
+ fr->vo_frame.field = _emotion_frame_field;
+ fr->vo_frame.dispose = _emotion_frame_dispose;
+ fr->vo_frame.driver = vo_driver;
+
+ return (vo_frame_t *)fr;
+}
+
+static void
+_emotion_frame_dispose(vo_frame_t *vo_frame)
+{
+ Emotion_Frame *fr;
+
+ fr = (Emotion_Frame *)vo_frame;
+// printf("emotion: _emotion_frame_dispose()\n");
+ _emotion_frame_data_free(fr);
+ free(fr);
+}
+
+static void
+_emotion_frame_format_update(vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+ fr = (Emotion_Frame *)vo_frame;
+
+ if ((fr->width != width) || (fr->height != height) ||
+ (fr->format != format) || (!fr->vo_frame.base[0]))
+ {
+// printf("emotion: _emotion_frame_format_update()\n");
+ _emotion_frame_data_free(fr);
+
+ fr->width = width;
+ fr->height = height;
+ fr->format = format;
+
+ switch (format)
+ {
+ case XINE_IMGFMT_YV12:
+ {
+ int y_size, uv_size;
+
+ fr->vo_frame.pitches[0] = 8 * ((width + 7) / 8);
+ fr->vo_frame.pitches[1] = 8 * ((width + 15) / 16);
+ fr->vo_frame.pitches[2] = 8 * ((width + 15) / 16);
+
+ y_size = fr->vo_frame.pitches[0] * height;
+ uv_size = fr->vo_frame.pitches[1] * ((height + 1) / 2);
+
+ fr->vo_frame.base[0] = malloc(y_size + (2 * uv_size));
+ fr->vo_frame.base[1] = fr->vo_frame.base[0] + y_size + uv_size;
+ fr->vo_frame.base[2] = fr->vo_frame.base[0] + y_size;
+ fr->frame.w = fr->width;
+ fr->frame.h = fr->height;
+ fr->frame.ratio = fr->vo_frame.ratio;
+ fr->frame.y = fr->vo_frame.base[0];
+ fr->frame.u = fr->vo_frame.base[1];
+ fr->frame.v = fr->vo_frame.base[2];
+ fr->frame.y_stride = fr->vo_frame.pitches[0];
+ fr->frame.u_stride = fr->vo_frame.pitches[1];
+ fr->frame.v_stride = fr->vo_frame.pitches[2];
+ fr->frame.obj = dv->ev->obj;
+ }
+ break;
+ default:
+ break;
+ }
+ if (((format == XINE_IMGFMT_YV12)
+ && ((fr->vo_frame.base[0] == NULL)
+ || (fr->vo_frame.base[1] == NULL)
+ || (fr->vo_frame.base[2] == NULL))))
+ {
+ _emotion_frame_data_free(fr);
+ }
+ }
+ fr->frame.ratio = fr->vo_frame.ratio;
+ fr->ratio = ratio;
+}
+
+static void
+_emotion_frame_display(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+ fr = (Emotion_Frame *)vo_frame;
+// printf("emotion: _emotion_frame_display()\n");
+ if (dv->ev)
+ {
+ void *buf;
+ int ret;
+
+ buf = &(fr->frame);
+ fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0;
+ fr->frame.done_func = _emotion_frame_data_unlock;
+ fr->frame.done_data = fr;
+ ret = write(dv->ev->fd, &buf, sizeof(void *));
+// printf("-- FRAME DEC %p == %i\n", fr->frame.obj, ret);
+ fr->in_use = 1;
+ }
+ /* hmm - must find a way to sanely copy data out... FIXME problem */
+// fr->vo_frame.free(&fr->vo_frame);
+}
+
+static void
+_emotion_frame_field(vo_frame_t *vo_frame, int which_field)
+{
+ Emotion_Frame *fr;
+
+ fr = (Emotion_Frame *)vo_frame;
+// printf("emotion: _emotion_frame_field()\n");
+}
+
+/***************************************************************************/
+static void
+_emotion_frame_data_free(Emotion_Frame *fr)
+{
+ if (fr->vo_frame.base[0])
+ {
+ free(fr->vo_frame.base[0]);
+ fr->vo_frame.base[0] = NULL;
+ fr->vo_frame.base[1] = NULL;
+ fr->vo_frame.base[2] = NULL;
+ fr->frame.y = fr->vo_frame.base[0];
+ fr->frame.u = fr->vo_frame.base[1];
+ fr->frame.v = fr->vo_frame.base[2];
+ }
+}
+
+static void
+_emotion_frame_data_unlock(Emotion_Frame *fr)
+{
+// printf("emotion: _emotion_frame_data_unlock()\n");
+ if (fr->in_use)
+ {
+ fr->vo_frame.free(&fr->vo_frame);
+ fr->in_use = 0;
+ }
+}
+
+/***************************************************************************/
+static void
+_emotion_overlay_begin(vo_driver_t *vo_driver, vo_frame_t *vo_frame, int changed)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+ fr = (Emotion_Frame *)vo_frame;
+// printf("emotion: _emotion_overlay_begin()\n");
+}
+
+static void
+_emotion_overlay_end(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+ fr = (Emotion_Frame *)vo_frame;
+// printf("emotion: _emotion_overlay_end()\n");
+}
+
+static void
+_emotion_overlay_blend(vo_driver_t *vo_driver, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+ fr = (Emotion_Frame *)vo_frame;
+// printf("emotion: _emotion_overlay_blend()\n");
+ _emotion_overlay_blend_yuv(fr->vo_frame.base, vo_overlay,
+ fr->width, fr->height,
+ fr->vo_frame.pitches);
+}
+
+static void _emotion_overlay_mem_blend_8(uint8_t *mem, uint8_t val, uint8_t o, size_t sz)
+{
+ uint8_t *limit = mem + sz;
+ while (mem < limit) {
+ *mem = BLEND_BYTE(*mem, val, o);
+ mem++;
+ }
+}
+
+static void _emotion_overlay_blend_yuv(uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3])
+{
+ Emotion_Lut *my_clut;
+ uint8_t *my_trans;
+
+ int src_width = img_overl->width;
+ int src_height = img_overl->height;
+ rle_elem_t *rle = img_overl->rle;
+ rle_elem_t *rle_limit = rle + img_overl->num_rle;
+ int x_off = img_overl->x;
+ int y_off = img_overl->y;
+ int ymask,xmask;
+ int rle_this_bite;
+ int rle_remainder;
+ int rlelen;
+ int x, y;
+ int clip_right;
+ uint8_t clr=0;
+
+ uint8_t *dst_y = dst_base[0] + dst_pitches[0] * y_off + x_off;
+ uint8_t *dst_cr = dst_base[2] + (y_off / 2) * dst_pitches[1] + (x_off / 2) + 1;
+ uint8_t *dst_cb = dst_base[1] + (y_off / 2) * dst_pitches[2] + (x_off / 2) + 1;
+ my_clut = (Emotion_Lut *) img_overl->clip_color;
+ my_trans = img_overl->clip_trans;
+
+ /* avoid wraping overlay if drawing to small image */
+ if( (x_off + img_overl->clip_right) < dst_width )
+ clip_right = img_overl->clip_right;
+ else
+ clip_right = dst_width - 1 - x_off;
+
+ /* avoid buffer overflow */
+ if( (src_height + y_off) >= dst_height )
+ src_height = dst_height - 1 - y_off;
+
+ rlelen=rle_remainder=0;
+ for (y = 0; y < src_height; y++) {
+ ymask = ((img_overl->clip_top > y) || (img_overl->clip_bottom < y));
+ xmask = 0;
+
+ for (x = 0; x < src_width;) {
+ uint16_t o;
+
+ if (rlelen == 0) {
+ rle_remainder = rlelen = rle->len;
+ clr = rle->color;
+ rle++;
+ }
+ if (rle_remainder == 0) {
+ rle_remainder = rlelen;
+ }
+ if ((rle_remainder + x) > src_width) {
+ /* Do something for long rlelengths */
+ rle_remainder = src_width - x;
+ }
+
+ if (ymask == 0) {
+ if (x <= img_overl->clip_left) {
+ /* Starts outside clip area */
+ if ((x + rle_remainder - 1) > img_overl->clip_left ) {
+ /* Cutting needed, starts outside, ends inside */
+ rle_this_bite = (img_overl->clip_left - x + 1);
+ rle_remainder -= rle_this_bite;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ } else {
+ /* no cutting needed, starts outside, ends outside */
+ rle_this_bite = rle_remainder;
+ rle_remainder = 0;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ } else if (x < clip_right) {
+ /* Starts inside clip area */
+ if ((x + rle_remainder) > clip_right ) {
+ /* Cutting needed, starts inside, ends outside */
+ rle_this_bite = (clip_right - x);
+ rle_remainder -= rle_this_bite;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->clip_color;
+ my_trans = img_overl->clip_trans;
+ xmask++;
+ } else {
+ /* no cutting needed, starts inside, ends inside */
+ rle_this_bite = rle_remainder;
+ rle_remainder = 0;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->clip_color;
+ my_trans = img_overl->clip_trans;
+ xmask++;
+ }
+ } else if (x >= clip_right) {
+ /* Starts outside clip area, ends outsite clip area */
+ if ((x + rle_remainder ) > src_width ) {
+ /* Cutting needed, starts outside, ends at right edge */
+ /* It should never reach here due to the earlier test of src_width */
+ rle_this_bite = (src_width - x );
+ rle_remainder -= rle_this_bite;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ } else {
+ /* no cutting needed, starts outside, ends outside */
+ rle_this_bite = rle_remainder;
+ rle_remainder = 0;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ }
+ } else {
+ /* Outside clip are due to y */
+ /* no cutting needed, starts outside, ends outside */
+ rle_this_bite = rle_remainder;
+ rle_remainder = 0;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ o = my_trans[clr];
+ if (o) {
+ if(o >= 15) {
+ memset(dst_y + x, my_clut[clr].y, rle_this_bite);
+ if (y & 1) {
+ memset(dst_cr + (x >> 1), my_clut[clr].cr, (rle_this_bite+1) >> 1);
+ memset(dst_cb + (x >> 1), my_clut[clr].cb, (rle_this_bite+1) >> 1);
+ }
+ } else {
+ _emotion_overlay_mem_blend_8(dst_y + x, my_clut[clr].y, o, rle_this_bite);
+ if (y & 1) {
+ /* Blending cr and cb should use a different function, with pre -128 to each sample */
+ _emotion_overlay_mem_blend_8(dst_cr + (x >> 1), my_clut[clr].cr, o, (rle_this_bite+1) >> 1);
+ _emotion_overlay_mem_blend_8(dst_cb + (x >> 1), my_clut[clr].cb, o, (rle_this_bite+1) >> 1);
+ }
+ }
+
+ }
+ x += rle_this_bite;
+ if (rle >= rle_limit) {
+ break;
+ }
+ }
+ if (rle >= rle_limit) {
+ break;
+ }
+
+ dst_y += dst_pitches[0];
+
+ if (y & 1) {
+ dst_cr += dst_pitches[2];
+ dst_cb += dst_pitches[1];
+ }
+ }
+}