Executing the tests
-------------------
+To see a list of all of the options:
+
+ ./execute.sh -h
+
To execute tests, cd into automated-tests and run
./execute.sh
This will execute dali and dali-internal test sets. Note that the output summary for the first will be printed before running the second.
-By default the tests execute in parallel, which is faster but does not produce any test case output. Use this to execute the tests in series:
+By default the tests execute in parallel, which is faster but does not produce any test case output files. Use this to execute the tests in series and log test output to stdout/err
- ./execute.sh -s
+ ./execute.sh -S
-To see the results, copy the style folder from web-tct_2.2.1_r1/tools/tct-mgr/style into automated-tests and run
+To use test kit lite, (which is very slow),
- firefox --new-window summary.xml
+ ./execute.sh -s
-To see a list of all of the options:
+To see the test kit lite results, copy the style folder from web-tct_2.2.1_r1/tools/tct-mgr/style into automated-tests and run
- ./execute.sh -h
+ firefox --new-window summary.xml
To execute a subset of tests, you can run individual test sets, e.g.
- ./execute.sh dali
+ ./execute.sh dali-toolkit
-To get coverage output, run
+To get coverage output (you need to first build dali libraries with
+--coverage), run
./coverage.sh
On desktop, you can debug the tests by running gdb on the test program:
$ cd automated-tests
- $ gdb build/src/dali/tct-dali-core
+ $ gdb build/src/dali-toolkit/tct-dali-toolkit-core
gdb> r <TestCase>
replace `<TestCase>` with the name of the failing testcase.
-For example, using testcase UtcDaliNinePatch01 from the dali-core test suite:
+For example, using testcase UtcDaliControlBackgroundProperties from the dali-toolkit test suite:
- $ gdb build/src/dali/tct-dali-core
- gdb> r UtcDaliNinePatch01
+ $ gdb build/src/dali-toolkit/tct-dali-toolkit-core
+ gdb> r UtcDaliControlBackgroundProperties
On target, you can re-install the test RPM and associated debug RPMs manually using
After installing the rpm and it's debug RPMs, you can find the executable in /opt/usr/bin/tct-dali-core. First ensure you have smack permissions set:
chsmack -e "^" /usr/bin/gdb
- chsmack -e "^" /opt/usr/bin/tct-dali-core/tct-dali-core
+ chsmack -e "^" /opt/usr/bin/tct-dali-toolkit-core/tct-dali-toolkit-core
then run it under gdb as above.
fi
done
fi
+
+echo "Build succeeded"
+exit 0
#!/bin/bash
-TEMP=`getopt -o hsr --long help,serial,rerun -n 'execute.sh' -- "$@"`
+TEMP=`getopt -o hsSm --long help,serial,tct,modules -n 'execute.sh' -- "$@"`
if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 1 ; fi
function usage
{
- echo -e "Usage: execute.sh\t\tExecute test cases from all modules in parallel"
- echo -e " execute.sh <testmodule>\tExecute test cases from the given module in parallel"
- echo -e " execute.sh -s\t\tExecute test cases in serial using Testkit-Lite"
- echo -e " execute.sh -r\t\tExecute test cases in parallel, re-running failed test cases in serial afterwards"
+ echo -e "Usage: execute.sh [-s|-S|-r] [module|testcase]"
+ echo -e " execute.sh\t\tExecute test cases from all modules in parallel"
+ echo -e " execute.sh [module]\tExecute test cases from the given module in parallel"
+ echo -e " execute.sh -s [module]\t\tExecute test cases in serial using Testkit-Lite"
+ echo -e " execute.sh -S [module]\t\tExecute test cases in serial"
echo -e " execute.sh <testcase>\tFind and execute the given test case"
exit 2
}
-opt_serial=0
-opt_rerun=""
+opt_tct=0
+opt_serial=""
+opt_modules=0
while true ; do
case "$1" in
-h|--help) usage ;;
- -s|--serial) opt_serial=1 ; shift ;;
- -r|--rerun) opt_rerun="-r" ; shift ;;
+ -s|--tct) opt_tct=1 ; shift ;;
+ -S|--serial) opt_serial="-s" ; shift ;;
+ -m|--modules) opt_modules=1 ; shift ;;
--) shift; break;;
*) echo "Internal error $1!" ; exit 1 ;;
esac
done
-function execute
+function execute_tct
{
scripts/tctestsgen.sh $1 `pwd` desktop $2
testkit-lite -f `pwd`/tests.xml -o tct-${1}-core-tests.xml -A --comm localhost
scripts/add_style.pl $1
}
+function summary_start
+{
+ start=`date +"%Y-%m-%d_%H_%M_%S"`
+ cat > summary.xml <<EOF
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="./style/summary.xsl"?>
+<result_summary plan_name="Core">
+ <other xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="xs:string" />
+ <summary test_plan_name="Dali">
+ <start_at>$start</start_at>
+ <end_at>$start</end_at>
+ </summary>
+EOF
+}
+
+function summary_end
+{
+ cat >> summary.xml <<EOF
+</result_summary>
+EOF
+}
+if [ $opt_modules == 1 ] ; then
+ modules= get_modules
+ echo $modules
+ exit 0
+fi
# Clean up old test results
rm -f tct*core-tests.xml
# Clean up old coverage data
if [ -d ../build/tizen ] ; then
- rm -f ../build/tizen/dali-core/.libs/*.gcda
+ rm -f ../build/tizen/dali-toolkit/.libs/*.gcda
fi
find build \( -name "*.gcda" \) -exec rm '{}' \;
ASCII_BOLD="\e[1m"
ASCII_RESET="\e[0m"
-if [ $opt_serial = 1 ] ; then
+modules=`ls -1 src/ | grep -v CMakeList | grep -v common | grep -v manual`
+if [ -f summary.xml ] ; then unlink summary.xml ; fi
+
+if [ $opt_tct == 1 ] ; then
+ # Use Test-kit lite
# Run all test case executables serially, create XML output
if [ -n "$1" ] ; then
- execute $1 $*
+ execute_tct $1 $*
else
- for mod in `ls -1 src/ | grep -v CMakeList `
+ for mod in $modules
do
if [ $mod != 'common' ] && [ $mod != 'manual' ]; then
echo -ne "$ASCII_BOLD"
echo -e "Executing $mod$ASCII_RESET"
- execute $mod $*
+ execute_tct $mod $*
fi
done
fi
scripts/summarize.pl
+
else
- # if $1 is an executable filename, execute it·
+ # Execute test cases using own test harness
if [ -z "$1" ] ; then
# No arguments:
- # Execute each test executable in turn, using parallel execution
- for mod in `ls -1 src/ | grep -v CMakeList | grep -v common | grep -v manual`
+ # Execute each test executable in turn (by default, runs tests in parallel)
+ summary_start
+ for mod in $modules
do
echo -e "$ASCII_BOLD"
echo -e "Executing $mod$ASCII_RESET"
- build/src/$mod/tct-$mod-core $opt_rerun
+ build/src/$mod/tct-$mod-core -r $opt_serial
done
+ summary_end
elif [ -f "build/src/$1/tct-$1-core" ] ; then
# First argument is an executable filename - execute only that with any
# remaining arguments
+ summary_start
module=$1
shift;
- build/src/$module/tct-$module-core $opt_rerun $*
+ build/src/$module/tct-$module-core -r $opt_serial $*
+ summary_end
else
# First argument is not an executable. Is it a test case name?
# Try executing each executable with the test case name until success/known failure
- for mod in `ls -1 src/ | grep -v CMakeList | grep -v common | grep -v manual`
+ for mod in $modules
do
output=`build/src/$mod/tct-$mod-core $1`
ret=$?
echo $1 not found
fi
fi
+
+if [ -f summary.xml ] ; then
+ scripts/output_summary.pl
+fi
+
+exit $?
--- /dev/null
+#!/usr/bin/perl
+
+# Reads summary.xml and produces human readable output
+
+use strict;
+use XML::Parser;
+use Encode;
+use Getopt::Long;
+use Cwd;
+
+my $pwd = getcwd;
+my $text = "";
+my $module="";
+my %modules=();
+
+sub handle_start
+{
+ my ($p, $elt, %attrs) = @_;
+
+ if($elt =~ /suite/)
+ {
+ $module=$attrs{"name"};
+ }
+ if($elt =~ /_case/)
+ {
+ $text = "";
+ }
+}
+
+sub handle_end
+{
+ my ($p, $elt) = @_;
+ if($elt =~ /pass_case/)
+ {
+ $modules{$module}->{"pass"}=$text;
+ $text="";
+ }
+ elsif($elt =~ /fail_case/)
+ {
+ $modules{$module}->{"fail"}=$text;
+ $text="";
+ }
+}
+
+sub handle_char
+{
+ my ($p, $str) = @_;
+ $text .= $str;
+}
+
+my($parser) = new XML::Parser(Handlers => {Start => \&handle_start,
+ End => \&handle_end,
+ Char => \&handle_char});
+$parser->parsefile("summary.xml");
+
+my $RED_COLOR="\e[1;31m";
+my $GREEN_COLOR="\e[1;32m";
+my $ASCII_RESET="\e[0m";
+my $ASCII_BOLD="\e[1m";
+
+print "\n";
+my $totalFailures=0;
+foreach $module (keys(%modules))
+{
+ my $result_colour = $GREEN_COLOR;
+ if( $modules{$module}->{"fail"} )
+ {
+ $result_colour = $RED_COLOR;
+ }
+ my $numPasses = $modules{$module}->{"pass"};
+ my $numFailures = $modules{$module}->{"fail"};
+ $totalFailures += $numFailures;
+ print( "$ASCII_BOLD$module results:$ASCII_RESET\n" );
+ printf("Number of test passes: %s%4d (%5.2f%%)%s\n", $ASCII_BOLD, $numPasses, 100.0 * $numPasses / ($numPasses+$numFailures), $ASCII_RESET);
+ printf("%sNumber of test failures:%s %s%4d%s\n\n", $result_colour, $ASCII_RESET, $ASCII_BOLD, $numFailures, $ASCII_RESET);
+}
+
+exit $totalFailures == 0;
#!/usr/bin/perl
+# Generates an XML summary of test cases from Test-kit lite output XML.
+
use strict;
use XML::Parser;
use Encode;
{
int result = TestHarness::EXIT_STATUS_BAD_ARGUMENT;
- const char* optString = "r";
- bool optRerunFailed(false);
+ const char* optString = "rs";
+ bool optRerunFailed(true);
+ bool optRunSerially(false);
int nextOpt = 0;
do
case 'r':
optRerunFailed = true;
break;
+ case 's':
+ optRunSerially = true;
+ break;
case '?':
TestHarness::Usage(argv[0]);
exit(TestHarness::EXIT_STATUS_BAD_ARGUMENT);
if( optind == argc ) // no testcase name in argument list
{
- result = TestHarness::RunAllInParallel(argv[0], tc_array, optRerunFailed);
+ if( optRunSerially )
+ {
+ result = TestHarness::RunAll( argv[0], tc_array );
+ }
+ else
+ {
+ result = TestHarness::RunAllInParallel( argv[0], tc_array, optRerunFailed );
+ }
}
else
{
void TestApplication::Initialize()
{
+ // We always need the first update!
+ mStatus.keepUpdating = Integration::KeepUpdating::STAGE_KEEP_RENDERING;
+
mCore = Dali::Integration::Core::New(
mRenderController,
mPlatformAbstraction,
void TestApplication::DoUpdate( unsigned int intervalMilliseconds )
{
+ if( GetUpdateStatus() == 0 &&
+ mRenderStatus.NeedsUpdate() == false &&
+ ! GetRenderController().WasCalled(TestRenderController::RequestUpdateFunc) )
+ {
+ fprintf(stderr, "WARNING - Update not required\n");
+ }
+
unsigned int nextVSyncTime = mLastVSyncTime + intervalMilliseconds;
float elapsedSeconds = intervalMilliseconds / 1e3f;
mCore->Update( elapsedSeconds, mLastVSyncTime, nextVSyncTime, mStatus );
+ GetRenderController().Initialize();
+
mLastVSyncTime = nextVSyncTime;
}
return mStatus.KeepUpdating();
}
+bool TestApplication::GetRenderNeedsUpdate()
+{
+ return mRenderStatus.NeedsUpdate();
+}
+bool TestApplication::GetRenderHasRendered()
+{
+ return mRenderStatus.HasRendered();
+}
+
bool TestApplication::RenderOnly( )
{
// Update Time values
bool UpdateOnly( unsigned int intervalMilliseconds = DEFAULT_RENDER_INTERVAL );
bool RenderOnly( );
void ResetContext();
+ bool GetRenderNeedsUpdate();
+ bool GetRenderHasRendered();
private:
void DoUpdate( unsigned int intervalMilliseconds );
mIsRenderbufferResult = 0;
mIsShaderResult = 0;
mIsTextureResult = 0;
- mVertexAttribArrayChanged = false;
-
+ mActiveTextureUnit = 0;
mCheckFramebufferStatusResult = 0;
mFramebufferStatus = 0;
mFramebufferColorAttached = 0;
mFramebufferDepthAttached = 0;
mFramebufferStencilAttached = 0;
-
mNumBinaryFormats = 0;
mBinaryFormats = 0;
mProgramBinaryLength = 0;
- mGetProgramBinaryCalled = false;
- mLastAutoTextureIdUsed = 0;
+ mVertexAttribArrayChanged = false;
+ mGetProgramBinaryCalled = false;
- mLastShaderIdUsed = 0;
- mLastProgramIdUsed = 0;
- mLastUniformIdUsed = 0;
mLastShaderCompiled = 0;
mLastClearBitMask = 0;
mClearCount = 0;
mLastBlendFuncDstRgb = 0;
mLastBlendFuncSrcAlpha = 0;
mLastBlendFuncDstAlpha = 0;
+ mLastAutoTextureIdUsed = 0;
+ mLastShaderIdUsed = 0;
+ mLastProgramIdUsed = 0;
+ mLastUniformIdUsed = 0;
mUniforms.clear();
mProgramUniforms1i.clear();
mProgramUniforms2f.clear();
mProgramUniforms3f.clear();
mProgramUniforms4f.clear();
+
+ for( unsigned int i=0; i<MAX_ATTRIBUTE_CACHE_SIZE; ++i )
+ {
+ mVertexAttribArrayState[i] = false;
+ }
}
void TestGlAbstraction::PreRender()
// EXTERNAL INCLUDES
#include <sstream>
#include <string>
+#include <cstring>
#include <map>
#include <cstdio>
#include <cstring> // for strcmp
inline void DeleteTextures(GLsizei n, const GLuint* textures)
{
std::stringstream out;
- out << n << ", " << textures << " = [" ;
+ out << n << ", " << textures << " = [";
for(GLsizei i=0; i<n; i++)
{
- out << textures[i] << ", " ;
+ out << textures[i] << ", ";
mDeletedTextureIds.push_back(textures[i]);
}
out << "]";
*/
TraceCallStack& TestGlSyncAbstraction::GetTrace() { return mTrace; }
+int TestGlSyncAbstraction::GetNumberOfSyncObjects()
+{
+ return mSyncObjects.size();
+}
+
} // Dali
*/
TraceCallStack& GetTrace();
+ /**
+ * Get the number of sync objects
+ *
+ * @return the number of sync objects
+ */
+ int GetNumberOfSyncObjects();
+
private:
typedef std::vector<TestSyncObject*> SyncContainer;
typedef SyncContainer::iterator SyncIter;
typedef std::map<int, TestCase> RunningTestCases;
-namespace
+const char* basename(const char* path)
{
-const char* RED_COLOR="\e[1;31m";
-const char* GREEN_COLOR="\e[1;32m";
-const char* ASCII_RESET="\e[0m";
-const char* ASCII_BOLD="\e[1m";
+ const char* ptr=path;
+ const char* slash=NULL;
+ for( ; *ptr != '\0' ; ++ptr )
+ {
+ if(*ptr == '/') slash=ptr;
+ }
+ if(slash != NULL) ++slash;
+ return slash;
}
-
int RunTestCase( struct ::testcase_s& testCase )
{
int result = EXIT_STATUS_TESTCASE_FAILED;
close(STDOUT_FILENO);
close(STDERR_FILENO);
}
- exit( RunTestCase( testCase ) );
+ else
+ {
+ printf("\n");
+ for(int i=0; i<80; ++i) printf("#");
+ printf("\nTC: %s\n", testCase.name);
+ fflush(stdout);
+ }
+
+ int status = RunTestCase( testCase );
+
+ if( ! suppressOutput )
+ {
+ fflush(stdout);
+ fflush(stderr);
+ fclose(stdout);
+ fclose(stderr);
+ }
+ exit( status );
}
else if(pid == -1)
{
else // Parent process
{
int status = 0;
- int childPid = waitpid(-1, &status, 0);
+ int childPid = waitpid(pid, &status, 0);
if( childPid == -1 )
{
perror("waitpid");
}
else if(WIFSIGNALED(status) )
{
+ int signal = WTERMSIG(status);
testResult = EXIT_STATUS_TESTCASE_ABORTED;
-
-#ifdef WCOREDUMP
- if(WCOREDUMP(status))
+ if( signal == SIGABRT )
+ {
+ printf("Test case %s failed: test case asserted\n", testCase.name );
+ }
+ else
{
- printf("Test case %s failed: due to a crash\n", testCase.name);
+ printf("Test case %s failed: exit with signal %s\n", testCase.name, strsignal(WTERMSIG(status)));
}
-#endif
- printf("Test case %s failed: exit with signal %s\n", testCase.name, strsignal(WTERMSIG(status)));
}
else if(WIFSTOPPED(status))
{
printf("Test case %s failed: stopped with signal %s\n", testCase.name, strsignal(WSTOPSIG(status)));
}
}
+ fflush(stdout);
+ fflush(stderr);
return testResult;
}
-void OutputStatistics( int numPasses, int numFailures )
+void OutputStatistics( const char* processName, int numPasses, int numFailures )
{
- const char* failureColor = GREEN_COLOR;
- if( numFailures > 0 )
+ FILE* fp=fopen("summary.xml", "a");
+ if( fp != NULL )
{
- failureColor = RED_COLOR;
+ fprintf( fp,
+ " <suite name=\"%s\">\n"
+ " <total_case>%d</total_case>\n"
+ " <pass_case>%d</pass_case>\n"
+ " <pass_rate>%5.2f</pass_rate>\n"
+ " <fail_case>%d</fail_case>\n"
+ " <fail_rate>%5.2f</fail_rate>\n"
+ " <block_case>0</block_case>\n"
+ " <block_rate>0.00</block_rate>\n"
+ " <na_case>0</na_case>\n"
+ " <na_rate>0.00</na_rate>\n"
+ " </suite>\n",
+ basename(processName),
+ numPasses+numFailures,
+ numPasses,
+ (float)numPasses/(numPasses+numFailures),
+ numFailures,
+ (float)numFailures/(numPasses+numFailures) );
+ fclose(fp);
}
- printf("\rNumber of test passes: %s%4d (%5.2f%%)%s\n", ASCII_BOLD, numPasses, 100.0f * (float)numPasses / (numPasses+numFailures), ASCII_RESET);
- printf("%sNumber of test failures:%s %s%4d%s\n", failureColor, ASCII_RESET, ASCII_BOLD, numFailures, ASCII_RESET);
-
}
-
-int RunAll(const char* processName, ::testcase tc_array[], bool reRunFailed)
+int RunAll( const char* processName, ::testcase tc_array[] )
{
int numFailures = 0;
int numPasses = 0;
// Run test cases in child process( to kill output/handle signals ), but run serially.
for( unsigned int i=0; tc_array[i].name; i++)
{
- int result = RunTestCaseInChildProcess( tc_array[i], true );
+ int result = RunTestCaseInChildProcess( tc_array[i], false );
if( result == 0 )
{
numPasses++;
}
}
- OutputStatistics(numPasses, numFailures);
+ OutputStatistics( processName, numPasses, numFailures);
return numFailures;
}
-
-
// Constantly runs up to MAX_NUM_CHILDREN processes
int RunAllInParallel( const char* processName, ::testcase tc_array[], bool reRunFailed)
{
}
}
- OutputStatistics( numPasses, numFailures );
+ OutputStatistics( processName, numPasses, numFailures );
if( reRunFailed )
{
* Run all test cases in serial
* @param[in] processName The name of this process
* @param[in] tc_array The array of auto-generated testkit-lite test cases
- * @param[in] reRunFailed True if failed test cases should be re-run
* @return 0 on success
*/
-int RunAll(const char* processName, testcase tc_array[], bool reRunFailed);
+int RunAll( const char* processName, testcase tc_array[] );
/**
* Find the named test case in the given array, and run it
return numCalls;
}
-
/**
* Search for a method in the stack with the given parameter list
* @param[in] method The name of the method
*/
bool TraceCallStack::FindMethodAndParams(std::string method, std::string params) const
{
- bool found = false;
+ return FindIndexFromMethodAndParams( method, params ) > -1;
+}
+
+/**
+ * Search for a method in the stack with the given parameter list
+ * @param[in] method The name of the method
+ * @param[in] params A comma separated list of parameter values
+ * @return index in the stack where the method was found or -1 otherwise
+ */
+int TraceCallStack::FindIndexFromMethodAndParams(std::string method, std::string params) const
+{
+ int index = -1;
for( size_t i=0; i < mCallStack.size(); i++ )
{
if( 0 == mCallStack[i][0].compare(method) && 0 == mCallStack[i][1].compare(params) )
{
- found = true;
+ index = i;
break;
}
}
- return found;
+ return index;
}
/**
*/
bool FindMethodAndParams(std::string method, std::string params) const;
+ /**
+ * Search for a method in the stack with the given parameter list
+ * @param[in] method The name of the method
+ * @param[in] params A comma separated list of parameter values
+ * @return index in the stack where the method was found or -1 otherwise
+ */
+ int FindIndexFromMethodAndParams(std::string method, std::string params) const;
+
/**
* Test if the given method and parameters are at a given index in the stack
* @param[in] index Index in the call stack
{
int result = TestHarness::EXIT_STATUS_BAD_ARGUMENT;
- const char* optString = "r";
- bool optRerunFailed(false);
+ const char* optString = "rs";
+ bool optRerunFailed(true);
+ bool optRunSerially(false);
int nextOpt = 0;
do
case 'r':
optRerunFailed = true;
break;
+ case 's':
+ optRunSerially = true;
+ break;
case '?':
TestHarness::Usage(argv[0]);
exit(TestHarness::EXIT_STATUS_BAD_ARGUMENT);
if( optind == argc ) // no testcase name in argument list
{
- result = TestHarness::RunAllInParallel(argv[0], tc_array, optRerunFailed);
+ if( optRunSerially )
+ {
+ result = TestHarness::RunAll( argv[0], tc_array );
+ }
+ else
+ {
+ result = TestHarness::RunAllInParallel( argv[0], tc_array, optRerunFailed );
+ }
}
else
{