# Script to compare testsuite failures against a list of known-to-fail
# tests.
+#
+# NOTE: This script is used in installations that are running Python 2.4.
+# Please stick to syntax features available in 2.4 and earlier
+# versions.
# Contributed by Diego Novillo <dnovillo@google.com>
#
-# Copyright (C) 2011 Free Software Foundation, Inc.
+# Copyright (C) 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
1- Determine the target built: TARGET
2- Determine the source directory: SRCDIR
3- Look for a failure manifest file in
- <SRCDIR>/contrib/testsuite-management/<TARGET>.xfail
+ <SRCDIR>/<MANIFEST_SUBDIR>/<MANIFEST_NAME>.xfail
4- Collect all the <tool>.sum files from the build tree.
5- Produce a report stating:
a- Failures expected in the manifest but not present in the build.
b- Failures in the build not expected in the manifest.
6- If all the build failures are expected in the manifest, it exits
with exit code 0. Otherwise, it exits with error code 1.
+
+Manifest files contain expected DejaGNU results that are otherwise
+treated as failures.
+They may also contain additional text:
+
+# This is a comment. - self explanatory
+@include file - the file is a path relative to the includer
+@remove result text - result text is removed from the expected set
"""
+import datetime
import optparse
import os
import re
# Handled test results.
_VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
+_VALID_TEST_RESULTS_REX = re.compile("%s" % "|".join(_VALID_TEST_RESULTS))
+
+# Subdirectory of srcdir in which to find the manifest file.
+_MANIFEST_SUBDIR = 'contrib/testsuite-management'
-# Pattern for naming manifest files. The first argument should be
-# the toplevel GCC source directory. The second argument is the
-# target triple used during the build.
-_MANIFEST_PATH_PATTERN = '%s/contrib/testsuite-management/%s.xfail'
+# Pattern for naming manifest files.
+# The first argument should be the toplevel GCC(/GNU tool) source directory.
+# The second argument is the manifest subdir.
+# The third argument is the manifest target, which defaults to the target
+# triplet used during the build.
+_MANIFEST_PATH_PATTERN = '%s/%s/%s.xfail'
+
+# The options passed to the program.
+_OPTIONS = None
def Error(msg):
- print >>sys.stderr, '\nerror: %s' % msg
+ print >>sys.stderr, 'error: %s' % msg
sys.exit(1)
state: One of UNRESOLVED, XPASS or FAIL.
name: File name for the test.
description: String describing the test (flags used, dejagnu message, etc)
+ ordinal: Monotonically increasing integer.
+ It is used to keep results for one .exp file sorted
+ by the order the tests were run.
"""
- def __init__(self, summary_line):
+ def __init__(self, summary_line, ordinal=-1):
try:
self.attrs = ''
if '|' in summary_line:
(self.attrs, summary_line) = summary_line.split('|', 1)
- (self.state,
- self.name,
- self.description) = re.match(r' *([A-Z]+): ([^ ]+) (.*)',
- summary_line).groups()
+ try:
+ (self.state,
+ self.name,
+ self.description) = re.match(r' *([A-Z]+):\s*(\S+)\s+(.*)',
+ summary_line).groups()
+ except:
+ print 'Failed to parse summary line: "%s"' % summary_line
+ raise
self.attrs = self.attrs.strip()
self.state = self.state.strip()
self.description = self.description.strip()
+ self.ordinal = ordinal
except ValueError:
Error('Cannot parse summary line "%s"' % summary_line)
self.state, summary_line, self))
def __lt__(self, other):
- return self.name < other.name
+ return (self.name < other.name or
+ (self.name == other.name and self.ordinal < other.ordinal))
def __hash__(self):
return hash(self.state) ^ hash(self.name) ^ hash(self.description)
attrs = '%s | ' % self.attrs
return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
+ def ExpirationDate(self):
+ # Return a datetime.date object with the expiration date for this
+ # test result. Return None, if no expiration has been set.
+ if re.search(r'expire=', self.attrs):
+ expiration = re.search(r'expire=(\d\d\d\d)(\d\d)(\d\d)', self.attrs)
+ if not expiration:
+ Error('Invalid expire= format in "%s". Must be of the form '
+ '"expire=YYYYMMDD"' % self)
+ return datetime.date(int(expiration.group(1)),
+ int(expiration.group(2)),
+ int(expiration.group(3)))
+ return None
+
+ def HasExpired(self):
+ # Return True if the expiration date of this result has passed.
+ expiration_date = self.ExpirationDate()
+ if expiration_date:
+ now = datetime.date.today()
+ return now > expiration_date
+
def GetMakefileValue(makefile_name, value_name):
if os.path.exists(makefile_name):
- with open(makefile_name) as makefile:
- for line in makefile:
- if line.startswith(value_name):
- (_, value) = line.split('=', 1)
- value = value.strip()
- return value
+ makefile = open(makefile_name)
+ for line in makefile:
+ if line.startswith(value_name):
+ (_, value) = line.split('=', 1)
+ value = value.strip()
+ makefile.close()
+ return value
+ makefile.close()
return None
return True
+def IsComment(line):
+ """Return True if line is a comment."""
+ return line.startswith('#')
+
+
def IsInterestingResult(line):
- """Return True if the given line is one of the summary lines we care about."""
- line = line.strip()
- if line.startswith('#'):
- return False
+ """Return True if line is one of the summary lines we care about."""
if '|' in line:
(_, line) = line.split('|', 1)
- line = line.strip()
- for result in _VALID_TEST_RESULTS:
- if line.startswith(result):
- return True
- return False
+ line = line.strip()
+ return bool(_VALID_TEST_RESULTS_REX.match(line))
+
+
+def IsInclude(line):
+ """Return True if line is an include of another file."""
+ return line.startswith("@include ")
+
+
+def GetIncludeFile(line, includer):
+ """Extract the name of the include file from line."""
+ includer_dir = os.path.dirname(includer)
+ include_file = line[len("@include "):]
+ return os.path.join(includer_dir, include_file.strip())
+
+
+def IsNegativeResult(line):
+ """Return True if line should be removed from the expected results."""
+ return line.startswith("@remove ")
+
+
+def GetNegativeResult(line):
+ """Extract the name of the negative result from line."""
+ line = line[len("@remove "):]
+ return line.strip()
+
+
+def ParseManifestWorker(result_set, manifest_path):
+ """Read manifest_path, adding the contents to result_set."""
+ if _OPTIONS.verbosity >= 1:
+ print 'Parsing manifest file %s.' % manifest_path
+ manifest_file = open(manifest_path)
+ for line in manifest_file:
+ line = line.strip()
+ if line == "":
+ pass
+ elif IsComment(line):
+ pass
+ elif IsNegativeResult(line):
+ result_set.remove(TestResult(GetNegativeResult(line)))
+ elif IsInclude(line):
+ ParseManifestWorker(result_set, GetIncludeFile(line, manifest_path))
+ elif IsInterestingResult(line):
+ result_set.add(TestResult(line))
+ else:
+ Error('Unrecognized line in manifest file: %s' % line)
+ manifest_file.close()
+
+
+def ParseManifest(manifest_path):
+ """Create a set of TestResult instances from the given manifest file."""
+ result_set = set()
+ ParseManifestWorker(result_set, manifest_path)
+ return result_set
def ParseSummary(sum_fname):
"""Create a set of TestResult instances from the given summary file."""
result_set = set()
- with open(sum_fname) as sum_file:
- for line in sum_file:
- if IsInterestingResult(line):
- result_set.add(TestResult(line))
+ # ordinal is used when sorting the results so that tests within each
+ # .exp file are kept sorted.
+ ordinal=0
+ sum_file = open(sum_fname)
+ for line in sum_file:
+ if IsInterestingResult(line):
+ result = TestResult(line, ordinal)
+ ordinal += 1
+ if result.HasExpired():
+ # Tests that have expired are not added to the set of expected
+ # results. If they are still present in the set of actual results,
+ # they will cause an error to be reported.
+ print 'WARNING: Expected failure "%s" has expired.' % line.strip()
+ continue
+ result_set.add(result)
+ sum_file.close()
return result_set
-def GetManifest(manifest_name):
+def GetManifest(manifest_path):
"""Build a set of expected failures from the manifest file.
Each entry in the manifest file should have the format understood
by the TestResult constructor.
- If no manifest file exists for this target, it returns an empty
- set.
+ If no manifest file exists for this target, it returns an empty set.
"""
- if os.path.exists(manifest_name):
- return ParseSummary(manifest_name)
+ if os.path.exists(manifest_path):
+ return ParseManifest(manifest_path)
else:
return set()
-def GetSumFiles(builddir):
+def CollectSumFiles(builddir):
sum_files = []
for root, dirs, files in os.walk(builddir):
- if '.svn' in dirs:
- dirs.remove('.svn')
+ for ignored in ('.svn', '.git'):
+ if ignored in dirs:
+ dirs.remove(ignored)
for fname in files:
if fname.endswith('.sum'):
sum_files.append(os.path.join(root, fname))
return sum_files
-def GetResults(builddir):
- """Collect all the test results from .sum files under the given build
- directory."""
- sum_files = GetSumFiles(builddir)
+def GetResults(sum_files):
+ """Collect all the test results from the given .sum files."""
build_results = set()
for sum_fname in sum_files:
print '\t%s' % sum_fname
def CompareResults(manifest, actual):
"""Compare sets of results and return two lists:
- - List of results present in MANIFEST but missing from ACTUAL.
- List of results present in ACTUAL but missing from MANIFEST.
+ - List of results present in MANIFEST but missing from ACTUAL.
"""
- # Report all the actual results not present in the manifest.
+ # Collect all the actual results not present in the manifest.
+ # Results in this set will be reported as errors.
actual_vs_manifest = set()
for actual_result in actual:
if actual_result not in manifest:
actual_vs_manifest.add(actual_result)
- # Simlarly for all the tests in the manifest.
+ # Collect all the tests in the manifest that were not found
+ # in the actual results.
+ # Results in this set will be reported as warnings (since
+ # they are expected failures that are not failing anymore).
manifest_vs_actual = set()
for expected_result in manifest:
# Ignore tests marked flaky.
return actual_vs_manifest, manifest_vs_actual
-def GetBuildData(options):
- target = GetMakefileValue('%s/Makefile' % options.build_dir, 'target=')
- srcdir = GetMakefileValue('%s/Makefile' % options.build_dir, 'srcdir =')
- if not ValidBuildDirectory(options.build_dir, target):
- Error('%s is not a valid GCC top level build directory.' %
- options.build_dir)
+def GetManifestPath(srcdir, target, user_provided_must_exist):
+ """Return the full path to the manifest file."""
+ manifest_path = _OPTIONS.manifest
+ if manifest_path:
+ if user_provided_must_exist and not os.path.exists(manifest_path):
+ Error('Manifest does not exist: %s' % manifest_path)
+ return manifest_path
+ else:
+ assert srcdir and target
+ return _MANIFEST_PATH_PATTERN % (srcdir, _MANIFEST_SUBDIR, target)
+
+
+def GetBuildData():
+ srcdir = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'srcdir =')
+ target = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'target_alias=')
+ if not ValidBuildDirectory(_OPTIONS.build_dir, target):
+ # If we have been given a set of results to use, we may
+ # not be inside a valid GCC build directory. In that case,
+ # the user must provide both a manifest file and a set
+ # of results to check against it.
+ if not _OPTIONS.results or not _OPTIONS.manifest:
+ Error('%s is not a valid GCC top level build directory. '
+ 'You must use --manifest and --results to do the validation.' %
+ _OPTIONS.build_dir)
+ else:
+ return None, None
print 'Source directory: %s' % srcdir
print 'Build target: %s' % target
- return srcdir, target, True
+ return srcdir, target
def PrintSummary(msg, summary):
print result
-def CheckExpectedResults(options):
- (srcdir, target, valid_build) = GetBuildData(options)
- if not valid_build:
- return False
-
- manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
- print 'Manifest: %s' % manifest_name
- manifest = GetManifest(manifest_name)
-
- print 'Getting actual results from build'
- actual = GetResults(options.build_dir)
+def GetSumFiles(results, build_dir):
+ if not results:
+ print 'Getting actual results from build directory %s' % build_dir
+ sum_files = CollectSumFiles(build_dir)
+ else:
+ print 'Getting actual results from user-provided results'
+ sum_files = results.split()
+ return sum_files
- if options.verbosity >= 1:
- PrintSummary('Tests expected to fail', manifest)
- PrintSummary('\nActual test results', actual)
- actual_vs_manifest, manifest_vs_actual = CompareResults(manifest, actual)
+def PerformComparison(expected, actual, ignore_missing_failures):
+ actual_vs_expected, expected_vs_actual = CompareResults(expected, actual)
tests_ok = True
- if len(actual_vs_manifest) > 0:
- PrintSummary('Build results not in the manifest', actual_vs_manifest)
+ if len(actual_vs_expected) > 0:
+ PrintSummary('Unexpected results in this build (new failures)',
+ actual_vs_expected)
tests_ok = False
- if len(manifest_vs_actual) > 0:
- PrintSummary('Manifest results not present in the build'
- '\n\nNOTE: This is not a failure. It just means that the '
- 'manifest expected\nthese tests to fail, '
- 'but they worked in this configuration.\n',
- manifest_vs_actual)
+ if not ignore_missing_failures and len(expected_vs_actual) > 0:
+ PrintSummary('Expected results not present in this build (fixed tests)'
+ '\n\nNOTE: This is not a failure. It just means that these '
+ 'tests were expected\nto fail, but they worked in this '
+ 'configuration.\n', expected_vs_actual)
if tests_ok:
print '\nSUCCESS: No unexpected failures.'
return tests_ok
-def ProduceManifest(options):
- (srcdir, target, valid_build) = GetBuildData(options)
- if not valid_build:
- return False
+def CheckExpectedResults():
+ srcdir, target = GetBuildData()
+ manifest_path = GetManifestPath(srcdir, target, True)
+ print 'Manifest: %s' % manifest_path
+ manifest = GetManifest(manifest_path)
+ sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
+ actual = GetResults(sum_files)
+
+ if _OPTIONS.verbosity >= 1:
+ PrintSummary('Tests expected to fail', manifest)
+ PrintSummary('\nActual test results', actual)
- manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
- if os.path.exists(manifest_name) and not options.force:
+ return PerformComparison(manifest, actual, _OPTIONS.ignore_missing_failures)
+
+
+def ProduceManifest():
+ (srcdir, target) = GetBuildData()
+ manifest_path = GetManifestPath(srcdir, target, False)
+ print 'Manifest: %s' % manifest_path
+ if os.path.exists(manifest_path) and not _OPTIONS.force:
Error('Manifest file %s already exists.\nUse --force to overwrite.' %
- manifest_name)
+ manifest_path)
- actual = GetResults(options.build_dir)
- with open(manifest_name, 'w') as manifest_file:
- for result in sorted(actual):
- print result
- manifest_file.write('%s\n' % result)
+ sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
+ actual = GetResults(sum_files)
+ manifest_file = open(manifest_path, 'w')
+ for result in sorted(actual):
+ print result
+ manifest_file.write('%s\n' % result)
+ manifest_file.close()
return True
+def CompareBuilds():
+ (srcdir, target) = GetBuildData()
+
+ sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
+ actual = GetResults(sum_files)
+
+ clean_sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.clean_build)
+ clean = GetResults(clean_sum_files)
+
+ return PerformComparison(clean, actual, _OPTIONS.ignore_missing_failures)
+
+
def Main(argv):
parser = optparse.OptionParser(usage=__doc__)
+
+ # Keep the following list sorted by option name.
parser.add_option('--build_dir', action='store', type='string',
dest='build_dir', default='.',
help='Build directory to check (default = .)')
- parser.add_option('--manifest', action='store_true', dest='manifest',
- default=False, help='Produce the manifest for the current '
- 'build (default = False)')
+ parser.add_option('--clean_build', action='store', type='string',
+ dest='clean_build', default=None,
+ help='Compare test results from this build against '
+ 'those of another (clean) build. Use this option '
+ 'when comparing the test results of your patch versus '
+ 'the test results of a clean build without your patch. '
+ 'You must provide the path to the top directory of your '
+ 'clean build.')
parser.add_option('--force', action='store_true', dest='force',
- default=False, help='When used with --manifest, it will '
- 'overwrite an existing manifest file (default = False)')
+ default=False, help='When used with --produce_manifest, '
+ 'it will overwrite an existing manifest file '
+ '(default = False)')
+ parser.add_option('--ignore_missing_failures', action='store_true',
+ dest='ignore_missing_failures', default=False,
+ help='When a failure is expected in the manifest but '
+ 'it is not found in the actual results, the script '
+ 'produces a note alerting to this fact. This means '
+ 'that the expected failure has been fixed, or '
+ 'it did not run, or it may simply be flaky '
+ '(default = False)')
+ parser.add_option('--manifest', action='store', type='string',
+ dest='manifest', default=None,
+ help='Name of the manifest file to use (default = '
+ 'taken from '
+ 'contrib/testsuite-managment/<target_alias>.xfail)')
+ parser.add_option('--produce_manifest', action='store_true',
+ dest='produce_manifest', default=False,
+ help='Produce the manifest for the current '
+ 'build (default = False)')
+ parser.add_option('--results', action='store', type='string',
+ dest='results', default=None, help='Space-separated list '
+ 'of .sum files with the testing results to check. The '
+ 'only content needed from these files are the lines '
+ 'starting with FAIL, XPASS or UNRESOLVED (default = '
+ '.sum files collected from the build directory).')
parser.add_option('--verbosity', action='store', dest='verbosity',
type='int', default=0, help='Verbosity level (default = 0)')
- (options, _) = parser.parse_args(argv[1:])
+ global _OPTIONS
+ (_OPTIONS, _) = parser.parse_args(argv[1:])
- if options.manifest:
- retval = ProduceManifest(options)
+ if _OPTIONS.produce_manifest:
+ retval = ProduceManifest()
+ elif _OPTIONS.clean_build:
+ retval = CompareBuilds()
else:
- retval = CheckExpectedResults(options)
+ retval = CheckExpectedResults()
if retval:
return 0
else:
return 1
+
if __name__ == '__main__':
retval = Main(sys.argv)
sys.exit(retval)