3 # Script to compare testsuite failures against a list of known-to-fail
6 # Contributed by Diego Novillo <dnovillo@google.com>
8 # Copyright (C) 2011 Free Software Foundation, Inc.
10 # This file is part of GCC.
12 # GCC is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU General Public License as published by
14 # the Free Software Foundation; either version 3, or (at your option)
17 # GCC is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
22 # You should have received a copy of the GNU General Public License
23 # along with GCC; see the file COPYING. If not, write to
24 # the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25 # Boston, MA 02110-1301, USA.
27 """This script provides a coarser XFAILing mechanism that requires no
28 detailed DejaGNU markings. This is useful in a variety of scenarios:
30 - Development branches with many known failures waiting to be fixed.
31 - Release branches with known failures that are not considered
32 important for the particular release criteria used in that branch.
34 The script must be executed from the toplevel build directory. When
37 1- Determine the target built: TARGET
38 2- Determine the source directory: SRCDIR
39 3- Look for a failure manifest file in
40 <SRCDIR>/contrib/testsuite-management/<TARGET>.xfail
41 4- Collect all the <tool>.sum files from the build tree.
42 5- Produce a report stating:
43 a- Failures expected in the manifest but not present in the build.
44 b- Failures in the build not expected in the manifest.
45 6- If all the build failures are expected in the manifest, it exits
46 with exit code 0. Otherwise, it exits with error code 1.
54 # Handled test results.
55 _VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
57 # Pattern for naming manifest files. The first argument should be
58 # the toplevel GCC source directory. The second argument is the
59 # target triple used during the build.
60 _MANIFEST_PATH_PATTERN = '%s/contrib/testsuite-management/%s.xfail'
63 print >>sys.stderr, '\nerror: %s' % msg
67 class TestResult(object):
68 """Describes a single DejaGNU test result as emitted in .sum files.
70 We are only interested in representing unsuccessful tests. So, only
71 a subset of all the tests are loaded.
73 The summary line used to build the test result should have this format:
75 attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
76 ^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
77 optional state name description
81 attrlist: A comma separated list of attributes.
83 flaky Indicates that this test may not always fail. These
84 tests are reported, but their presence does not affect
87 expire=YYYYMMDD After this date, this test will produce an error
88 whether it is in the manifest or not.
90 state: One of UNRESOLVED, XPASS or FAIL.
91 name: File name for the test.
92 description: String describing the test (flags used, dejagnu message, etc)
95 def __init__(self, summary_line):
98 if '|' in summary_line:
99 (self.attrs, summary_line) = summary_line.split('|', 1)
102 self.description) = re.match(r' *([A-Z]+): ([^ ]+) (.*)',
103 summary_line).groups()
104 self.attrs = self.attrs.strip()
105 self.state = self.state.strip()
106 self.description = self.description.strip()
108 Error('Cannot parse summary line "%s"' % summary_line)
110 if self.state not in _VALID_TEST_RESULTS:
111 Error('Invalid test result %s in "%s" (parsed as "%s")' % (
112 self.state, summary_line, self))
114 def __lt__(self, other):
115 return self.name < other.name
118 return hash(self.state) ^ hash(self.name) ^ hash(self.description)
120 def __eq__(self, other):
121 return (self.state == other.state and
122 self.name == other.name and
123 self.description == other.description)
125 def __ne__(self, other):
126 return not (self == other)
131 attrs = '%s | ' % self.attrs
132 return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
135 def GetMakefileValue(makefile_name, value_name):
136 if os.path.exists(makefile_name):
137 with open(makefile_name) as makefile:
138 for line in makefile:
139 if line.startswith(value_name):
140 (_, value) = line.split('=', 1)
141 value = value.strip()
146 def ValidBuildDirectory(builddir, target):
147 if (not os.path.exists(builddir) or
148 not os.path.exists('%s/Makefile' % builddir) or
149 not os.path.exists('%s/build-%s' % (builddir, target))):
154 def IsInterestingResult(line):
155 """Return True if the given line is one of the summary lines we care about."""
157 if line.startswith('#'):
160 (_, line) = line.split('|', 1)
162 for result in _VALID_TEST_RESULTS:
163 if line.startswith(result):
168 def ParseSummary(sum_fname):
169 """Create a set of TestResult instances from the given summary file."""
171 with open(sum_fname) as sum_file:
172 for line in sum_file:
173 if IsInterestingResult(line):
174 result_set.add(TestResult(line))
178 def GetManifest(manifest_name):
179 """Build a set of expected failures from the manifest file.
181 Each entry in the manifest file should have the format understood
182 by the TestResult constructor.
184 If no manifest file exists for this target, it returns an empty
187 if os.path.exists(manifest_name):
188 return ParseSummary(manifest_name)
193 def GetSumFiles(builddir):
195 for root, dirs, files in os.walk(builddir):
199 if fname.endswith('.sum'):
200 sum_files.append(os.path.join(root, fname))
204 def GetResults(builddir):
205 """Collect all the test results from .sum files under the given build
207 sum_files = GetSumFiles(builddir)
208 build_results = set()
209 for sum_fname in sum_files:
210 print '\t%s' % sum_fname
211 build_results |= ParseSummary(sum_fname)
215 def CompareResults(manifest, actual):
216 """Compare sets of results and return two lists:
217 - List of results present in MANIFEST but missing from ACTUAL.
218 - List of results present in ACTUAL but missing from MANIFEST.
220 # Report all the actual results not present in the manifest.
221 actual_vs_manifest = set()
222 for actual_result in actual:
223 if actual_result not in manifest:
224 actual_vs_manifest.add(actual_result)
226 # Simlarly for all the tests in the manifest.
227 manifest_vs_actual = set()
228 for expected_result in manifest:
229 # Ignore tests marked flaky.
230 if 'flaky' in expected_result.attrs:
232 if expected_result not in actual:
233 manifest_vs_actual.add(expected_result)
235 return actual_vs_manifest, manifest_vs_actual
238 def GetBuildData(options):
239 target = GetMakefileValue('%s/Makefile' % options.build_dir, 'target=')
240 srcdir = GetMakefileValue('%s/Makefile' % options.build_dir, 'srcdir =')
241 if not ValidBuildDirectory(options.build_dir, target):
242 Error('%s is not a valid GCC top level build directory.' %
244 print 'Source directory: %s' % srcdir
245 print 'Build target: %s' % target
246 return srcdir, target, True
249 def PrintSummary(msg, summary):
251 for result in sorted(summary):
255 def CheckExpectedResults(options):
256 (srcdir, target, valid_build) = GetBuildData(options)
260 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
261 print 'Manifest: %s' % manifest_name
262 manifest = GetManifest(manifest_name)
264 print 'Getting actual results from build'
265 actual = GetResults(options.build_dir)
267 if options.verbosity >= 1:
268 PrintSummary('Tests expected to fail', manifest)
269 PrintSummary('\nActual test results', actual)
271 actual_vs_manifest, manifest_vs_actual = CompareResults(manifest, actual)
274 if len(actual_vs_manifest) > 0:
275 PrintSummary('Build results not in the manifest', actual_vs_manifest)
278 if len(manifest_vs_actual) > 0:
279 PrintSummary('Manifest results not present in the build'
280 '\n\nNOTE: This is not a failure. It just means that the '
281 'manifest expected\nthese tests to fail, '
282 'but they worked in this configuration.\n',
286 print '\nSUCCESS: No unexpected failures.'
291 def ProduceManifest(options):
292 (srcdir, target, valid_build) = GetBuildData(options)
296 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
297 if os.path.exists(manifest_name) and not options.force:
298 Error('Manifest file %s already exists.\nUse --force to overwrite.' %
301 actual = GetResults(options.build_dir)
302 with open(manifest_name, 'w') as manifest_file:
303 for result in sorted(actual):
305 manifest_file.write('%s\n' % result)
311 parser = optparse.OptionParser(usage=__doc__)
312 parser.add_option('--build_dir', action='store', type='string',
313 dest='build_dir', default='.',
314 help='Build directory to check (default = .)')
315 parser.add_option('--manifest', action='store_true', dest='manifest',
316 default=False, help='Produce the manifest for the current '
317 'build (default = False)')
318 parser.add_option('--force', action='store_true', dest='force',
319 default=False, help='When used with --manifest, it will '
320 'overwrite an existing manifest file (default = False)')
321 parser.add_option('--verbosity', action='store', dest='verbosity',
322 type='int', default=0, help='Verbosity level (default = 0)')
323 (options, _) = parser.parse_args(argv[1:])
326 retval = ProduceManifest(options)
328 retval = CheckExpectedResults(options)
335 if __name__ == '__main__':
336 retval = Main(sys.argv)