diff options
author | Stephen Hines <srhines@google.com> | 2017-02-07 13:02:21 -0800 |
---|---|---|
committer | Stephen Hines <srhines@google.com> | 2017-02-07 13:03:24 -0800 |
commit | 870a8df6fcb12de32fa2dd83b6ed0f7b24dbab1e (patch) | |
tree | 87b3a32b13c392939d66fa93105896f5df0736a6 /automation/clients/report/dejagnu | |
parent | baba90fd78c18585d22430dc95c748f96ad0c772 (diff) | |
parent | 058aae85dcfb12049ef90137915ec7e981288569 (diff) | |
download | toolchain-utils-870a8df6fcb12de32fa2dd83b6ed0f7b24dbab1e.tar.gz |
Merge remote-tracking branch 'aosp/mirror-chromium-master' into initial_import
Initial import of Chromium's toolchain-utils project.
Bug: http://b/31321592
Test: None necessary, as this is just a helper repository.
Change-Id: I61d2caaf1195da18cfaa7795706e8fc7fecff1d4
Diffstat (limited to 'automation/clients/report/dejagnu')
-rw-r--r-- | automation/clients/report/dejagnu/__init__.py | 1 | ||||
-rw-r--r-- | automation/clients/report/dejagnu/main.py | 137 | ||||
-rw-r--r-- | automation/clients/report/dejagnu/manifest.py | 103 | ||||
-rw-r--r-- | automation/clients/report/dejagnu/report.html | 94 | ||||
-rw-r--r-- | automation/clients/report/dejagnu/report.py | 115 | ||||
-rw-r--r-- | automation/clients/report/dejagnu/summary.py | 262 |
6 files changed, 712 insertions, 0 deletions
diff --git a/automation/clients/report/dejagnu/__init__.py b/automation/clients/report/dejagnu/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/automation/clients/report/dejagnu/__init__.py @@ -0,0 +1 @@ + diff --git a/automation/clients/report/dejagnu/main.py b/automation/clients/report/dejagnu/main.py new file mode 100644 index 00000000..62f095e1 --- /dev/null +++ b/automation/clients/report/dejagnu/main.py @@ -0,0 +1,137 @@ +# Copyright 2011 Google Inc. All Rights Reserved. +# Author: kbaclawski@google.com (Krystian Baclawski) +# + +from contextlib import contextmanager +import glob +from itertools import chain +import logging +import optparse +import os.path +import sys + +from manifest import Manifest +import report +from summary import DejaGnuTestRun + + +def ExpandGlobExprList(paths): + """Returns an iterator that goes over expanded glob paths.""" + return chain.from_iterable(map(glob.glob, paths)) + + +@contextmanager +def OptionChecker(parser): + """Provides scoped environment for command line option checking.""" + try: + yield + except SystemExit as ex: + parser.print_help() + print '' + sys.exit('ERROR: %s' % str(ex)) + + +def ManifestCommand(argv): + parser = optparse.OptionParser( + description= + ('Read in one or more DejaGNU summary files (.sum), parse their ' + 'content and generate manifest files. Manifest files store a list ' + 'of failed tests that should be ignored. Generated files are ' + 'stored in current directory under following name: ' + '${tool}-${board}.xfail (e.g. "gcc-unix.xfail").'), + usage='Usage: %prog manifest [file.sum] (file2.sum ...)') + + _, args = parser.parse_args(argv[2:]) + + with OptionChecker(parser): + if not args: + sys.exit('At least one *.sum file required.') + + for filename in chain.from_iterable(map(glob.glob, args)): + test_run = DejaGnuTestRun.FromFile(filename) + + manifest = Manifest.FromDejaGnuTestRun(test_run) + manifest_filename = '%s-%s.xfail' % (test_run.tool, test_run.board) + + with open(manifest_filename, 'w') as manifest_file: + manifest_file.write(manifest.Generate()) + + logging.info('Wrote manifest to "%s" file.', manifest_filename) + + +def ReportCommand(argv): + parser = optparse.OptionParser( + description= + ('Read in one or more DejaGNU summary files (.sum), parse their ' + 'content and generate a single report file in selected format ' + '(currently only HTML).'), + usage=('Usage: %prog report (-m manifest.xfail) [-o report.html] ' + '[file.sum (file2.sum ...)')) + parser.add_option( + '-o', + dest='output', + type='string', + default=None, + help=('Suppress failures for test listed in provided manifest files. ' + '(use -m for each manifest file you want to read)')) + parser.add_option( + '-m', + dest='manifests', + type='string', + action='append', + default=None, + help=('Suppress failures for test listed in provided manifest files. ' + '(use -m for each manifest file you want to read)')) + + opts, args = parser.parse_args(argv[2:]) + + with OptionChecker(parser): + if not args: + sys.exit('At least one *.sum file required.') + + if not opts.output: + sys.exit('Please provide name for report file.') + + manifests = [] + + for filename in ExpandGlobExprList(opts.manifests or []): + logging.info('Using "%s" manifest.', filename) + manifests.append(Manifest.FromFile(filename)) + + test_runs = [DejaGnuTestRun.FromFile(filename) + for filename in chain.from_iterable(map(glob.glob, args))] + + html = report.Generate(test_runs, manifests) + + if html: + with open(opts.output, 'w') as html_file: + html_file.write(html) + logging.info('Wrote report to "%s" file.', opts.output) + else: + sys.exit(1) + + +def HelpCommand(argv): + sys.exit('\n'.join([ + 'Usage: %s command [options]' % os.path.basename(argv[ + 0]), '', 'Commands:', + ' manifest - manage files containing a list of suppressed test failures', + ' report - generate report file for selected test runs' + ])) + + +def Main(argv): + try: + cmd_name = argv[1] + except IndexError: + cmd_name = None + + cmd_map = {'manifest': ManifestCommand, 'report': ReportCommand} + cmd_map.get(cmd_name, HelpCommand)(argv) + + +if __name__ == '__main__': + FORMAT = '%(asctime)-15s %(levelname)s %(message)s' + logging.basicConfig(format=FORMAT, level=logging.INFO) + + Main(sys.argv) diff --git a/automation/clients/report/dejagnu/manifest.py b/automation/clients/report/dejagnu/manifest.py new file mode 100644 index 00000000..5831d1b0 --- /dev/null +++ b/automation/clients/report/dejagnu/manifest.py @@ -0,0 +1,103 @@ +# Copyright 2011 Google Inc. All Rights Reserved. +# Author: kbaclawski@google.com (Krystian Baclawski) +# + +__author__ = 'kbaclawski@google.com (Krystian Baclawski)' + +from collections import namedtuple +from cStringIO import StringIO +import logging + +from summary import DejaGnuTestResult + + +class Manifest(namedtuple('Manifest', 'tool board results')): + """Stores a list of unsuccessful tests. + + Any line that starts with '#@' marker carries auxiliary data in form of a + key-value pair, for example: + + #@ tool: * + #@ board: unix + + So far tool and board parameters are recognized. Their value can contain + arbitrary glob expression. Based on aforementioned parameters given manifest + will be applied for all test results, but only in selected test runs. Note + that all parameters are optional. Their default value is '*' (i.e. for all + tools/boards). + + The meaning of lines above is as follows: corresponding test results to follow + should only be suppressed if test run was performed on "unix" board. + + The summary line used to build the test result should have this format: + + attrlist | UNRESOLVED: gcc.dg/unroll_1.c (test for excess errors) + ^^^^^^^^ ^^^^^^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^ + optional result name variant + attributes + """ + SUPPRESSIBLE_RESULTS = ['FAIL', 'UNRESOLVED', 'XPASS', 'ERROR'] + + @classmethod + def FromDejaGnuTestRun(cls, test_run): + results = [result + for result in test_run.results + if result.result in cls.SUPPRESSIBLE_RESULTS] + + return cls(test_run.tool, test_run.board, results) + + @classmethod + def FromFile(cls, filename): + """Creates manifest instance from a file in format described above.""" + params = {} + results = [] + + with open(filename, 'r') as manifest_file: + for line in manifest_file: + if line.startswith('#@'): + # parse a line with a parameter + try: + key, value = line[2:].split(':', 1) + except ValueError: + logging.warning('Malformed parameter line: "%s".', line) + else: + params[key.strip()] = value.strip() + else: + # remove comment + try: + line, _ = line.split('#', 1) + except ValueError: + pass + + line = line.strip() + + if line: + # parse a line with a test result + result = DejaGnuTestResult.FromLine(line) + + if result: + results.append(result) + else: + logging.warning('Malformed test result line: "%s".', line) + + tool = params.get('tool', '*') + board = params.get('board', '*') + + return cls(tool, board, results) + + def Generate(self): + """Dumps manifest to string.""" + text = StringIO() + + for name in ['tool', 'board']: + text.write('#@ {0}: {1}\n'.format(name, getattr(self, name))) + + text.write('\n') + + for result in sorted(self.results, key=lambda r: r.result): + text.write('{0}\n'.format(result)) + + return text.getvalue() + + def __iter__(self): + return iter(self.results) diff --git a/automation/clients/report/dejagnu/report.html b/automation/clients/report/dejagnu/report.html new file mode 100644 index 00000000..39b39e09 --- /dev/null +++ b/automation/clients/report/dejagnu/report.html @@ -0,0 +1,94 @@ +<link type="text/css" rel="Stylesheet" +href="http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.16/themes/ui-lightness/jquery-ui.css"/> + +<script type="text/javascript" src="https://www.google.com/jsapi"></script> +<script type="text/javascript"> + google.load("visualization", "1.1", {packages: ["corechart", "table"]}); + google.load("jquery", "1.6.2"); + google.load("jqueryui", "1.8.16"); + + function drawChart(name, label, table) { + var data = google.visualization.arrayToDataTable(table); + var chart = new google.visualization.PieChart( + document.getElementById(name)); + + chart.draw(data, + {title: label, pieSliceText: "value", width: 800, height: 400}); + } + + function drawTable(name, table) { + var data = google.visualization.arrayToDataTable(table); + var table = new google.visualization.Table( + document.getElementById(name)); + + table.draw(data, { + showRowNumber: false, allowHtml: true, sortColumn: 0}); + } + + google.setOnLoadCallback(function () { + $( "#testruns" ).tabs(); + + {% for test_run in test_runs %} + $( "#testrun{{ test_run.id }}" ).tabs(); + + {% for result_type, group in test_run.groups.items %} + $( "#testrun{{ test_run.id }}-{{ result_type }}-tables" ).accordion({ + autoHeight: false, collapsible: true, active: false }); + + drawChart( + "testrun{{ test_run.id }}-{{ result_type }}-chart", + "DejaGNU test {{ result_type }} summary for {{ test_run.name }}", + [ + ["Result", "Count"], + {% for result, count in group.summary %} + ["{{ result }}", {{ count }}],{% endfor %} + ]); + + {% for description, test_list in group.tests %} + {% if test_list %} + drawTable( + "testrun{{ test_run.id }}-{{ result_type }}-table-{{ forloop.counter }}", + [ + ["Test", "Variant"], + {% for test, variant in test_list %} + ["{{ test }}", "{{ variant }}"],{% endfor %} + ]); + {% endif %} + {% endfor %} + {% endfor %} + {% endfor %} + }); +</script> + +<div id="testruns"> + <ul> + {% for test_run in test_runs %} + <li><a href="#testrun{{ test_run.id }}">{{ test_run.name }}</a></li> + {% endfor %} + </ul> + + {% for test_run in test_runs %} + <div id="testrun{{ test_run.id }}" style="padding: 0px"> + <ul> + {% for result_type, group in test_run.groups.items %} + <li> + <a href="#testrun{{ test_run.id }}-{{ forloop.counter }}">{{ result_type }}</a> + </li> + {% endfor %} + </ul> + {% for result_type, group in test_run.groups.items %} + <div id="testrun{{ test_run.id }}-{{ forloop.counter }}"> + <div id="testrun{{ test_run.id }}-{{ result_type }}-chart" style="text-align: center"></div> + <div id="testrun{{ test_run.id }}-{{ result_type }}-tables"> + {% for description, test_list in group.tests %} + {% if test_list %} + <h3><a href="#">{{ description }}</a></h3> + <div id="testrun{{ test_run.id }}-{{ result_type }}-table-{{ forloop.counter }}"></div> + {% endif %} + {% endfor %} + </div> + </div> + {% endfor %} + </div> +{% endfor %} +</div> diff --git a/automation/clients/report/dejagnu/report.py b/automation/clients/report/dejagnu/report.py new file mode 100644 index 00000000..191a5389 --- /dev/null +++ b/automation/clients/report/dejagnu/report.py @@ -0,0 +1,115 @@ +# Copyright 2011 Google Inc. All Rights Reserved. +# Author: kbaclawski@google.com (Krystian Baclawski) +# + +import logging +import os.path + +RESULT_DESCRIPTION = { + 'ERROR': 'DejaGNU errors', + 'FAIL': 'Failed tests', + 'NOTE': 'DejaGNU notices', + 'PASS': 'Passed tests', + 'UNRESOLVED': 'Unresolved tests', + 'UNSUPPORTED': 'Unsupported tests', + 'UNTESTED': 'Not executed tests', + 'WARNING': 'DejaGNU warnings', + 'XFAIL': 'Expected test failures', + 'XPASS': 'Unexpectedly passed tests' +} + +RESULT_GROUPS = { + 'Successes': ['PASS', 'XFAIL'], + 'Failures': ['FAIL', 'XPASS', 'UNRESOLVED'], + 'Suppressed': ['!FAIL', '!XPASS', '!UNRESOLVED', '!ERROR'], + 'Framework': ['UNTESTED', 'UNSUPPORTED', 'ERROR', 'WARNING', 'NOTE'] +} + +ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + + +def _GetResultDescription(name): + if name.startswith('!'): + name = name[1:] + + try: + return RESULT_DESCRIPTION[name] + except KeyError: + raise ValueError('Unknown result: "%s"' % name) + + +def _PrepareSummary(res_types, summary): + + def GetResultCount(res_type): + return summary.get(res_type, 0) + + return [(_GetResultDescription(rt), GetResultCount(rt)) for rt in res_types] + + +def _PrepareTestList(res_types, tests): + + def GetTestsByResult(res_type): + return [(test.name, test.variant or '') + for test in sorted(tests) if test.result == res_type] + + return [(_GetResultDescription(rt), GetTestsByResult(rt)) + for rt in res_types if rt != 'PASS'] + + +def Generate(test_runs, manifests): + """Generate HTML report from provided test runs. + + Args: + test_runs: DejaGnuTestRun objects list. + manifests: Manifest object list that will drive test result suppression. + + Returns: + String to which the HTML report was rendered. + """ + tmpl_args = [] + + for test_run_id, test_run in enumerate(test_runs): + logging.info('Generating report for: %s.', test_run) + + test_run.CleanUpTestResults() + test_run.SuppressTestResults(manifests) + + # Generate summary and test list for each result group + groups = {} + + for res_group, res_types in RESULT_GROUPS.items(): + summary_all = _PrepareSummary(res_types, test_run.summary) + tests_all = _PrepareTestList(res_types, test_run.results) + + has_2nd = lambda tuple2: bool(tuple2[1]) + summary = filter(has_2nd, summary_all) + tests = filter(has_2nd, tests_all) + + if summary or tests: + groups[res_group] = {'summary': summary, 'tests': tests} + + tmpl_args.append({ + 'id': test_run_id, + 'name': '%s @%s' % (test_run.tool, test_run.board), + 'groups': groups + }) + + logging.info('Rendering report in HTML format.') + + try: + from django import template + from django.template import loader + from django.conf import settings + except ImportError: + logging.error('Django framework not installed!') + logging.error('Failed to generate report in HTML format!') + return '' + + settings.configure(DEBUG=True, + TEMPLATE_DEBUG=True, + TEMPLATE_DIRS=(ROOT_PATH,)) + + tmpl = loader.get_template('report.html') + ctx = template.Context({'test_runs': tmpl_args}) + + return tmpl.render(ctx) diff --git a/automation/clients/report/dejagnu/summary.py b/automation/clients/report/dejagnu/summary.py new file mode 100644 index 00000000..d573c691 --- /dev/null +++ b/automation/clients/report/dejagnu/summary.py @@ -0,0 +1,262 @@ +# Copyright 2011 Google Inc. All Rights Reserved. +# Author: kbaclawski@google.com (Krystian Baclawski) +# + +from collections import defaultdict +from collections import namedtuple +from datetime import datetime +from fnmatch import fnmatch +from itertools import groupby +import logging +import os.path +import re + + +class DejaGnuTestResult(namedtuple('Result', 'name variant result flaky')): + """Stores the result of a single test case.""" + + # avoid adding __dict__ to the class + __slots__ = () + + LINE_RE = re.compile(r'([A-Z]+):\s+([\w/+.-]+)(.*)') + + @classmethod + def FromLine(cls, line): + """Alternate constructor which takes a string and parses it.""" + try: + attrs, line = line.split('|', 1) + + if attrs.strip() != 'flaky': + return None + + line = line.strip() + flaky = True + except ValueError: + flaky = False + + fields = cls.LINE_RE.match(line.strip()) + + if fields: + result, path, variant = fields.groups() + + # some of the tests are generated in build dir and are issued from there, + # because every test run is performed in randomly named tmp directory we + # need to remove random part + try: + # assume that 2nd field is a test path + path_parts = path.split('/') + + index = path_parts.index('testsuite') + path = '/'.join(path_parts[index + 1:]) + except ValueError: + path = '/'.join(path_parts) + + # Remove junk from test description. + variant = variant.strip(', ') + + substitutions = [ + # remove include paths - they contain name of tmp directory + ('-I\S+', ''), + # compress white spaces + ('\s+', ' ') + ] + + for pattern, replacement in substitutions: + variant = re.sub(pattern, replacement, variant) + + # Some tests separate last component of path by space, so actual filename + # ends up in description instead of path part. Correct that. + try: + first, rest = variant.split(' ', 1) + except ValueError: + pass + else: + if first.endswith('.o'): + path = os.path.join(path, first) + variant = rest + + # DejaGNU framework errors don't contain path part at all, so description + # part has to be reconstructed. + if not any(os.path.basename(path).endswith('.%s' % suffix) + for suffix in ['h', 'c', 'C', 'S', 'H', 'cc', 'i', 'o']): + variant = '%s %s' % (path, variant) + path = '' + + # Some tests are picked up from current directory (presumably DejaGNU + # generates some test files). Remove the prefix for these files. + if path.startswith('./'): + path = path[2:] + + return cls(path, variant or '', result, flaky=flaky) + + def __str__(self): + """Returns string representation of a test result.""" + if self.flaky: + fmt = 'flaky | ' + else: + fmt = '' + fmt += '{2}: {0}' + if self.variant: + fmt += ' {1}' + return fmt.format(*self) + + +class DejaGnuTestRun(object): + """Container for test results that were a part of single test run. + + The class stores also metadata related to the test run. + + Attributes: + board: Name of DejaGNU board, which was used to run the tests. + date: The date when the test run was started. + target: Target triple. + host: Host triple. + tool: The tool that was tested (e.g. gcc, binutils, g++, etc.) + results: a list of DejaGnuTestResult objects. + """ + + __slots__ = ('board', 'date', 'target', 'host', 'tool', 'results') + + def __init__(self, **kwargs): + assert all(name in self.__slots__ for name in kwargs) + + self.results = set() + self.date = kwargs.get('date', datetime.now()) + + for name in ('board', 'target', 'tool', 'host'): + setattr(self, name, kwargs.get(name, 'unknown')) + + @classmethod + def FromFile(cls, filename): + """Alternate constructor - reads a DejaGNU output file.""" + test_run = cls() + test_run.FromDejaGnuOutput(filename) + test_run.CleanUpTestResults() + return test_run + + @property + def summary(self): + """Returns a summary as {ResultType -> Count} dictionary.""" + summary = defaultdict(int) + + for r in self.results: + summary[r.result] += 1 + + return summary + + def _ParseBoard(self, fields): + self.board = fields.group(1).strip() + + def _ParseDate(self, fields): + self.date = datetime.strptime(fields.group(2).strip(), '%a %b %d %X %Y') + + def _ParseTarget(self, fields): + self.target = fields.group(2).strip() + + def _ParseHost(self, fields): + self.host = fields.group(2).strip() + + def _ParseTool(self, fields): + self.tool = fields.group(1).strip() + + def FromDejaGnuOutput(self, filename): + """Read in and parse DejaGNU output file.""" + + logging.info('Reading "%s" DejaGNU output file.', filename) + + with open(filename, 'r') as report: + lines = [line.strip() for line in report.readlines() if line.strip()] + + parsers = ((re.compile(r'Running target (.*)'), self._ParseBoard), + (re.compile(r'Test Run By (.*) on (.*)'), self._ParseDate), + (re.compile(r'=== (.*) tests ==='), self._ParseTool), + (re.compile(r'Target(\s+)is (.*)'), self._ParseTarget), + (re.compile(r'Host(\s+)is (.*)'), self._ParseHost)) + + for line in lines: + result = DejaGnuTestResult.FromLine(line) + + if result: + self.results.add(result) + else: + for regexp, parser in parsers: + fields = regexp.match(line) + if fields: + parser(fields) + break + + logging.debug('DejaGNU output file parsed successfully.') + logging.debug(self) + + def CleanUpTestResults(self): + """Remove certain test results considered to be spurious. + + 1) Large number of test reported as UNSUPPORTED are also marked as + UNRESOLVED. If that's the case remove latter result. + 2) If a test is performed on compiler output and for some reason compiler + fails, we don't want to report all failures that depend on the former. + """ + name_key = lambda v: v.name + results_by_name = sorted(self.results, key=name_key) + + for name, res_iter in groupby(results_by_name, key=name_key): + results = set(res_iter) + + # If DejaGnu was unable to compile a test it will create following result: + failed = DejaGnuTestResult(name, '(test for excess errors)', 'FAIL', + False) + + # If a test compilation failed, remove all results that are dependent. + if failed in results: + dependants = set(filter(lambda r: r.result != 'FAIL', results)) + + self.results -= dependants + + for res in dependants: + logging.info('Removed {%s} dependance.', res) + + # Remove all UNRESOLVED results that were also marked as UNSUPPORTED. + unresolved = [res._replace(result='UNRESOLVED') + for res in results if res.result == 'UNSUPPORTED'] + + for res in unresolved: + if res in self.results: + self.results.remove(res) + logging.info('Removed {%s} duplicate.', res) + + def _IsApplicable(self, manifest): + """Checks if test results need to be reconsidered based on the manifest.""" + check_list = [(self.tool, manifest.tool), (self.board, manifest.board)] + + return all(fnmatch(text, pattern) for text, pattern in check_list) + + def SuppressTestResults(self, manifests): + """Suppresses all test results listed in manifests.""" + + # Get a set of tests results that are going to be suppressed if they fail. + manifest_results = set() + + for manifest in filter(self._IsApplicable, manifests): + manifest_results |= set(manifest.results) + + suppressed_results = self.results & manifest_results + + for result in sorted(suppressed_results): + logging.debug('Result suppressed for {%s}.', result) + + new_result = '!' + result.result + + # Mark result suppression as applied. + manifest_results.remove(result) + + # Rewrite test result. + self.results.remove(result) + self.results.add(result._replace(result=new_result)) + + for result in sorted(manifest_results): + logging.warning('Result {%s} listed in manifest but not suppressed.', + result) + + def __str__(self): + return '{0}, {1} @{2} on {3}'.format(self.target, self.tool, self.board, + self.date) |