aboutsummaryrefslogtreecommitdiff
path: root/deprecated/automation/clients/report
diff options
context:
space:
mode:
Diffstat (limited to 'deprecated/automation/clients/report')
-rwxr-xr-xdeprecated/automation/clients/report/dejagnu.sh9
-rw-r--r--deprecated/automation/clients/report/dejagnu/__init__.py1
-rw-r--r--deprecated/automation/clients/report/dejagnu/main.py137
-rw-r--r--deprecated/automation/clients/report/dejagnu/manifest.py103
-rw-r--r--deprecated/automation/clients/report/dejagnu/report.html94
-rw-r--r--deprecated/automation/clients/report/dejagnu/report.py115
-rw-r--r--deprecated/automation/clients/report/dejagnu/summary.py262
-rwxr-xr-xdeprecated/automation/clients/report/validate_failures.py239
8 files changed, 0 insertions, 960 deletions
diff --git a/deprecated/automation/clients/report/dejagnu.sh b/deprecated/automation/clients/report/dejagnu.sh
deleted file mode 100755
index fadd8a0c..00000000
--- a/deprecated/automation/clients/report/dejagnu.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2011 Google Inc. All Rights Reserved.
-# Author: kbaclawski@google.com (Krystian Baclawski)
-#
-
-export PYTHONPATH="$(pwd)"
-
-python dejagnu/main.py $@
diff --git a/deprecated/automation/clients/report/dejagnu/__init__.py b/deprecated/automation/clients/report/dejagnu/__init__.py
deleted file mode 100644
index 8b137891..00000000
--- a/deprecated/automation/clients/report/dejagnu/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/deprecated/automation/clients/report/dejagnu/main.py b/deprecated/automation/clients/report/dejagnu/main.py
deleted file mode 100644
index 62f095e1..00000000
--- a/deprecated/automation/clients/report/dejagnu/main.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# Copyright 2011 Google Inc. All Rights Reserved.
-# Author: kbaclawski@google.com (Krystian Baclawski)
-#
-
-from contextlib import contextmanager
-import glob
-from itertools import chain
-import logging
-import optparse
-import os.path
-import sys
-
-from manifest import Manifest
-import report
-from summary import DejaGnuTestRun
-
-
-def ExpandGlobExprList(paths):
- """Returns an iterator that goes over expanded glob paths."""
- return chain.from_iterable(map(glob.glob, paths))
-
-
-@contextmanager
-def OptionChecker(parser):
- """Provides scoped environment for command line option checking."""
- try:
- yield
- except SystemExit as ex:
- parser.print_help()
- print ''
- sys.exit('ERROR: %s' % str(ex))
-
-
-def ManifestCommand(argv):
- parser = optparse.OptionParser(
- description=
- ('Read in one or more DejaGNU summary files (.sum), parse their '
- 'content and generate manifest files. Manifest files store a list '
- 'of failed tests that should be ignored. Generated files are '
- 'stored in current directory under following name: '
- '${tool}-${board}.xfail (e.g. "gcc-unix.xfail").'),
- usage='Usage: %prog manifest [file.sum] (file2.sum ...)')
-
- _, args = parser.parse_args(argv[2:])
-
- with OptionChecker(parser):
- if not args:
- sys.exit('At least one *.sum file required.')
-
- for filename in chain.from_iterable(map(glob.glob, args)):
- test_run = DejaGnuTestRun.FromFile(filename)
-
- manifest = Manifest.FromDejaGnuTestRun(test_run)
- manifest_filename = '%s-%s.xfail' % (test_run.tool, test_run.board)
-
- with open(manifest_filename, 'w') as manifest_file:
- manifest_file.write(manifest.Generate())
-
- logging.info('Wrote manifest to "%s" file.', manifest_filename)
-
-
-def ReportCommand(argv):
- parser = optparse.OptionParser(
- description=
- ('Read in one or more DejaGNU summary files (.sum), parse their '
- 'content and generate a single report file in selected format '
- '(currently only HTML).'),
- usage=('Usage: %prog report (-m manifest.xfail) [-o report.html] '
- '[file.sum (file2.sum ...)'))
- parser.add_option(
- '-o',
- dest='output',
- type='string',
- default=None,
- help=('Suppress failures for test listed in provided manifest files. '
- '(use -m for each manifest file you want to read)'))
- parser.add_option(
- '-m',
- dest='manifests',
- type='string',
- action='append',
- default=None,
- help=('Suppress failures for test listed in provided manifest files. '
- '(use -m for each manifest file you want to read)'))
-
- opts, args = parser.parse_args(argv[2:])
-
- with OptionChecker(parser):
- if not args:
- sys.exit('At least one *.sum file required.')
-
- if not opts.output:
- sys.exit('Please provide name for report file.')
-
- manifests = []
-
- for filename in ExpandGlobExprList(opts.manifests or []):
- logging.info('Using "%s" manifest.', filename)
- manifests.append(Manifest.FromFile(filename))
-
- test_runs = [DejaGnuTestRun.FromFile(filename)
- for filename in chain.from_iterable(map(glob.glob, args))]
-
- html = report.Generate(test_runs, manifests)
-
- if html:
- with open(opts.output, 'w') as html_file:
- html_file.write(html)
- logging.info('Wrote report to "%s" file.', opts.output)
- else:
- sys.exit(1)
-
-
-def HelpCommand(argv):
- sys.exit('\n'.join([
- 'Usage: %s command [options]' % os.path.basename(argv[
- 0]), '', 'Commands:',
- ' manifest - manage files containing a list of suppressed test failures',
- ' report - generate report file for selected test runs'
- ]))
-
-
-def Main(argv):
- try:
- cmd_name = argv[1]
- except IndexError:
- cmd_name = None
-
- cmd_map = {'manifest': ManifestCommand, 'report': ReportCommand}
- cmd_map.get(cmd_name, HelpCommand)(argv)
-
-
-if __name__ == '__main__':
- FORMAT = '%(asctime)-15s %(levelname)s %(message)s'
- logging.basicConfig(format=FORMAT, level=logging.INFO)
-
- Main(sys.argv)
diff --git a/deprecated/automation/clients/report/dejagnu/manifest.py b/deprecated/automation/clients/report/dejagnu/manifest.py
deleted file mode 100644
index 5831d1b0..00000000
--- a/deprecated/automation/clients/report/dejagnu/manifest.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2011 Google Inc. All Rights Reserved.
-# Author: kbaclawski@google.com (Krystian Baclawski)
-#
-
-__author__ = 'kbaclawski@google.com (Krystian Baclawski)'
-
-from collections import namedtuple
-from cStringIO import StringIO
-import logging
-
-from summary import DejaGnuTestResult
-
-
-class Manifest(namedtuple('Manifest', 'tool board results')):
- """Stores a list of unsuccessful tests.
-
- Any line that starts with '#@' marker carries auxiliary data in form of a
- key-value pair, for example:
-
- #@ tool: *
- #@ board: unix
-
- So far tool and board parameters are recognized. Their value can contain
- arbitrary glob expression. Based on aforementioned parameters given manifest
- will be applied for all test results, but only in selected test runs. Note
- that all parameters are optional. Their default value is '*' (i.e. for all
- tools/boards).
-
- The meaning of lines above is as follows: corresponding test results to follow
- should only be suppressed if test run was performed on "unix" board.
-
- The summary line used to build the test result should have this format:
-
- attrlist | UNRESOLVED: gcc.dg/unroll_1.c (test for excess errors)
- ^^^^^^^^ ^^^^^^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
- optional result name variant
- attributes
- """
- SUPPRESSIBLE_RESULTS = ['FAIL', 'UNRESOLVED', 'XPASS', 'ERROR']
-
- @classmethod
- def FromDejaGnuTestRun(cls, test_run):
- results = [result
- for result in test_run.results
- if result.result in cls.SUPPRESSIBLE_RESULTS]
-
- return cls(test_run.tool, test_run.board, results)
-
- @classmethod
- def FromFile(cls, filename):
- """Creates manifest instance from a file in format described above."""
- params = {}
- results = []
-
- with open(filename, 'r') as manifest_file:
- for line in manifest_file:
- if line.startswith('#@'):
- # parse a line with a parameter
- try:
- key, value = line[2:].split(':', 1)
- except ValueError:
- logging.warning('Malformed parameter line: "%s".', line)
- else:
- params[key.strip()] = value.strip()
- else:
- # remove comment
- try:
- line, _ = line.split('#', 1)
- except ValueError:
- pass
-
- line = line.strip()
-
- if line:
- # parse a line with a test result
- result = DejaGnuTestResult.FromLine(line)
-
- if result:
- results.append(result)
- else:
- logging.warning('Malformed test result line: "%s".', line)
-
- tool = params.get('tool', '*')
- board = params.get('board', '*')
-
- return cls(tool, board, results)
-
- def Generate(self):
- """Dumps manifest to string."""
- text = StringIO()
-
- for name in ['tool', 'board']:
- text.write('#@ {0}: {1}\n'.format(name, getattr(self, name)))
-
- text.write('\n')
-
- for result in sorted(self.results, key=lambda r: r.result):
- text.write('{0}\n'.format(result))
-
- return text.getvalue()
-
- def __iter__(self):
- return iter(self.results)
diff --git a/deprecated/automation/clients/report/dejagnu/report.html b/deprecated/automation/clients/report/dejagnu/report.html
deleted file mode 100644
index 39b39e09..00000000
--- a/deprecated/automation/clients/report/dejagnu/report.html
+++ /dev/null
@@ -1,94 +0,0 @@
-<link type="text/css" rel="Stylesheet"
-href="http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.16/themes/ui-lightness/jquery-ui.css"/>
-
-<script type="text/javascript" src="https://www.google.com/jsapi"></script>
-<script type="text/javascript">
- google.load("visualization", "1.1", {packages: ["corechart", "table"]});
- google.load("jquery", "1.6.2");
- google.load("jqueryui", "1.8.16");
-
- function drawChart(name, label, table) {
- var data = google.visualization.arrayToDataTable(table);
- var chart = new google.visualization.PieChart(
- document.getElementById(name));
-
- chart.draw(data,
- {title: label, pieSliceText: "value", width: 800, height: 400});
- }
-
- function drawTable(name, table) {
- var data = google.visualization.arrayToDataTable(table);
- var table = new google.visualization.Table(
- document.getElementById(name));
-
- table.draw(data, {
- showRowNumber: false, allowHtml: true, sortColumn: 0});
- }
-
- google.setOnLoadCallback(function () {
- $( "#testruns" ).tabs();
-
- {% for test_run in test_runs %}
- $( "#testrun{{ test_run.id }}" ).tabs();
-
- {% for result_type, group in test_run.groups.items %}
- $( "#testrun{{ test_run.id }}-{{ result_type }}-tables" ).accordion({
- autoHeight: false, collapsible: true, active: false });
-
- drawChart(
- "testrun{{ test_run.id }}-{{ result_type }}-chart",
- "DejaGNU test {{ result_type }} summary for {{ test_run.name }}",
- [
- ["Result", "Count"],
- {% for result, count in group.summary %}
- ["{{ result }}", {{ count }}],{% endfor %}
- ]);
-
- {% for description, test_list in group.tests %}
- {% if test_list %}
- drawTable(
- "testrun{{ test_run.id }}-{{ result_type }}-table-{{ forloop.counter }}",
- [
- ["Test", "Variant"],
- {% for test, variant in test_list %}
- ["{{ test }}", "{{ variant }}"],{% endfor %}
- ]);
- {% endif %}
- {% endfor %}
- {% endfor %}
- {% endfor %}
- });
-</script>
-
-<div id="testruns">
- <ul>
- {% for test_run in test_runs %}
- <li><a href="#testrun{{ test_run.id }}">{{ test_run.name }}</a></li>
- {% endfor %}
- </ul>
-
- {% for test_run in test_runs %}
- <div id="testrun{{ test_run.id }}" style="padding: 0px">
- <ul>
- {% for result_type, group in test_run.groups.items %}
- <li>
- <a href="#testrun{{ test_run.id }}-{{ forloop.counter }}">{{ result_type }}</a>
- </li>
- {% endfor %}
- </ul>
- {% for result_type, group in test_run.groups.items %}
- <div id="testrun{{ test_run.id }}-{{ forloop.counter }}">
- <div id="testrun{{ test_run.id }}-{{ result_type }}-chart" style="text-align: center"></div>
- <div id="testrun{{ test_run.id }}-{{ result_type }}-tables">
- {% for description, test_list in group.tests %}
- {% if test_list %}
- <h3><a href="#">{{ description }}</a></h3>
- <div id="testrun{{ test_run.id }}-{{ result_type }}-table-{{ forloop.counter }}"></div>
- {% endif %}
- {% endfor %}
- </div>
- </div>
- {% endfor %}
- </div>
-{% endfor %}
-</div>
diff --git a/deprecated/automation/clients/report/dejagnu/report.py b/deprecated/automation/clients/report/dejagnu/report.py
deleted file mode 100644
index 191a5389..00000000
--- a/deprecated/automation/clients/report/dejagnu/report.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright 2011 Google Inc. All Rights Reserved.
-# Author: kbaclawski@google.com (Krystian Baclawski)
-#
-
-import logging
-import os.path
-
-RESULT_DESCRIPTION = {
- 'ERROR': 'DejaGNU errors',
- 'FAIL': 'Failed tests',
- 'NOTE': 'DejaGNU notices',
- 'PASS': 'Passed tests',
- 'UNRESOLVED': 'Unresolved tests',
- 'UNSUPPORTED': 'Unsupported tests',
- 'UNTESTED': 'Not executed tests',
- 'WARNING': 'DejaGNU warnings',
- 'XFAIL': 'Expected test failures',
- 'XPASS': 'Unexpectedly passed tests'
-}
-
-RESULT_GROUPS = {
- 'Successes': ['PASS', 'XFAIL'],
- 'Failures': ['FAIL', 'XPASS', 'UNRESOLVED'],
- 'Suppressed': ['!FAIL', '!XPASS', '!UNRESOLVED', '!ERROR'],
- 'Framework': ['UNTESTED', 'UNSUPPORTED', 'ERROR', 'WARNING', 'NOTE']
-}
-
-ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
-
-
-def _GetResultDescription(name):
- if name.startswith('!'):
- name = name[1:]
-
- try:
- return RESULT_DESCRIPTION[name]
- except KeyError:
- raise ValueError('Unknown result: "%s"' % name)
-
-
-def _PrepareSummary(res_types, summary):
-
- def GetResultCount(res_type):
- return summary.get(res_type, 0)
-
- return [(_GetResultDescription(rt), GetResultCount(rt)) for rt in res_types]
-
-
-def _PrepareTestList(res_types, tests):
-
- def GetTestsByResult(res_type):
- return [(test.name, test.variant or '')
- for test in sorted(tests) if test.result == res_type]
-
- return [(_GetResultDescription(rt), GetTestsByResult(rt))
- for rt in res_types if rt != 'PASS']
-
-
-def Generate(test_runs, manifests):
- """Generate HTML report from provided test runs.
-
- Args:
- test_runs: DejaGnuTestRun objects list.
- manifests: Manifest object list that will drive test result suppression.
-
- Returns:
- String to which the HTML report was rendered.
- """
- tmpl_args = []
-
- for test_run_id, test_run in enumerate(test_runs):
- logging.info('Generating report for: %s.', test_run)
-
- test_run.CleanUpTestResults()
- test_run.SuppressTestResults(manifests)
-
- # Generate summary and test list for each result group
- groups = {}
-
- for res_group, res_types in RESULT_GROUPS.items():
- summary_all = _PrepareSummary(res_types, test_run.summary)
- tests_all = _PrepareTestList(res_types, test_run.results)
-
- has_2nd = lambda tuple2: bool(tuple2[1])
- summary = filter(has_2nd, summary_all)
- tests = filter(has_2nd, tests_all)
-
- if summary or tests:
- groups[res_group] = {'summary': summary, 'tests': tests}
-
- tmpl_args.append({
- 'id': test_run_id,
- 'name': '%s @%s' % (test_run.tool, test_run.board),
- 'groups': groups
- })
-
- logging.info('Rendering report in HTML format.')
-
- try:
- from django import template
- from django.template import loader
- from django.conf import settings
- except ImportError:
- logging.error('Django framework not installed!')
- logging.error('Failed to generate report in HTML format!')
- return ''
-
- settings.configure(DEBUG=True,
- TEMPLATE_DEBUG=True,
- TEMPLATE_DIRS=(ROOT_PATH,))
-
- tmpl = loader.get_template('report.html')
- ctx = template.Context({'test_runs': tmpl_args})
-
- return tmpl.render(ctx)
diff --git a/deprecated/automation/clients/report/dejagnu/summary.py b/deprecated/automation/clients/report/dejagnu/summary.py
deleted file mode 100644
index d573c691..00000000
--- a/deprecated/automation/clients/report/dejagnu/summary.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright 2011 Google Inc. All Rights Reserved.
-# Author: kbaclawski@google.com (Krystian Baclawski)
-#
-
-from collections import defaultdict
-from collections import namedtuple
-from datetime import datetime
-from fnmatch import fnmatch
-from itertools import groupby
-import logging
-import os.path
-import re
-
-
-class DejaGnuTestResult(namedtuple('Result', 'name variant result flaky')):
- """Stores the result of a single test case."""
-
- # avoid adding __dict__ to the class
- __slots__ = ()
-
- LINE_RE = re.compile(r'([A-Z]+):\s+([\w/+.-]+)(.*)')
-
- @classmethod
- def FromLine(cls, line):
- """Alternate constructor which takes a string and parses it."""
- try:
- attrs, line = line.split('|', 1)
-
- if attrs.strip() != 'flaky':
- return None
-
- line = line.strip()
- flaky = True
- except ValueError:
- flaky = False
-
- fields = cls.LINE_RE.match(line.strip())
-
- if fields:
- result, path, variant = fields.groups()
-
- # some of the tests are generated in build dir and are issued from there,
- # because every test run is performed in randomly named tmp directory we
- # need to remove random part
- try:
- # assume that 2nd field is a test path
- path_parts = path.split('/')
-
- index = path_parts.index('testsuite')
- path = '/'.join(path_parts[index + 1:])
- except ValueError:
- path = '/'.join(path_parts)
-
- # Remove junk from test description.
- variant = variant.strip(', ')
-
- substitutions = [
- # remove include paths - they contain name of tmp directory
- ('-I\S+', ''),
- # compress white spaces
- ('\s+', ' ')
- ]
-
- for pattern, replacement in substitutions:
- variant = re.sub(pattern, replacement, variant)
-
- # Some tests separate last component of path by space, so actual filename
- # ends up in description instead of path part. Correct that.
- try:
- first, rest = variant.split(' ', 1)
- except ValueError:
- pass
- else:
- if first.endswith('.o'):
- path = os.path.join(path, first)
- variant = rest
-
- # DejaGNU framework errors don't contain path part at all, so description
- # part has to be reconstructed.
- if not any(os.path.basename(path).endswith('.%s' % suffix)
- for suffix in ['h', 'c', 'C', 'S', 'H', 'cc', 'i', 'o']):
- variant = '%s %s' % (path, variant)
- path = ''
-
- # Some tests are picked up from current directory (presumably DejaGNU
- # generates some test files). Remove the prefix for these files.
- if path.startswith('./'):
- path = path[2:]
-
- return cls(path, variant or '', result, flaky=flaky)
-
- def __str__(self):
- """Returns string representation of a test result."""
- if self.flaky:
- fmt = 'flaky | '
- else:
- fmt = ''
- fmt += '{2}: {0}'
- if self.variant:
- fmt += ' {1}'
- return fmt.format(*self)
-
-
-class DejaGnuTestRun(object):
- """Container for test results that were a part of single test run.
-
- The class stores also metadata related to the test run.
-
- Attributes:
- board: Name of DejaGNU board, which was used to run the tests.
- date: The date when the test run was started.
- target: Target triple.
- host: Host triple.
- tool: The tool that was tested (e.g. gcc, binutils, g++, etc.)
- results: a list of DejaGnuTestResult objects.
- """
-
- __slots__ = ('board', 'date', 'target', 'host', 'tool', 'results')
-
- def __init__(self, **kwargs):
- assert all(name in self.__slots__ for name in kwargs)
-
- self.results = set()
- self.date = kwargs.get('date', datetime.now())
-
- for name in ('board', 'target', 'tool', 'host'):
- setattr(self, name, kwargs.get(name, 'unknown'))
-
- @classmethod
- def FromFile(cls, filename):
- """Alternate constructor - reads a DejaGNU output file."""
- test_run = cls()
- test_run.FromDejaGnuOutput(filename)
- test_run.CleanUpTestResults()
- return test_run
-
- @property
- def summary(self):
- """Returns a summary as {ResultType -> Count} dictionary."""
- summary = defaultdict(int)
-
- for r in self.results:
- summary[r.result] += 1
-
- return summary
-
- def _ParseBoard(self, fields):
- self.board = fields.group(1).strip()
-
- def _ParseDate(self, fields):
- self.date = datetime.strptime(fields.group(2).strip(), '%a %b %d %X %Y')
-
- def _ParseTarget(self, fields):
- self.target = fields.group(2).strip()
-
- def _ParseHost(self, fields):
- self.host = fields.group(2).strip()
-
- def _ParseTool(self, fields):
- self.tool = fields.group(1).strip()
-
- def FromDejaGnuOutput(self, filename):
- """Read in and parse DejaGNU output file."""
-
- logging.info('Reading "%s" DejaGNU output file.', filename)
-
- with open(filename, 'r') as report:
- lines = [line.strip() for line in report.readlines() if line.strip()]
-
- parsers = ((re.compile(r'Running target (.*)'), self._ParseBoard),
- (re.compile(r'Test Run By (.*) on (.*)'), self._ParseDate),
- (re.compile(r'=== (.*) tests ==='), self._ParseTool),
- (re.compile(r'Target(\s+)is (.*)'), self._ParseTarget),
- (re.compile(r'Host(\s+)is (.*)'), self._ParseHost))
-
- for line in lines:
- result = DejaGnuTestResult.FromLine(line)
-
- if result:
- self.results.add(result)
- else:
- for regexp, parser in parsers:
- fields = regexp.match(line)
- if fields:
- parser(fields)
- break
-
- logging.debug('DejaGNU output file parsed successfully.')
- logging.debug(self)
-
- def CleanUpTestResults(self):
- """Remove certain test results considered to be spurious.
-
- 1) Large number of test reported as UNSUPPORTED are also marked as
- UNRESOLVED. If that's the case remove latter result.
- 2) If a test is performed on compiler output and for some reason compiler
- fails, we don't want to report all failures that depend on the former.
- """
- name_key = lambda v: v.name
- results_by_name = sorted(self.results, key=name_key)
-
- for name, res_iter in groupby(results_by_name, key=name_key):
- results = set(res_iter)
-
- # If DejaGnu was unable to compile a test it will create following result:
- failed = DejaGnuTestResult(name, '(test for excess errors)', 'FAIL',
- False)
-
- # If a test compilation failed, remove all results that are dependent.
- if failed in results:
- dependants = set(filter(lambda r: r.result != 'FAIL', results))
-
- self.results -= dependants
-
- for res in dependants:
- logging.info('Removed {%s} dependance.', res)
-
- # Remove all UNRESOLVED results that were also marked as UNSUPPORTED.
- unresolved = [res._replace(result='UNRESOLVED')
- for res in results if res.result == 'UNSUPPORTED']
-
- for res in unresolved:
- if res in self.results:
- self.results.remove(res)
- logging.info('Removed {%s} duplicate.', res)
-
- def _IsApplicable(self, manifest):
- """Checks if test results need to be reconsidered based on the manifest."""
- check_list = [(self.tool, manifest.tool), (self.board, manifest.board)]
-
- return all(fnmatch(text, pattern) for text, pattern in check_list)
-
- def SuppressTestResults(self, manifests):
- """Suppresses all test results listed in manifests."""
-
- # Get a set of tests results that are going to be suppressed if they fail.
- manifest_results = set()
-
- for manifest in filter(self._IsApplicable, manifests):
- manifest_results |= set(manifest.results)
-
- suppressed_results = self.results & manifest_results
-
- for result in sorted(suppressed_results):
- logging.debug('Result suppressed for {%s}.', result)
-
- new_result = '!' + result.result
-
- # Mark result suppression as applied.
- manifest_results.remove(result)
-
- # Rewrite test result.
- self.results.remove(result)
- self.results.add(result._replace(result=new_result))
-
- for result in sorted(manifest_results):
- logging.warning('Result {%s} listed in manifest but not suppressed.',
- result)
-
- def __str__(self):
- return '{0}, {1} @{2} on {3}'.format(self.target, self.tool, self.board,
- self.date)
diff --git a/deprecated/automation/clients/report/validate_failures.py b/deprecated/automation/clients/report/validate_failures.py
deleted file mode 100755
index d8776ba5..00000000
--- a/deprecated/automation/clients/report/validate_failures.py
+++ /dev/null
@@ -1,239 +0,0 @@
-#!/usr/bin/python2
-
-# Script to compare testsuite failures against a list of known-to-fail
-# tests.
-
-# Contributed by Diego Novillo <dnovillo@google.com>
-# Overhaul by Krystian Baclawski <kbaclawski@google.com>
-#
-# Copyright (C) 2011 Free Software Foundation, Inc.
-#
-# This file is part of GCC.
-#
-# GCC is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 3, or (at your option)
-# any later version.
-#
-# GCC is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with GCC; see the file COPYING. If not, write to
-# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
-# Boston, MA 02110-1301, USA.
-"""This script provides a coarser XFAILing mechanism that requires no
-detailed DejaGNU markings. This is useful in a variety of scenarios:
-
-- Development branches with many known failures waiting to be fixed.
-- Release branches with known failures that are not considered
- important for the particular release criteria used in that branch.
-
-The script must be executed from the toplevel build directory. When
-executed it will:
-
-1) Determine the target built: TARGET
-2) Determine the source directory: SRCDIR
-3) Look for a failure manifest file in
- <SRCDIR>/contrib/testsuite-management/<TARGET>.xfail
-4) Collect all the <tool>.sum files from the build tree.
-5) Produce a report stating:
- a) Failures expected in the manifest but not present in the build.
- b) Failures in the build not expected in the manifest.
-6) If all the build failures are expected in the manifest, it exits
- with exit code 0. Otherwise, it exits with error code 1.
-"""
-
-import optparse
-import logging
-import os
-import sys
-
-sys.path.append(os.path.dirname(os.path.abspath(__file__)))
-
-from dejagnu.manifest import Manifest
-from dejagnu.summary import DejaGnuTestResult
-from dejagnu.summary import DejaGnuTestRun
-
-# Pattern for naming manifest files. The first argument should be
-# the toplevel GCC source directory. The second argument is the
-# target triple used during the build.
-_MANIFEST_PATH_PATTERN = '%s/contrib/testsuite-management/%s.xfail'
-
-
-def GetMakefileVars(makefile_path):
- assert os.path.exists(makefile_path)
-
- with open(makefile_path) as lines:
- kvs = [line.split('=', 1) for line in lines if '=' in line]
-
- return dict((k.strip(), v.strip()) for k, v in kvs)
-
-
-def GetSumFiles(build_dir):
- summaries = []
-
- for root, _, filenames in os.walk(build_dir):
- summaries.extend([os.path.join(root, filename)
- for filename in filenames if filename.endswith('.sum')])
-
- return map(os.path.normpath, summaries)
-
-
-def ValidBuildDirectory(build_dir, target):
- mandatory_paths = [build_dir, os.path.join(build_dir, 'Makefile')]
-
- extra_paths = [os.path.join(build_dir, target),
- os.path.join(build_dir, 'build-%s' % target)]
-
- return (all(map(os.path.exists, mandatory_paths)) and
- any(map(os.path.exists, extra_paths)))
-
-
-def GetManifestPath(build_dir):
- makefile = GetMakefileVars(os.path.join(build_dir, 'Makefile'))
- srcdir = makefile['srcdir']
- target = makefile['target']
-
- if not ValidBuildDirectory(build_dir, target):
- target = makefile['target_alias']
-
- if not ValidBuildDirectory(build_dir, target):
- logging.error('%s is not a valid GCC top level build directory.', build_dir)
- sys.exit(1)
-
- logging.info('Discovered source directory: "%s"', srcdir)
- logging.info('Discovered build target: "%s"', target)
-
- return _MANIFEST_PATH_PATTERN % (srcdir, target)
-
-
-def CompareResults(manifest, actual):
- """Compare sets of results and return two lists:
- - List of results present in MANIFEST but missing from ACTUAL.
- - List of results present in ACTUAL but missing from MANIFEST.
- """
- # Report all the actual results not present in the manifest.
- actual_vs_manifest = actual - manifest
-
- # Filter out tests marked flaky.
- manifest_without_flaky_tests = set(filter(lambda result: not result.flaky,
- manifest))
-
- # Simlarly for all the tests in the manifest.
- manifest_vs_actual = manifest_without_flaky_tests - actual
-
- return actual_vs_manifest, manifest_vs_actual
-
-
-def LogResults(level, results):
- log_fun = getattr(logging, level)
-
- for num, result in enumerate(sorted(results), start=1):
- log_fun(' %d) %s', num, result)
-
-
-def CheckExpectedResults(manifest_path, build_dir):
- logging.info('Reading manifest file: "%s"', manifest_path)
-
- manifest = set(Manifest.FromFile(manifest_path))
-
- logging.info('Getting actual results from build directory: "%s"',
- os.path.realpath(build_dir))
-
- summaries = GetSumFiles(build_dir)
-
- actual = set()
-
- for summary in summaries:
- test_run = DejaGnuTestRun.FromFile(summary)
- failures = set(Manifest.FromDejaGnuTestRun(test_run))
- actual.update(failures)
-
- if manifest:
- logging.debug('Tests expected to fail:')
- LogResults('debug', manifest)
-
- if actual:
- logging.debug('Actual test failures:')
- LogResults('debug', actual)
-
- actual_vs_manifest, manifest_vs_actual = CompareResults(manifest, actual)
-
- if actual_vs_manifest:
- logging.info('Build results not in the manifest:')
- LogResults('info', actual_vs_manifest)
-
- if manifest_vs_actual:
- logging.info('Manifest results not present in the build:')
- LogResults('info', manifest_vs_actual)
- logging.info('NOTE: This is not a failure! ',
- 'It just means that the manifest expected these tests to '
- 'fail, but they worked in this configuration.')
-
- if actual_vs_manifest or manifest_vs_actual:
- sys.exit(1)
-
- logging.info('No unexpected failures.')
-
-
-def ProduceManifest(manifest_path, build_dir, overwrite):
- if os.path.exists(manifest_path) and not overwrite:
- logging.error('Manifest file "%s" already exists.', manifest_path)
- logging.error('Use --force to overwrite.')
- sys.exit(1)
-
- testruns = map(DejaGnuTestRun.FromFile, GetSumFiles(build_dir))
- manifests = map(Manifest.FromDejaGnuTestRun, testruns)
-
- with open(manifest_path, 'w') as manifest_file:
- manifest_strings = [manifest.Generate() for manifest in manifests]
- logging.info('Writing manifest to "%s".', manifest_path)
- manifest_file.write('\n'.join(manifest_strings))
-
-
-def Main(argv):
- parser = optparse.OptionParser(usage=__doc__)
- parser.add_option(
- '-b',
- '--build_dir',
- dest='build_dir',
- action='store',
- metavar='PATH',
- default=os.getcwd(),
- help='Build directory to check. (default: current directory)')
- parser.add_option('-m',
- '--manifest',
- dest='manifest',
- action='store_true',
- help='Produce the manifest for the current build.')
- parser.add_option(
- '-f',
- '--force',
- dest='force',
- action='store_true',
- help=('Overwrite an existing manifest file, if user requested creating '
- 'new one. (default: False)'))
- parser.add_option('-v',
- '--verbose',
- dest='verbose',
- action='store_true',
- help='Increase verbosity.')
- options, _ = parser.parse_args(argv[1:])
-
- if options.verbose:
- logging.root.setLevel(logging.DEBUG)
-
- manifest_path = GetManifestPath(options.build_dir)
-
- if options.manifest:
- ProduceManifest(manifest_path, options.build_dir, options.force)
- else:
- CheckExpectedResults(manifest_path, options.build_dir)
-
-
-if __name__ == '__main__':
- logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
- Main(sys.argv)