aboutsummaryrefslogtreecommitdiff
path: root/clang_tidy
diff options
context:
space:
mode:
authorElaina Guan <ziyig@google.com>2019-05-29 14:14:48 -0700
committerElaina Guan <ziyig@google.com>2019-05-30 20:21:29 +0000
commita2668a4ebe5409cd5c7add5ffbcdbab680bfb329 (patch)
treec045157cfb76bb2b2526b9f70b5504118eb0a73c /clang_tidy
parent66b7798c751874fc8e8c354dab2d4f1476f5e5ae (diff)
downloadtoolchain-utils-a2668a4ebe5409cd5c7add5ffbcdbab680bfb329.tar.gz
Clang-Tidy: adding tests, refactor long functions
Authors: Emma Vukelj, Elaina Guan This CL is a colaborations of Emma and Elaina. -we rename the files to replace '-' with '_' to make the import works because Python doesn't easily support importing files with '-' -we refactor function 'emit_stats_by_projects' because it's doing several things at the same time, and modularize it to make it easier to test -we add test cases for initializing the global arrays; the functionality of each part of 'emit_stats_by_projects'; classifying a specific warning; and count the total number of matching lines for each severity. BUG=None TEST=Determine the correct functionality of specific functions and assert that the existing warnings parser meets the expectations Change-Id: Idd1c53e0fcbe6ccb632a0ae95a63bb6ac4c0bbfa Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1636377 Reviewed-by: Caroline Tice <cmtice@chromium.org> Tested-by: Elaina Guan <ziyig@google.com>
Diffstat (limited to 'clang_tidy')
-rwxr-xr-xclang_tidy/clang_tidy_parse_build_log.py (renamed from clang_tidy/clang-tidy-parse-build-log.py)2
-rwxr-xr-xclang_tidy/clang_tidy_warn.py (renamed from clang_tidy/clang-tidy-warn.py)77
-rwxr-xr-xclang_tidy/clang_tidy_warn_test.py228
3 files changed, 291 insertions, 16 deletions
diff --git a/clang_tidy/clang-tidy-parse-build-log.py b/clang_tidy/clang_tidy_parse_build_log.py
index d207483d..b288965e 100755
--- a/clang_tidy/clang-tidy-parse-build-log.py
+++ b/clang_tidy/clang_tidy_parse_build_log.py
@@ -36,7 +36,7 @@ def Main(argv):
output_dir = options.out_dir if options.out_dir else '/tmp/clang-tidy-output'
cwd = os.path.dirname(os.path.realpath(__file__))
- warn_script = os.path.join(cwd, 'clang-tidy-warn.py')
+ warn_script = os.path.join(cwd, 'clang_tidy_warn.py')
logfile = options.log_file
if not os.path.exists(logfile):
diff --git a/clang_tidy/clang-tidy-warn.py b/clang_tidy/clang_tidy_warn.py
index 66aea31d..ab489e9f 100755
--- a/clang_tidy/clang-tidy-warn.py
+++ b/clang_tidy/clang_tidy_warn.py
@@ -93,6 +93,7 @@ import re
import signal
import sys
+# TODO: move the parser code into a function
parser = argparse.ArgumentParser(description='Convert a build log into HTML')
parser.add_argument(
'--csvpath',
@@ -123,7 +124,11 @@ parser.add_argument(
help='Number of parallel processes to process warnings')
parser.add_argument(
dest='buildlog', metavar='build.log', help='Path to build.log file')
-args = parser.parse_args()
+
+if len(sys.argv) > 1:
+ args = parser.parse_args()
+else:
+ args = None
class Severity(object):
@@ -5680,34 +5685,53 @@ def sort_warnings():
i['members'] = sorted(set(i['members']))
-def emit_stats_by_project():
- """Dump a google chart table of warnings per project and severity."""
- # warnings[p][s] is number of warnings in project p of severity s.
+def create_warnings():
+ """Creates warnings s.t. warnings[p][s] is as specified in above docs
+
+ Returns 2D warnings array where warnings[p][s] is # of warnings
+ in project name p of severity level s
+ """
+
warnings = {p: {s: 0 for s in Severity.range} for p in project_names}
for i in warn_patterns:
s = i['severity']
for p in i['projects']:
warnings[p][s] += i['projects'][p]
+ return warnings
- # total_by_project[p] is number of warnings in project p.
- total_by_project = {
- p: sum(warnings[p][s] for s in Severity.range) for p in project_names
- }
- # total_by_severity[s] is number of warnings of severity s.
- total_by_severity = {
- s: sum(warnings[p][s] for p in project_names) for s in Severity.range
- }
+def get_total_by_project(warnings):
+ """Returns dict, project as key and # warnings for that project as value"""
+
+ return {p: sum(warnings[p][s] for s in Severity.range) for p in project_names}
+
+
+def get_total_by_severity(warnings):
+ """Returns dict, severity as key and # warnings of that severity as value"""
+
+ return {s: sum(warnings[p][s] for p in project_names) for s in Severity.range}
+
+
+def emit_table_header(total_by_severity):
+ """Returns list of HTML-formatted content for severity stats"""
- # emit table header
stats_header = ['Project']
for s in Severity.range:
if total_by_severity[s]:
stats_header.append("<span style='background-color:{}'>{}</span>".format(
Severity.colors[s], Severity.column_headers[s]))
stats_header.append('TOTAL')
+ return stats_header
+
+
+def emit_row_counts_per_project(warnings, total_by_project, total_by_severity):
+ """Returns total project warnings and row of stats for each project
+
+ Returns total_all_projects, the total number of warnings over all projects
+ and stats_rows, a 2d list where each row is [Project Name,
+ <severity counts>, total # warnings for this project]
+ """
- # emit a row of warning counts per project, skip no-warning projects
total_all_projects = 0
stats_rows = []
for p in project_names:
@@ -5719,8 +5743,17 @@ def emit_stats_by_project():
one_row.append(total_by_project[p])
stats_rows.append(one_row)
total_all_projects += total_by_project[p]
+ return total_all_projects, stats_rows
+
+
+def emit_row_counts_per_severity(total_by_severity, stats_header, stats_rows,
+ total_all_projects):
+ """Emits stats_header and stats_rows as specified above
+
+ Specifications found in docstrings for emit_table_header and
+ emit_row_counts_per_project above
+ """
- # emit a row of warning counts per severity
total_all_severities = 0
one_row = ['<b>TOTAL</b>']
for s in Severity.range:
@@ -5736,8 +5769,22 @@ def emit_stats_by_project():
print('</script>')
+def emit_stats_by_project():
+ """Dump a google chart table of warnings per project and severity."""
+
+ warnings = create_warnings()
+ total_by_project = get_total_by_project(warnings)
+ total_by_severity = get_total_by_severity(warnings)
+ stats_header = emit_table_header(total_by_severity)
+ total_all_projects, stats_rows = \
+ emit_row_counts_per_project(warnings, total_by_project, total_by_severity)
+ emit_row_counts_per_severity(total_by_severity, stats_header, stats_rows,
+ total_all_projects)
+
+
def dump_stats():
"""Dump some stats about total number of warnings and such."""
+
known = 0
skipped = 0
unknown = 0
diff --git a/clang_tidy/clang_tidy_warn_test.py b/clang_tidy/clang_tidy_warn_test.py
new file mode 100755
index 00000000..86937bc8
--- /dev/null
+++ b/clang_tidy/clang_tidy_warn_test.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+# Copyright 2019 Google Inc. All Rights Reserved.
+
+"""Clang_Tidy_Warn tests
+
+This is the test file for clang_tidy_warn.py.
+It starts with unit testing of individual functions, and then tests the
+functionality of the whole file by using artificial log files, and then tests
+on the real world examples.
+"""
+
+from __future__ import print_function
+
+from csv import writer
+import clang_tidy_warn as ct_warn
+
+import unittest
+from contextlib import contextmanager
+import StringIO
+
+
+def get_test_vars():
+ """create artificial warn_patterns and project names for testing purposes"""
+
+ project_names = ['ProjectA', 'ProjectB']
+ warn_patterns = [{
+ 'severity': 0,
+ 'projects': {
+ 'ProjectA': 2,
+ 'ProjectB': 0
+ }
+ }, {
+ 'severity': 1,
+ 'projects': {
+ 'ProjectA': 1,
+ 'ProjectB': 3
+ }
+ }, {
+ 'severity': 2,
+ 'projects': {
+ 'ProjectA': 0,
+ 'ProjectB': 6
+ }
+ }]
+ for s in range(9):
+ if s >= len(warn_patterns):
+ warn_patterns.append({'severity': s, 'projects': {}})
+ warn_patterns[s]['members'] = []
+ warn_patterns[s]['description'] = ""
+
+ expected_warnings = {
+ 'ProjectA': {
+ 0: 2,
+ 1: 1,
+ 2: 0,
+ 3: 0,
+ 4: 0,
+ 5: 0,
+ 6: 0,
+ 7: 0,
+ 8: 0
+ },
+ 'ProjectB': {
+ 0: 0,
+ 1: 3,
+ 2: 6,
+ 3: 0,
+ 4: 0,
+ 5: 0,
+ 6: 0,
+ 7: 0,
+ 8: 0
+ }
+ }
+ expected_total_by_project = {'ProjectA': 3, 'ProjectB': 9}
+ expected_total_by_severity = {
+ 0: 2,
+ 1: 4,
+ 2: 6,
+ 3: 0,
+ 4: 0,
+ 5: 0,
+ 6: 0,
+ 7: 0,
+ 8: 0
+ }
+ expected_total_all_projects = 12
+ expected_stats_rows = [['ProjectA', 2, 1, 0, 3], ['ProjectB', 0, 3, 6, 9]]
+ expected_count_severity_total = {}
+ for s, warn_by_severity in enumerate(warn_patterns):
+ # for each project, the number of desired warnings
+ for project, count in warn_by_severity['projects'].items():
+ warn_by_severity['members'] += [project] * count
+ expected_count_severity_total[s] = len(warn_by_severity['members'])
+
+ res = {
+ 'project_names': project_names,
+ 'warn_patterns': warn_patterns,
+ 'warnings': expected_warnings,
+ 'total_by_project': expected_total_by_project,
+ 'total_by_severity': expected_total_by_severity,
+ 'total_all_projects': expected_total_all_projects,
+ 'stats_rows': expected_stats_rows,
+ 'count_severity_total': expected_count_severity_total
+ }
+
+ return res
+
+
+def put_test_vars():
+ # save old warn patterns to reset to following this test
+ actual_warn_patterns = ct_warn.warn_patterns
+ actual_project_names = ct_warn.project_names
+
+ # run test w specified inputs
+ expected = get_test_vars()
+
+ ct_warn.warn_patterns = expected['warn_patterns']
+ ct_warn.project_names = expected['project_names']
+ return actual_warn_patterns, actual_project_names
+
+
+def remove_test_vars(actual_warn_patterns, actual_project_names):
+ # reset to actual vals
+ ct_warn.project_names = actual_project_names
+ ct_warn.warn_patterns = actual_warn_patterns
+
+
+def setup_classify():
+ """Run prereqs for calling classify_one_warning
+
+ The module requires an explicit call to compile_patterns to have these created
+ and this happens outside of the methods we are testing so explicit setup is
+ necessary
+ """
+
+ ct_warn.compile_patterns()
+
+
+@contextmanager
+def test_vars():
+ actual_warn_patterns, actual_project_names = put_test_vars()
+ try:
+ yield
+ finally:
+ remove_test_vars(actual_warn_patterns, actual_project_names)
+
+
+class Tests(unittest.TestCase):
+ """Test Class for Clang-Tidy"""
+
+ def test_initialize_arrays(self):
+ names, patterns = ct_warn.initialize_arrays()
+ self.assertGreater(len(names), 0)
+ self.assertGreater(len(patterns), 0)
+
+ # check that warn_patterns was modified in-place properly
+ for w in ct_warn.warn_patterns:
+ self.assertIn('members', w)
+ self.assertIn('option', w)
+ self.assertIn('projects', w)
+ self.assertTrue(isinstance(w['projects'], dict))
+
+ def test_create_warnings(self):
+ with test_vars():
+ expected = get_test_vars()
+ self.assertEqual(expected['warnings'], ct_warn.create_warnings())
+
+ def test_get_total_by_project(self):
+ with test_vars():
+ expected = get_test_vars()
+ total_by_project = ct_warn.get_total_by_project(expected['warnings'])
+ self.assertEqual(total_by_project, expected['total_by_project'])
+
+ def test_get_total_by_severity(self):
+ with test_vars():
+ expected = get_test_vars()
+ total_by_severity = ct_warn.get_total_by_severity(expected['warnings'])
+ self.assertEqual(total_by_severity, expected['total_by_severity'])
+
+ def test_emit_row_counts_per_project(self):
+ with test_vars():
+ expected = get_test_vars()
+ total_all_projects, stats_rows = \
+ ct_warn.emit_row_counts_per_project(expected['warnings'],
+ expected['total_by_project'],
+ expected['total_by_severity'])
+ self.assertEqual(total_all_projects, expected['total_all_projects'])
+ self.assertEqual(stats_rows, expected['stats_rows'])
+
+ def test_classify_one_warning(self):
+ setup_classify()
+ line = ("external/libese/apps/weaver/weaver.c:340:17: "
+ "warning: unused variable 'READ_SUCCESS' [-Wunused-variable]")
+ results = []
+
+ # find expected result
+ expected_index = -1
+
+ for i, w in enumerate(ct_warn.warn_patterns):
+ if w['description'] == \
+ "Unused function, variable, label, comparison, etc.":
+ expected_index = i
+ break # we expect to find a single index
+ assert expected_index != -1
+
+ # check that the expected result is in index column of actual results
+ ct_warn.classify_one_warning(line, results)
+ self.assertIn(expected_index, [result[1] for result in results])
+
+ def test_count_severity(self):
+ with test_vars():
+ expected = get_test_vars()
+
+ # NOTE: csv writer necessary to call function but not testing that
+ # functionality only testing that count_severity returns the correct total
+ csvfile = StringIO.StringIO()
+ csvwriter = writer(csvfile)
+ for total in expected['count_severity_total'].items():
+ # total = (severity, count for the severity)
+ count_severity_total = ct_warn.count_severity(csvwriter, total[0],
+ "testing")
+ self.assertEqual(count_severity_total, total[1])
+
+
+if __name__ == '__main__':
+ unittest.main()