aboutsummaryrefslogtreecommitdiff
path: root/run_tests_for.py
blob: 807a218d35ec169bf7de058c3b37d62dcb70b34e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""Runs tests for the given input files.

Tries its best to autodetect all tests based on path name without being *too*
aggressive.

In short, there's a small set of directories in which, if you make any change,
all of the tests in those directories get run. Additionally, if you change a
python file named foo, it'll run foo_test.py or foo_unittest.py if either of
those exist.

All tests are run in parallel.
"""

# NOTE: An alternative mentioned on the initial CL for this
# https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1516414
# is pytest. It looks like that brings some complexity (and makes use outside
# of the chroot a bit more obnoxious?), but might be worth exploring if this
# starts to grow quite complex on its own.

from __future__ import print_function

import argparse
import collections
import contextlib
import multiprocessing.pool
import os
import pipes
import subprocess
import sys

TestSpec = collections.namedtuple('TestSpec', ['directory', 'command'])

# List of python scripts that are not test with relative path to
# toolchain-utils.
non_test_py_files = {
    'debug_info_test/debug_info_test.py',
}


def _make_relative_to_toolchain_utils(toolchain_utils, path):
  """Cleans & makes a path relative to toolchain_utils.

  Raises if that path isn't under toolchain_utils.
  """
  # abspath has the nice property that it removes any markers like './'.
  as_abs = os.path.abspath(path)
  result = os.path.relpath(as_abs, start=toolchain_utils)

  if result.startswith('../'):
    raise ValueError('Non toolchain-utils directory found: %s' % result)
  return result


def _filter_python_tests(test_files, toolchain_utils):
  """Returns all files that are real python tests."""
  python_tests = []
  for test_file in test_files:
    rel_path = _make_relative_to_toolchain_utils(toolchain_utils, test_file)
    if rel_path not in non_test_py_files:
      python_tests.append(_python_test_to_spec(test_file))
    else:
      print('## %s ... NON_TEST_PY_FILE' % rel_path)
  return python_tests


def _gather_python_tests_in(rel_subdir, toolchain_utils):
  """Returns all files that appear to be Python tests in a given directory."""
  subdir = os.path.join(toolchain_utils, rel_subdir)
  test_files = (
      os.path.join(subdir, file_name) for file_name in os.listdir(subdir)
      if file_name.endswith('_test.py') or file_name.endswith('_unittest.py'))
  return _filter_python_tests(test_files, toolchain_utils)


def _run_test(test_spec):
  """Runs a test."""
  p = subprocess.Popen(test_spec.command,
                       cwd=test_spec.directory,
                       stdin=open('/dev/null'),
                       stdout=subprocess.PIPE,
                       stderr=subprocess.STDOUT,
                       encoding='utf-8')
  stdout, _ = p.communicate()
  exit_code = p.wait()
  return exit_code, stdout


def _python_test_to_spec(test_file):
  """Given a .py file, convert it to a TestSpec."""
  # Run tests in the directory they exist in, since some of them are sensitive
  # to that.
  test_directory = os.path.dirname(os.path.abspath(test_file))
  file_name = os.path.basename(test_file)

  if os.access(test_file, os.X_OK):
    command = ['./' + file_name]
  else:
    # Assume the user wanted py3.
    command = ['python3', file_name]

  return TestSpec(directory=test_directory, command=command)


def _autodetect_python_tests_for(test_file, toolchain_utils):
  """Given a test file, detect if there may be related tests."""
  if not test_file.endswith('.py'):
    return []

  test_prefixes = ('test_', 'unittest_')
  test_suffixes = ('_test.py', '_unittest.py')

  test_file_name = os.path.basename(test_file)
  test_file_is_a_test = (
      any(test_file_name.startswith(x) for x in test_prefixes)
      or any(test_file_name.endswith(x) for x in test_suffixes))

  if test_file_is_a_test:
    test_files = [test_file]
  else:
    test_file_no_suffix = test_file[:-3]
    candidates = [test_file_no_suffix + x for x in test_suffixes]

    dir_name = os.path.dirname(test_file)
    candidates += (os.path.join(dir_name, x + test_file_name)
                   for x in test_prefixes)
    test_files = (x for x in candidates if os.path.exists(x))
  return _filter_python_tests(test_files, toolchain_utils)


def _run_test_scripts(all_tests, show_successful_output=False):
  """Runs a list of TestSpecs. Returns whether all of them succeeded."""
  with contextlib.closing(multiprocessing.pool.ThreadPool()) as pool:
    results = [pool.apply_async(_run_test, (test, )) for test in all_tests]

  failures = []
  for i, (test, future) in enumerate(zip(all_tests, results)):
    # Add a bit more spacing between outputs.
    if show_successful_output and i:
      print('\n')

    pretty_test = ' '.join(pipes.quote(test_arg) for test_arg in test.command)
    pretty_directory = os.path.relpath(test.directory)
    if pretty_directory == '.':
      test_message = pretty_test
    else:
      test_message = '%s in %s/' % (pretty_test, pretty_directory)

    print('## %s ... ' % test_message, end='')
    # Be sure that the users sees which test is running.
    sys.stdout.flush()

    exit_code, stdout = future.get()
    if not exit_code:
      print('PASS')
    else:
      print('FAIL')
      failures.append(pretty_test)

    if show_successful_output or exit_code:
      sys.stdout.write(stdout)

  if failures:
    word = 'tests' if len(failures) > 1 else 'test'
    print('%d %s failed: %s' % (len(failures), word, failures))

  return not failures


def _compress_list(l):
  """Removes consecutive duplicate elements from |l|.

  >>> _compress_list([])
  []
  >>> _compress_list([1, 1])
  [1]
  >>> _compress_list([1, 2, 1])
  [1, 2, 1]
  """
  result = []
  for e in l:
    if result and result[-1] == e:
      continue
    result.append(e)
  return result


def _fix_python_path(toolchain_utils):
  pypath = os.environ.get('PYTHONPATH', '')
  if pypath:
    pypath = ':' + pypath
  os.environ['PYTHONPATH'] = toolchain_utils + pypath


def _find_forced_subdir_python_tests(test_paths, toolchain_utils):
  assert all(os.path.isabs(path) for path in test_paths)

  # Directories under toolchain_utils for which any change will cause all tests
  # in that directory to be rerun. Includes changes in subdirectories.
  all_dirs = {
      'crosperf',
      'cros_utils',
  }

  relative_paths = [
      _make_relative_to_toolchain_utils(toolchain_utils, path)
      for path in test_paths
  ]

  gather_test_dirs = set()

  for path in relative_paths:
    top_level_dir = path.split('/')[0]
    if top_level_dir in all_dirs:
      gather_test_dirs.add(top_level_dir)

  results = []
  for d in sorted(gather_test_dirs):
    results += _gather_python_tests_in(d, toolchain_utils)
  return results


def _find_go_tests(test_paths):
  """Returns TestSpecs for the go folders of the given files"""
  assert all(os.path.isabs(path) for path in test_paths)

  dirs_with_gofiles = set(
      os.path.dirname(p) for p in test_paths if p.endswith('.go'))
  command = ['go', 'test', '-vet=all']
  # Note: We sort the directories to be deterministic.
  return [
      TestSpec(directory=d, command=command) for d in sorted(dirs_with_gofiles)
  ]


def main(argv):
  default_toolchain_utils = os.path.abspath(os.path.dirname(__file__))

  parser = argparse.ArgumentParser(description=__doc__)
  parser.add_argument('--show_all_output',
                      action='store_true',
                      help='show stdout of successful tests')
  parser.add_argument('--toolchain_utils',
                      default=default_toolchain_utils,
                      help='directory of toolchain-utils. Often auto-detected')
  parser.add_argument('file',
                      nargs='*',
                      help='a file that we should run tests for')
  args = parser.parse_args(argv)

  modified_files = [os.path.abspath(f) for f in args.file]
  show_all_output = args.show_all_output
  toolchain_utils = args.toolchain_utils

  if not modified_files:
    print('No files given. Exit.')
    return 0

  _fix_python_path(toolchain_utils)

  tests_to_run = _find_forced_subdir_python_tests(modified_files,
                                                  toolchain_utils)
  for f in modified_files:
    tests_to_run += _autodetect_python_tests_for(f, toolchain_utils)
  tests_to_run += _find_go_tests(modified_files)

  # TestSpecs have lists, so we can't use a set. We'd likely want to keep them
  # sorted for determinism anyway.
  tests_to_run.sort()
  tests_to_run = _compress_list(tests_to_run)

  success = _run_test_scripts(tests_to_run, show_all_output)
  return 0 if success else 1


if __name__ == '__main__':
  sys.exit(main(sys.argv[1:]))