aboutsummaryrefslogtreecommitdiff
path: root/crosperf/experiment_runner.py
blob: 8ba85a4c35312afc3a5243b72d32dfce976aade7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
# -*- coding: utf-8 -*-
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""The experiment runner module."""
from __future__ import print_function

import getpass
import os
import shutil
import time

import lock_machine
import test_flag

from cros_utils import command_executer
from cros_utils import logger
from cros_utils.email_sender import EmailSender
from cros_utils.file_utils import FileUtils

import config
from experiment_status import ExperimentStatus
from results_cache import CacheConditions
from results_cache import ResultsCache
from results_report import HTMLResultsReport
from results_report import TextResultsReport
from results_report import JSONResultsReport
from schedv2 import Schedv2


def _WriteJSONReportToFile(experiment, results_dir, json_report):
  """Writes a JSON report to a file in results_dir."""
  has_llvm = any('llvm' in l.compiler for l in experiment.labels)
  compiler_string = 'llvm' if has_llvm else 'gcc'
  board = experiment.labels[0].board
  filename = 'report_%s_%s_%s.%s.json' % (board, json_report.date,
                                          json_report.time.replace(
                                              ':', '.'), compiler_string)
  fullname = os.path.join(results_dir, filename)
  report_text = json_report.GetReport()
  with open(fullname, 'w') as out_file:
    out_file.write(report_text)


class ExperimentRunner(object):
  """ExperimentRunner Class."""

  STATUS_TIME_DELAY = 30
  THREAD_MONITOR_DELAY = 2

  SUCCEEDED = 0
  HAS_FAILURE = 1
  ALL_FAILED = 2

  def __init__(self,
               experiment,
               json_report,
               using_schedv2=False,
               log=None,
               cmd_exec=None):
    self._experiment = experiment
    self.l = log or logger.GetLogger(experiment.log_dir)
    self._ce = cmd_exec or command_executer.GetCommandExecuter(self.l)
    self._terminated = False
    self.json_report = json_report
    self.locked_machines = []
    if experiment.log_level != 'verbose':
      self.STATUS_TIME_DELAY = 10

    # Setting this to True will use crosperf sched v2 (feature in progress).
    self._using_schedv2 = using_schedv2

  def _GetMachineList(self):
    """Return a list of all requested machines.

    Create a list of all the requested machines, both global requests and
    label-specific requests, and return the list.
    """
    machines = self._experiment.remote
    # All Label.remote is a sublist of experiment.remote.
    for l in self._experiment.labels:
      for r in l.remote:
        assert r in machines
    return machines

  def _UpdateMachineList(self, locked_machines):
    """Update machines lists to contain only locked machines.

    Go through all the lists of requested machines, both global and
    label-specific requests, and remove any machine that we were not
    able to lock.

    Args:
      locked_machines: A list of the machines we successfully locked.
    """
    for m in self._experiment.remote:
      if m not in locked_machines:
        self._experiment.remote.remove(m)

    for l in self._experiment.labels:
      for m in l.remote:
        if m not in locked_machines:
          l.remote.remove(m)

  def _GetMachineType(self, lock_mgr, machine):
    """Get where is the machine from.

    Returns:
      The location of the machine: local or skylab
    """
    # We assume that lab machine always starts with chromeos*, and local
    # machines are ip address.
    if 'chromeos' in machine:
      if lock_mgr.CheckMachineInSkylab(machine):
        return 'skylab'
      else:
        raise RuntimeError('Lab machine not in Skylab.')
    return 'local'

  def _LockAllMachines(self, experiment):
    """Attempt to globally lock all of the machines requested for run.

    This method tries to lock all machines requested for this crosperf run
    in three different modes automatically, to prevent any other crosperf runs
    from being able to update/use the machines while this experiment is
    running:
      - Skylab machines: Use skylab lease-dut mechanism to lease
      - Local machines: Use file lock mechanism to lock
    """
    if test_flag.GetTestMode():
      self.locked_machines = self._GetMachineList()
      experiment.locked_machines = self.locked_machines
    else:
      experiment.lock_mgr = lock_machine.LockManager(
          self._GetMachineList(),
          '',
          experiment.labels[0].chromeos_root,
          experiment.locks_dir,
          log=self.l,
      )
      for m in experiment.lock_mgr.machines:
        machine_type = self._GetMachineType(experiment.lock_mgr, m)
        if machine_type == 'local':
          experiment.lock_mgr.AddMachineToLocal(m)
        elif machine_type == 'skylab':
          experiment.lock_mgr.AddMachineToSkylab(m)
      machine_states = experiment.lock_mgr.GetMachineStates('lock')
      experiment.lock_mgr.CheckMachineLocks(machine_states, 'lock')
      self.locked_machines = experiment.lock_mgr.UpdateMachines(True)
      experiment.locked_machines = self.locked_machines
      self._UpdateMachineList(self.locked_machines)
      experiment.machine_manager.RemoveNonLockedMachines(self.locked_machines)
      if not self.locked_machines:
        raise RuntimeError('Unable to lock any machines.')

  def _ClearCacheEntries(self, experiment):
    for br in experiment.benchmark_runs:
      cache = ResultsCache()
      cache.Init(br.label.chromeos_image, br.label.chromeos_root,
                 br.benchmark.test_name, br.iteration, br.test_args,
                 br.profiler_args, br.machine_manager, br.machine,
                 br.label.board, br.cache_conditions, br.logger(), br.log_level,
                 br.label, br.share_cache, br.benchmark.suite,
                 br.benchmark.show_all_results, br.benchmark.run_local,
                 br.benchmark.cwp_dso)
      cache_dir = cache.GetCacheDirForWrite()
      if os.path.exists(cache_dir):
        self.l.LogOutput('Removing cache dir: %s' % cache_dir)
        shutil.rmtree(cache_dir)

  def _Run(self, experiment):
    try:
      # We should not lease machines if tests are launched via `skylab
      # create-test`. This is because leasing DUT in skylab will create a
      # no-op task on the DUT and new test created will be hanging there.
      # TODO(zhizhouy): Need to check whether machine is ready or not before
      # assigning a test to it.
      if not experiment.skylab:
        self._LockAllMachines(experiment)
      # Calculate all checksums of avaiable/locked machines, to ensure same
      # label has same machines for testing
      experiment.SetCheckSums(forceSameImage=True)
      if self._using_schedv2:
        schedv2 = Schedv2(experiment)
        experiment.set_schedv2(schedv2)
      if CacheConditions.FALSE in experiment.cache_conditions:
        self._ClearCacheEntries(experiment)
      status = ExperimentStatus(experiment)
      experiment.Run()
      last_status_time = 0
      last_status_string = ''
      try:
        if experiment.log_level != 'verbose':
          self.l.LogStartDots()
        while not experiment.IsComplete():
          if last_status_time + self.STATUS_TIME_DELAY < time.time():
            last_status_time = time.time()
            border = '=============================='
            if experiment.log_level == 'verbose':
              self.l.LogOutput(border)
              self.l.LogOutput(status.GetProgressString())
              self.l.LogOutput(status.GetStatusString())
              self.l.LogOutput(border)
            else:
              current_status_string = status.GetStatusString()
              if current_status_string != last_status_string:
                self.l.LogEndDots()
                self.l.LogOutput(border)
                self.l.LogOutput(current_status_string)
                self.l.LogOutput(border)
                last_status_string = current_status_string
              else:
                self.l.LogAppendDot()
          time.sleep(self.THREAD_MONITOR_DELAY)
      except KeyboardInterrupt:
        self._terminated = True
        self.l.LogError('Ctrl-c pressed. Cleaning up...')
        experiment.Terminate()
        raise
      except SystemExit:
        self._terminated = True
        self.l.LogError('Unexpected exit. Cleaning up...')
        experiment.Terminate()
        raise
    finally:
      experiment.Cleanup()

  def _PrintTable(self, experiment):
    self.l.LogOutput(TextResultsReport.FromExperiment(experiment).GetReport())

  def _Email(self, experiment):
    # Only email by default if a new run was completed.
    send_mail = False
    for benchmark_run in experiment.benchmark_runs:
      if not benchmark_run.cache_hit:
        send_mail = True
        break
    if (not send_mail and not experiment.email_to or
        config.GetConfig('no_email')):
      return

    label_names = []
    for label in experiment.labels:
      label_names.append(label.name)
    subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))

    text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
    text_report += ('\nResults are stored in %s.\n' %
                    experiment.results_directory)
    text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
    html_report = HTMLResultsReport.FromExperiment(experiment).GetReport()
    attachment = EmailSender.Attachment('report.html', html_report)
    email_to = experiment.email_to or []
    email_to.append(getpass.getuser())
    EmailSender().SendEmail(
        email_to,
        subject,
        text_report,
        attachments=[attachment],
        msg_type='html')

  def _StoreResults(self, experiment):
    if self._terminated:
      return self.ALL_FAILED

    results_directory = experiment.results_directory
    FileUtils().RmDir(results_directory)
    FileUtils().MkDirP(results_directory)
    self.l.LogOutput('Storing experiment file in %s.' % results_directory)
    experiment_file_path = os.path.join(results_directory, 'experiment.exp')
    FileUtils().WriteFile(experiment_file_path, experiment.experiment_file)

    has_failure = False
    all_failed = True

    topstats_file = os.path.join(results_directory, 'topstats.log')
    self.l.LogOutput('Storing top statistics of each benchmark run into %s.' %
                     topstats_file)
    with open(topstats_file, 'w') as top_fd:
      for benchmark_run in experiment.benchmark_runs:
        if benchmark_run.result:
          # FIXME: Pylint has a bug suggesting the following change, which
          # should be fixed in pylint 2.0. Resolve this after pylint >= 2.0.
          # Bug: https://github.com/PyCQA/pylint/issues/1984
          # pylint: disable=simplifiable-if-statement
          if benchmark_run.result.retval:
            has_failure = True
          else:
            all_failed = False
          # Header with benchmark run name.
          top_fd.write('%s\n' % str(benchmark_run))
          # Formatted string with top statistics.
          top_fd.write(benchmark_run.result.FormatStringTopCommands())
          top_fd.write('\n\n')

    if all_failed:
      return self.ALL_FAILED

    self.l.LogOutput('Storing results of each benchmark run.')
    for benchmark_run in experiment.benchmark_runs:
      if benchmark_run.result:
        benchmark_run_name = ''.join(
            ch for ch in benchmark_run.name if ch.isalnum())
        benchmark_run_path = os.path.join(results_directory, benchmark_run_name)
        if experiment.compress_results:
          benchmark_run.result.CompressResultsTo(benchmark_run_path)
        else:
          benchmark_run.result.CopyResultsTo(benchmark_run_path)
        benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp)

    self.l.LogOutput('Storing results report in %s.' % results_directory)
    results_table_path = os.path.join(results_directory, 'results.html')
    report = HTMLResultsReport.FromExperiment(experiment).GetReport()
    if self.json_report:
      json_report = JSONResultsReport.FromExperiment(
          experiment, json_args={'indent': 2})
      _WriteJSONReportToFile(experiment, results_directory, json_report)

    FileUtils().WriteFile(results_table_path, report)

    self.l.LogOutput('Storing email message body in %s.' % results_directory)
    msg_file_path = os.path.join(results_directory, 'msg_body.html')
    text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
    text_report += ('\nResults are stored in %s.\n' %
                    experiment.results_directory)
    msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
    FileUtils().WriteFile(msg_file_path, msg_body)

    return self.SUCCEEDED if not has_failure else self.HAS_FAILURE

  def Run(self):
    try:
      self._Run(self._experiment)
    finally:
      # Always print the report at the end of the run.
      self._PrintTable(self._experiment)
      ret = self._StoreResults(self._experiment)
      if ret != self.ALL_FAILED:
        self._Email(self._experiment)
    return ret


class MockExperimentRunner(ExperimentRunner):
  """Mocked ExperimentRunner for testing."""

  def __init__(self, experiment, json_report):
    super(MockExperimentRunner, self).__init__(experiment, json_report)

  def _Run(self, experiment):
    self.l.LogOutput("Would run the following experiment: '%s'." %
                     experiment.name)

  def _PrintTable(self, experiment):
    self.l.LogOutput('Would print the experiment table.')

  def _Email(self, experiment):
    self.l.LogOutput('Would send result email.')

  def _StoreResults(self, experiment):
    self.l.LogOutput('Would store the results.')