aboutsummaryrefslogtreecommitdiff
path: root/crosperf/experiment_status.py
blob: fa6b1eec2202c6daf08640db6d1d98a81521804b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
# -*- coding: utf-8 -*-
# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

"""The class to show the banner."""


import collections
import datetime
import time


class ExperimentStatus(object):
    """The status class."""

    def __init__(self, experiment):
        self.experiment = experiment
        self.num_total = len(self.experiment.benchmark_runs)
        self.completed = 0
        self.new_job_start_time = time.time()
        self.log_level = experiment.log_level

    def _GetProgressBar(self, num_complete, num_total):
        ret = "Done: %s%%" % int(100.0 * num_complete / num_total)
        bar_length = 50
        done_char = ">"
        undone_char = " "
        num_complete_chars = bar_length * num_complete // num_total
        num_undone_chars = bar_length - num_complete_chars
        ret += " [%s%s]" % (
            num_complete_chars * done_char,
            num_undone_chars * undone_char,
        )
        return ret

    def GetProgressString(self):
        """Get the elapsed_time, ETA."""
        current_time = time.time()
        if self.experiment.start_time:
            elapsed_time = current_time - self.experiment.start_time
        else:
            elapsed_time = 0
        try:
            if self.completed != self.experiment.num_complete:
                self.completed = self.experiment.num_complete
                self.new_job_start_time = current_time
            time_completed_jobs = elapsed_time - (
                current_time - self.new_job_start_time
            )
            # eta is calculated as:
            #   ETA = (num_jobs_not_yet_started * estimated_time_per_job)
            #          + time_left_for_current_job
            #
            #   where
            #        num_jobs_not_yet_started = (num_total - num_complete - 1)
            #
            #        estimated_time_per_job = time_completed_jobs / num_run_complete
            #
            #        time_left_for_current_job = estimated_time_per_job -
            #                                    time_spent_so_far_on_current_job
            #
            #  The biggest problem with this calculation is its assumption that
            #  all jobs have roughly the same running time (blatantly false!).
            #
            #  ETA can come out negative if the time spent on the current job is
            #  greater than the estimated time per job (e.g. you're running the
            #  first long job, after a series of short jobs).  For now, if that
            #  happens, we set the ETA to "Unknown."
            #
            eta_seconds = float(
                self.num_total - self.experiment.num_complete - 1
            ) * time_completed_jobs / self.experiment.num_run_complete + (
                time_completed_jobs / self.experiment.num_run_complete
                - (current_time - self.new_job_start_time)
            )

            eta_seconds = int(eta_seconds)
            if eta_seconds > 0:
                eta = datetime.timedelta(seconds=eta_seconds)
            else:
                eta = "Unknown"
        except ZeroDivisionError:
            eta = "Unknown"
        strings = []
        strings.append(
            "Current time: %s Elapsed: %s ETA: %s"
            % (
                datetime.datetime.now(),
                datetime.timedelta(seconds=int(elapsed_time)),
                eta,
            )
        )
        strings.append(
            self._GetProgressBar(self.experiment.num_complete, self.num_total)
        )
        return "\n".join(strings)

    def GetStatusString(self):
        """Get the status string of all the benchmark_runs."""
        status_bins = collections.defaultdict(list)
        for benchmark_run in self.experiment.benchmark_runs:
            status_bins[benchmark_run.timeline.GetLastEvent()].append(
                benchmark_run
            )

        status_strings = []
        for key, val in status_bins.items():
            if key == "RUNNING":
                get_description = self._GetNamesAndIterations
            else:
                get_description = self._GetCompactNamesAndIterations
            status_strings.append("%s: %s" % (key, get_description(val)))

        thread_status = ""
        thread_status_format = "Thread Status: \n{}\n"
        if (
            self.experiment.schedv2() is None
            and self.experiment.log_level == "verbose"
        ):
            # Add the machine manager status.
            thread_status = thread_status_format.format(
                self.experiment.machine_manager.AsString()
            )
        elif self.experiment.schedv2():
            # In schedv2 mode, we always print out thread status.
            thread_status = thread_status_format.format(
                self.experiment.schedv2().threads_status_as_string()
            )

        result = "{}{}".format(thread_status, "\n".join(status_strings))

        return result

    def _GetNamesAndIterations(self, benchmark_runs):
        strings = []
        t = time.time()
        for benchmark_run in benchmark_runs:
            t_last = benchmark_run.timeline.GetLastEventTime()
            elapsed = str(datetime.timedelta(seconds=int(t - t_last)))
            strings.append("'{0}' {1}".format(benchmark_run.name, elapsed))
        return " %s (%s)" % (len(strings), ", ".join(strings))

    def _GetCompactNamesAndIterations(self, benchmark_runs):
        grouped_benchmarks = collections.defaultdict(list)
        for benchmark_run in benchmark_runs:
            grouped_benchmarks[benchmark_run.label.name].append(benchmark_run)

        output_segs = []
        for label_name, label_runs in grouped_benchmarks.items():
            strings = []
            benchmark_iterations = collections.defaultdict(list)
            for benchmark_run in label_runs:
                assert benchmark_run.label.name == label_name
                benchmark_name = benchmark_run.benchmark.name
                benchmark_iterations[benchmark_name].append(
                    benchmark_run.iteration
                )
            for key, val in benchmark_iterations.items():
                val.sort()
                iterations = ",".join(str(v) for v in val)
                strings.append("{} [{}]".format(key, iterations))
            output_segs.append(
                "  " + label_name + ": " + ", ".join(strings) + "\n"
            )

        return " %s \n%s" % (len(benchmark_runs), "".join(output_segs))