diff options
author | Sadaf Ebrahimi <sadafebrahimi@google.com> | 2022-11-11 23:35:38 +0000 |
---|---|---|
committer | Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com> | 2022-11-11 23:35:38 +0000 |
commit | 40214b48188358a80b7478bfff21d4814dd9177c (patch) | |
tree | 77dc031614745bb406dbd90cea9a082a1b5cdd54 /crosperf/experiment_status.py | |
parent | a51582ad9cb50ec284f4718765bd5d31fa0069d4 (diff) | |
parent | 584b8e46d146a2bcfeffd64448a2d8e92904168d (diff) | |
download | toolchain-utils-40214b48188358a80b7478bfff21d4814dd9177c.tar.gz |
Upgrade toolchain-utils to 2c474af4f370b143032144aff1ff1985f789e20f am: 8b320f7173 am: d444309511 am: 584b8e46d1android-14.0.0_r45android-14.0.0_r44android-14.0.0_r43android-14.0.0_r42android-14.0.0_r41android-14.0.0_r40android-14.0.0_r39android-14.0.0_r38android-14.0.0_r27android-14.0.0_r26android-14.0.0_r25android-14.0.0_r24android-14.0.0_r23android-14.0.0_r22android-14.0.0_r21android-14.0.0_r20android-14.0.0_r19android-14.0.0_r18android-14.0.0_r17android-14.0.0_r16aml_rkp_341510000aml_rkp_341311000aml_rkp_341114000aml_rkp_341015010aml_rkp_341012000aml_hef_341717050aml_hef_341613000aml_hef_341512030aml_hef_341415040aml_hef_341311010aml_hef_341114030aml_cfg_341510000android14-qpr1-s2-releaseandroid14-qpr1-releaseandroid14-mainline-healthfitness-releaseandroid14-devandroid14-d2-s5-releaseandroid14-d2-s4-releaseandroid14-d2-s3-releaseandroid14-d2-s2-releaseandroid14-d2-s1-releaseandroid14-d2-release
Original change: https://android-review.googlesource.com/c/platform/external/toolchain-utils/+/2292563
Change-Id: Ida212764c332af8ba8ab8aaa62a7d2a007e3314e
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
Diffstat (limited to 'crosperf/experiment_status.py')
-rw-r--r-- | crosperf/experiment_status.py | 290 |
1 files changed, 154 insertions, 136 deletions
diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py index 2ac47c74..fa6b1eec 100644 --- a/crosperf/experiment_status.py +++ b/crosperf/experiment_status.py @@ -1,12 +1,10 @@ # -*- coding: utf-8 -*- -# Copyright 2011 The Chromium OS Authors. All rights reserved. +# Copyright 2011 The ChromiumOS Authors # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The class to show the banner.""" -from __future__ import division -from __future__ import print_function import collections import datetime @@ -14,136 +12,156 @@ import time class ExperimentStatus(object): - """The status class.""" - - def __init__(self, experiment): - self.experiment = experiment - self.num_total = len(self.experiment.benchmark_runs) - self.completed = 0 - self.new_job_start_time = time.time() - self.log_level = experiment.log_level - - def _GetProgressBar(self, num_complete, num_total): - ret = 'Done: %s%%' % int(100.0 * num_complete / num_total) - bar_length = 50 - done_char = '>' - undone_char = ' ' - num_complete_chars = bar_length * num_complete // num_total - num_undone_chars = bar_length - num_complete_chars - ret += ' [%s%s]' % (num_complete_chars * done_char, - num_undone_chars * undone_char) - return ret - - def GetProgressString(self): - """Get the elapsed_time, ETA.""" - current_time = time.time() - if self.experiment.start_time: - elapsed_time = current_time - self.experiment.start_time - else: - elapsed_time = 0 - try: - if self.completed != self.experiment.num_complete: - self.completed = self.experiment.num_complete - self.new_job_start_time = current_time - time_completed_jobs = ( - elapsed_time - (current_time - self.new_job_start_time)) - # eta is calculated as: - # ETA = (num_jobs_not_yet_started * estimated_time_per_job) - # + time_left_for_current_job - # - # where - # num_jobs_not_yet_started = (num_total - num_complete - 1) - # - # estimated_time_per_job = time_completed_jobs / num_run_complete - # - # time_left_for_current_job = estimated_time_per_job - - # time_spent_so_far_on_current_job - # - # The biggest problem with this calculation is its assumption that - # all jobs have roughly the same running time (blatantly false!). - # - # ETA can come out negative if the time spent on the current job is - # greater than the estimated time per job (e.g. you're running the - # first long job, after a series of short jobs). For now, if that - # happens, we set the ETA to "Unknown." - # - eta_seconds = ( - float(self.num_total - self.experiment.num_complete - 1) * - time_completed_jobs / self.experiment.num_run_complete + - (time_completed_jobs / self.experiment.num_run_complete - - (current_time - self.new_job_start_time))) - - eta_seconds = int(eta_seconds) - if eta_seconds > 0: - eta = datetime.timedelta(seconds=eta_seconds) - else: - eta = 'Unknown' - except ZeroDivisionError: - eta = 'Unknown' - strings = [] - strings.append('Current time: %s Elapsed: %s ETA: %s' % - (datetime.datetime.now(), - datetime.timedelta(seconds=int(elapsed_time)), eta)) - strings.append( - self._GetProgressBar(self.experiment.num_complete, self.num_total)) - return '\n'.join(strings) - - def GetStatusString(self): - """Get the status string of all the benchmark_runs.""" - status_bins = collections.defaultdict(list) - for benchmark_run in self.experiment.benchmark_runs: - status_bins[benchmark_run.timeline.GetLastEvent()].append(benchmark_run) - - status_strings = [] - for key, val in status_bins.items(): - if key == 'RUNNING': - get_description = self._GetNamesAndIterations - else: - get_description = self._GetCompactNamesAndIterations - status_strings.append('%s: %s' % (key, get_description(val))) - - thread_status = '' - thread_status_format = 'Thread Status: \n{}\n' - if (self.experiment.schedv2() is None and - self.experiment.log_level == 'verbose'): - # Add the machine manager status. - thread_status = thread_status_format.format( - self.experiment.machine_manager.AsString()) - elif self.experiment.schedv2(): - # In schedv2 mode, we always print out thread status. - thread_status = thread_status_format.format( - self.experiment.schedv2().threads_status_as_string()) - - result = '{}{}'.format(thread_status, '\n'.join(status_strings)) - - return result - - def _GetNamesAndIterations(self, benchmark_runs): - strings = [] - t = time.time() - for benchmark_run in benchmark_runs: - t_last = benchmark_run.timeline.GetLastEventTime() - elapsed = str(datetime.timedelta(seconds=int(t - t_last))) - strings.append("'{0}' {1}".format(benchmark_run.name, elapsed)) - return ' %s (%s)' % (len(strings), ', '.join(strings)) - - def _GetCompactNamesAndIterations(self, benchmark_runs): - grouped_benchmarks = collections.defaultdict(list) - for benchmark_run in benchmark_runs: - grouped_benchmarks[benchmark_run.label.name].append(benchmark_run) - - output_segs = [] - for label_name, label_runs in grouped_benchmarks.items(): - strings = [] - benchmark_iterations = collections.defaultdict(list) - for benchmark_run in label_runs: - assert benchmark_run.label.name == label_name - benchmark_name = benchmark_run.benchmark.name - benchmark_iterations[benchmark_name].append(benchmark_run.iteration) - for key, val in benchmark_iterations.items(): - val.sort() - iterations = ','.join(str(v) for v in val) - strings.append('{} [{}]'.format(key, iterations)) - output_segs.append(' ' + label_name + ': ' + ', '.join(strings) + '\n') - - return ' %s \n%s' % (len(benchmark_runs), ''.join(output_segs)) + """The status class.""" + + def __init__(self, experiment): + self.experiment = experiment + self.num_total = len(self.experiment.benchmark_runs) + self.completed = 0 + self.new_job_start_time = time.time() + self.log_level = experiment.log_level + + def _GetProgressBar(self, num_complete, num_total): + ret = "Done: %s%%" % int(100.0 * num_complete / num_total) + bar_length = 50 + done_char = ">" + undone_char = " " + num_complete_chars = bar_length * num_complete // num_total + num_undone_chars = bar_length - num_complete_chars + ret += " [%s%s]" % ( + num_complete_chars * done_char, + num_undone_chars * undone_char, + ) + return ret + + def GetProgressString(self): + """Get the elapsed_time, ETA.""" + current_time = time.time() + if self.experiment.start_time: + elapsed_time = current_time - self.experiment.start_time + else: + elapsed_time = 0 + try: + if self.completed != self.experiment.num_complete: + self.completed = self.experiment.num_complete + self.new_job_start_time = current_time + time_completed_jobs = elapsed_time - ( + current_time - self.new_job_start_time + ) + # eta is calculated as: + # ETA = (num_jobs_not_yet_started * estimated_time_per_job) + # + time_left_for_current_job + # + # where + # num_jobs_not_yet_started = (num_total - num_complete - 1) + # + # estimated_time_per_job = time_completed_jobs / num_run_complete + # + # time_left_for_current_job = estimated_time_per_job - + # time_spent_so_far_on_current_job + # + # The biggest problem with this calculation is its assumption that + # all jobs have roughly the same running time (blatantly false!). + # + # ETA can come out negative if the time spent on the current job is + # greater than the estimated time per job (e.g. you're running the + # first long job, after a series of short jobs). For now, if that + # happens, we set the ETA to "Unknown." + # + eta_seconds = float( + self.num_total - self.experiment.num_complete - 1 + ) * time_completed_jobs / self.experiment.num_run_complete + ( + time_completed_jobs / self.experiment.num_run_complete + - (current_time - self.new_job_start_time) + ) + + eta_seconds = int(eta_seconds) + if eta_seconds > 0: + eta = datetime.timedelta(seconds=eta_seconds) + else: + eta = "Unknown" + except ZeroDivisionError: + eta = "Unknown" + strings = [] + strings.append( + "Current time: %s Elapsed: %s ETA: %s" + % ( + datetime.datetime.now(), + datetime.timedelta(seconds=int(elapsed_time)), + eta, + ) + ) + strings.append( + self._GetProgressBar(self.experiment.num_complete, self.num_total) + ) + return "\n".join(strings) + + def GetStatusString(self): + """Get the status string of all the benchmark_runs.""" + status_bins = collections.defaultdict(list) + for benchmark_run in self.experiment.benchmark_runs: + status_bins[benchmark_run.timeline.GetLastEvent()].append( + benchmark_run + ) + + status_strings = [] + for key, val in status_bins.items(): + if key == "RUNNING": + get_description = self._GetNamesAndIterations + else: + get_description = self._GetCompactNamesAndIterations + status_strings.append("%s: %s" % (key, get_description(val))) + + thread_status = "" + thread_status_format = "Thread Status: \n{}\n" + if ( + self.experiment.schedv2() is None + and self.experiment.log_level == "verbose" + ): + # Add the machine manager status. + thread_status = thread_status_format.format( + self.experiment.machine_manager.AsString() + ) + elif self.experiment.schedv2(): + # In schedv2 mode, we always print out thread status. + thread_status = thread_status_format.format( + self.experiment.schedv2().threads_status_as_string() + ) + + result = "{}{}".format(thread_status, "\n".join(status_strings)) + + return result + + def _GetNamesAndIterations(self, benchmark_runs): + strings = [] + t = time.time() + for benchmark_run in benchmark_runs: + t_last = benchmark_run.timeline.GetLastEventTime() + elapsed = str(datetime.timedelta(seconds=int(t - t_last))) + strings.append("'{0}' {1}".format(benchmark_run.name, elapsed)) + return " %s (%s)" % (len(strings), ", ".join(strings)) + + def _GetCompactNamesAndIterations(self, benchmark_runs): + grouped_benchmarks = collections.defaultdict(list) + for benchmark_run in benchmark_runs: + grouped_benchmarks[benchmark_run.label.name].append(benchmark_run) + + output_segs = [] + for label_name, label_runs in grouped_benchmarks.items(): + strings = [] + benchmark_iterations = collections.defaultdict(list) + for benchmark_run in label_runs: + assert benchmark_run.label.name == label_name + benchmark_name = benchmark_run.benchmark.name + benchmark_iterations[benchmark_name].append( + benchmark_run.iteration + ) + for key, val in benchmark_iterations.items(): + val.sort() + iterations = ",".join(str(v) for v in val) + strings.append("{} [{}]".format(key, iterations)) + output_segs.append( + " " + label_name + ": " + ", ".join(strings) + "\n" + ) + + return " %s \n%s" % (len(benchmark_runs), "".join(output_segs)) |