aboutsummaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorborenet <borenet@google.com>2014-07-02 09:37:03 -0700
committerCommit bot <commit-bot@chromium.org>2014-07-02 09:37:03 -0700
commitb26130af2faf6a72b077c400557e638e89c339d4 (patch)
tree50d1727c32ec7d60ae96472f6766107dab341661 /tools
parent256b351fb1d33f3e3156d090a6a533b54ad66117 (diff)
downloadskia-b26130af2faf6a72b077c400557e638e89c339d4.tar.gz
Fix RecreateSkps
- Use shell_utils to auto-print all subprocesses. - Only download bench_pictures data. - Skip or error out on bots which failed BenchPictures, PostBench, or UploadBenchResults. BUG=skia:2714 R=rmistry@google.com TBR=rmistry NOTRY=true Author: borenet@google.com Review URL: https://codereview.chromium.org/363833004
Diffstat (limited to 'tools')
-rw-r--r--tools/gen_bench_expectations_from_codereview.py87
1 files changed, 71 insertions, 16 deletions
diff --git a/tools/gen_bench_expectations_from_codereview.py b/tools/gen_bench_expectations_from_codereview.py
index 3d40aa67e..4c41c9a72 100644
--- a/tools/gen_bench_expectations_from_codereview.py
+++ b/tools/gen_bench_expectations_from_codereview.py
@@ -10,21 +10,27 @@
import collections
import compare_codereview
+import json
import os
import re
import shutil
-import subprocess
import sys
+import urllib2
+import fix_pythonpath # pylint: disable=W0611
+from common.py.utils import shell_utils
-BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/*'
+
+BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/bench_*_data_*'
+BUILD_STATUS_SUCCESS = 0
+BUILD_STATUS_WARNINGS = 1
CHECKOUT_PATH = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
TMP_BENCH_DATA_DIR = os.path.join(CHECKOUT_PATH, '.bench_data')
TryBuild = collections.namedtuple(
- 'TryBuild', ['builder_name', 'build_number', 'is_finished'])
+ 'TryBuild', ['builder_name', 'build_number', 'is_finished', 'json_url'])
def find_all_builds(codereview_url):
@@ -40,12 +46,19 @@ def find_all_builds(codereview_url):
try_builds = []
for builder, data in results.iteritems():
if builder.startswith('Perf'):
- build_num = data.url.split('/')[-1] if data.url else None
+ build_num = None
+ json_url = None
+ if data.url:
+ split_url = data.url.split('/')
+ build_num = split_url[-1]
+ split_url.insert(split_url.index('builders'), 'json')
+ json_url = '/'.join(split_url)
is_finished = (data.status not in ('pending', 'try-pending') and
build_num is not None)
try_builds.append(TryBuild(builder_name=builder,
build_number=build_num,
- is_finished=is_finished))
+ is_finished=is_finished,
+ json_url=json_url))
return try_builds
@@ -85,9 +98,7 @@ def get_bench_data(builder, build_num, dest_dir):
dest_dir: string; destination directory for the bench data.
"""
url = BENCH_DATA_URL % (builder, build_num)
- subprocess.check_call(['gsutil', 'cp', '-R', url, dest_dir],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ shell_utils.run(['gsutil', 'cp', '-R', url, dest_dir])
def find_revision_from_downloaded_data(dest_dir):
@@ -111,8 +122,32 @@ class TrybotNotFinishedError(Exception):
pass
+def _step_succeeded(try_build, step_name):
+ """Return True if the given step succeeded and False otherwise.
+
+ This function talks to the build master's JSON interface, which is slow.
+
+ TODO(borenet): There are now a few places which talk to the master's JSON
+ interface. Maybe it'd be worthwhile to create a module which does this.
+
+ Args:
+ try_build: TryBuild instance; the build we're concerned about.
+ step_name: string; name of the step we're concerned about.
+ """
+ step_url = '/'.join((try_build.json_url, 'steps', step_name))
+ step_data = json.load(urllib2.urlopen(step_url))
+ # step_data['results'] may not be present if the step succeeded. If present,
+ # it is a list whose first element is a result code, per the documentation:
+ # http://docs.buildbot.net/latest/developer/results.html
+ result = step_data.get('results', [BUILD_STATUS_SUCCESS])[0]
+ if result in (BUILD_STATUS_SUCCESS, BUILD_STATUS_WARNINGS):
+ return True
+ return False
+
+
def gen_bench_expectations_from_codereview(codereview_url,
- error_on_unfinished=True):
+ error_on_unfinished=True,
+ error_on_try_failure=True):
"""Generate bench expectations from a code review.
Scans the given code review for Perf trybot runs. Downloads the results of
@@ -122,6 +157,8 @@ def gen_bench_expectations_from_codereview(codereview_url,
Args:
url: string; URL of the code review.
error_on_unfinished: bool; throw an error if any trybot has not finished.
+ error_on_try_failure: bool; throw an error if any trybot failed an
+ important step.
"""
try_builds = find_all_builds(codereview_url)
@@ -129,14 +166,32 @@ def gen_bench_expectations_from_codereview(codereview_url,
if error_on_unfinished and not _all_trybots_finished(try_builds):
raise TrybotNotFinishedError('Not all trybots have finished.')
+ failed_run = []
failed_data_pull = []
failed_gen_expectations = []
+ # Don't even try to do anything if BenchPictures, PostBench, or
+ # UploadBenchResults failed.
+ for try_build in try_builds:
+ for step in ('BenchPictures', 'PostBench', 'UploadBenchResults'):
+ if not _step_succeeded(try_build, step):
+ msg = '%s failed on %s!' % (step, try_build.builder_name)
+ if error_on_try_failure:
+ raise Exception(msg)
+ print 'WARNING: %s Skipping.' % msg
+ failed_run.append(try_build.builder_name)
+
if os.path.isdir(TMP_BENCH_DATA_DIR):
shutil.rmtree(TMP_BENCH_DATA_DIR)
for try_build in try_builds:
try_builder = try_build.builder_name
+
+ # Even if we're not erroring out on try failures, we can't generate new
+ # expectations for failed bots.
+ if try_builder in failed_run:
+ continue
+
builder = try_builder.replace('-Trybot', '')
# Download the data.
@@ -144,7 +199,7 @@ def gen_bench_expectations_from_codereview(codereview_url,
os.makedirs(dest_dir)
try:
get_bench_data(try_builder, try_build.build_number, dest_dir)
- except subprocess.CalledProcessError:
+ except shell_utils.CommandFailedException:
failed_data_pull.append(try_builder)
continue
@@ -160,12 +215,12 @@ def gen_bench_expectations_from_codereview(codereview_url,
output_file = os.path.join(CHECKOUT_PATH, 'expectations', 'bench',
'bench_expectations_%s.txt' % builder)
try:
- subprocess.check_call(['python',
- os.path.join(CHECKOUT_PATH, 'bench',
- 'gen_bench_expectations.py'),
- '-b', builder, '-o', output_file,
- '-d', dest_dir, '-r', revision])
- except subprocess.CalledProcessError:
+ shell_utils.run(['python',
+ os.path.join(CHECKOUT_PATH, 'bench',
+ 'gen_bench_expectations.py'),
+ '-b', builder, '-o', output_file,
+ '-d', dest_dir, '-r', revision])
+ except shell_utils.CommandFailedException:
failed_gen_expectations.append(builder)
failure = ''