aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
authorGravatar commit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81>2014-05-28 17:30:10 +0000
committerGravatar commit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81>2014-05-28 17:30:10 +0000
commitea6cb91170d473dde85668c79c4abdf756687916 (patch)
treee8ff5773a70b8a5b4c1755bb0c71b9f62bdf4431 /tools
parentf449156bd00815eae1f1daea436f08068ef0f01c (diff)
Add script to rebaseline benches from codereview trybot results
This script is designed to be used by the RecreateSKPs bot. Eventually, the bot will: 1. Generate new SKPs 2. Upload the new SKPs to a subdirectory with an ID or generation number. 3. Change Skia to use the new SKPs: a. Create and upload a Skia CL which changes the "current SKP generation" file to point to the new SKPs b. Launch Perf trybots on that CL. c. Call this script every 5 minutes until it successfully creates new baselines for each of the launched Perf bots. d. Add the new baselines to the CL e. Upload a second patch set of the CL f. Check the CQ bit on the CL BUG=skia:2225 R=epoger@google.com, halcanary@google.com, bensong@google.com Author: borenet@google.com Review URL: https://codereview.chromium.org/297893004 git-svn-id: http://skia.googlecode.com/svn/trunk@14921 2bbb7eff-a529-9590-31e7-b0007b416f81
Diffstat (limited to 'tools')
-rw-r--r--tools/gen_bench_expectations_from_codereview.py154
1 files changed, 154 insertions, 0 deletions
diff --git a/tools/gen_bench_expectations_from_codereview.py b/tools/gen_bench_expectations_from_codereview.py
new file mode 100644
index 0000000000..cc28996fbe
--- /dev/null
+++ b/tools/gen_bench_expectations_from_codereview.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""Generate new bench expectations from results of trybots on a code review."""
+
+
+import collections
+import compare_codereview
+import os
+import re
+import shutil
+import subprocess
+import sys
+
+
+BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/*'
+CHECKOUT_PATH = os.path.realpath(os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), os.pardir))
+TMP_BENCH_DATA_DIR = os.path.join(CHECKOUT_PATH, '.bench_data')
+
+
+def find_all_builds(codereview_url):
+ """Finds and returns information about trybot runs for a code review.
+
+ Args:
+ codereview_url: URL of the codereview in question.
+
+ Returns:
+ List of NamedTuples: (builder_name, build_number, is_finished)
+ """
+ results = compare_codereview.CodeReviewHTMLParser().parse(codereview_url)
+ TryBuild = collections.namedtuple(
+ 'TryBuild', ['builder_name', 'build_number', 'is_finished'])
+ try_builds = []
+
+ for builder, data in results.iteritems():
+ if builder.startswith('Perf'):
+ try_builds.append(TryBuild(builder, data.url.split('/')[-1],
+ data.status != 'pending'))
+ return try_builds
+
+
+def get_bench_data(builder, build_num, dest_dir):
+ """Download the bench data for the given builder at the given build_num.
+
+ Args:
+ builder: string; name of the builder.
+ build_num: string; build number.
+ dest_dir: string; destination directory for the bench data.
+ """
+ url = BENCH_DATA_URL % (builder, build_num)
+ subprocess.check_call(['gsutil', 'cp', '-R', url, dest_dir],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+
+def find_revision_from_downloaded_data(dest_dir):
+ """Finds the revision at which the downloaded data was generated.
+
+ Args:
+ dest_dir: string; directory holding the downloaded data.
+
+ Returns:
+ The revision (git commit hash) at which the downloaded data was
+ generated, or None if no revision can be found.
+ """
+ for data_file in os.listdir(dest_dir):
+ match = re.match('bench_(?P<revision>[0-9a-fA-F]{2,40})_data.*', data_file)
+ if match:
+ return match.group('revision')
+ return None
+
+
+class TrybotNotFinishedError(Exception):
+ pass
+
+
+def gen_bench_expectations_from_codereview(codereview_url,
+ error_on_unfinished=True):
+ """Generate bench expectations from a code review.
+
+ Scans the given code review for Perf trybot runs. Downloads the results of
+ finished trybots and uses them to generate new expectations for their
+ waterfall counterparts.
+
+ Args:
+ url: string; URL of the code review.
+ error_on_unfinished: bool; throw an error if any trybot has not finished.
+ """
+ try_builds = find_all_builds(codereview_url)
+
+ # Verify that all trybots have finished running.
+ if error_on_unfinished:
+ for try_build in try_builds:
+ if not try_build.is_finished:
+ raise TrybotNotFinishedError('%s: #%s is not finished.' % (
+ try_build.builder_name,
+ try_build.build_number))
+ failed_data_pull = []
+ failed_gen_expectations = []
+
+ if os.path.isdir(TMP_BENCH_DATA_DIR):
+ shutil.rmtree(TMP_BENCH_DATA_DIR)
+
+ for try_build in try_builds:
+ try_builder = try_build.builder_name
+ builder = try_builder.replace('-Trybot', '')
+
+ # Download the data.
+ dest_dir = os.path.join(TMP_BENCH_DATA_DIR, builder)
+ os.makedirs(dest_dir)
+ try:
+ get_bench_data(try_builder, try_build.build_number, dest_dir)
+ except subprocess.CalledProcessError:
+ failed_data_pull.append(try_builder)
+ continue
+
+ # Find the revision at which the data was generated.
+ revision = find_revision_from_downloaded_data(dest_dir)
+ if not revision:
+ # If we can't find a revision, then something is wrong with the data we
+ # downloaded. Skip this builder.
+ failed_data_pull.append(try_builder)
+ continue
+
+ # Generate new expectations.
+ output_file = os.path.join(CHECKOUT_PATH, 'expectations', 'bench',
+ 'bench_expectations_%s.txt' % builder)
+ try:
+ subprocess.check_call(['python',
+ os.path.join(CHECKOUT_PATH, 'bench',
+ 'gen_bench_expectations.py'),
+ '-b', builder, '-o', output_file,
+ '-d', dest_dir, '-r', revision])
+ except subprocess.CalledProcessError:
+ failed_gen_expectations.append(builder)
+
+ failure = ''
+ if failed_data_pull:
+ failure += 'Failed to load data for: %s\n\n' % ','.join(failed_data_pull)
+ if failed_gen_expectations:
+ failure += 'Failed to generate expectations for: %s\n\n' % ','.join(
+ failed_gen_expectations)
+ if failure:
+ raise Exception(failure)
+
+
+if __name__ == '__main__':
+ gen_bench_expectations_from_codereview(sys.argv[1])
+