diff options
author | commit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81> | 2014-03-17 21:16:29 +0000 |
---|---|---|
committer | commit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81> | 2014-03-17 21:16:29 +0000 |
commit | b1bcb21631275fd5d0f5313b1ea37d0b975c066a (patch) | |
tree | 003726316953d421be1e2940d6474b36de4dceca /bench | |
parent | bab3fc4c90c4369c5d77d8da86d9f117ee54e0bc (diff) |
gen_bench_expectations to generate bench expectations files.
BUG=skia:2225
NOTRY=true
R=borenet@google.com
Author: bensong@google.com
Review URL: https://codereview.chromium.org/201423002
git-svn-id: http://skia.googlecode.com/svn/trunk@13838 2bbb7eff-a529-9590-31e7-b0007b416f81
Diffstat (limited to 'bench')
-rw-r--r-- | bench/bench_util.py | 34 | ||||
-rw-r--r-- | bench/check_bench_regressions.py | 35 | ||||
-rw-r--r-- | bench/gen_bench_expectations.py | 115 |
3 files changed, 151 insertions, 33 deletions
diff --git a/bench/bench_util.py b/bench/bench_util.py index 29ef1c473b..b6fecb7ca8 100644 --- a/bench/bench_util.py +++ b/bench/bench_util.py @@ -4,6 +4,7 @@ Created on May 19, 2011 @author: bungeman ''' +import os import re import math @@ -147,6 +148,39 @@ def _ParseAndStoreTimes(config_re_compiled, is_per_tile, line, bench, layout_dic.setdefault(bench, {}).setdefault( current_config, {}).setdefault(current_time_type, tile_layout) +def parse_skp_bench_data(directory, revision, rep, default_settings=None): + """Parses all the skp bench data in the given directory. + + Args: + directory: string of path to input data directory. + revision: git hash revision that matches the data to process. + rep: bench representation algorithm, see bench_util.py. + default_settings: dictionary of other run settings. See writer.option() in + bench/benchmain.cpp. + + Returns: + A list of BenchDataPoint objects. + """ + revision_data_points = [] + file_list = os.listdir(directory) + file_list.sort() + for bench_file in file_list: + scalar_type = None + # Scalar type, if any, is in the bench filename after 'scalar_'. + if (bench_file.startswith('bench_' + revision + '_data_')): + if bench_file.find('scalar_') > 0: + components = bench_file.split('_') + scalar_type = components[components.index('scalar') + 1] + else: # Skips non skp bench files. + continue + + with open('/'.join([directory, bench_file]), 'r') as file_handle: + settings = dict(default_settings or {}) + settings['scalar'] = scalar_type + revision_data_points.extend(parse(settings, file_handle, rep)) + + return revision_data_points + # TODO(bensong): switch to reading JSON output when available. This way we don't # need the RE complexities. def parse(settings, lines, representation=None): diff --git a/bench/check_bench_regressions.py b/bench/check_bench_regressions.py index 48ce180a39..8f648dc2b0 100644 --- a/bench/check_bench_regressions.py +++ b/bench/check_bench_regressions.py @@ -80,35 +80,6 @@ class Label: hash(self.time_type) ^ hash(frozenset(self.settings.iteritems()))) -def parse_dir(directory, default_settings, revision, rep): - """Parses bench data from bench logs files. - revision can be either svn revision or git commit hash. - """ - revision_data_points = [] # list of BenchDataPoint - file_list = os.listdir(directory) - file_list.sort() - for bench_file in file_list: - scalar_type = None - # Scalar type, if any, is in the bench filename after revision - if (len(revision) > MAX_SVN_REV_LENGTH and - bench_file.startswith('bench_' + revision + '_')): - # The revision is GIT commit hash. - scalar_type = bench_file[len(revision) + len('bench_') + 1:] - elif (bench_file.startswith('bench_r' + revision + '_') and - revision.isdigit()): - # The revision is SVN number - scalar_type = bench_file[len(revision) + len('bench_r') + 1:] - else: - continue - - file_handle = open(directory + '/' + bench_file, 'r') - - default_settings['scalar'] = scalar_type - revision_data_points.extend( - bench_util.parse(default_settings, file_handle, rep)) - file_handle.close() - return revision_data_points - def create_bench_dict(revision_data_points): """Convert current revision data into a dictionary of line data. @@ -206,6 +177,7 @@ def check_expectations(lines, expectations, key_suffix): if outputs: raise Exception('\n'.join(outputs)) + def main(): """Parses command line and checks bench expectations.""" try: @@ -248,10 +220,7 @@ def main(): platform_and_alg = bot + '-' + rep - data_points = parse_dir(directory, - {}, # Sets default settings to empty. - rev, - rep) + data_points = bench_util.parse_skp_bench_data(directory, rev, rep) bench_dict = create_bench_dict(data_points) diff --git a/bench/gen_bench_expectations.py b/bench/gen_bench_expectations.py new file mode 100644 index 0000000000..57f61c9fa7 --- /dev/null +++ b/bench/gen_bench_expectations.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python +# Copyright (c) 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" Generate bench_expectations file from a given set of bench data files. """ + +import argparse +import bench_util +import os +import re +import sys + +# Parameters for calculating bench ranges. +RANGE_RATIO = 1.0 # Ratio of range for upper and lower bounds. +ABS_ERR = 1.0 # Additional allowed error in milliseconds. + +# List of bench configs to monitor. Ignore all other configs. +CONFIGS_TO_INCLUDE = ['simple_viewport_1000x1000', + 'simple_viewport_1000x1000_gpu', + 'simple_viewport_1000x1000_scalar_1.100000', + 'simple_viewport_1000x1000_scalar_1.100000_gpu', + ] + + +def compute_ranges(benches): + """Given a list of bench numbers, calculate the alert range. + + Args: + benches: a list of float bench values. + + Returns: + a list of float [lower_bound, upper_bound]. + """ + minimum = min(benches) + maximum = max(benches) + diff = maximum - minimum + + return [minimum - diff * RANGE_RATIO - ABS_ERR, + maximum + diff * RANGE_RATIO + ABS_ERR] + + +def create_expectations_dict(revision_data_points): + """Convert list of bench data points into a dictionary of expectations data. + + Args: + revision_data_points: a list of BenchDataPoint objects. + + Returns: + a dictionary of this form: + keys = tuple of (config, bench) strings. + values = list of float [expected, lower_bound, upper_bound] for the key. + """ + bench_dict = {} + for point in revision_data_points: + if (point.time_type or # Not walltime which has time_type '' + not point.config in CONFIGS_TO_INCLUDE): + continue + key = (point.config, point.bench) + if key in bench_dict: + raise Exception('Duplicate bench entry: ' + str(key)) + bench_dict[key] = [point.time] + compute_ranges(point.per_iter_time) + + return bench_dict + + +def main(): + """Reads bench data points, then calculate and export expectations. + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '-a', '--representation_alg', default='25th', + help='bench representation algorithm to use, see bench_util.py.') + parser.add_argument( + '-b', '--builder', required=True, + help='name of the builder whose bench ranges we are computing.') + parser.add_argument( + '-d', '--input_dir', required=True, + help='a directory containing bench data files.') + parser.add_argument( + '-o', '--output_file', required=True, + help='file path and name for storing the output bench expectations.') + parser.add_argument( + '-r', '--git_revision', required=True, + help='the git hash to indicate the revision of input data to use.') + args = parser.parse_args() + + builder = args.builder + + data_points = bench_util.parse_skp_bench_data( + args.input_dir, args.git_revision, args.representation_alg) + + expectations_dict = create_expectations_dict(data_points) + + out_lines = [] + keys = expectations_dict.keys() + keys.sort() + for (config, bench) in keys: + (expected, lower_bound, upper_bound) = expectations_dict[(config, bench)] + out_lines.append('%(bench)s_%(config)s_,%(builder)s-%(representation)s,' + '%(expected)s,%(lower_bound)s,%(upper_bound)s' % { + 'bench': bench, + 'config': config, + 'builder': builder, + 'representation': args.representation_alg, + 'expected': expected, + 'lower_bound': lower_bound, + 'upper_bound': upper_bound}) + + with open(args.output_file, 'w') as file_handle: + file_handle.write('\n'.join(out_lines)) + + +if __name__ == "__main__": + main() |