aboutsummaryrefslogtreecommitdiffhomepage
path: root/bench
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2014-08-06 08:39:36 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2014-08-06 08:39:38 -0700
commitbe21e3432e4cf925fe494efe23edcb457bd05fcb (patch)
tree38b5bf644bdc5444e24f922758601a29af3662ee /bench
parent7909f47b423dacaff8623e8be247586108c3be66 (diff)
Bye bye bench.
NOTREECHECKS=true BUG=skia: R=djsollen@google.com, mtklein@google.com Author: mtklein@chromium.org Review URL: https://codereview.chromium.org/442343004
Diffstat (limited to 'bench')
-rw-r--r--bench/bench_util.py356
-rw-r--r--bench/benchmain.cpp716
2 files changed, 0 insertions, 1072 deletions
diff --git a/bench/bench_util.py b/bench/bench_util.py
deleted file mode 100644
index b6fecb7ca8..0000000000
--- a/bench/bench_util.py
+++ /dev/null
@@ -1,356 +0,0 @@
-'''
-Created on May 19, 2011
-
-@author: bungeman
-'''
-
-import os
-import re
-import math
-
-# bench representation algorithm constant names
-ALGORITHM_AVERAGE = 'avg'
-ALGORITHM_MEDIAN = 'med'
-ALGORITHM_MINIMUM = 'min'
-ALGORITHM_25TH_PERCENTILE = '25th'
-
-# Regular expressions used throughout.
-PER_SETTING_RE = '([^\s=]+)(?:=(\S+))?'
-SETTINGS_RE = 'skia bench:((?:\s+' + PER_SETTING_RE + ')*)'
-BENCH_RE = 'running bench (?:\[\d+ \d+\] )?\s*(\S+)'
-TIME_RE = '(?:(\w*)msecs = )?\s*((?:\d+\.\d+)(?:,\s*\d+\.\d+)*)'
-# non-per-tile benches have configs that don't end with ']' or '>'
-CONFIG_RE = '(\S+[^\]>]):\s+((?:' + TIME_RE + '\s+)+)'
-# per-tile bench lines are in the following format. Note that there are
-# non-averaged bench numbers in separate lines, which we ignore now due to
-# their inaccuracy.
-TILE_RE = (' tile_(\S+): tile \[\d+,\d+\] out of \[\d+,\d+\] <averaged>:'
- ' ((?:' + TIME_RE + '\s+)+)')
-# for extracting tile layout
-TILE_LAYOUT_RE = ' out of \[(\d+),(\d+)\] <averaged>: '
-
-PER_SETTING_RE_COMPILED = re.compile(PER_SETTING_RE)
-SETTINGS_RE_COMPILED = re.compile(SETTINGS_RE)
-BENCH_RE_COMPILED = re.compile(BENCH_RE)
-TIME_RE_COMPILED = re.compile(TIME_RE)
-CONFIG_RE_COMPILED = re.compile(CONFIG_RE)
-TILE_RE_COMPILED = re.compile(TILE_RE)
-TILE_LAYOUT_RE_COMPILED = re.compile(TILE_LAYOUT_RE)
-
-class BenchDataPoint:
- """A single data point produced by bench.
- """
- def __init__(self, bench, config, time_type, time, settings,
- tile_layout='', per_tile_values=[], per_iter_time=[]):
- # string name of the benchmark to measure
- self.bench = bench
- # string name of the configurations to run
- self.config = config
- # type of the timer in string: '' (walltime), 'c' (cpu) or 'g' (gpu)
- self.time_type = time_type
- # float number of the bench time value
- self.time = time
- # dictionary of the run settings
- self.settings = settings
- # how tiles cover the whole picture: '5x3' means 5 columns and 3 rows
- self.tile_layout = tile_layout
- # list of float for per_tile bench values, if applicable
- self.per_tile_values = per_tile_values
- # list of float for per-iteration bench time, if applicable
- self.per_iter_time = per_iter_time
-
- def __repr__(self):
- return "BenchDataPoint(%s, %s, %s, %s, %s)" % (
- str(self.bench),
- str(self.config),
- str(self.time_type),
- str(self.time),
- str(self.settings),
- )
-
-class _ExtremeType(object):
- """Instances of this class compare greater or less than other objects."""
- def __init__(self, cmpr, rep):
- object.__init__(self)
- self._cmpr = cmpr
- self._rep = rep
-
- def __cmp__(self, other):
- if isinstance(other, self.__class__) and other._cmpr == self._cmpr:
- return 0
- return self._cmpr
-
- def __repr__(self):
- return self._rep
-
-Max = _ExtremeType(1, "Max")
-Min = _ExtremeType(-1, "Min")
-
-class _ListAlgorithm(object):
- """Algorithm for selecting the representation value from a given list.
- representation is one of the ALGORITHM_XXX representation types."""
- def __init__(self, data, representation=None):
- if not representation:
- representation = ALGORITHM_AVERAGE # default algorithm
- self._data = data
- self._len = len(data)
- if representation == ALGORITHM_AVERAGE:
- self._rep = sum(self._data) / self._len
- else:
- self._data.sort()
- if representation == ALGORITHM_MINIMUM:
- self._rep = self._data[0]
- else:
- # for percentiles, we use the value below which x% of values are
- # found, which allows for better detection of quantum behaviors.
- if representation == ALGORITHM_MEDIAN:
- x = int(round(0.5 * self._len + 0.5))
- elif representation == ALGORITHM_25TH_PERCENTILE:
- x = int(round(0.25 * self._len + 0.5))
- else:
- raise Exception("invalid representation algorithm %s!" %
- representation)
- self._rep = self._data[x - 1]
-
- def compute(self):
- return self._rep
-
-def _ParseAndStoreTimes(config_re_compiled, is_per_tile, line, bench,
- value_dic, layout_dic):
- """Parses given bench time line with regex and adds data to value_dic.
-
- config_re_compiled: precompiled regular expression for parsing the config
- line.
- is_per_tile: boolean indicating whether this is a per-tile bench.
- If so, we add tile layout into layout_dic as well.
- line: input string line to parse.
- bench: name of bench for the time values.
- value_dic: dictionary to store bench values. See bench_dic in parse() below.
- layout_dic: dictionary to store tile layouts. See parse() for descriptions.
- """
-
- for config in config_re_compiled.finditer(line):
- current_config = config.group(1)
- tile_layout = ''
- if is_per_tile: # per-tile bench, add name prefix
- current_config = 'tile_' + current_config
- layouts = TILE_LAYOUT_RE_COMPILED.search(line)
- if layouts and len(layouts.groups()) == 2:
- tile_layout = '%sx%s' % layouts.groups()
- times = config.group(2)
- for new_time in TIME_RE_COMPILED.finditer(times):
- current_time_type = new_time.group(1)
- iters = [float(i) for i in
- new_time.group(2).strip().split(',')]
- value_dic.setdefault(bench, {}).setdefault(
- current_config, {}).setdefault(current_time_type, []).append(
- iters)
- layout_dic.setdefault(bench, {}).setdefault(
- current_config, {}).setdefault(current_time_type, tile_layout)
-
-def parse_skp_bench_data(directory, revision, rep, default_settings=None):
- """Parses all the skp bench data in the given directory.
-
- Args:
- directory: string of path to input data directory.
- revision: git hash revision that matches the data to process.
- rep: bench representation algorithm, see bench_util.py.
- default_settings: dictionary of other run settings. See writer.option() in
- bench/benchmain.cpp.
-
- Returns:
- A list of BenchDataPoint objects.
- """
- revision_data_points = []
- file_list = os.listdir(directory)
- file_list.sort()
- for bench_file in file_list:
- scalar_type = None
- # Scalar type, if any, is in the bench filename after 'scalar_'.
- if (bench_file.startswith('bench_' + revision + '_data_')):
- if bench_file.find('scalar_') > 0:
- components = bench_file.split('_')
- scalar_type = components[components.index('scalar') + 1]
- else: # Skips non skp bench files.
- continue
-
- with open('/'.join([directory, bench_file]), 'r') as file_handle:
- settings = dict(default_settings or {})
- settings['scalar'] = scalar_type
- revision_data_points.extend(parse(settings, file_handle, rep))
-
- return revision_data_points
-
-# TODO(bensong): switch to reading JSON output when available. This way we don't
-# need the RE complexities.
-def parse(settings, lines, representation=None):
- """Parses bench output into a useful data structure.
-
- ({str:str}, __iter__ -> str) -> [BenchDataPoint]
- representation is one of the ALGORITHM_XXX types."""
-
- benches = []
- current_bench = None
- # [bench][config][time_type] -> [[per-iter values]] where per-tile config
- # has per-iter value list for each tile [[<tile1_iter1>,<tile1_iter2>,...],
- # [<tile2_iter1>,<tile2_iter2>,...],...], while non-per-tile config only
- # contains one list of iterations [[iter1, iter2, ...]].
- bench_dic = {}
- # [bench][config][time_type] -> tile_layout
- layout_dic = {}
-
- for line in lines:
-
- # see if this line is a settings line
- settingsMatch = SETTINGS_RE_COMPILED.search(line)
- if (settingsMatch):
- settings = dict(settings)
- for settingMatch in PER_SETTING_RE_COMPILED.finditer(settingsMatch.group(1)):
- if (settingMatch.group(2)):
- settings[settingMatch.group(1)] = settingMatch.group(2)
- else:
- settings[settingMatch.group(1)] = True
-
- # see if this line starts a new bench
- new_bench = BENCH_RE_COMPILED.search(line)
- if new_bench:
- current_bench = new_bench.group(1)
-
- # add configs on this line to the bench_dic
- if current_bench:
- if line.startswith(' tile_') :
- _ParseAndStoreTimes(TILE_RE_COMPILED, True, line, current_bench,
- bench_dic, layout_dic)
- else:
- _ParseAndStoreTimes(CONFIG_RE_COMPILED, False, line,
- current_bench, bench_dic, layout_dic)
-
- # append benches to list
- for bench in bench_dic:
- for config in bench_dic[bench]:
- for time_type in bench_dic[bench][config]:
- tile_layout = ''
- per_tile_values = [] # empty for non-per-tile configs
- per_iter_time = [] # empty for per-tile configs
- bench_summary = None # a single final bench value
- if len(bench_dic[bench][config][time_type]) > 1:
- # per-tile config; compute representation for each tile
- per_tile_values = [
- _ListAlgorithm(iters, representation).compute()
- for iters in bench_dic[bench][config][time_type]]
- # use sum of each tile representation for total bench value
- bench_summary = sum(per_tile_values)
- # extract tile layout
- tile_layout = layout_dic[bench][config][time_type]
- else:
- # get the list of per-iteration values
- per_iter_time = bench_dic[bench][config][time_type][0]
- bench_summary = _ListAlgorithm(
- per_iter_time, representation).compute()
- benches.append(BenchDataPoint(
- bench,
- config,
- time_type,
- bench_summary,
- settings,
- tile_layout,
- per_tile_values,
- per_iter_time))
-
- return benches
-
-class LinearRegression:
- """Linear regression data based on a set of data points.
-
- ([(Number,Number)])
- There must be at least two points for this to make sense."""
- def __init__(self, points):
- n = len(points)
- max_x = Min
- min_x = Max
-
- Sx = 0.0
- Sy = 0.0
- Sxx = 0.0
- Sxy = 0.0
- Syy = 0.0
- for point in points:
- x = point[0]
- y = point[1]
- max_x = max(max_x, x)
- min_x = min(min_x, x)
-
- Sx += x
- Sy += y
- Sxx += x*x
- Sxy += x*y
- Syy += y*y
-
- denom = n*Sxx - Sx*Sx
- if (denom != 0.0):
- B = (n*Sxy - Sx*Sy) / denom
- else:
- B = 0.0
- a = (1.0/n)*(Sy - B*Sx)
-
- se2 = 0
- sB2 = 0
- sa2 = 0
- if (n >= 3 and denom != 0.0):
- se2 = (1.0/(n*(n-2)) * (n*Syy - Sy*Sy - B*B*denom))
- sB2 = (n*se2) / denom
- sa2 = sB2 * (1.0/n) * Sxx
-
-
- self.slope = B
- self.intercept = a
- self.serror = math.sqrt(max(0, se2))
- self.serror_slope = math.sqrt(max(0, sB2))
- self.serror_intercept = math.sqrt(max(0, sa2))
- self.max_x = max_x
- self.min_x = min_x
-
- def __repr__(self):
- return "LinearRegression(%s, %s, %s, %s, %s)" % (
- str(self.slope),
- str(self.intercept),
- str(self.serror),
- str(self.serror_slope),
- str(self.serror_intercept),
- )
-
- def find_min_slope(self):
- """Finds the minimal slope given one standard deviation."""
- slope = self.slope
- intercept = self.intercept
- error = self.serror
- regr_start = self.min_x
- regr_end = self.max_x
- regr_width = regr_end - regr_start
-
- if slope < 0:
- lower_left_y = slope*regr_start + intercept - error
- upper_right_y = slope*regr_end + intercept + error
- return min(0, (upper_right_y - lower_left_y) / regr_width)
-
- elif slope > 0:
- upper_left_y = slope*regr_start + intercept + error
- lower_right_y = slope*regr_end + intercept - error
- return max(0, (lower_right_y - upper_left_y) / regr_width)
-
- return 0
-
-def CreateRevisionLink(revision_number):
- """Returns HTML displaying the given revision number and linking to
- that revision's change page at code.google.com, e.g.
- http://code.google.com/p/skia/source/detail?r=2056
- """
- return '<a href="http://code.google.com/p/skia/source/detail?r=%s">%s</a>'%(
- revision_number, revision_number)
-
-def main():
- foo = [[0.0, 0.0], [0.0, 1.0], [0.0, 2.0], [0.0, 3.0]]
- LinearRegression(foo)
-
-if __name__ == "__main__":
- main()
diff --git a/bench/benchmain.cpp b/bench/benchmain.cpp
deleted file mode 100644
index 9c5f0e9bd7..0000000000
--- a/bench/benchmain.cpp
+++ /dev/null
@@ -1,716 +0,0 @@
-/*
- * Copyright 2011 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "BenchLogger.h"
-#include "Benchmark.h"
-#include "CrashHandler.h"
-#include "GMBench.h"
-#include "ResultsWriter.h"
-#include "SkCanvas.h"
-#include "SkColorPriv.h"
-#include "SkCommandLineFlags.h"
-#include "SkData.h"
-#include "SkDeferredCanvas.h"
-#include "SkGraphics.h"
-#include "SkImageEncoder.h"
-#include "SkOSFile.h"
-#include "SkPicture.h"
-#include "SkPictureRecorder.h"
-#include "SkString.h"
-#include "SkSurface.h"
-#include "Timer.h"
-
-#if SK_SUPPORT_GPU
-#include "GrContext.h"
-#include "GrContextFactory.h"
-#include "GrRenderTarget.h"
-#include "gl/GrGLDefines.h"
-#else
-class GrContext;
-#endif // SK_SUPPORT_GPU
-
-#include <limits>
-
-enum BenchMode {
- kNormal_BenchMode,
- kDeferred_BenchMode,
- kDeferredSilent_BenchMode,
- kRecord_BenchMode,
- kPictureRecord_BenchMode
-};
-const char* BenchMode_Name[] = {
- "normal", "deferred", "deferredSilent", "record", "picturerecord"
-};
-
-static const char kDefaultsConfigStr[] = "defaults";
-
-#if SK_SUPPORT_GPU
-static const char kGpuAPINameGL[] = "gl";
-static const char kGpuAPINameGLES[] = "gles";
-#endif
-
-///////////////////////////////////////////////////////////////////////////////
-
-class Iter {
-public:
- Iter() : fBenches(BenchRegistry::Head()), fGMs(skiagm::GMRegistry::Head()) {}
-
- Benchmark* next() {
- if (fBenches) {
- BenchRegistry::Factory f = fBenches->factory();
- fBenches = fBenches->next();
- return (*f)(NULL);
- }
-
- while (fGMs) {
- SkAutoTDelete<skiagm::GM> gm(fGMs->factory()(NULL));
- fGMs = fGMs->next();
- if (gm->getFlags() & skiagm::GM::kAsBench_Flag) {
- return SkNEW_ARGS(GMBench, (gm.detach()));
- }
- }
-
- return NULL;
- }
-
-private:
- const BenchRegistry* fBenches;
- const skiagm::GMRegistry* fGMs;
-};
-
-static void make_filename(const char name[], SkString* path) {
- path->set(name);
- for (int i = 0; name[i]; i++) {
- switch (name[i]) {
- case '/':
- case '\\':
- case ' ':
- case ':':
- path->writable_str()[i] = '-';
- break;
- default:
- break;
- }
- }
-}
-
-static void saveFile(const char name[], const char config[], const char dir[],
- const SkImage* image) {
- SkAutoTUnref<SkData> data(image->encode(SkImageEncoder::kPNG_Type, 100));
- if (NULL == data.get()) {
- return;
- }
-
- SkString filename;
- make_filename(name, &filename);
- filename.appendf("_%s.png", config);
- SkString path = SkOSPath::Join(dir, filename.c_str());
- ::remove(path.c_str());
-
- SkFILEWStream stream(path.c_str());
- stream.write(data->data(), data->size());
-}
-
-static void perform_clip(SkCanvas* canvas, int w, int h) {
- SkRect r;
-
- r.set(SkIntToScalar(10), SkIntToScalar(10),
- SkIntToScalar(w*2/3), SkIntToScalar(h*2/3));
- canvas->clipRect(r, SkRegion::kIntersect_Op);
-
- r.set(SkIntToScalar(w/3), SkIntToScalar(h/3),
- SkIntToScalar(w-10), SkIntToScalar(h-10));
- canvas->clipRect(r, SkRegion::kXOR_Op);
-}
-
-static void perform_rotate(SkCanvas* canvas, int w, int h) {
- const SkScalar x = SkIntToScalar(w) / 2;
- const SkScalar y = SkIntToScalar(h) / 2;
-
- canvas->translate(x, y);
- canvas->rotate(SkIntToScalar(35));
- canvas->translate(-x, -y);
-}
-
-static void perform_scale(SkCanvas* canvas, int w, int h) {
- const SkScalar x = SkIntToScalar(w) / 2;
- const SkScalar y = SkIntToScalar(h) / 2;
-
- canvas->translate(x, y);
- // just enough so we can't take the sprite case
- canvas->scale(SK_Scalar1 * 99/100, SK_Scalar1 * 99/100);
- canvas->translate(-x, -y);
-}
-
-static SkSurface* make_surface(SkColorType colorType, const SkIPoint& size,
- Benchmark::Backend backend, int sampleCount,
- GrContext* context) {
- SkSurface* surface = NULL;
- SkImageInfo info = SkImageInfo::Make(size.fX, size.fY, colorType,
- kPremul_SkAlphaType);
-
- switch (backend) {
- case Benchmark::kRaster_Backend:
- surface = SkSurface::NewRaster(info);
- surface->getCanvas()->clear(SK_ColorWHITE);
- break;
-#if SK_SUPPORT_GPU
- case Benchmark::kGPU_Backend: {
- surface = SkSurface::NewRenderTarget(context, info, sampleCount);
- break;
- }
-#endif
- case Benchmark::kPDF_Backend:
- default:
- SkDEBUGFAIL("unsupported");
- }
- return surface;
-}
-
-#if SK_SUPPORT_GPU
-GrContextFactory gContextFactory;
-typedef GrContextFactory::GLContextType GLContextType;
-static const GLContextType kNative = GrContextFactory::kNative_GLContextType;
-static const GLContextType kNVPR = GrContextFactory::kNVPR_GLContextType;
-#if SK_ANGLE
-static const GLContextType kANGLE = GrContextFactory::kANGLE_GLContextType;
-#endif
-static const GLContextType kDebug = GrContextFactory::kDebug_GLContextType;
-static const GLContextType kNull = GrContextFactory::kNull_GLContextType;
-#else
-typedef int GLContextType;
-static const GLContextType kNative = 0, kANGLE = 0, kDebug = 0, kNull = 0;
-#endif
-
-#ifdef SK_DEBUG
-static const bool kIsDebug = true;
-#else
-static const bool kIsDebug = false;
-#endif
-
-static const struct Config {
- SkColorType fColorType;
- const char* name;
- int sampleCount;
- Benchmark::Backend backend;
- GLContextType contextType;
- bool runByDefault;
-} gConfigs[] = {
- { kN32_SkColorType, "NONRENDERING", 0, Benchmark::kNonRendering_Backend, kNative, true},
- { kN32_SkColorType, "8888", 0, Benchmark::kRaster_Backend, kNative, true},
- { kRGB_565_SkColorType, "565", 0, Benchmark::kRaster_Backend, kNative, true},
-#if SK_SUPPORT_GPU
- { kN32_SkColorType, "GPU", 0, Benchmark::kGPU_Backend, kNative, true},
- { kN32_SkColorType, "MSAA4", 4, Benchmark::kGPU_Backend, kNative, false},
- { kN32_SkColorType, "MSAA16", 16, Benchmark::kGPU_Backend, kNative, false},
- { kN32_SkColorType, "NVPRMSAA4", 4, Benchmark::kGPU_Backend, kNVPR, true},
- { kN32_SkColorType, "NVPRMSAA16", 16, Benchmark::kGPU_Backend, kNVPR, false},
-#if SK_ANGLE
- { kN32_SkColorType, "ANGLE", 0, Benchmark::kGPU_Backend, kANGLE, true},
-#endif // SK_ANGLE
- { kN32_SkColorType, "Debug", 0, Benchmark::kGPU_Backend, kDebug, kIsDebug},
- { kN32_SkColorType, "NULLGPU", 0, Benchmark::kGPU_Backend, kNull, true},
-#endif // SK_SUPPORT_GPU
-};
-
-DEFINE_string(outDir, "", "If given, image of each bench will be put in outDir.");
-DEFINE_string(timers, "cg", "Timers to display. "
- "Options: w(all) W(all, truncated) c(pu) C(pu, truncated) g(pu)");
-
-DEFINE_bool(rotate, false, "Rotate canvas before bench run?");
-DEFINE_bool(scale, false, "Scale canvas before bench run?");
-DEFINE_bool(clip, false, "Clip canvas before bench run?");
-
-DEFINE_string(forceDither, "default", "Force dithering: true, false, or default?");
-DEFINE_bool(forceBlend, false, "Force alpha blending?");
-
-DEFINE_string(gpuAPI, "", "Force use of specific gpu API. Using \"gl\" "
- "forces OpenGL API. Using \"gles\" forces OpenGL ES API. "
- "Defaults to empty string, which selects the API native to the "
- "system.");
-DEFINE_int32(gpuCacheBytes, -1, "GPU cache size limit in bytes. 0 to disable cache.");
-DEFINE_int32(gpuCacheCount, -1, "GPU cache size limit in object count. 0 to disable cache.");
-
-DEFINE_bool(gpu, true, "Allows GPU configs to be run. Applied after --configs.");
-DEFINE_bool(cpu, true, "Allows non-GPU configs to be run. Applied after --config.");
-
-DEFINE_bool2(leaks, l, false, "show leaked ref cnt'd objects.");
-DEFINE_string(match, "", "[~][^]substring[$] [...] of test name to run.\n"
- "Multiple matches may be separated by spaces.\n"
- "~ causes a matching test to always be skipped\n"
- "^ requires the start of the test to match\n"
- "$ requires the end of the test to match\n"
- "^ and $ requires an exact match\n"
- "If a test does not match any list entry,\n"
- "it is skipped unless some list entry starts with ~\n");
-DEFINE_string(mode, "normal",
- "normal: draw to a normal canvas;\n"
- "deferred: draw to a deferred canvas;\n"
- "deferredSilent: deferred with silent playback;\n"
- "record: draw to an SkPicture;\n"
- "picturerecord: draw from an SkPicture to an SkPicture.\n");
-DEFINE_string(config, kDefaultsConfigStr,
- "Run configs given. By default, runs the configs marked \"runByDefault\" in gConfigs.");
-DEFINE_string(logFile, "", "Also write stdout here.");
-DEFINE_int32(minMs, 20, "Shortest time we'll allow a benchmark to run.");
-DEFINE_int32(maxMs, 1000, "Longest time we'll allow a benchmark to run.");
-DEFINE_bool(runOnce, kIsDebug, "Run each bench exactly once and don't report timings.");
-DEFINE_double(error, 0.01,
- "Ratio of subsequent bench measurements must drop within 1±error to converge.");
-DEFINE_string(timeFormat, "%9.2f", "Format to print results, in milliseconds per 1000 loops.");
-DEFINE_bool2(verbose, v, false, "Print more.");
-DEFINE_string(outResultsFile, "", "If given, the results will be written to the file in JSON format.");
-DEFINE_bool(dryRun, false, "Don't actually run the tests, just print what would have been done.");
-
-// Has this bench converged? First arguments are milliseconds / loop iteration,
-// last is overall runtime in milliseconds.
-static bool HasConverged(double prevPerLoop, double currPerLoop, double currRaw) {
- if (currRaw < FLAGS_minMs) {
- return false;
- }
- const double low = 1 - FLAGS_error, high = 1 + FLAGS_error;
- const double ratio = currPerLoop / prevPerLoop;
- return low < ratio && ratio < high;
-}
-
-int tool_main(int argc, char** argv);
-int tool_main(int argc, char** argv) {
- SetupCrashHandler();
- SkCommandLineFlags::Parse(argc, argv);
-#if SK_ENABLE_INST_COUNT
- if (FLAGS_leaks) {
- gPrintInstCount = true;
- }
-#endif
- SkAutoGraphics ag;
-
- // First, parse some flags.
- BenchLogger logger;
- if (FLAGS_logFile.count()) {
- logger.SetLogFile(FLAGS_logFile[0]);
- }
-
- LoggerResultsWriter logWriter(logger, FLAGS_timeFormat[0]);
- MultiResultsWriter writer;
- writer.add(&logWriter);
-
- SkAutoTDelete<JSONResultsWriter> jsonWriter;
- if (FLAGS_outResultsFile.count()) {
- jsonWriter.reset(SkNEW(JSONResultsWriter(FLAGS_outResultsFile[0])));
- writer.add(jsonWriter.get());
- }
-
- // Instantiate after all the writers have been added to writer so that we
- // call close() before their destructors are called on the way out.
- CallEnd<MultiResultsWriter> ender(writer);
-
- const uint8_t alpha = FLAGS_forceBlend ? 0x80 : 0xFF;
- SkTriState::State dither = SkTriState::kDefault;
- for (size_t i = 0; i < 3; i++) {
- if (strcmp(SkTriState::Name[i], FLAGS_forceDither[0]) == 0) {
- dither = static_cast<SkTriState::State>(i);
- }
- }
-
- BenchMode benchMode = kNormal_BenchMode;
- for (size_t i = 0; i < SK_ARRAY_COUNT(BenchMode_Name); i++) {
- if (strcmp(FLAGS_mode[0], BenchMode_Name[i]) == 0) {
- benchMode = static_cast<BenchMode>(i);
- }
- }
-
- SkTDArray<int> configs;
- bool runDefaultConfigs = false;
- // Try user-given configs first.
- for (int i = 0; i < FLAGS_config.count(); i++) {
- for (int j = 0; j < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++j) {
- if (0 == strcmp(FLAGS_config[i], gConfigs[j].name)) {
- *configs.append() = j;
- } else if (0 == strcmp(FLAGS_config[i], kDefaultsConfigStr)) {
- runDefaultConfigs = true;
- }
- }
- }
- // If there weren't any, fill in with defaults.
- if (runDefaultConfigs) {
- for (int i = 0; i < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++i) {
- if (gConfigs[i].runByDefault) {
- *configs.append() = i;
- }
- }
- }
- // Filter out things we can't run.
- if (kNormal_BenchMode != benchMode) {
- // Non-rendering configs only run in normal mode
- for (int i = 0; i < configs.count(); ++i) {
- const Config& config = gConfigs[configs[i]];
- if (Benchmark::kNonRendering_Backend == config.backend) {
- configs.remove(i, 1);
- --i;
- }
- }
- }
- // Apply the gpu/cpu only flags
- for (int i = 0; i < configs.count(); ++i) {
- const Config& config = gConfigs[configs[i]];
- if (config.backend == Benchmark::kGPU_Backend) {
- if (!FLAGS_gpu) {
- configs.remove(i, 1);
- --i;
- }
- } else if (!FLAGS_cpu) {
- configs.remove(i, 1);
- --i;
- }
- }
-
-#if SK_SUPPORT_GPU
- GrGLStandard gpuAPI = kNone_GrGLStandard;
- if (1 == FLAGS_gpuAPI.count()) {
- if (FLAGS_gpuAPI.contains(kGpuAPINameGL)) {
- gpuAPI = kGL_GrGLStandard;
- } else if (FLAGS_gpuAPI.contains(kGpuAPINameGLES)) {
- gpuAPI = kGLES_GrGLStandard;
- } else {
- SkDebugf("Selected gpu API could not be used. Using the default.\n");
- }
- } else if (FLAGS_gpuAPI.count() > 1) {
- SkDebugf("Selected gpu API could not be used. Using the default.\n");
- }
-
- for (int i = 0; i < configs.count(); ++i) {
- const Config& config = gConfigs[configs[i]];
-
- if (Benchmark::kGPU_Backend == config.backend) {
- GrContext* context = gContextFactory.get(config.contextType, gpuAPI);
- if (NULL == context) {
- SkDebugf("GrContext could not be created for config %s. Config will be skipped.\n",
- config.name);
- configs.remove(i);
- --i;
- continue;
- }
- if (config.sampleCount > context->getMaxSampleCount()){
- SkDebugf(
- "Sample count (%d) for config %s is not supported. Config will be skipped.\n",
- config.sampleCount, config.name);
- configs.remove(i);
- --i;
- continue;
- }
- }
- }
-#endif
-
- // All flags should be parsed now. Report our settings.
- if (FLAGS_runOnce) {
- logger.logError("bench was run with --runOnce, so we're going to hide the times."
- " It's for your own good!\n");
- }
- writer.option("mode", FLAGS_mode[0]);
- writer.option("alpha", SkStringPrintf("0x%02X", alpha).c_str());
- writer.option("dither", SkTriState::Name[dither]);
-
- writer.option("rotate", SkStringPrintf("%d", FLAGS_rotate).c_str());
- writer.option("scale", SkStringPrintf("%d", FLAGS_scale).c_str());
- writer.option("clip", SkStringPrintf("%d", FLAGS_clip).c_str());
-
-#if defined(SK_BUILD_FOR_WIN32)
- writer.option("system", "WIN32");
-#elif defined(SK_BUILD_FOR_MAC)
- writer.option("system", "MAC");
-#elif defined(SK_BUILD_FOR_ANDROID)
- writer.option("system", "ANDROID");
-#elif defined(SK_BUILD_FOR_UNIX)
- writer.option("system", "UNIX");
-#else
- writer.option("system", "other");
-#endif
-
-#if defined(SK_DEBUG)
- writer.option("build", "DEBUG");
-#else
- writer.option("build", "RELEASE");
-#endif
-
- // Set texture cache limits if non-default.
- for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) {
-#if SK_SUPPORT_GPU
- const Config& config = gConfigs[i];
- if (Benchmark::kGPU_Backend != config.backend) {
- continue;
- }
- GrContext* context = gContextFactory.get(config.contextType, gpuAPI);
- if (NULL == context) {
- continue;
- }
-
- size_t bytes;
- int count;
- context->getResourceCacheLimits(&count, &bytes);
- if (-1 != FLAGS_gpuCacheBytes) {
- bytes = static_cast<size_t>(FLAGS_gpuCacheBytes);
- }
- if (-1 != FLAGS_gpuCacheCount) {
- count = FLAGS_gpuCacheCount;
- }
- context->setResourceCacheLimits(count, bytes);
-#endif
- }
-
- // Run each bench in each configuration it supports and we asked for.
- Iter iter;
- Benchmark* bench;
- while ((bench = iter.next()) != NULL) {
- SkAutoTUnref<Benchmark> benchUnref(bench);
- if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getName())) {
- continue;
- }
-
- bench->setForceAlpha(alpha);
- bench->setDither(dither);
- bench->preDraw();
-
- bool loggedBenchName = false;
- for (int i = 0; i < configs.count(); ++i) {
- const int configIndex = configs[i];
- const Config& config = gConfigs[configIndex];
-
- if (!bench->isSuitableFor(config.backend)) {
- continue;
- }
-
- GrContext* context = NULL;
-#if SK_SUPPORT_GPU
- SkGLContextHelper* glContext = NULL;
- if (Benchmark::kGPU_Backend == config.backend) {
- context = gContextFactory.get(config.contextType, gpuAPI);
- if (NULL == context) {
- continue;
- }
- glContext = gContextFactory.getGLContext(config.contextType);
- }
-#endif
-
- SkAutoTUnref<SkCanvas> canvas;
- SkAutoTUnref<SkPicture> recordFrom;
- SkPictureRecorder recorderTo;
- const SkIPoint dim = bench->getSize();
-
- SkAutoTUnref<SkSurface> surface;
- if (Benchmark::kNonRendering_Backend != config.backend) {
- surface.reset(make_surface(config.fColorType,
- dim,
- config.backend,
- config.sampleCount,
- context));
- if (!surface.get()) {
- logger.logError(SkStringPrintf(
- "Device creation failure for config %s. Will skip.\n", config.name));
- continue;
- }
-
- switch(benchMode) {
- case kDeferredSilent_BenchMode:
- case kDeferred_BenchMode:
- canvas.reset(SkDeferredCanvas::Create(surface.get()));
- break;
- case kRecord_BenchMode:
- canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY)));
- break;
- case kPictureRecord_BenchMode: {
- SkPictureRecorder recorderFrom;
- bench->draw(1, recorderFrom.beginRecording(dim.fX, dim.fY));
- recordFrom.reset(recorderFrom.endRecording());
- canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY)));
- break;
- }
- case kNormal_BenchMode:
- canvas.reset(SkRef(surface->getCanvas()));
- break;
- default:
- SkASSERT(false);
- }
- }
-
- if (NULL != canvas) {
- canvas->clear(SK_ColorWHITE);
- if (FLAGS_clip) {
- perform_clip(canvas, dim.fX, dim.fY);
- }
- if (FLAGS_scale) {
- perform_scale(canvas, dim.fX, dim.fY);
- }
- if (FLAGS_rotate) {
- perform_rotate(canvas, dim.fX, dim.fY);
- }
- }
-
- if (!loggedBenchName) {
- loggedBenchName = true;
- writer.bench(bench->getName(), dim.fX, dim.fY);
- }
-
-#if SK_SUPPORT_GPU
- SkGLContextHelper* contextHelper = NULL;
- if (Benchmark::kGPU_Backend == config.backend) {
- contextHelper = gContextFactory.getGLContext(config.contextType);
- }
- Timer timer(contextHelper);
-#else
- Timer timer;
-#endif
-
- double previous = std::numeric_limits<double>::infinity();
- bool converged = false;
-
- // variables used to compute loopsPerFrame
- double frameIntervalTime = 0.0f;
- int frameIntervalTotalLoops = 0;
-
- bool frameIntervalComputed = false;
- int loopsPerFrame = 0;
- int loopsPerIter = 0;
- if (FLAGS_verbose) { SkDebugf("%s %s: ", bench->getName(), config.name); }
- if (!FLAGS_dryRun) {
- do {
- // Ramp up 1 -> 2 -> 4 -> 8 -> 16 -> ... -> ~1 billion.
- loopsPerIter = (loopsPerIter == 0) ? 1 : loopsPerIter * 2;
- if (loopsPerIter >= (1<<30) || timer.fWall > FLAGS_maxMs) {
- // If you find it takes more than a billion loops to get up to 20ms of runtime,
- // you've got a computer clocked at several THz or have a broken benchmark. ;)
- // "1B ought to be enough for anybody."
- logger.logError(SkStringPrintf(
- "\nCan't get %s %s to converge in %dms (%d loops)",
- bench->getName(), config.name, FLAGS_maxMs, loopsPerIter));
- break;
- }
-
- if ((benchMode == kRecord_BenchMode || benchMode == kPictureRecord_BenchMode)) {
- // Clear the recorded commands so that they do not accumulate.
- canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY)));
- }
-
- timer.start();
- // Inner loop that allows us to break the run into smaller
- // chunks (e.g. frames). This is especially useful for the GPU
- // as we can flush and/or swap buffers to keep the GPU from
- // queuing up too much work.
- for (int loopCount = loopsPerIter; loopCount > 0; ) {
- // Save and restore around each call to draw() to guarantee a pristine canvas.
- SkAutoCanvasRestore saveRestore(canvas, true/*also save*/);
-
- int loops;
- if (frameIntervalComputed && loopCount > loopsPerFrame) {
- loops = loopsPerFrame;
- loopCount -= loopsPerFrame;
- } else {
- loops = loopCount;
- loopCount = 0;
- }
-
- if (benchMode == kPictureRecord_BenchMode) {
- recordFrom->draw(canvas);
- } else {
- bench->draw(loops, canvas);
- }
-
- if (kDeferredSilent_BenchMode == benchMode) {
- static_cast<SkDeferredCanvas*>(canvas.get())->silentFlush();
- } else if (NULL != canvas) {
- canvas->flush();
- }
-
- #if SK_SUPPORT_GPU
- // swap drawing buffers on each frame to prevent the GPU
- // from queuing up too much work
- if (NULL != glContext) {
- glContext->swapBuffers();
- }
- #endif
- }
-
-
-
- // Stop truncated timers before GL calls complete, and stop the full timers after.
- timer.truncatedEnd();
- #if SK_SUPPORT_GPU
- if (NULL != glContext) {
- context->flush();
- SK_GL(*glContext, Finish());
- }
- #endif
- timer.end();
-
- // setup the frame interval for subsequent iterations
- if (!frameIntervalComputed) {
- frameIntervalTime += timer.fWall;
- frameIntervalTotalLoops += loopsPerIter;
- if (frameIntervalTime >= FLAGS_minMs) {
- frameIntervalComputed = true;
- loopsPerFrame =
- (int)(((double)frameIntervalTotalLoops / frameIntervalTime) * FLAGS_minMs);
- if (loopsPerFrame < 1) {
- loopsPerFrame = 1;
- }
- // SkDebugf(" %s has %d loops in %f ms (normalized to %d)\n",
- // bench->getName(), frameIntervalTotalLoops,
- // timer.fWall, loopsPerFrame);
- }
- }
-
- const double current = timer.fWall / loopsPerIter;
- if (FLAGS_verbose && current > previous) { SkDebugf("↑"); }
- if (FLAGS_verbose) { SkDebugf("%.3g ", current); }
- converged = HasConverged(previous, current, timer.fWall);
- previous = current;
- } while (!FLAGS_runOnce && !converged);
- }
- if (FLAGS_verbose) { SkDebugf("\n"); }
-
- if (!FLAGS_dryRun && FLAGS_outDir.count() && Benchmark::kNonRendering_Backend != config.backend) {
- SkAutoTUnref<SkImage> image(surface->newImageSnapshot());
- if (image.get()) {
- saveFile(bench->getName(), config.name, FLAGS_outDir[0],
- image);
- }
- }
-
- if (FLAGS_runOnce) {
- // Let's not mislead ourselves by looking at Debug build or single iteration bench times!
- continue;
- }
-
- // Normalize to ms per 1000 iterations.
- const double normalize = 1000.0 / loopsPerIter;
- const struct { char shortName; const char* longName; double ms; } times[] = {
- {'w', "msecs", normalize * timer.fWall},
- {'W', "Wmsecs", normalize * timer.fTruncatedWall},
- {'c', "cmsecs", normalize * timer.fCpu},
- {'C', "Cmsecs", normalize * timer.fTruncatedCpu},
- {'g', "gmsecs", normalize * timer.fGpu},
- };
-
- writer.config(config.name);
- for (size_t i = 0; i < SK_ARRAY_COUNT(times); i++) {
- if (strchr(FLAGS_timers[0], times[i].shortName) && times[i].ms > 0) {
- writer.timer(times[i].longName, times[i].ms);
- }
- }
- }
- }
-#if SK_SUPPORT_GPU
- gContextFactory.destroyContexts();
-#endif
- return 0;
-}
-
-#if !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_NACL)
-int main(int argc, char * const argv[]) {
- return tool_main(argc, (char**) argv);
-}
-#endif