aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/skpbench
diff options
context:
space:
mode:
authorGravatar csmartdalton <csmartdalton@google.com>2016-09-19 11:03:58 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2016-09-19 11:03:58 -0700
commit4b5179b74c49498e5b2b7d94319476672170b61d (patch)
tree5439f68e7c4b25e8166d55218722fccaede3ce6a /tools/skpbench
parente202bd8b71f6aa184c2c8ce6f653755de1331c88 (diff)
skpbench
skpbench is a benchmarking suite for skps that aims to generate 100% repeatable results. The initial commit consists of three parts: skpbench A minimalist program whose sole purpose is to open an skp file, benchmark it on a single config, and exit. No tiling, looping, or other fanciness is used; it just draws the skp whole into a size- matched render target and syncs the GPU after each draw. Limiting the entire process to a single config/skp pair helps to keep the results repeatable. skpbench.py A wrapper to execute the skpbench binary with various configs and skps. It also monitors the output in order to filter out and re-run results with an unacceptable stddev. In the future this script will lock down and monitor clocks and temperatures. parseskpbench.py A utility for parsing skpbench output into a spreadsheet. BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search?issue=2341823002 Review-Url: https://codereview.chromium.org/2341823002
Diffstat (limited to 'tools/skpbench')
-rw-r--r--tools/skpbench/__init__.py4
-rw-r--r--tools/skpbench/_benchresult.py69
-rwxr-xr-xtools/skpbench/parseskpbench.py155
-rw-r--r--tools/skpbench/skpbench.cpp326
-rwxr-xr-xtools/skpbench/skpbench.py176
5 files changed, 730 insertions, 0 deletions
diff --git a/tools/skpbench/__init__.py b/tools/skpbench/__init__.py
new file mode 100644
index 0000000000..a040ad644b
--- /dev/null
+++ b/tools/skpbench/__init__.py
@@ -0,0 +1,4 @@
+# Copyright 2016 Google Inc.
+#
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/tools/skpbench/_benchresult.py b/tools/skpbench/_benchresult.py
new file mode 100644
index 0000000000..3969b552b7
--- /dev/null
+++ b/tools/skpbench/_benchresult.py
@@ -0,0 +1,69 @@
+# Copyright 2016 Google Inc.
+#
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+'''Parses an skpbench result from a line of output text.'''
+
+from __future__ import print_function
+import re
+import sys
+
+class BenchResult:
+ FLOAT_REGEX = '[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?'
+ PATTERN = re.compile('^(?P<median_pad> *)'
+ '(?P<median>' + FLOAT_REGEX + ')'
+ '(?P<accum_pad> +)'
+ '(?P<accum>' + FLOAT_REGEX + ')'
+ '(?P<max_pad> +)'
+ '(?P<max>' + FLOAT_REGEX + ')'
+ '(?P<min_pad> +)'
+ '(?P<min>' + FLOAT_REGEX + ')'
+ '(?P<stddev_pad> +)'
+ '(?P<stddev>' + FLOAT_REGEX + '%)'
+ '(?P<metric_pad> +)'
+ '(?P<metric>ms|fps)'
+ '(?P<samples_pad> +)'
+ '(?P<samples>\d+)'
+ '(?P<sample_ms_pad> +)'
+ '(?P<sample_ms>\d+)'
+ '(?P<config_pad> +)'
+ '(?P<config>[^\s]+)'
+ '(?P<bench_pad> +)'
+ '(?P<bench>[^\s]+)$')
+
+ @classmethod
+ def match(cls, text):
+ match = cls.PATTERN.search(text)
+ return cls(match) if match else None
+
+ def __init__(self, match):
+ self.median = float(match.group('median'))
+ self.accum = float(match.group('accum'))
+ self.max = float(match.group('max'))
+ self.min = float(match.group('min'))
+ self.stddev = float(match.group('stddev')[:-1]) # Drop '%' sign.
+ self.metric = match.group('metric')
+ self.samples = int(match.group('samples'))
+ self.sample_ms = int(match.group('sample_ms'))
+ self.config = match.group('config')
+ self.bench = match.group('bench')
+ self._match = match
+
+ def get_string(self, name):
+ return self._match.group(name)
+
+ def print_values(self, config_suffix=None, outfile=sys.stdout):
+ if not config_suffix or config_suffix == '':
+ print(self._match.group(0), file=outfile)
+ else:
+ values = list()
+ for name in ['median', 'accum', 'max', 'min', 'stddev',
+ 'metric', 'samples', 'sample_ms', 'config']:
+ values.append(self.get_string(name + '_pad'))
+ values.append(self.get_string(name))
+ values.append(config_suffix)
+ bench_pad = self.get_string('bench_pad')
+ values.append(bench_pad[min(len(config_suffix), len(bench_pad) - 1):])
+ values.append(self.get_string('bench'))
+ print(''.join(values), file=outfile)
diff --git a/tools/skpbench/parseskpbench.py b/tools/skpbench/parseskpbench.py
new file mode 100755
index 0000000000..21f46632df
--- /dev/null
+++ b/tools/skpbench/parseskpbench.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+
+# Copyright 2016 Google Inc.
+#
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+from _benchresult import BenchResult
+from argparse import ArgumentParser
+from datetime import datetime
+import collections
+import operator
+import os
+import sys
+import tempfile
+import urllib
+import urlparse
+import webbrowser
+
+__argparse = ArgumentParser(description="""
+
+Parses output files from skpbench.py into csv.
+
+This script can also be used to generate a Google sheet:
+
+(1) Install the "Office Editing for Docs, Sheets & Slides" Chrome extension:
+ https://chrome.google.com/webstore/detail/office-editing-for-docs-s/gbkeegbaiigmenfmjfclcdgdpimamgkj
+
+(2) Designate Chrome os-wide as the default application for opening .csv files.
+
+(3) Run parseskpbench.py with the --open flag.
+
+""")
+
+__argparse.add_argument('-r', '--result',
+ choices=['median', 'accum', 'max', 'min'], default='median',
+ help='result to use for cell values')
+__argparse.add_argument('-f', '--force',
+ action='store_true', help='silently ignore warnings')
+__argparse.add_argument('-o', '--open',
+ action='store_true',
+ help='generate a temp file and open it (theoretically in a web browser)')
+__argparse.add_argument('-n', '--name',
+ default='skpbench_%s' % datetime.now().strftime('%Y-%m-%d_%H.%M.%S.csv'),
+ help='if using --open, a name for the temp file')
+__argparse.add_argument('sources',
+ nargs='+', help='source files with skpbench results ("-" for stdin)')
+
+FLAGS = __argparse.parse_args()
+
+
+class Parser:
+ def __init__(self):
+ self.configs = list() # use list to preserve the order configs appear in.
+ self.rows = collections.defaultdict(dict)
+ self.cols = collections.defaultdict(dict)
+ self.metric = None
+ self.samples = None
+ self.sample_ms = None
+
+ def parse_file(self, infile):
+ for line in infile:
+ match = BenchResult.match(line)
+ if not match:
+ continue
+ if self.metric is None:
+ self.metric = match.metric
+ elif match.metric != self.metric:
+ raise ValueError('results have mismatched metrics (%s and %s)' %
+ (self.metric, match.metric))
+ if self.samples is None:
+ self.samples = match.samples
+ elif not FLAGS.force and match.samples != self.samples:
+ raise ValueError('results have mismatched number of samples. '
+ '(use --force to ignore)')
+ if self.sample_ms is None:
+ self.sample_ms = match.sample_ms
+ elif not FLAGS.force and match.sample_ms != self.sample_ms:
+ raise ValueError('results have mismatched sampling times. '
+ '(use --force to ignore)')
+ if not match.config in self.configs:
+ self.configs.append(match.config)
+ self.rows[match.bench][match.config] = match.get_string(FLAGS.result)
+ self.cols[match.config][match.bench] = getattr(match, FLAGS.result)
+
+ def print_csv(self, outfile=sys.stdout):
+ print('%s_%s' % (FLAGS.result, self.metric), file=outfile)
+
+ # Write the header.
+ outfile.write('bench,')
+ for config in self.configs:
+ outfile.write('%s,' % config)
+ outfile.write('\n')
+
+ # Write the rows.
+ for bench, row in self.rows.items():
+ outfile.write('%s,' % bench)
+ for config in self.configs:
+ if config in row:
+ outfile.write('%s,' % row[config])
+ elif FLAGS.force:
+ outfile.write(',')
+ else:
+ raise ValueError('%s: missing value for %s. (use --force to ignore)' %
+ (bench, config))
+ outfile.write('\n')
+
+ # Add simple, literal averages.
+ if len(self.rows) > 1:
+ outfile.write('\n')
+ self.__print_computed_row('MEAN',
+ lambda col: reduce(operator.add, col.values()) / len(col),
+ outfile=outfile)
+ self.__print_computed_row('GEOMEAN',
+ lambda col: reduce(operator.mul, col.values()) ** (1.0 / len(col)),
+ outfile=outfile)
+
+ def __print_computed_row(self, name, func, outfile=sys.stdout):
+ outfile.write('%s,' % name)
+ for config in self.configs:
+ assert(len(self.cols[config]) == len(self.rows))
+ outfile.write('%.4g,' % func(self.cols[config]))
+ outfile.write('\n')
+
+
+def main():
+ parser = Parser()
+
+ # Parse the input files.
+ for src in FLAGS.sources:
+ if src == '-':
+ parser.parse_file(sys.stdin)
+ else:
+ with open(src, mode='r') as infile:
+ parser.parse_file(infile)
+
+ # Print the csv.
+ if not FLAGS.open:
+ parser.print_csv()
+ else:
+ dirname = tempfile.mkdtemp()
+ basename = FLAGS.name
+ if os.path.splitext(basename)[1] != '.csv':
+ basename += '.csv';
+ pathname = os.path.join(dirname, basename)
+ with open(pathname, mode='w') as tmpfile:
+ parser.print_csv(outfile=tmpfile)
+ fileuri = urlparse.urljoin('file:', urllib.pathname2url(pathname))
+ print('opening %s' % fileuri)
+ webbrowser.open(fileuri)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/skpbench/skpbench.cpp b/tools/skpbench/skpbench.cpp
new file mode 100644
index 0000000000..afe44b5c7b
--- /dev/null
+++ b/tools/skpbench/skpbench.cpp
@@ -0,0 +1,326 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrContextFactory.h"
+#include "SkCanvas.h"
+#include "SkOSFile.h"
+#include "SkPicture.h"
+#include "SkStream.h"
+#include "SkSurface.h"
+#include "SkSurfaceProps.h"
+#include "picture_utils.h"
+#include "flags/SkCommandLineFlags.h"
+#include "flags/SkCommonFlagsConfig.h"
+#include <stdlib.h>
+#include <algorithm>
+#include <array>
+#include <chrono>
+#include <cmath>
+#include <vector>
+
+/**
+ * This is a minimalist program whose sole purpose is to open an skp file, benchmark it on a single
+ * config, and exit. It is intended to be used through skpbench.py rather than invoked directly.
+ * Limiting the entire process to a single config/skp pair helps to keep the results repeatable.
+ *
+ * No tiling, looping, or other fanciness is used; it just draws the skp whole into a size-matched
+ * render target and syncs the GPU after each draw.
+ *
+ * The results consist of a fixed amount of samples (--samples). A sample is defined as the number
+ * of frames rendered within a set amount of time (--sampleMs).
+ *
+ * Currently, only GPU configs are supported.
+ */
+
+DEFINE_int32(samples, 101, "number of samples to collect");
+DEFINE_int32(sampleMs, 50, "duration of each sample");
+DEFINE_bool(fps, false, "use fps instead of ms");
+DEFINE_string(skp, "", "path to a single .skp file to benchmark");
+DEFINE_string(png, "", "if set, save a .png proof to disk at this file location");
+DEFINE_int32(verbosity, 4, "level of verbosity (0=none to 5=debug)");
+DEFINE_bool(suppressHeader, false, "don't print a header row before the results");
+
+static const char* header =
+ " median accum max min stddev metric samples sample_ms config bench";
+
+static const char* resultFormat =
+ "%8.4g %8.4g %8.4g %8.4g %6.3g%% %-6s %7li %9i %-9s %s";
+
+struct Sample {
+ using clock = std::chrono::high_resolution_clock;
+
+ Sample() : fFrames(0), fDuration(0) {}
+ double seconds() const { return std::chrono::duration<double>(fDuration).count(); }
+ double ms() const { return std::chrono::duration<double, std::milli>(fDuration).count(); }
+ double value() const { return FLAGS_fps ? fFrames / this->seconds() : this->ms() / fFrames; }
+ static const char* metric() { return FLAGS_fps ? "fps" : "ms"; }
+
+ int fFrames;
+ clock::duration fDuration;
+};
+
+enum class ExitErr {
+ kOk = 0,
+ kUsage = 64,
+ kData = 65,
+ kUnavailable = 69,
+ kIO = 74,
+ kSoftware = 70
+};
+
+static void draw_skp_and_flush(SkCanvas*, const SkPicture*);
+static SkPlatformGpuFence insert_verified_fence(const SkGpuFenceSync*);
+static void wait_fence_and_delete(const SkGpuFenceSync*, SkPlatformGpuFence);
+static bool mkdir_p(const SkString& name);
+static SkString join(const SkCommandLineFlags::StringArray&);
+static void exitf(ExitErr, const char* format, ...);
+
+static void run_benchmark(const SkGpuFenceSync* sync, SkCanvas* canvas, const SkPicture* skp,
+ std::vector<Sample>* samples) {
+ using clock = Sample::clock;
+ std::chrono::milliseconds sampleMs(FLAGS_sampleMs);
+
+ samples->clear();
+ samples->resize(FLAGS_samples);
+
+ // Prime the graphics pipe.
+ SkPlatformGpuFence frameN_minus_2, frameN_minus_1;
+ {
+ draw_skp_and_flush(canvas, skp);
+ SkPlatformGpuFence frame0 = insert_verified_fence(sync);
+
+ draw_skp_and_flush(canvas, skp);
+ frameN_minus_2 = insert_verified_fence(sync);
+
+ draw_skp_and_flush(canvas, skp);
+ frameN_minus_1 = insert_verified_fence(sync);
+
+ wait_fence_and_delete(sync, frame0);
+ }
+
+ clock::time_point start = clock::now();
+
+ for (Sample& sample : *samples) {
+ clock::time_point end;
+ do {
+ draw_skp_and_flush(canvas, skp);
+
+ // Sync the GPU.
+ wait_fence_and_delete(sync, frameN_minus_2);
+ frameN_minus_2 = frameN_minus_1;
+ frameN_minus_1 = insert_verified_fence(sync);
+
+ end = clock::now();
+ sample.fDuration = end - start;
+ ++sample.fFrames;
+ } while (sample.fDuration < sampleMs);
+
+ if (FLAGS_verbosity >= 5) {
+ fprintf(stderr, "%.4g%s [ms=%.4g frames=%i]\n",
+ sample.value(), Sample::metric(), sample.ms(), sample.fFrames);
+ }
+
+ start = end;
+ }
+
+ sync->deleteFence(frameN_minus_2);
+ sync->deleteFence(frameN_minus_1);
+}
+
+void print_result(const std::vector<Sample>& samples, const char* config, const char* bench) {
+ if (0 == (samples.size() % 2)) {
+ exitf(ExitErr::kSoftware, "attempted to gather stats on even number of samples");
+ }
+
+ Sample accum = Sample();
+ std::vector<double> values;
+ values.reserve(samples.size());
+ for (const Sample& sample : samples) {
+ accum.fFrames += sample.fFrames;
+ accum.fDuration += sample.fDuration;
+ values.push_back(sample.value());
+ }
+ std::sort(values.begin(), values.end());
+ const double median = values[values.size() / 2];
+
+ const double meanValue = accum.value();
+ double variance = 0;
+ for (const Sample& sample : samples) {
+ const double delta = sample.value() - meanValue;
+ variance += delta * delta;
+ }
+ variance /= samples.size();
+ // Technically, this is the relative standard deviation.
+ const double stddev = 100/*%*/ * sqrt(variance) / meanValue;
+
+ printf(resultFormat, median, accum.value(), values.back(), values.front(), stddev,
+ Sample::metric(), values.size(), FLAGS_sampleMs, config, bench);
+ printf("\n");
+ fflush(stdout);
+}
+
+int main(int argc, char** argv) {
+ SkCommandLineFlags::SetUsage("Use skpbench.py instead. "
+ "You usually don't want to use this program directly.");
+ SkCommandLineFlags::Parse(argc, argv);
+
+ if (!FLAGS_suppressHeader) {
+ printf("%s\n", header);
+ }
+ if (FLAGS_samples <= 0) {
+ exit(0); // This can be used to print the header and quit.
+ }
+ if (0 == FLAGS_samples % 2) {
+ fprintf(stderr, "WARNING: even number of samples requested (%i); "
+ "using %i so there can be a true median.\n",
+ FLAGS_samples, FLAGS_samples + 1);
+ ++FLAGS_samples;
+ }
+
+ // Parse the config.
+ const SkCommandLineConfigGpu* config = nullptr; // Initialize for spurious warning.
+ SkCommandLineConfigArray configs;
+ ParseConfigs(FLAGS_config, &configs);
+ if (configs.count() != 1 || !(config = configs[0]->asConfigGpu())) {
+ exitf(ExitErr::kUsage, "invalid config %s; must specify one (and only one) GPU config",
+ join(FLAGS_config).c_str());
+ }
+
+ // Parse the skp.
+ if (FLAGS_skp.count() != 1) {
+ exitf(ExitErr::kUsage, "invalid skp \"%s\"; one (and only one) skp must be specified.",
+ join(FLAGS_skp).c_str());
+ }
+ const char* skpfile = FLAGS_skp[0];
+ std::unique_ptr<SkStream> skpstream(SkStream::MakeFromFile(skpfile));
+ if (!skpstream) {
+ exitf(ExitErr::kIO, "failed to open skp file %s", skpfile);
+ }
+ sk_sp<SkPicture> skp = SkPicture::MakeFromStream(skpstream.get());
+ if (!skp) {
+ exitf(ExitErr::kData, "failed to parse skp file %s", skpfile);
+ }
+ int width = SkTMin(SkScalarCeilToInt(skp->cullRect().width()), 2048),
+ height = SkTMin(SkScalarCeilToInt(skp->cullRect().height()), 2048);
+ if (FLAGS_verbosity >= 2 &&
+ (width != skp->cullRect().width() || height != skp->cullRect().height())) {
+ fprintf(stderr, "NOTE: %s is too large (%ix%i); cropping to %ix%i.\n",
+ skpfile, SkScalarCeilToInt(skp->cullRect().width()),
+ SkScalarCeilToInt(skp->cullRect().height()), width, height);
+ }
+
+ // Create a context.
+ sk_gpu_test::GrContextFactory factory;
+ sk_gpu_test::ContextInfo ctxInfo =
+ factory.getContextInfo(config->getContextType(), config->getContextOptions());
+ GrContext* ctx = ctxInfo.grContext();
+ if (!ctx) {
+ exitf(ExitErr::kUnavailable, "failed to create context for config %s",
+ config->getTag().c_str());
+ }
+ if (ctx->caps()->maxRenderTargetSize() < SkTMax(width, height)) {
+ exitf(ExitErr::kUnavailable, "render target size %ix%i not supported by platform (max: %i)",
+ width, height, ctx->caps()->maxRenderTargetSize());
+ }
+ if (ctx->caps()->maxSampleCount() < config->getSamples()) {
+ exitf(ExitErr::kUnavailable, "sample count %i not supported by platform (max: %i)",
+ config->getSamples(), ctx->caps()->maxSampleCount());
+ }
+ sk_gpu_test::TestContext* testCtx = ctxInfo.testContext();
+ if (!testCtx) {
+ exitf(ExitErr::kSoftware, "testContext is null");
+ }
+ if (!testCtx->fenceSyncSupport()) {
+ exitf(ExitErr::kUnavailable, "GPU does not support fence sync");
+ }
+
+ // Create a render target.
+ SkImageInfo info = SkImageInfo::Make(width, height, config->getColorType(),
+ kPremul_SkAlphaType, sk_ref_sp(config->getColorSpace()));
+ uint32_t flags = config->getUseDIText() ? SkSurfaceProps::kUseDeviceIndependentFonts_Flag : 0;
+ SkSurfaceProps props(flags, SkSurfaceProps::kLegacyFontHost_InitType);
+ sk_sp<SkSurface> surface =
+ SkSurface::MakeRenderTarget(ctx, SkBudgeted::kNo, info, config->getSamples(), &props);
+ if (!surface) {
+ exitf(ExitErr::kUnavailable, "failed to create %ix%i render target for config %s",
+ width, height, config->getTag().c_str());
+ }
+
+ // Run the benchmark.
+ std::vector<Sample> samples;
+ SkCanvas* canvas = surface->getCanvas();
+ canvas->translate(-skp->cullRect().x(), -skp->cullRect().y());
+ run_benchmark(testCtx->fenceSync(), canvas, skp.get(), &samples);
+ print_result(samples, config->getTag().c_str(), SkOSPath::Basename(skpfile).c_str());
+
+ // Save a proof (if one was requested).
+ if (!FLAGS_png.isEmpty()) {
+ SkBitmap bmp;
+ bmp.setInfo(info);
+ if (!surface->getCanvas()->readPixels(&bmp, 0, 0)) {
+ exitf(ExitErr::kUnavailable, "failed to read canvas pixels for png");
+ }
+ const SkString &dirname = SkOSPath::Dirname(FLAGS_png[0]),
+ &basename = SkOSPath::Basename(FLAGS_png[0]);
+ if (!mkdir_p(dirname)) {
+ exitf(ExitErr::kIO, "failed to create directory \"%s\" for png", dirname.c_str());
+ }
+ if (!sk_tools::write_bitmap_to_disk(bmp, dirname, nullptr, basename)) {
+ exitf(ExitErr::kIO, "failed to save png to \"%s\"", FLAGS_png[0]);
+ }
+ }
+
+ exit(0);
+}
+
+static void draw_skp_and_flush(SkCanvas* canvas, const SkPicture* skp) {
+ canvas->drawPicture(skp);
+ canvas->flush();
+}
+
+static SkPlatformGpuFence insert_verified_fence(const SkGpuFenceSync* sync) {
+ SkPlatformGpuFence fence = sync->insertFence();
+ if (kInvalidPlatformGpuFence == fence) {
+ exitf(ExitErr::kUnavailable, "failed to insert fence");
+ }
+ return fence;
+}
+
+static void wait_fence_and_delete(const SkGpuFenceSync* sync, SkPlatformGpuFence fence) {
+ if (kInvalidPlatformGpuFence == fence) {
+ exitf(ExitErr::kSoftware, "attempted to wait on invalid fence");
+ }
+ if (!sync->waitFence(fence)) {
+ exitf(ExitErr::kUnavailable, "failed to wait for fence");
+ }
+ sync->deleteFence(fence);
+}
+
+bool mkdir_p(const SkString& dirname) {
+ if (dirname.isEmpty()) {
+ return true;
+ }
+ return mkdir_p(SkOSPath::Dirname(dirname.c_str())) && sk_mkdir(dirname.c_str());
+}
+
+static SkString join(const SkCommandLineFlags::StringArray& stringArray) {
+ SkString joined;
+ for (int i = 0; i < FLAGS_config.count(); ++i) {
+ joined.appendf(i ? " %s" : "%s", FLAGS_config[i]);
+ }
+ return joined;
+}
+
+static void exitf(ExitErr err, const char* format, ...) {
+ fprintf(stderr, ExitErr::kSoftware == err ? "INTERNAL ERROR: " : "ERROR: ");
+ va_list args;
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+ fprintf(stderr, ExitErr::kSoftware == err ? "; this should never happen.\n": ".\n");
+ exit((int)err);
+}
diff --git a/tools/skpbench/skpbench.py b/tools/skpbench/skpbench.py
new file mode 100755
index 0000000000..f547003573
--- /dev/null
+++ b/tools/skpbench/skpbench.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+
+# Copyright 2016 Google Inc.
+#
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+from _benchresult import BenchResult
+from argparse import ArgumentParser
+from os import path
+from queue import Queue
+from threading import Thread
+import collections
+import glob
+import math
+import re
+import subprocess
+import sys
+
+__argparse = ArgumentParser(description="""
+
+Executes the skpbench binary with various configs and skps.
+
+Also monitors the output in order to filter out and re-run results that have an
+unacceptable stddev.
+
+""")
+
+__argparse.add_argument('-p', '--path',
+ help='directory to execute ./skpbench from')
+__argparse.add_argument('-m', '--max-stddev',
+ type=float, default=4,
+ help='initial max allowable relative standard deviation')
+__argparse.add_argument('-x', '--suffix',
+ help='suffix to append on config (e.g. "_before", "_after")')
+__argparse.add_argument('-w','--write-path',
+ help='directory to save .png proofs to disk.')
+__argparse.add_argument('-v','--verbosity',
+ type=int, default=0, help='level of verbosity (0=none to 5=debug)')
+__argparse.add_argument('-n', '--samples',
+ type=int, help='number of samples to collect for each bench')
+__argparse.add_argument('-d', '--sample-ms',
+ type=int, help='duration of each sample')
+__argparse.add_argument('--fps',
+ action='store_true', help='use fps instead of ms')
+__argparse.add_argument('-c', '--config',
+ default='gpu', help='comma- or space-separated list of GPU configs')
+__argparse.add_argument('skps',
+ nargs='+',
+ help='.skp files or directories to expand for .skp files')
+
+FLAGS = __argparse.parse_args()
+
+
+class StddevException(Exception):
+ pass
+
+class Message:
+ READLINE = 0,
+ EXIT = 1
+ def __init__(self, message, value=None):
+ self.message = message
+ self.value = value
+
+class SKPBench(Thread):
+ ARGV = ['skpbench', '--verbosity', str(FLAGS.verbosity)]
+ if FLAGS.path:
+ ARGV[0] = path.join(FLAGS.path, ARGV[0])
+ if FLAGS.samples:
+ ARGV.extend(['--samples', str(FLAGS.samples)])
+ if FLAGS.sample_ms:
+ ARGV.extend(['--sampleMs', str(FLAGS.sample_ms)])
+ if FLAGS.fps:
+ ARGV.extend(['--fps', 'true'])
+
+ @classmethod
+ def print_header(cls):
+ subprocess.call(cls.ARGV + ['--samples', '0'])
+
+ def __init__(self, skp, config, max_stddev, best_result=None):
+ self.skp = skp
+ self.config = config
+ self.max_stddev = max_stddev
+ self.best_result = best_result
+ self._queue = Queue()
+ Thread.__init__(self)
+
+ def execute(self):
+ self.start()
+ while True:
+ message = self._queue.get()
+ if message.message == Message.READLINE:
+ result = BenchResult.match(message.value)
+ if result:
+ self.__process_result(result)
+ else:
+ print(message.value)
+ sys.stdout.flush()
+ continue
+ if message.message == Message.EXIT:
+ self.join()
+ break
+
+ def __process_result(self, result):
+ if not self.best_result or result.stddev <= self.best_result.stddev:
+ self.best_result = result
+ elif FLAGS.verbosity >= 1:
+ print('NOTE: reusing previous result for %s/%s with lower stddev '
+ '(%s%% instead of %s%%).' %
+ (result.config, result.bench, self.best_result.stddev,
+ result.stddev), file=sys.stderr)
+ if self.max_stddev and self.best_result.stddev > self.max_stddev:
+ raise StddevException()
+ self.best_result.print_values(config_suffix=FLAGS.suffix)
+
+ def run(self):
+ """Called on the background thread.
+
+ Launches and reads output from an skpbench process.
+
+ """
+ commandline = self.ARGV + ['--config', self.config,
+ '--skp', self.skp,
+ '--suppressHeader', 'true']
+ if (FLAGS.write_path):
+ pngfile = path.join(FLAGS.write_path, self.config,
+ path.basename(self.skp) + '.png')
+ commandline.extend(['--png', pngfile])
+ if (FLAGS.verbosity >= 3):
+ print(' '.join(commandline), file=sys.stderr)
+ proc = subprocess.Popen(commandline, stdout=subprocess.PIPE)
+ for line in iter(proc.stdout.readline, b''):
+ self._queue.put(Message(Message.READLINE, line.decode('utf-8').rstrip()))
+ proc.wait()
+ self._queue.put(Message(Message.EXIT, proc.returncode))
+
+
+def main():
+ SKPBench.print_header()
+
+ # Delimiter is "," or " ", skip if nested inside parens (e.g. gpu(a=b,c=d)).
+ DELIMITER = r'[, ](?!(?:[^(]*\([^)]*\))*[^()]*\))'
+ configs = re.split(DELIMITER, FLAGS.config)
+
+ skps = list()
+ for skp in FLAGS.skps:
+ if (path.isdir(skp)):
+ skps.extend(glob.iglob(path.join(skp, '*.skp')))
+ else:
+ skps.append(skp)
+
+ benches = collections.deque([(skp, config, FLAGS.max_stddev)
+ for skp in skps
+ for config in configs])
+ while benches:
+ benchargs = benches.popleft()
+ skpbench = SKPBench(*benchargs)
+ try:
+ skpbench.execute()
+
+ except StddevException:
+ retry_max_stddev = skpbench.max_stddev * math.sqrt(2)
+ if FLAGS.verbosity >= 1:
+ print('NOTE: stddev too high for %s/%s (%s%%; max=%.2f%%). '
+ 'Re-queuing with max=%.2f%%.' %
+ (skpbench.best_result.config, skpbench.best_result.bench,
+ skpbench.best_result.stddev, skpbench.max_stddev,
+ retry_max_stddev),
+ file=sys.stderr)
+ benches.append((skpbench.skp, skpbench.config, retry_max_stddev,
+ skpbench.best_result))
+
+
+if __name__ == '__main__':
+ main()