aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/profiling
diff options
context:
space:
mode:
Diffstat (limited to 'tools/profiling')
-rw-r--r--tools/profiling/microbenchmarks/bm_diff/bm_constants.py5
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_diff.py29
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_speedup.py23
-rw-r--r--tools/profiling/microbenchmarks/bm_json.py2
-rwxr-xr-xtools/profiling/qps/qps_diff.py169
-rw-r--r--tools/profiling/qps/qps_scenarios.py19
6 files changed, 218 insertions, 29 deletions
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
index 4cd65867c3..ad79a0a197 100644
--- a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
@@ -26,5 +26,6 @@ _AVAILABLE_BENCHMARK_TESTS = [
_INTERESTING = ('cpu_time', 'real_time', 'locks_per_iteration',
'allocs_per_iteration', 'writes_per_iteration',
'atm_cas_per_iteration', 'atm_add_per_iteration',
- 'nows_per_iteration', 'cli_transport_stalls', 'cli_stream_stalls',
- 'svr_transport_stalls', 'svr_stream_stalls',)
+ 'nows_per_iteration', 'cli_transport_stalls_per_iteration',
+ 'cli_stream_stalls_per_iteration', 'svr_transport_stalls_per_iteration',
+ 'svr_stream_stalls_per_iteration',)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
index 73abf90ff5..809817a1a8 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
@@ -108,9 +108,10 @@ class Benchmark:
mdn_diff = abs(_median(new) - _median(old))
_maybe_print('%s: %s=%r %s=%r mdn_diff=%r' %
(f, new_name, new, old_name, old, mdn_diff))
- s = bm_speedup.speedup(new, old)
- if abs(s) > 3 and mdn_diff > 0.5:
- self.final[f] = '%+d%%' % s
+ s = bm_speedup.speedup(new, old, 1e-5)
+ if abs(s) > 3:
+ if mdn_diff > 0.5 or 'trickle' in f:
+ self.final[f] = '%+d%%' % s
return self.final.keys()
def skip(self):
@@ -172,18 +173,16 @@ def diff(bms, loops, track, old, new, counters):
js_new_ctr = None
js_old_ctr = None
- if js_new_ctr:
- for row in bm_json.expand_json(js_new_ctr, js_new_opt):
- name = row['cpp_name']
- if name.endswith('_mean') or name.endswith('_stddev'):
- continue
- benchmarks[name].add_sample(track, row, True)
- if js_old_ctr:
- for row in bm_json.expand_json(js_old_ctr, js_old_opt):
- name = row['cpp_name']
- if name.endswith('_mean') or name.endswith('_stddev'):
- continue
- benchmarks[name].add_sample(track, row, False)
+ for row in bm_json.expand_json(js_new_ctr, js_new_opt):
+ name = row['cpp_name']
+ if name.endswith('_mean') or name.endswith('_stddev'):
+ continue
+ benchmarks[name].add_sample(track, row, True)
+ for row in bm_json.expand_json(js_old_ctr, js_old_opt):
+ name = row['cpp_name']
+ if name.endswith('_mean') or name.endswith('_stddev'):
+ continue
+ benchmarks[name].add_sample(track, row, False)
really_interesting = set()
for name, bm in benchmarks.items():
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
index 3d126efa62..63e691af02 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
@@ -17,8 +17,7 @@
from scipy import stats
import math
-_THRESHOLD = 1e-10
-
+_DEFAULT_THRESHOLD = 1e-10
def scale(a, mul):
return [x * mul for x in a]
@@ -28,32 +27,32 @@ def cmp(a, b):
return stats.ttest_ind(a, b)
-def speedup(new, old):
+def speedup(new, old, threshold = _DEFAULT_THRESHOLD):
if (len(set(new))) == 1 and new == old: return 0
s0, p0 = cmp(new, old)
if math.isnan(p0): return 0
if s0 == 0: return 0
- if p0 > _THRESHOLD: return 0
+ if p0 > threshold: return 0
if s0 < 0:
pct = 1
- while pct < 101:
+ while pct < 100:
sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
if sp > 0: break
- if pp > _THRESHOLD: break
+ if pp > threshold: break
pct += 1
return -(pct - 1)
else:
pct = 1
- while pct < 100000:
+ while pct < 10000:
sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
if sp < 0: break
- if pp > _THRESHOLD: break
+ if pp > threshold: break
pct += 1
return pct - 1
if __name__ == "__main__":
- new = [1.0, 1.0, 1.0, 1.0]
- old = [2.0, 2.0, 2.0, 2.0]
- print speedup(new, old)
- print speedup(old, new)
+ new = [0.0, 0.0, 0.0, 0.0]
+ old = [2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06]
+ print speedup(new, old, 1e-5)
+ print speedup(old, new, 1e-5)
diff --git a/tools/profiling/microbenchmarks/bm_json.py b/tools/profiling/microbenchmarks/bm_json.py
index 062611f1c7..f6082fe7b4 100644
--- a/tools/profiling/microbenchmarks/bm_json.py
+++ b/tools/profiling/microbenchmarks/bm_json.py
@@ -167,6 +167,8 @@ def parse_name(name):
return out
def expand_json(js, js2 = None):
+ if not js and not js2: raise StopIteration()
+ if not js: js = js2
for bm in js['benchmarks']:
if bm['name'].endswith('_stddev') or bm['name'].endswith('_mean'): continue
context = js['context']
diff --git a/tools/profiling/qps/qps_diff.py b/tools/profiling/qps/qps_diff.py
new file mode 100755
index 0000000000..0654f45666
--- /dev/null
+++ b/tools/profiling/qps/qps_diff.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python2.7
+#
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Computes the diff between two qps runs and outputs significant results """
+
+import argparse
+import json
+import multiprocessing
+import os
+import qps_scenarios
+import shutil
+import subprocess
+import sys
+import tabulate
+
+sys.path.append(
+ os.path.join(
+ os.path.dirname(sys.argv[0]), '..', 'microbenchmarks', 'bm_diff'))
+import bm_speedup
+
+sys.path.append(
+ os.path.join(
+ os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
+import comment_on_pr
+
+
+def _args():
+ argp = argparse.ArgumentParser(
+ description='Perform diff on QPS Driver')
+ argp.add_argument(
+ '-d',
+ '--diff_base',
+ type=str,
+ help='Commit or branch to compare the current one to')
+ argp.add_argument(
+ '-l',
+ '--loops',
+ type=int,
+ default=4,
+ help='Number of loops for each benchmark. More loops cuts down on noise'
+ )
+ argp.add_argument(
+ '-j',
+ '--jobs',
+ type=int,
+ default=multiprocessing.cpu_count(),
+ help='Number of CPUs to use')
+ args = argp.parse_args()
+ assert args.diff_base, "diff_base must be set"
+ return args
+
+
+def _make_cmd(jobs):
+ return ['make', '-j', '%d' % jobs, 'qps_json_driver', 'qps_worker']
+
+
+def build(name, jobs):
+ shutil.rmtree('qps_diff_%s' % name, ignore_errors=True)
+ subprocess.check_call(['git', 'submodule', 'update'])
+ try:
+ subprocess.check_call(_make_cmd(jobs))
+ except subprocess.CalledProcessError, e:
+ subprocess.check_call(['make', 'clean'])
+ subprocess.check_call(_make_cmd(jobs))
+ os.rename('bins', 'qps_diff_%s' % name)
+
+
+def _run_cmd(name, scenario, fname):
+ return ['qps_diff_%s/opt/qps_json_driver' % name, '--scenarios_json', scenario, '--json_file_out', fname]
+
+
+def run(name, scenarios, loops):
+ for sn in scenarios:
+ for i in range(0, loops):
+ fname = "%s.%s.%d.json" % (sn, name, i)
+ subprocess.check_call(_run_cmd(name, scenarios[sn], fname))
+
+
+def _load_qps(fname):
+ try:
+ with open(fname) as f:
+ return json.loads(f.read())['qps']
+ except IOError, e:
+ print("IOError occurred reading file: %s" % fname)
+ return None
+ except ValueError, e:
+ print("ValueError occurred reading file: %s" % fname)
+ return None
+
+
+def _median(ary):
+ assert (len(ary))
+ ary = sorted(ary)
+ n = len(ary)
+ if n % 2 == 0:
+ return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
+ else:
+ return ary[n / 2]
+
+
+def diff(scenarios, loops, old, new):
+ old_data = {}
+ new_data = {}
+
+ # collect data
+ for sn in scenarios:
+ old_data[sn] = []
+ new_data[sn] = []
+ for i in range(loops):
+ old_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, old, i)))
+ new_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, new, i)))
+
+ # crunch data
+ headers = ['Benchmark', 'qps']
+ rows = []
+ for sn in scenarios:
+ mdn_diff = abs(_median(new_data[sn]) - _median(old_data[sn]))
+ print('%s: %s=%r %s=%r mdn_diff=%r' % (sn, new, new_data[sn], old, old_data[sn], mdn_diff))
+ s = bm_speedup.speedup(new_data[sn], old_data[sn], 10e-5)
+ if abs(s) > 3 and mdn_diff > 0.5:
+ rows.append([sn, '%+d%%' % s])
+
+ if rows:
+ return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
+ else:
+ return None
+
+
+def main(args):
+ build('new', args.jobs)
+
+ if args.diff_base:
+ where_am_i = subprocess.check_output(
+ ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
+ subprocess.check_call(['git', 'checkout', args.diff_base])
+ try:
+ build('old', args.jobs)
+ finally:
+ subprocess.check_call(['git', 'checkout', where_am_i])
+ subprocess.check_call(['git', 'submodule', 'update'])
+
+ run('new', qps_scenarios._SCENARIOS, args.loops)
+ run('old', qps_scenarios._SCENARIOS, args.loops)
+
+ diff_output = diff(qps_scenarios._SCENARIOS, args.loops, 'old', 'new')
+
+ if diff_output:
+ text = '[qps] Performance differences noted:\n%s' % diff_output
+ else:
+ text = '[qps] No significant performance differences'
+ print('%s' % text)
+ comment_on_pr.comment_on_pr('```\n%s\n```' % text)
+
+
+if __name__ == '__main__':
+ args = _args()
+ main(args)
diff --git a/tools/profiling/qps/qps_scenarios.py b/tools/profiling/qps/qps_scenarios.py
new file mode 100644
index 0000000000..4fbbdefc4d
--- /dev/null
+++ b/tools/profiling/qps/qps_scenarios.py
@@ -0,0 +1,19 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" QPS Scenarios to run """
+
+_SCENARIOS = {
+ 'large-message-throughput': '{"scenarios":[{"name":"large-message-throughput", "spawn_local_worker_count": -2, "warmup_seconds": 30, "benchmark_seconds": 270, "num_servers": 1, "server_config": {"async_server_threads": 1, "security_params": null, "server_type": "ASYNC_SERVER"}, "num_clients": 1, "client_config": {"client_type": "ASYNC_CLIENT", "security_params": null, "payload_config": {"simple_params": {"resp_size": 1048576, "req_size": 1048576}}, "client_channels": 1, "async_client_threads": 1, "outstanding_rpcs_per_channel": 1, "rpc_type": "UNARY", "load_params": {"closed_loop": {}}, "histogram_params": {"max_possible": 60000000000.0, "resolution": 0.01}}}]}',
+ 'multi-channel-64-KiB': '{"scenarios":[{"name":"multi-channel-64-KiB", "spawn_local_worker_count": -3, "warmup_seconds": 30, "benchmark_seconds": 270, "num_servers": 1, "server_config": {"async_server_threads": 31, "security_params": null, "server_type": "ASYNC_SERVER"}, "num_clients": 2, "client_config": {"client_type": "ASYNC_CLIENT", "security_params": null, "payload_config": {"simple_params": {"resp_size": 65536, "req_size": 65536}}, "client_channels": 32, "async_client_threads": 31, "outstanding_rpcs_per_channel": 100, "rpc_type": "UNARY", "load_params": {"closed_loop": {}}, "histogram_params": {"max_possible": 60000000000.0, "resolution": 0.01}}}]}'
+}