aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/profiling/microbenchmarks
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-04-11 14:34:20 -0700
committerGravatar Craig Tiller <ctiller@google.com>2017-04-11 14:34:20 -0700
commit763667ce29f3fdcef1134fd38b490f3399ee54f9 (patch)
tree0d7c10d3f2f955c0c461fc0ccc542ef2dc9720c4 /tools/profiling/microbenchmarks
parent9314361096b13be05603fd60f6ef055f7de6468a (diff)
parent70dbfd587cecc8f72e2258161f5f145fd5fd4d35 (diff)
Merge branch 'foo' of github.com:ctiller/grpc into foo
Diffstat (limited to 'tools/profiling/microbenchmarks')
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff.py40
-rw-r--r--tools/profiling/microbenchmarks/bm_json.py1
-rw-r--r--tools/profiling/microbenchmarks/speedup.py2
3 files changed, 18 insertions, 25 deletions
diff --git a/tools/profiling/microbenchmarks/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff.py
index 5c8b93d471..e4ef8df91c 100755
--- a/tools/profiling/microbenchmarks/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff.py
@@ -45,6 +45,16 @@ import jobset
import itertools
import speedup
+_INTERESTING = (
+ 'cpu_time',
+ 'real_time',
+ 'locks_per_iteration',
+ 'allocs_per_iteration',
+ 'writes_per_iteration',
+ 'atm_cas_per_iteration',
+ 'atm_add_per_iteration',
+)
+
def changed_ratio(n, o):
if float(o) <= .0001: o = 0
if float(n) <= .0001: n = 0
@@ -63,26 +73,6 @@ def median(ary):
def min_change(pct):
return lambda n, o: abs(changed_ratio(n,o)) > pct/100.0
-nanos = {
- 'abs_diff': 5,
- 'pct_diff': 10,
-}
-counter = {
- 'abs_diff': 0.5,
- 'pct_diff': 10,
-}
-
-_INTERESTING = {
- 'cpu_time': nanos,
- 'real_time': nanos,
- 'locks_per_iteration': counter,
- 'allocs_per_iteration': counter,
- 'writes_per_iteration': counter,
- 'atm_cas_per_iteration': counter,
- 'atm_add_per_iteration': counter,
-}
-
-
_AVAILABLE_BENCHMARK_TESTS = ['bm_fullstack_unary_ping_pong',
'bm_fullstack_streaming_ping_pong',
'bm_fullstack_streaming_pump',
@@ -98,9 +88,9 @@ _AVAILABLE_BENCHMARK_TESTS = ['bm_fullstack_unary_ping_pong',
argp = argparse.ArgumentParser(description='Perform diff on microbenchmarks')
argp.add_argument('-t', '--track',
- choices=sorted(_INTERESTING.keys()),
+ choices=sorted(_INTERESTING),
nargs='+',
- default=sorted(_INTERESTING.keys()),
+ default=sorted(_INTERESTING),
help='Which metrics to track')
argp.add_argument('-b', '--benchmarks', nargs='+', choices=_AVAILABLE_BENCHMARK_TESTS, default=['bm_cq'])
argp.add_argument('-d', '--diff_base', type=str)
@@ -181,9 +171,11 @@ class Benchmark:
new = self.samples[True][f]
old = self.samples[False][f]
if not new or not old: continue
+ mdn_diff = abs(median(new) - median(old))
+ print '%s: new=%r old=%r mdn_diff=%r' % (f, new, old, mdn_diff)
s = speedup.speedup(new, old)
- if s:
- self.final[f] = '%d%%' % s
+ if s and mdn_diff > 0.5:
+ self.final[f] = '%+d%%' % s
return self.final.keys()
def skip(self):
diff --git a/tools/profiling/microbenchmarks/bm_json.py b/tools/profiling/microbenchmarks/bm_json.py
index e885444f41..917269823d 100644
--- a/tools/profiling/microbenchmarks/bm_json.py
+++ b/tools/profiling/microbenchmarks/bm_json.py
@@ -203,4 +203,5 @@ def expand_json(js, js2 = None):
row['real_time'] = bm2['real_time']
row['iterations'] = bm2['iterations']
bm2['already_used'] = True
+ break
yield row
diff --git a/tools/profiling/microbenchmarks/speedup.py b/tools/profiling/microbenchmarks/speedup.py
index ff11453960..35d392a57d 100644
--- a/tools/profiling/microbenchmarks/speedup.py
+++ b/tools/profiling/microbenchmarks/speedup.py
@@ -30,7 +30,7 @@
from scipy import stats
import math
-_THRESHOLD = 0.01
+_THRESHOLD = 0.001
def scale(a, mul):
return [x*mul for x in a]