aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rwxr-xr-xtools/jenkins/run_trickle_diff.sh2
-rw-r--r--tools/profiling/microbenchmarks/bm_diff/bm_constants.py5
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_diff.py29
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_speedup.py23
-rw-r--r--tools/profiling/microbenchmarks/bm_json.py2
5 files changed, 31 insertions, 30 deletions
diff --git a/tools/jenkins/run_trickle_diff.sh b/tools/jenkins/run_trickle_diff.sh
index da905d0249..47dd8b44d6 100755
--- a/tools/jenkins/run_trickle_diff.sh
+++ b/tools/jenkins/run_trickle_diff.sh
@@ -20,4 +20,4 @@ set -ex
cd $(dirname $0)/../..
tools/run_tests/start_port_server.py
-tools/profiling/microbenchmarks/bm_diff/bm_main.py -d origin/$ghprbTargetBranch -b bm_fullstack_trickle -l 4 -t cli_transport_stalls cli_stream_stalls svr_transport_stalls svr_stream_stalls --no-counters --pr_comment_name trickle
+tools/profiling/microbenchmarks/bm_diff/bm_main.py -d origin/$ghprbTargetBranch -b bm_fullstack_trickle -l 4 -t cli_transport_stalls_per_iteration cli_stream_stalls_per_iteration svr_transport_stalls_per_iteration svr_stream_stalls_per_iteration --no-counters --pr_comment_name trickle
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
index 4cd65867c3..ad79a0a197 100644
--- a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
@@ -26,5 +26,6 @@ _AVAILABLE_BENCHMARK_TESTS = [
_INTERESTING = ('cpu_time', 'real_time', 'locks_per_iteration',
'allocs_per_iteration', 'writes_per_iteration',
'atm_cas_per_iteration', 'atm_add_per_iteration',
- 'nows_per_iteration', 'cli_transport_stalls', 'cli_stream_stalls',
- 'svr_transport_stalls', 'svr_stream_stalls',)
+ 'nows_per_iteration', 'cli_transport_stalls_per_iteration',
+ 'cli_stream_stalls_per_iteration', 'svr_transport_stalls_per_iteration',
+ 'svr_stream_stalls_per_iteration',)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
index 73abf90ff5..809817a1a8 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
@@ -108,9 +108,10 @@ class Benchmark:
mdn_diff = abs(_median(new) - _median(old))
_maybe_print('%s: %s=%r %s=%r mdn_diff=%r' %
(f, new_name, new, old_name, old, mdn_diff))
- s = bm_speedup.speedup(new, old)
- if abs(s) > 3 and mdn_diff > 0.5:
- self.final[f] = '%+d%%' % s
+ s = bm_speedup.speedup(new, old, 1e-5)
+ if abs(s) > 3:
+ if mdn_diff > 0.5 or 'trickle' in f:
+ self.final[f] = '%+d%%' % s
return self.final.keys()
def skip(self):
@@ -172,18 +173,16 @@ def diff(bms, loops, track, old, new, counters):
js_new_ctr = None
js_old_ctr = None
- if js_new_ctr:
- for row in bm_json.expand_json(js_new_ctr, js_new_opt):
- name = row['cpp_name']
- if name.endswith('_mean') or name.endswith('_stddev'):
- continue
- benchmarks[name].add_sample(track, row, True)
- if js_old_ctr:
- for row in bm_json.expand_json(js_old_ctr, js_old_opt):
- name = row['cpp_name']
- if name.endswith('_mean') or name.endswith('_stddev'):
- continue
- benchmarks[name].add_sample(track, row, False)
+ for row in bm_json.expand_json(js_new_ctr, js_new_opt):
+ name = row['cpp_name']
+ if name.endswith('_mean') or name.endswith('_stddev'):
+ continue
+ benchmarks[name].add_sample(track, row, True)
+ for row in bm_json.expand_json(js_old_ctr, js_old_opt):
+ name = row['cpp_name']
+ if name.endswith('_mean') or name.endswith('_stddev'):
+ continue
+ benchmarks[name].add_sample(track, row, False)
really_interesting = set()
for name, bm in benchmarks.items():
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
index 3d126efa62..63e691af02 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
@@ -17,8 +17,7 @@
from scipy import stats
import math
-_THRESHOLD = 1e-10
-
+_DEFAULT_THRESHOLD = 1e-10
def scale(a, mul):
return [x * mul for x in a]
@@ -28,32 +27,32 @@ def cmp(a, b):
return stats.ttest_ind(a, b)
-def speedup(new, old):
+def speedup(new, old, threshold = _DEFAULT_THRESHOLD):
if (len(set(new))) == 1 and new == old: return 0
s0, p0 = cmp(new, old)
if math.isnan(p0): return 0
if s0 == 0: return 0
- if p0 > _THRESHOLD: return 0
+ if p0 > threshold: return 0
if s0 < 0:
pct = 1
- while pct < 101:
+ while pct < 100:
sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
if sp > 0: break
- if pp > _THRESHOLD: break
+ if pp > threshold: break
pct += 1
return -(pct - 1)
else:
pct = 1
- while pct < 100000:
+ while pct < 10000:
sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
if sp < 0: break
- if pp > _THRESHOLD: break
+ if pp > threshold: break
pct += 1
return pct - 1
if __name__ == "__main__":
- new = [1.0, 1.0, 1.0, 1.0]
- old = [2.0, 2.0, 2.0, 2.0]
- print speedup(new, old)
- print speedup(old, new)
+ new = [0.0, 0.0, 0.0, 0.0]
+ old = [2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06]
+ print speedup(new, old, 1e-5)
+ print speedup(old, new, 1e-5)
diff --git a/tools/profiling/microbenchmarks/bm_json.py b/tools/profiling/microbenchmarks/bm_json.py
index 062611f1c7..930287e0d6 100644
--- a/tools/profiling/microbenchmarks/bm_json.py
+++ b/tools/profiling/microbenchmarks/bm_json.py
@@ -167,6 +167,8 @@ def parse_name(name):
return out
def expand_json(js, js2 = None):
+ assert(js or js2)
+ if not js: js = js2
for bm in js['benchmarks']:
if bm['name'].endswith('_stddev') or bm['name'].endswith('_mean'): continue
context = js['context']