diff options
author | Craig Tiller <ctiller@google.com> | 2017-04-20 12:39:26 -0700 |
---|---|---|
committer | Craig Tiller <ctiller@google.com> | 2017-04-20 12:39:26 -0700 |
commit | fe1a67c3d306d650131988471cda8feb0cfa119e (patch) | |
tree | f9c47fe773ab15296bac4684dfebcccb980b1a31 /tools/profiling | |
parent | 02cc83b51a9eb9418c97befdd52b5f2e52977562 (diff) | |
parent | 3fe0f6d8514f5e957baadde15c35cd7896cc2142 (diff) |
Merge github.com:grpc/grpc into trickle_stall
Diffstat (limited to 'tools/profiling')
-rwxr-xr-x | tools/profiling/microbenchmarks/bm_diff.py | 92 | ||||
-rw-r--r-- | tools/profiling/microbenchmarks/speedup.py | 4 |
2 files changed, 51 insertions, 45 deletions
diff --git a/tools/profiling/microbenchmarks/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff.py index 2f1262f60e..759135e533 100755 --- a/tools/profiling/microbenchmarks/bm_diff.py +++ b/tools/profiling/microbenchmarks/bm_diff.py @@ -102,7 +102,7 @@ argp.add_argument('-t', '--track', argp.add_argument('-b', '--benchmarks', nargs='+', choices=_AVAILABLE_BENCHMARK_TESTS, default=['bm_cq']) argp.add_argument('-d', '--diff_base', type=str) argp.add_argument('-r', '--repetitions', type=int, default=1) -argp.add_argument('-l', '--loops', type=int, default=12) +argp.add_argument('-l', '--loops', type=int, default=20) argp.add_argument('-j', '--jobs', type=int, default=multiprocessing.cpu_count()) args = argp.parse_args() @@ -196,52 +196,58 @@ class Benchmark: return [self.final[f] if f in self.final else '' for f in flds] -def read_file(filename): +def eintr_be_gone(fn): + """Run fn until it doesn't stop because of EINTR""" while True: try: - with open(filename) as f: - return f.read() + return fn() except IOError, e: if e.errno != errno.EINTR: raise + def read_json(filename): - return json.loads(read_file(filename)) - -benchmarks = collections.defaultdict(Benchmark) - -for bm in args.benchmarks: - for loop in range(0, args.loops): - js_new_ctr = read_json('%s.counters.new.%d.json' % (bm, loop)) - js_new_opt = read_json('%s.opt.new.%d.json' % (bm, loop)) - js_old_ctr = read_json('%s.counters.old.%d.json' % (bm, loop)) - js_old_opt = read_json('%s.opt.old.%d.json' % (bm, loop)) - - for row in bm_json.expand_json(js_new_ctr, js_new_opt): - print row - name = row['cpp_name'] - if name.endswith('_mean') or name.endswith('_stddev'): continue - benchmarks[name].add_sample(row, True) - for row in bm_json.expand_json(js_old_ctr, js_old_opt): - print row - name = row['cpp_name'] - if name.endswith('_mean') or name.endswith('_stddev'): continue - benchmarks[name].add_sample(row, False) - -really_interesting = set() -for name, bm in benchmarks.items(): - print name - really_interesting.update(bm.process()) -fields = [f for f in args.track if f in really_interesting] - -headers = ['Benchmark'] + fields -rows = [] -for name in sorted(benchmarks.keys()): - if benchmarks[name].skip(): continue - rows.append([name] + benchmarks[name].row(fields)) -if rows: - text = 'Performance differences noted:\n' + tabulate.tabulate(rows, headers=headers, floatfmt='+.2f') -else: - text = 'No significant performance differences' -comment_on_pr.comment_on_pr('```\n%s\n```' % text) -print text + with open(filename) as f: return json.loads(f.read()) + + +def finalize(): + benchmarks = collections.defaultdict(Benchmark) + + for bm in args.benchmarks: + for loop in range(0, args.loops): + js_new_ctr = read_json('%s.counters.new.%d.json' % (bm, loop)) + js_new_opt = read_json('%s.opt.new.%d.json' % (bm, loop)) + js_old_ctr = read_json('%s.counters.old.%d.json' % (bm, loop)) + js_old_opt = read_json('%s.opt.old.%d.json' % (bm, loop)) + + for row in bm_json.expand_json(js_new_ctr, js_new_opt): + print row + name = row['cpp_name'] + if name.endswith('_mean') or name.endswith('_stddev'): continue + benchmarks[name].add_sample(row, True) + for row in bm_json.expand_json(js_old_ctr, js_old_opt): + print row + name = row['cpp_name'] + if name.endswith('_mean') or name.endswith('_stddev'): continue + benchmarks[name].add_sample(row, False) + + really_interesting = set() + for name, bm in benchmarks.items(): + print name + really_interesting.update(bm.process()) + fields = [f for f in args.track if f in really_interesting] + + headers = ['Benchmark'] + fields + rows = [] + for name in sorted(benchmarks.keys()): + if benchmarks[name].skip(): continue + rows.append([name] + benchmarks[name].row(fields)) + if rows: + text = 'Performance differences noted:\n' + tabulate.tabulate(rows, headers=headers, floatfmt='+.2f') + else: + text = 'No significant performance differences' + comment_on_pr.comment_on_pr('```\n%s\n```' % text) + print text + + +eintr_be_gone(finalize) diff --git a/tools/profiling/microbenchmarks/speedup.py b/tools/profiling/microbenchmarks/speedup.py index 8f9023d2c8..8af0066c9d 100644 --- a/tools/profiling/microbenchmarks/speedup.py +++ b/tools/profiling/microbenchmarks/speedup.py @@ -30,7 +30,7 @@ from scipy import stats import math -_THRESHOLD = 0.0001 +_THRESHOLD = 1e-10 def scale(a, mul): return [x*mul for x in a] @@ -53,7 +53,7 @@ def speedup(new, old): return -(pct - 1) else: pct = 1 - while pct < 101: + while pct < 100000: sp, pp = cmp(new, scale(old, 1 + pct/100.0)) if sp < 0: break if pp > _THRESHOLD: break |