aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/profiling
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-04-18 13:57:38 -0700
committerGravatar Craig Tiller <ctiller@google.com>2017-04-18 13:57:38 -0700
commit25e3c6da76a95bcd35fc675ad8a176060ebd59dc (patch)
tree5cb66d10bd5e24c11b2801268f1de4cf381664ee /tools/profiling
parentc2d9f1e297b7a504412be85ec9840e59bfdb794e (diff)
Fix EINTR forever
Diffstat (limited to 'tools/profiling')
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff.py91
1 files changed, 49 insertions, 42 deletions
diff --git a/tools/profiling/microbenchmarks/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff.py
index ba0c225f6c..f1b6ef1ab9 100755
--- a/tools/profiling/microbenchmarks/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff.py
@@ -192,52 +192,59 @@ class Benchmark:
return [self.final[f] if f in self.final else '' for f in flds]
-def read_file(filename):
+def eintr_be_gone(fn):
+ """Run fn until it doesn't stop because of EINTR"""
while True:
try:
- with open(filename) as f:
- return f.read()
+ return fn()
except IOError, e:
if e.errno != errno.EINTR:
raise
+
def read_json(filename):
- return json.loads(read_file(filename))
-
-benchmarks = collections.defaultdict(Benchmark)
-
-for bm in args.benchmarks:
- for loop in range(0, args.loops):
- js_new_ctr = read_json('%s.counters.new.%d.json' % (bm, loop))
- js_new_opt = read_json('%s.opt.new.%d.json' % (bm, loop))
- js_old_ctr = read_json('%s.counters.old.%d.json' % (bm, loop))
- js_old_opt = read_json('%s.opt.old.%d.json' % (bm, loop))
-
- for row in bm_json.expand_json(js_new_ctr, js_new_opt):
- print row
- name = row['cpp_name']
- if name.endswith('_mean') or name.endswith('_stddev'): continue
- benchmarks[name].add_sample(row, True)
- for row in bm_json.expand_json(js_old_ctr, js_old_opt):
- print row
- name = row['cpp_name']
- if name.endswith('_mean') or name.endswith('_stddev'): continue
- benchmarks[name].add_sample(row, False)
-
-really_interesting = set()
-for name, bm in benchmarks.items():
- print name
- really_interesting.update(bm.process())
-fields = [f for f in args.track if f in really_interesting]
-
-headers = ['Benchmark'] + fields
-rows = []
-for name in sorted(benchmarks.keys()):
- if benchmarks[name].skip(): continue
- rows.append([name] + benchmarks[name].row(fields))
-if rows:
- text = 'Performance differences noted:\n' + tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
-else:
- text = 'No significant performance differences'
-comment_on_pr.comment_on_pr('```\n%s\n```' % text)
-print text
+ with open(filename) as f: return json.loads(f.read())
+
+
+def finalize():
+ benchmarks = collections.defaultdict(Benchmark)
+
+ for bm in args.benchmarks:
+ for loop in range(0, args.loops):
+ js_new_ctr = read_json('%s.counters.new.%d.json' % (bm, loop))
+ js_new_opt = read_json('%s.opt.new.%d.json' % (bm, loop))
+ js_old_ctr = read_json('%s.counters.old.%d.json' % (bm, loop))
+ js_old_opt = read_json('%s.opt.old.%d.json' % (bm, loop))
+
+ for row in bm_json.expand_json(js_new_ctr, js_new_opt):
+ print row
+ name = row['cpp_name']
+ if name.endswith('_mean') or name.endswith('_stddev'): continue
+ benchmarks[name].add_sample(row, True)
+ for row in bm_json.expand_json(js_old_ctr, js_old_opt):
+ print row
+ name = row['cpp_name']
+ if name.endswith('_mean') or name.endswith('_stddev'): continue
+ benchmarks[name].add_sample(row, False)
+
+ really_interesting = set()
+ for name, bm in benchmarks.items():
+ print name
+ really_interesting.update(bm.process())
+ fields = [f for f in args.track if f in really_interesting]
+
+ headers = ['Benchmark'] + fields
+ rows = []
+ for name in sorted(benchmarks.keys()):
+ if benchmarks[name].skip(): continue
+ rows.append([name] + benchmarks[name].row(fields))
+ if rows:
+ text = 'Performance differences noted:\n' + tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
+ else:
+ text = 'No significant performance differences'
+ comment_on_pr.comment_on_pr('```\n%s\n```' % text)
+ print text
+
+
+eintr_be_gone(finalize)
+