aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
authorGravatar ncteisen <ncteisen@gmail.com>2017-06-07 15:01:56 -0700
committerGravatar ncteisen <ncteisen@gmail.com>2017-06-07 15:14:19 -0700
commit7cd7b7fc433186f6b1f831ca11d015d28b3f7fac (patch)
treec9cd359e11bb6a3f7b0a4972169bf7ff7248177a /tools
parentf1e19fdd3118d36316223b6720f08e3aadcc2b4a (diff)
Differentiate between timeouts and crashes
Diffstat (limited to 'tools')
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_diff.py32
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_main.py2
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_run.py3
3 files changed, 23 insertions, 14 deletions
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
index b049f41ca0..72a8d11eea 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
@@ -51,7 +51,7 @@ def _median(ary):
ary = sorted(ary)
n = len(ary)
if n % 2 == 0:
- return (ary[n / 2] + ary[n / 2 + 1]) / 2.0
+ return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
else:
return ary[n / 2]
@@ -130,23 +130,30 @@ class Benchmark:
return [self.final[f] if f in self.final else '' for f in flds]
-def _read_json(filename, badfiles):
+def _read_json(filename, badjson_files, nonexistant_files):
stripped = ".".join(filename.split(".")[:-2])
try:
with open(filename) as f:
return json.loads(f.read())
+ except IOError, e:
+ if stripped in nonexistant_files:
+ nonexistant_files[stripped] += 1
+ else:
+ nonexistant_files[stripped] = 1
+ return None
except ValueError, e:
- if stripped in badfiles:
- badfiles[stripped] += 1
+ if stripped in badjson_files:
+ badjson_files[stripped] += 1
else:
- badfiles[stripped] = 1
+ badjson_files[stripped] = 1
return None
def diff(bms, loops, track, old, new):
benchmarks = collections.defaultdict(Benchmark)
- badfiles = {}
+ badjson_files = {}
+ nonexistant_files = {}
for bm in bms:
for loop in range(0, loops):
for line in subprocess.check_output(
@@ -156,16 +163,16 @@ def diff(bms, loops, track, old, new):
"<", "_").replace(">", "_").replace(", ", "_")
js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' %
(bm, stripped_line, new, loop),
- badfiles)
+ badjson_files, nonexistant_files)
js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
(bm, stripped_line, new, loop),
- badfiles)
+ badjson_files, nonexistant_files)
js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' %
(bm, stripped_line, old, loop),
- badfiles)
+ badjson_files, nonexistant_files)
js_old_opt = _read_json('%s.%s.opt.%s.%d.json' %
(bm, stripped_line, old, loop),
- badfiles)
+ badjson_files, nonexistant_files)
if js_new_ctr:
for row in bm_json.expand_json(js_new_ctr, js_new_opt):
@@ -191,7 +198,8 @@ def diff(bms, loops, track, old, new):
for name in sorted(benchmarks.keys()):
if benchmarks[name].skip(): continue
rows.append([name] + benchmarks[name].row(fields))
- note = 'flakiness data = %s' % str(badfiles)
+ note = 'Corrupt JSON data (indicates timeout or crash) = %s' % str(badjson_files)
+ note += '\n\nMissing files (new benchmark) = %s' % str(nonexistant_files)
if rows:
return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f'), note
else:
@@ -204,4 +212,4 @@ if __name__ == '__main__':
args.new)
print note
print ""
- print diff
+ print diff if diff else "No performance differences"
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_main.py b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
index 4c6eb8b48c..47381f4ec8 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_main.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
@@ -139,6 +139,8 @@ def main(args):
text = 'Performance differences noted:\n' + diff
else:
text = 'No significant performance differences'
+ print note
+ print ""
print text
comment_on_pr.comment_on_pr('```\n%s\n\n%s\n```' % (note, text))
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
index e281e9e61c..6ad9f1a3b7 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
@@ -107,8 +107,7 @@ def _collect_bm_data(bm, cfg, name, reps, idx, loops):
shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
loops),
verbose_success=True,
- timeout_seconds=60 * 10,
- timeout_retries=3))
+ timeout_seconds=60 * 2))
return jobs_list