aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/profiling/microbenchmarks
diff options
context:
space:
mode:
authorGravatar David Garcia Quintas <dgq@google.com>2017-12-13 14:44:29 -0800
committerGravatar David Garcia Quintas <dgq@google.com>2017-12-13 14:44:29 -0800
commit54d699ddda18e8aca7a556ad3c38d1684efc88ec (patch)
treeb0ecaaf07074bdcde6c3315f647cc04417da7b90 /tools/profiling/microbenchmarks
parent62d86e9987121c8bd79d4594fb0db019c4faafad (diff)
parent91a851c6e1f6bc7c1dbf84ea12558d535c911252 (diff)
Merge branch 'master' of github.com:grpc/grpc into backoff_cpp
Diffstat (limited to 'tools/profiling/microbenchmarks')
-rwxr-xr-xtools/profiling/microbenchmarks/bm2bq.py43
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_build.py87
-rw-r--r--tools/profiling/microbenchmarks/bm_diff/bm_constants.py22
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_diff.py335
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_main.py218
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_run.py158
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_speedup.py61
-rw-r--r--tools/profiling/microbenchmarks/bm_json.py365
8 files changed, 654 insertions, 635 deletions
diff --git a/tools/profiling/microbenchmarks/bm2bq.py b/tools/profiling/microbenchmarks/bm2bq.py
index 9f9b672f75..746b643b43 100755
--- a/tools/profiling/microbenchmarks/bm2bq.py
+++ b/tools/profiling/microbenchmarks/bm2bq.py
@@ -28,37 +28,38 @@ import subprocess
columns = []
for row in json.loads(
- subprocess.check_output([
- 'bq','--format=json','show','microbenchmarks.microbenchmarks']))['schema']['fields']:
- columns.append((row['name'], row['type'].lower()))
+ subprocess.check_output([
+ 'bq', '--format=json', 'show', 'microbenchmarks.microbenchmarks'
+ ]))['schema']['fields']:
+ columns.append((row['name'], row['type'].lower()))
SANITIZE = {
- 'integer': int,
- 'float': float,
- 'boolean': bool,
- 'string': str,
- 'timestamp': str,
+ 'integer': int,
+ 'float': float,
+ 'boolean': bool,
+ 'string': str,
+ 'timestamp': str,
}
if sys.argv[1] == '--schema':
- print ',\n'.join('%s:%s' % (k, t.upper()) for k, t in columns)
- sys.exit(0)
+ print ',\n'.join('%s:%s' % (k, t.upper()) for k, t in columns)
+ sys.exit(0)
with open(sys.argv[1]) as f:
- js = json.loads(f.read())
+ js = json.loads(f.read())
if len(sys.argv) > 2:
- with open(sys.argv[2]) as f:
- js2 = json.loads(f.read())
+ with open(sys.argv[2]) as f:
+ js2 = json.loads(f.read())
else:
- js2 = None
+ js2 = None
-writer = csv.DictWriter(sys.stdout, [c for c,t in columns])
+writer = csv.DictWriter(sys.stdout, [c for c, t in columns])
for row in bm_json.expand_json(js, js2):
- sane_row = {}
- for name, sql_type in columns:
- if name in row:
- if row[name] == '': continue
- sane_row[name] = SANITIZE[sql_type](row[name])
- writer.writerow(sane_row)
+ sane_row = {}
+ for name, sql_type in columns:
+ if name in row:
+ if row[name] == '': continue
+ sane_row[name] = SANITIZE[sql_type](row[name])
+ writer.writerow(sane_row)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_build.py b/tools/profiling/microbenchmarks/bm_diff/bm_build.py
index ce62c09d72..a4cd61707d 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_build.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_build.py
@@ -13,7 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
""" Python utility to build opt and counters benchmarks """
import bm_constants
@@ -26,55 +25,55 @@ import shutil
def _args():
- argp = argparse.ArgumentParser(description='Builds microbenchmarks')
- argp.add_argument(
- '-b',
- '--benchmarks',
- nargs='+',
- choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- help='Which benchmarks to build')
- argp.add_argument(
- '-j',
- '--jobs',
- type=int,
- default=multiprocessing.cpu_count(),
- help='How many CPUs to dedicate to this task')
- argp.add_argument(
- '-n',
- '--name',
- type=str,
- help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
- )
- argp.add_argument('--counters', dest='counters', action='store_true')
- argp.add_argument('--no-counters', dest='counters', action='store_false')
- argp.set_defaults(counters=True)
- args = argp.parse_args()
- assert args.name
- return args
+ argp = argparse.ArgumentParser(description='Builds microbenchmarks')
+ argp.add_argument(
+ '-b',
+ '--benchmarks',
+ nargs='+',
+ choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ help='Which benchmarks to build')
+ argp.add_argument(
+ '-j',
+ '--jobs',
+ type=int,
+ default=multiprocessing.cpu_count(),
+ help='How many CPUs to dedicate to this task')
+ argp.add_argument(
+ '-n',
+ '--name',
+ type=str,
+ help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
+ )
+ argp.add_argument('--counters', dest='counters', action='store_true')
+ argp.add_argument('--no-counters', dest='counters', action='store_false')
+ argp.set_defaults(counters=True)
+ args = argp.parse_args()
+ assert args.name
+ return args
def _make_cmd(cfg, benchmarks, jobs):
- return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
+ return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
def build(name, benchmarks, jobs, counters):
- shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
- subprocess.check_call(['git', 'submodule', 'update'])
- try:
- subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
- if counters:
- subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
- except subprocess.CalledProcessError, e:
- subprocess.check_call(['make', 'clean'])
- subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
- if counters:
- subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
- os.rename(
- 'bins',
- 'bm_diff_%s' % name,)
+ shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
+ subprocess.check_call(['git', 'submodule', 'update'])
+ try:
+ subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
+ if counters:
+ subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
+ except subprocess.CalledProcessError, e:
+ subprocess.check_call(['make', 'clean'])
+ subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
+ if counters:
+ subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
+ os.rename(
+ 'bins',
+ 'bm_diff_%s' % name,)
if __name__ == '__main__':
- args = _args()
- build(args.name, args.benchmarks, args.jobs, args.counters)
+ args = _args()
+ build(args.name, args.benchmarks, args.jobs, args.counters)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
index 0ec17fa17e..cff29dbe08 100644
--- a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
@@ -13,19 +13,19 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
""" Configurable constants for the bm_*.py family """
_AVAILABLE_BENCHMARK_TESTS = [
- 'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong',
- 'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create',
- 'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset',
- 'bm_metadata', 'bm_fullstack_trickle'
+ 'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong',
+ 'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create',
+ 'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset',
+ 'bm_metadata', 'bm_fullstack_trickle'
]
-_INTERESTING = ('cpu_time', 'real_time', 'call_initial_size-median', 'locks_per_iteration',
- 'allocs_per_iteration', 'writes_per_iteration',
- 'atm_cas_per_iteration', 'atm_add_per_iteration',
- 'nows_per_iteration', 'cli_transport_stalls_per_iteration',
- 'cli_stream_stalls_per_iteration', 'svr_transport_stalls_per_iteration',
- 'svr_stream_stalls_per_iteration', 'http2_pings_sent_per_iteration')
+_INTERESTING = (
+ 'cpu_time', 'real_time', 'call_initial_size-median', 'locks_per_iteration',
+ 'allocs_per_iteration', 'writes_per_iteration', 'atm_cas_per_iteration',
+ 'atm_add_per_iteration', 'nows_per_iteration',
+ 'cli_transport_stalls_per_iteration', 'cli_stream_stalls_per_iteration',
+ 'svr_transport_stalls_per_iteration', 'svr_stream_stalls_per_iteration',
+ 'http2_pings_sent_per_iteration')
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
index a41d0f0552..b8a3b22861 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
@@ -34,190 +34,195 @@ verbose = False
def _median(ary):
- assert (len(ary))
- ary = sorted(ary)
- n = len(ary)
- if n % 2 == 0:
- return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
- else:
- return ary[n / 2]
+ assert (len(ary))
+ ary = sorted(ary)
+ n = len(ary)
+ if n % 2 == 0:
+ return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
+ else:
+ return ary[n / 2]
def _args():
- argp = argparse.ArgumentParser(
- description='Perform diff on microbenchmarks')
- argp.add_argument(
- '-t',
- '--track',
- choices=sorted(bm_constants._INTERESTING),
- nargs='+',
- default=sorted(bm_constants._INTERESTING),
- help='Which metrics to track')
- argp.add_argument(
- '-b',
- '--benchmarks',
- nargs='+',
- choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- help='Which benchmarks to run')
- argp.add_argument(
- '-l',
- '--loops',
- type=int,
- default=20,
- help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
- )
- argp.add_argument(
- '-r',
- '--regex',
- type=str,
- default="",
- help='Regex to filter benchmarks run')
- argp.add_argument('--counters', dest='counters', action='store_true')
- argp.add_argument('--no-counters', dest='counters', action='store_false')
- argp.set_defaults(counters=True)
- argp.add_argument('-n', '--new', type=str, help='New benchmark name')
- argp.add_argument('-o', '--old', type=str, help='Old benchmark name')
- argp.add_argument(
- '-v', '--verbose', type=bool, help='Print details of before/after')
- args = argp.parse_args()
- global verbose
- if args.verbose: verbose = True
- assert args.new
- assert args.old
- return args
+ argp = argparse.ArgumentParser(
+ description='Perform diff on microbenchmarks')
+ argp.add_argument(
+ '-t',
+ '--track',
+ choices=sorted(bm_constants._INTERESTING),
+ nargs='+',
+ default=sorted(bm_constants._INTERESTING),
+ help='Which metrics to track')
+ argp.add_argument(
+ '-b',
+ '--benchmarks',
+ nargs='+',
+ choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ help='Which benchmarks to run')
+ argp.add_argument(
+ '-l',
+ '--loops',
+ type=int,
+ default=20,
+ help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
+ )
+ argp.add_argument(
+ '-r',
+ '--regex',
+ type=str,
+ default="",
+ help='Regex to filter benchmarks run')
+ argp.add_argument('--counters', dest='counters', action='store_true')
+ argp.add_argument('--no-counters', dest='counters', action='store_false')
+ argp.set_defaults(counters=True)
+ argp.add_argument('-n', '--new', type=str, help='New benchmark name')
+ argp.add_argument('-o', '--old', type=str, help='Old benchmark name')
+ argp.add_argument(
+ '-v', '--verbose', type=bool, help='Print details of before/after')
+ args = argp.parse_args()
+ global verbose
+ if args.verbose: verbose = True
+ assert args.new
+ assert args.old
+ return args
def _maybe_print(str):
- if verbose: print str
+ if verbose: print str
class Benchmark:
- def __init__(self):
- self.samples = {
- True: collections.defaultdict(list),
- False: collections.defaultdict(list)
- }
- self.final = {}
-
- def add_sample(self, track, data, new):
- for f in track:
- if f in data:
- self.samples[new][f].append(float(data[f]))
-
- def process(self, track, new_name, old_name):
- for f in sorted(track):
- new = self.samples[True][f]
- old = self.samples[False][f]
- if not new or not old: continue
- mdn_diff = abs(_median(new) - _median(old))
- _maybe_print('%s: %s=%r %s=%r mdn_diff=%r' %
- (f, new_name, new, old_name, old, mdn_diff))
- s = bm_speedup.speedup(new, old, 1e-5)
- if abs(s) > 3:
- if mdn_diff > 0.5 or 'trickle' in f:
- self.final[f] = '%+d%%' % s
- return self.final.keys()
-
- def skip(self):
- return not self.final
-
- def row(self, flds):
- return [self.final[f] if f in self.final else '' for f in flds]
+ def __init__(self):
+ self.samples = {
+ True: collections.defaultdict(list),
+ False: collections.defaultdict(list)
+ }
+ self.final = {}
+
+ def add_sample(self, track, data, new):
+ for f in track:
+ if f in data:
+ self.samples[new][f].append(float(data[f]))
+
+ def process(self, track, new_name, old_name):
+ for f in sorted(track):
+ new = self.samples[True][f]
+ old = self.samples[False][f]
+ if not new or not old: continue
+ mdn_diff = abs(_median(new) - _median(old))
+ _maybe_print('%s: %s=%r %s=%r mdn_diff=%r' %
+ (f, new_name, new, old_name, old, mdn_diff))
+ s = bm_speedup.speedup(new, old, 1e-5)
+ if abs(s) > 3:
+ if mdn_diff > 0.5 or 'trickle' in f:
+ self.final[f] = '%+d%%' % s
+ return self.final.keys()
+
+ def skip(self):
+ return not self.final
+
+ def row(self, flds):
+ return [self.final[f] if f in self.final else '' for f in flds]
def _read_json(filename, badjson_files, nonexistant_files):
- stripped = ".".join(filename.split(".")[:-2])
- try:
- with open(filename) as f:
- r = f.read();
- return json.loads(r)
- except IOError, e:
- if stripped in nonexistant_files:
- nonexistant_files[stripped] += 1
- else:
- nonexistant_files[stripped] = 1
- return None
- except ValueError, e:
- print r
- if stripped in badjson_files:
- badjson_files[stripped] += 1
- else:
- badjson_files[stripped] = 1
- return None
+ stripped = ".".join(filename.split(".")[:-2])
+ try:
+ with open(filename) as f:
+ r = f.read()
+ return json.loads(r)
+ except IOError, e:
+ if stripped in nonexistant_files:
+ nonexistant_files[stripped] += 1
+ else:
+ nonexistant_files[stripped] = 1
+ return None
+ except ValueError, e:
+ print r
+ if stripped in badjson_files:
+ badjson_files[stripped] += 1
+ else:
+ badjson_files[stripped] = 1
+ return None
+
def fmt_dict(d):
- return ''.join([" " + k + ": " + str(d[k]) + "\n" for k in d])
+ return ''.join([" " + k + ": " + str(d[k]) + "\n" for k in d])
+
def diff(bms, loops, regex, track, old, new, counters):
- benchmarks = collections.defaultdict(Benchmark)
-
- badjson_files = {}
- nonexistant_files = {}
- for bm in bms:
- for loop in range(0, loops):
- for line in subprocess.check_output(
- ['bm_diff_%s/opt/%s' % (old, bm),
- '--benchmark_list_tests',
- '--benchmark_filter=%s' % regex]).splitlines():
- stripped_line = line.strip().replace("/", "_").replace(
- "<", "_").replace(">", "_").replace(", ", "_")
- js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
- (bm, stripped_line, new, loop),
- badjson_files, nonexistant_files)
- js_old_opt = _read_json('%s.%s.opt.%s.%d.json' %
- (bm, stripped_line, old, loop),
- badjson_files, nonexistant_files)
- if counters:
- js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' %
- (bm, stripped_line, new, loop),
- badjson_files, nonexistant_files)
- js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' %
- (bm, stripped_line, old, loop),
- badjson_files, nonexistant_files)
+ benchmarks = collections.defaultdict(Benchmark)
+
+ badjson_files = {}
+ nonexistant_files = {}
+ for bm in bms:
+ for loop in range(0, loops):
+ for line in subprocess.check_output([
+ 'bm_diff_%s/opt/%s' % (old, bm), '--benchmark_list_tests',
+ '--benchmark_filter=%s' % regex
+ ]).splitlines():
+ stripped_line = line.strip().replace("/", "_").replace(
+ "<", "_").replace(">", "_").replace(", ", "_")
+ js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
+ (bm, stripped_line, new, loop),
+ badjson_files, nonexistant_files)
+ js_old_opt = _read_json('%s.%s.opt.%s.%d.json' %
+ (bm, stripped_line, old, loop),
+ badjson_files, nonexistant_files)
+ if counters:
+ js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' %
+ (bm, stripped_line, new, loop),
+ badjson_files, nonexistant_files)
+ js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' %
+ (bm, stripped_line, old, loop),
+ badjson_files, nonexistant_files)
+ else:
+ js_new_ctr = None
+ js_old_ctr = None
+
+ for row in bm_json.expand_json(js_new_ctr, js_new_opt):
+ name = row['cpp_name']
+ if name.endswith('_mean') or name.endswith('_stddev'):
+ continue
+ benchmarks[name].add_sample(track, row, True)
+ for row in bm_json.expand_json(js_old_ctr, js_old_opt):
+ name = row['cpp_name']
+ if name.endswith('_mean') or name.endswith('_stddev'):
+ continue
+ benchmarks[name].add_sample(track, row, False)
+
+ really_interesting = set()
+ for name, bm in benchmarks.items():
+ _maybe_print(name)
+ really_interesting.update(bm.process(track, new, old))
+ fields = [f for f in track if f in really_interesting]
+
+ headers = ['Benchmark'] + fields
+ rows = []
+ for name in sorted(benchmarks.keys()):
+ if benchmarks[name].skip(): continue
+ rows.append([name] + benchmarks[name].row(fields))
+ note = None
+ if len(badjson_files):
+ note = 'Corrupt JSON data (indicates timeout or crash): \n%s' % fmt_dict(
+ badjson_files)
+ if len(nonexistant_files):
+ if note:
+ note += '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(
+ nonexistant_files)
else:
- js_new_ctr = None
- js_old_ctr = None
-
- for row in bm_json.expand_json(js_new_ctr, js_new_opt):
- name = row['cpp_name']
- if name.endswith('_mean') or name.endswith('_stddev'):
- continue
- benchmarks[name].add_sample(track, row, True)
- for row in bm_json.expand_json(js_old_ctr, js_old_opt):
- name = row['cpp_name']
- if name.endswith('_mean') or name.endswith('_stddev'):
- continue
- benchmarks[name].add_sample(track, row, False)
-
- really_interesting = set()
- for name, bm in benchmarks.items():
- _maybe_print(name)
- really_interesting.update(bm.process(track, new, old))
- fields = [f for f in track if f in really_interesting]
-
- headers = ['Benchmark'] + fields
- rows = []
- for name in sorted(benchmarks.keys()):
- if benchmarks[name].skip(): continue
- rows.append([name] + benchmarks[name].row(fields))
- note = None
- if len(badjson_files):
- note = 'Corrupt JSON data (indicates timeout or crash): \n%s' % fmt_dict(badjson_files)
- if len(nonexistant_files):
- if note:
- note += '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(nonexistant_files)
+ note = '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(
+ nonexistant_files)
+ if rows:
+ return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f'), note
else:
- note = '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(nonexistant_files)
- if rows:
- return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f'), note
- else:
- return None, note
+ return None, note
if __name__ == '__main__':
- args = _args()
- diff, note = diff(args.benchmarks, args.loops, args.regex, args.track, args.old,
- args.new, args.counters)
- print('%s\n%s' % (note, diff if diff else "No performance differences"))
+ args = _args()
+ diff, note = diff(args.benchmarks, args.loops, args.regex, args.track,
+ args.old, args.new, args.counters)
+ print('%s\n%s' % (note, diff if diff else "No performance differences"))
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_main.py b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
index 74b7174f5d..137c22bf8e 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_main.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
@@ -13,7 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
""" Runs the entire bm_*.py pipeline, and possible comments on the PR """
import bm_constants
@@ -29,129 +28,132 @@ import multiprocessing
import subprocess
sys.path.append(
- os.path.join(
- os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
+ os.path.join(
+ os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
import comment_on_pr
sys.path.append(
- os.path.join(
- os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
- 'python_utils'))
+ os.path.join(
+ os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
+ 'python_utils'))
import jobset
def _args():
- argp = argparse.ArgumentParser(
- description='Perform diff on microbenchmarks')
- argp.add_argument(
- '-t',
- '--track',
- choices=sorted(bm_constants._INTERESTING),
- nargs='+',
- default=sorted(bm_constants._INTERESTING),
- help='Which metrics to track')
- argp.add_argument(
- '-b',
- '--benchmarks',
- nargs='+',
- choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- help='Which benchmarks to run')
- argp.add_argument(
- '-d',
- '--diff_base',
- type=str,
- help='Commit or branch to compare the current one to')
- argp.add_argument(
- '-o',
- '--old',
- default='old',
- type=str,
- help='Name of baseline run to compare to. Ususally just called "old"')
- argp.add_argument(
- '-r',
- '--regex',
- type=str,
- default="",
- help='Regex to filter benchmarks run')
- argp.add_argument(
- '-l',
- '--loops',
- type=int,
- default=10,
- help='Number of times to loops the benchmarks. More loops cuts down on noise'
- )
- argp.add_argument(
- '-j',
- '--jobs',
- type=int,
- default=multiprocessing.cpu_count(),
- help='Number of CPUs to use')
- argp.add_argument(
- '--pr_comment_name',
- type=str,
- default="microbenchmarks",
- help='Name that Jenkins will use to commen on the PR')
- argp.add_argument('--counters', dest='counters', action='store_true')
- argp.add_argument('--no-counters', dest='counters', action='store_false')
- argp.set_defaults(counters=True)
- args = argp.parse_args()
- assert args.diff_base or args.old, "One of diff_base or old must be set!"
- if args.loops < 3:
- print "WARNING: This run will likely be noisy. Increase loops."
- return args
+ argp = argparse.ArgumentParser(
+ description='Perform diff on microbenchmarks')
+ argp.add_argument(
+ '-t',
+ '--track',
+ choices=sorted(bm_constants._INTERESTING),
+ nargs='+',
+ default=sorted(bm_constants._INTERESTING),
+ help='Which metrics to track')
+ argp.add_argument(
+ '-b',
+ '--benchmarks',
+ nargs='+',
+ choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ help='Which benchmarks to run')
+ argp.add_argument(
+ '-d',
+ '--diff_base',
+ type=str,
+ help='Commit or branch to compare the current one to')
+ argp.add_argument(
+ '-o',
+ '--old',
+ default='old',
+ type=str,
+ help='Name of baseline run to compare to. Ususally just called "old"')
+ argp.add_argument(
+ '-r',
+ '--regex',
+ type=str,
+ default="",
+ help='Regex to filter benchmarks run')
+ argp.add_argument(
+ '-l',
+ '--loops',
+ type=int,
+ default=10,
+ help='Number of times to loops the benchmarks. More loops cuts down on noise'
+ )
+ argp.add_argument(
+ '-j',
+ '--jobs',
+ type=int,
+ default=multiprocessing.cpu_count(),
+ help='Number of CPUs to use')
+ argp.add_argument(
+ '--pr_comment_name',
+ type=str,
+ default="microbenchmarks",
+ help='Name that Jenkins will use to commen on the PR')
+ argp.add_argument('--counters', dest='counters', action='store_true')
+ argp.add_argument('--no-counters', dest='counters', action='store_false')
+ argp.set_defaults(counters=True)
+ args = argp.parse_args()
+ assert args.diff_base or args.old, "One of diff_base or old must be set!"
+ if args.loops < 3:
+ print "WARNING: This run will likely be noisy. Increase loops."
+ return args
def eintr_be_gone(fn):
- """Run fn until it doesn't stop because of EINTR"""
+ """Run fn until it doesn't stop because of EINTR"""
- def inner(*args):
- while True:
- try:
- return fn(*args)
- except IOError, e:
- if e.errno != errno.EINTR:
- raise
+ def inner(*args):
+ while True:
+ try:
+ return fn(*args)
+ except IOError, e:
+ if e.errno != errno.EINTR:
+ raise
- return inner
+ return inner
def main(args):
- bm_build.build('new', args.benchmarks, args.jobs, args.counters)
-
- old = args.old
- if args.diff_base:
- old = 'old'
- where_am_i = subprocess.check_output(
- ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
- subprocess.check_call(['git', 'checkout', args.diff_base])
- try:
- bm_build.build(old, args.benchmarks, args.jobs, args.counters)
- finally:
- subprocess.check_call(['git', 'checkout', where_am_i])
- subprocess.check_call(['git', 'submodule', 'update'])
-
- jobs_list = []
- jobs_list += bm_run.create_jobs('new', args.benchmarks, args.loops, args.regex, args.counters)
- jobs_list += bm_run.create_jobs(old, args.benchmarks, args.loops, args.regex, args.counters)
-
- # shuffle all jobs to eliminate noise from GCE CPU drift
- random.shuffle(jobs_list, random.SystemRandom().random)
- jobset.run(jobs_list, maxjobs=args.jobs)
-
- diff, note = bm_diff.diff(args.benchmarks, args.loops, args.regex, args.track, old,
- 'new', args.counters)
- if diff:
- text = '[%s] Performance differences noted:\n%s' % (args.pr_comment_name, diff)
- else:
- text = '[%s] No significant performance differences' % args.pr_comment_name
- if note:
- text = note + '\n\n' + text
- print('%s' % text)
- comment_on_pr.comment_on_pr('```\n%s\n```' % text)
+ bm_build.build('new', args.benchmarks, args.jobs, args.counters)
+
+ old = args.old
+ if args.diff_base:
+ old = 'old'
+ where_am_i = subprocess.check_output(
+ ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
+ subprocess.check_call(['git', 'checkout', args.diff_base])
+ try:
+ bm_build.build(old, args.benchmarks, args.jobs, args.counters)
+ finally:
+ subprocess.check_call(['git', 'checkout', where_am_i])
+ subprocess.check_call(['git', 'submodule', 'update'])
+
+ jobs_list = []
+ jobs_list += bm_run.create_jobs('new', args.benchmarks, args.loops,
+ args.regex, args.counters)
+ jobs_list += bm_run.create_jobs(old, args.benchmarks, args.loops,
+ args.regex, args.counters)
+
+ # shuffle all jobs to eliminate noise from GCE CPU drift
+ random.shuffle(jobs_list, random.SystemRandom().random)
+ jobset.run(jobs_list, maxjobs=args.jobs)
+
+ diff, note = bm_diff.diff(args.benchmarks, args.loops, args.regex,
+ args.track, old, 'new', args.counters)
+ if diff:
+ text = '[%s] Performance differences noted:\n%s' % (
+ args.pr_comment_name, diff)
+ else:
+ text = '[%s] No significant performance differences' % args.pr_comment_name
+ if note:
+ text = note + '\n\n' + text
+ print('%s' % text)
+ comment_on_pr.comment_on_pr('```\n%s\n```' % text)
if __name__ == '__main__':
- args = _args()
- main(args)
+ args = _args()
+ main(args)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
index 81db5a226a..08894bbe4d 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
@@ -13,7 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
""" Python utility to run opt and counters benchmarks and save json output """
import bm_constants
@@ -27,93 +26,96 @@ import sys
import os
sys.path.append(
- os.path.join(
- os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
- 'python_utils'))
+ os.path.join(
+ os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
+ 'python_utils'))
import jobset
def _args():
- argp = argparse.ArgumentParser(description='Runs microbenchmarks')
- argp.add_argument(
- '-b',
- '--benchmarks',
- nargs='+',
- choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- help='Benchmarks to run')
- argp.add_argument(
- '-j',
- '--jobs',
- type=int,
- default=multiprocessing.cpu_count(),
- help='Number of CPUs to use')
- argp.add_argument(
- '-n',
- '--name',
- type=str,
- help='Unique name of the build to run. Needs to match the handle passed to bm_build.py'
- )
- argp.add_argument(
- '-r',
- '--regex',
- type=str,
- default="",
- help='Regex to filter benchmarks run')
- argp.add_argument(
- '-l',
- '--loops',
- type=int,
- default=20,
- help='Number of times to loops the benchmarks. More loops cuts down on noise'
- )
- argp.add_argument('--counters', dest='counters', action='store_true')
- argp.add_argument('--no-counters', dest='counters', action='store_false')
- argp.set_defaults(counters=True)
- args = argp.parse_args()
- assert args.name
- if args.loops < 3:
- print "WARNING: This run will likely be noisy. Increase loops to at least 3."
- return args
+ argp = argparse.ArgumentParser(description='Runs microbenchmarks')
+ argp.add_argument(
+ '-b',
+ '--benchmarks',
+ nargs='+',
+ choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ help='Benchmarks to run')
+ argp.add_argument(
+ '-j',
+ '--jobs',
+ type=int,
+ default=multiprocessing.cpu_count(),
+ help='Number of CPUs to use')
+ argp.add_argument(
+ '-n',
+ '--name',
+ type=str,
+ help='Unique name of the build to run. Needs to match the handle passed to bm_build.py'
+ )
+ argp.add_argument(
+ '-r',
+ '--regex',
+ type=str,
+ default="",
+ help='Regex to filter benchmarks run')
+ argp.add_argument(
+ '-l',
+ '--loops',
+ type=int,
+ default=20,
+ help='Number of times to loops the benchmarks. More loops cuts down on noise'
+ )
+ argp.add_argument('--counters', dest='counters', action='store_true')
+ argp.add_argument('--no-counters', dest='counters', action='store_false')
+ argp.set_defaults(counters=True)
+ args = argp.parse_args()
+ assert args.name
+ if args.loops < 3:
+ print "WARNING: This run will likely be noisy. Increase loops to at least 3."
+ return args
def _collect_bm_data(bm, cfg, name, regex, idx, loops):
- jobs_list = []
- for line in subprocess.check_output(
- ['bm_diff_%s/%s/%s' % (name, cfg, bm),
- '--benchmark_list_tests', '--benchmark_filter=%s' % regex]).splitlines():
- stripped_line = line.strip().replace("/", "_").replace(
- "<", "_").replace(">", "_").replace(", ", "_")
- cmd = [
- 'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' %
- line, '--benchmark_out=%s.%s.%s.%s.%d.json' %
- (bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json',
- ]
- jobs_list.append(
- jobset.JobSpec(
- cmd,
- shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
- loops),
- verbose_success=True,
- cpu_cost=2,
- timeout_seconds=60 * 60)) # one hour
- return jobs_list
+ jobs_list = []
+ for line in subprocess.check_output([
+ 'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_list_tests',
+ '--benchmark_filter=%s' % regex
+ ]).splitlines():
+ stripped_line = line.strip().replace("/", "_").replace(
+ "<", "_").replace(">", "_").replace(", ", "_")
+ cmd = [
+ 'bm_diff_%s/%s/%s' % (name, cfg, bm),
+ '--benchmark_filter=^%s$' % line,
+ '--benchmark_out=%s.%s.%s.%s.%d.json' %
+ (bm, stripped_line, cfg, name, idx),
+ '--benchmark_out_format=json',
+ ]
+ jobs_list.append(
+ jobset.JobSpec(
+ cmd,
+ shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
+ loops),
+ verbose_success=True,
+ cpu_cost=2,
+ timeout_seconds=60 * 60)) # one hour
+ return jobs_list
def create_jobs(name, benchmarks, loops, regex, counters):
- jobs_list = []
- for loop in range(0, loops):
- for bm in benchmarks:
- jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops)
- if counters:
- jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop,
- loops)
- random.shuffle(jobs_list, random.SystemRandom().random)
- return jobs_list
+ jobs_list = []
+ for loop in range(0, loops):
+ for bm in benchmarks:
+ jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops)
+ if counters:
+ jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop,
+ loops)
+ random.shuffle(jobs_list, random.SystemRandom().random)
+ return jobs_list
if __name__ == '__main__':
- args = _args()
- jobs_list = create_jobs(args.name, args.benchmarks, args.loops,
- args.regex, args.counters)
- jobset.run(jobs_list, maxjobs=args.jobs)
+ args = _args()
+ jobs_list = create_jobs(args.name, args.benchmarks, args.loops, args.regex,
+ args.counters)
+ jobset.run(jobs_list, maxjobs=args.jobs)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
index 63e691af02..2a77040360 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
@@ -19,40 +19,41 @@ import math
_DEFAULT_THRESHOLD = 1e-10
+
def scale(a, mul):
- return [x * mul for x in a]
+ return [x * mul for x in a]
def cmp(a, b):
- return stats.ttest_ind(a, b)
-
-
-def speedup(new, old, threshold = _DEFAULT_THRESHOLD):
- if (len(set(new))) == 1 and new == old: return 0
- s0, p0 = cmp(new, old)
- if math.isnan(p0): return 0
- if s0 == 0: return 0
- if p0 > threshold: return 0
- if s0 < 0:
- pct = 1
- while pct < 100:
- sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
- if sp > 0: break
- if pp > threshold: break
- pct += 1
- return -(pct - 1)
- else:
- pct = 1
- while pct < 10000:
- sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
- if sp < 0: break
- if pp > threshold: break
- pct += 1
- return pct - 1
+ return stats.ttest_ind(a, b)
+
+
+def speedup(new, old, threshold=_DEFAULT_THRESHOLD):
+ if (len(set(new))) == 1 and new == old: return 0
+ s0, p0 = cmp(new, old)
+ if math.isnan(p0): return 0
+ if s0 == 0: return 0
+ if p0 > threshold: return 0
+ if s0 < 0:
+ pct = 1
+ while pct < 100:
+ sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
+ if sp > 0: break
+ if pp > threshold: break
+ pct += 1
+ return -(pct - 1)
+ else:
+ pct = 1
+ while pct < 10000:
+ sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
+ if sp < 0: break
+ if pp > threshold: break
+ pct += 1
+ return pct - 1
if __name__ == "__main__":
- new = [0.0, 0.0, 0.0, 0.0]
- old = [2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06]
- print speedup(new, old, 1e-5)
- print speedup(old, new, 1e-5)
+ new = [0.0, 0.0, 0.0, 0.0]
+ old = [2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06]
+ print speedup(new, old, 1e-5)
+ print speedup(old, new, 1e-5)
diff --git a/tools/profiling/microbenchmarks/bm_json.py b/tools/profiling/microbenchmarks/bm_json.py
index eb450ee6ad..1dd9f65dbf 100644
--- a/tools/profiling/microbenchmarks/bm_json.py
+++ b/tools/profiling/microbenchmarks/bm_json.py
@@ -15,187 +15,196 @@
import os
_BM_SPECS = {
- 'BM_UnaryPingPong': {
- 'tpl': ['fixture', 'client_mutator', 'server_mutator'],
- 'dyn': ['request_size', 'response_size'],
- },
- 'BM_PumpStreamClientToServer': {
- 'tpl': ['fixture'],
- 'dyn': ['request_size'],
- },
- 'BM_PumpStreamServerToClient': {
- 'tpl': ['fixture'],
- 'dyn': ['request_size'],
- },
- 'BM_StreamingPingPong': {
- 'tpl': ['fixture', 'client_mutator', 'server_mutator'],
- 'dyn': ['request_size', 'request_count'],
- },
- 'BM_StreamingPingPongMsgs': {
- 'tpl': ['fixture', 'client_mutator', 'server_mutator'],
- 'dyn': ['request_size'],
- },
- 'BM_PumpStreamServerToClient_Trickle': {
- 'tpl': [],
- 'dyn': ['request_size', 'bandwidth_kilobits'],
- },
- 'BM_PumpUnbalancedUnary_Trickle': {
- 'tpl': [],
- 'dyn': ['cli_req_size', 'svr_req_size', 'bandwidth_kilobits'],
- },
- 'BM_ErrorStringOnNewError': {
- 'tpl': ['fixture'],
- 'dyn': [],
- },
- 'BM_ErrorStringRepeatedly': {
- 'tpl': ['fixture'],
- 'dyn': [],
- },
- 'BM_ErrorGetStatus': {
- 'tpl': ['fixture'],
- 'dyn': [],
- },
- 'BM_ErrorGetStatusCode': {
- 'tpl': ['fixture'],
- 'dyn': [],
- },
- 'BM_ErrorHttpError': {
- 'tpl': ['fixture'],
- 'dyn': [],
- },
- 'BM_HasClearGrpcStatus': {
- 'tpl': ['fixture'],
- 'dyn': [],
- },
- 'BM_IsolatedFilter': {
- 'tpl': ['fixture', 'client_mutator'],
- 'dyn': [],
- },
- 'BM_HpackEncoderEncodeHeader': {
- 'tpl': ['fixture'],
- 'dyn': ['end_of_stream', 'request_size'],
- },
- 'BM_HpackParserParseHeader': {
- 'tpl': ['fixture', 'on_header'],
- 'dyn': [],
- },
- 'BM_CallCreateDestroy': {
- 'tpl': ['fixture'],
- 'dyn': [],
- },
- 'BM_Zalloc': {
- 'tpl': [],
- 'dyn': ['request_size'],
- },
- 'BM_PollEmptyPollset_SpeedOfLight': {
- 'tpl': [],
- 'dyn': ['request_size', 'request_count'],
- },
- 'BM_StreamCreateSendInitialMetadataDestroy': {
- 'tpl': ['fixture'],
- 'dyn': [],
- },
- 'BM_TransportStreamSend': {
- 'tpl': [],
- 'dyn': ['request_size'],
- },
- 'BM_TransportStreamRecv': {
- 'tpl': [],
- 'dyn': ['request_size'],
- },
- 'BM_StreamingPingPongWithCoalescingApi': {
- 'tpl': ['fixture', 'client_mutator', 'server_mutator'],
- 'dyn': ['request_size', 'request_count', 'end_of_stream'],
- },
- 'BM_Base16SomeStuff': {
- 'tpl': [],
- 'dyn': ['request_size'],
- }
+ 'BM_UnaryPingPong': {
+ 'tpl': ['fixture', 'client_mutator', 'server_mutator'],
+ 'dyn': ['request_size', 'response_size'],
+ },
+ 'BM_PumpStreamClientToServer': {
+ 'tpl': ['fixture'],
+ 'dyn': ['request_size'],
+ },
+ 'BM_PumpStreamServerToClient': {
+ 'tpl': ['fixture'],
+ 'dyn': ['request_size'],
+ },
+ 'BM_StreamingPingPong': {
+ 'tpl': ['fixture', 'client_mutator', 'server_mutator'],
+ 'dyn': ['request_size', 'request_count'],
+ },
+ 'BM_StreamingPingPongMsgs': {
+ 'tpl': ['fixture', 'client_mutator', 'server_mutator'],
+ 'dyn': ['request_size'],
+ },
+ 'BM_PumpStreamServerToClient_Trickle': {
+ 'tpl': [],
+ 'dyn': ['request_size', 'bandwidth_kilobits'],
+ },
+ 'BM_PumpUnbalancedUnary_Trickle': {
+ 'tpl': [],
+ 'dyn': ['cli_req_size', 'svr_req_size', 'bandwidth_kilobits'],
+ },
+ 'BM_ErrorStringOnNewError': {
+ 'tpl': ['fixture'],
+ 'dyn': [],
+ },
+ 'BM_ErrorStringRepeatedly': {
+ 'tpl': ['fixture'],
+ 'dyn': [],
+ },
+ 'BM_ErrorGetStatus': {
+ 'tpl': ['fixture'],
+ 'dyn': [],
+ },
+ 'BM_ErrorGetStatusCode': {
+ 'tpl': ['fixture'],
+ 'dyn': [],
+ },
+ 'BM_ErrorHttpError': {
+ 'tpl': ['fixture'],
+ 'dyn': [],
+ },
+ 'BM_HasClearGrpcStatus': {
+ 'tpl': ['fixture'],
+ 'dyn': [],
+ },
+ 'BM_IsolatedFilter': {
+ 'tpl': ['fixture', 'client_mutator'],
+ 'dyn': [],
+ },
+ 'BM_HpackEncoderEncodeHeader': {
+ 'tpl': ['fixture'],
+ 'dyn': ['end_of_stream', 'request_size'],
+ },
+ 'BM_HpackParserParseHeader': {
+ 'tpl': ['fixture', 'on_header'],
+ 'dyn': [],
+ },
+ 'BM_CallCreateDestroy': {
+ 'tpl': ['fixture'],
+ 'dyn': [],
+ },
+ 'BM_Zalloc': {
+ 'tpl': [],
+ 'dyn': ['request_size'],
+ },
+ 'BM_PollEmptyPollset_SpeedOfLight': {
+ 'tpl': [],
+ 'dyn': ['request_size', 'request_count'],
+ },
+ 'BM_StreamCreateSendInitialMetadataDestroy': {
+ 'tpl': ['fixture'],
+ 'dyn': [],
+ },
+ 'BM_TransportStreamSend': {
+ 'tpl': [],
+ 'dyn': ['request_size'],
+ },
+ 'BM_TransportStreamRecv': {
+ 'tpl': [],
+ 'dyn': ['request_size'],
+ },
+ 'BM_StreamingPingPongWithCoalescingApi': {
+ 'tpl': ['fixture', 'client_mutator', 'server_mutator'],
+ 'dyn': ['request_size', 'request_count', 'end_of_stream'],
+ },
+ 'BM_Base16SomeStuff': {
+ 'tpl': [],
+ 'dyn': ['request_size'],
+ }
}
+
def numericalize(s):
- if not s: return ''
- if s[-1] == 'k':
- return float(s[:-1]) * 1024
- if s[-1] == 'M':
- return float(s[:-1]) * 1024 * 1024
- if 0 <= (ord(s[-1]) - ord('0')) <= 9:
- return float(s)
- assert 'not a number: %s' % s
+ if not s: return ''
+ if s[-1] == 'k':
+ return float(s[:-1]) * 1024
+ if s[-1] == 'M':
+ return float(s[:-1]) * 1024 * 1024
+ if 0 <= (ord(s[-1]) - ord('0')) <= 9:
+ return float(s)
+ assert 'not a number: %s' % s
+
def parse_name(name):
- cpp_name = name
- if '<' not in name and '/' not in name and name not in _BM_SPECS:
- return {'name': name, 'cpp_name': name}
- rest = name
- out = {}
- tpl_args = []
- dyn_args = []
- if '<' in rest:
- tpl_bit = rest[rest.find('<') + 1 : rest.rfind('>')]
- arg = ''
- nesting = 0
- for c in tpl_bit:
- if c == '<':
- nesting += 1
- arg += c
- elif c == '>':
- nesting -= 1
- arg += c
- elif c == ',':
- if nesting == 0:
- tpl_args.append(arg.strip())
- arg = ''
- else:
- arg += c
- else:
- arg += c
- tpl_args.append(arg.strip())
- rest = rest[:rest.find('<')] + rest[rest.rfind('>') + 1:]
- if '/' in rest:
- s = rest.split('/')
- rest = s[0]
- dyn_args = s[1:]
- name = rest
- print (name)
- print (dyn_args, _BM_SPECS[name]['dyn'])
- print (tpl_args, _BM_SPECS[name]['tpl'])
- assert name in _BM_SPECS, '_BM_SPECS needs to be expanded for %s' % name
- assert len(dyn_args) == len(_BM_SPECS[name]['dyn'])
- assert len(tpl_args) == len(_BM_SPECS[name]['tpl'])
- out['name'] = name
- out['cpp_name'] = cpp_name
- out.update(dict((k, numericalize(v)) for k, v in zip(_BM_SPECS[name]['dyn'], dyn_args)))
- out.update(dict(zip(_BM_SPECS[name]['tpl'], tpl_args)))
- return out
+ cpp_name = name
+ if '<' not in name and '/' not in name and name not in _BM_SPECS:
+ return {'name': name, 'cpp_name': name}
+ rest = name
+ out = {}
+ tpl_args = []
+ dyn_args = []
+ if '<' in rest:
+ tpl_bit = rest[rest.find('<') + 1:rest.rfind('>')]
+ arg = ''
+ nesting = 0
+ for c in tpl_bit:
+ if c == '<':
+ nesting += 1
+ arg += c
+ elif c == '>':
+ nesting -= 1
+ arg += c
+ elif c == ',':
+ if nesting == 0:
+ tpl_args.append(arg.strip())
+ arg = ''
+ else:
+ arg += c
+ else:
+ arg += c
+ tpl_args.append(arg.strip())
+ rest = rest[:rest.find('<')] + rest[rest.rfind('>') + 1:]
+ if '/' in rest:
+ s = rest.split('/')
+ rest = s[0]
+ dyn_args = s[1:]
+ name = rest
+ print(name)
+ print(dyn_args, _BM_SPECS[name]['dyn'])
+ print(tpl_args, _BM_SPECS[name]['tpl'])
+ assert name in _BM_SPECS, '_BM_SPECS needs to be expanded for %s' % name
+ assert len(dyn_args) == len(_BM_SPECS[name]['dyn'])
+ assert len(tpl_args) == len(_BM_SPECS[name]['tpl'])
+ out['name'] = name
+ out['cpp_name'] = cpp_name
+ out.update(
+ dict((k, numericalize(v))
+ for k, v in zip(_BM_SPECS[name]['dyn'], dyn_args)))
+ out.update(dict(zip(_BM_SPECS[name]['tpl'], tpl_args)))
+ return out
-def expand_json(js, js2 = None):
- if not js and not js2: raise StopIteration()
- if not js: js = js2
- for bm in js['benchmarks']:
- if bm['name'].endswith('_stddev') or bm['name'].endswith('_mean'): continue
- context = js['context']
- if 'label' in bm:
- labels_list = [s.split(':') for s in bm['label'].strip().split(' ') if len(s) and s[0] != '#']
- for el in labels_list:
- el[0] = el[0].replace('/iter', '_per_iteration')
- labels = dict(labels_list)
- else:
- labels = {}
- row = {
- 'jenkins_build': os.environ.get('BUILD_NUMBER', ''),
- 'jenkins_job': os.environ.get('JOB_NAME', ''),
- }
- row.update(context)
- row.update(bm)
- row.update(parse_name(row['name']))
- row.update(labels)
- if js2:
- for bm2 in js2['benchmarks']:
- if bm['name'] == bm2['name'] and 'already_used' not in bm2:
- row['cpu_time'] = bm2['cpu_time']
- row['real_time'] = bm2['real_time']
- row['iterations'] = bm2['iterations']
- bm2['already_used'] = True
- break
- yield row
+
+def expand_json(js, js2=None):
+ if not js and not js2: raise StopIteration()
+ if not js: js = js2
+ for bm in js['benchmarks']:
+ if bm['name'].endswith('_stddev') or bm['name'].endswith('_mean'):
+ continue
+ context = js['context']
+ if 'label' in bm:
+ labels_list = [
+ s.split(':') for s in bm['label'].strip().split(' ')
+ if len(s) and s[0] != '#'
+ ]
+ for el in labels_list:
+ el[0] = el[0].replace('/iter', '_per_iteration')
+ labels = dict(labels_list)
+ else:
+ labels = {}
+ row = {
+ 'jenkins_build': os.environ.get('BUILD_NUMBER', ''),
+ 'jenkins_job': os.environ.get('JOB_NAME', ''),
+ }
+ row.update(context)
+ row.update(bm)
+ row.update(parse_name(row['name']))
+ row.update(labels)
+ if js2:
+ for bm2 in js2['benchmarks']:
+ if bm['name'] == bm2['name'] and 'already_used' not in bm2:
+ row['cpu_time'] = bm2['cpu_time']
+ row['real_time'] = bm2['real_time']
+ row['iterations'] = bm2['iterations']
+ bm2['already_used'] = True
+ break
+ yield row