aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/profiling/microbenchmarks/bm_diff
diff options
context:
space:
mode:
Diffstat (limited to 'tools/profiling/microbenchmarks/bm_diff')
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_build.py6
-rw-r--r--tools/profiling/microbenchmarks/bm_diff/bm_constants.py16
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_diff.py3
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_main.py3
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_run.py15
5 files changed, 26 insertions, 17 deletions
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_build.py b/tools/profiling/microbenchmarks/bm_diff/bm_build.py
index a4cd61707d..4197ba3632 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_build.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_build.py
@@ -43,7 +43,8 @@ def _args():
'-n',
'--name',
type=str,
- help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
+ help=
+ 'Unique name of this build. To be used as a handle to pass to the other bm* scripts'
)
argp.add_argument('--counters', dest='counters', action='store_true')
argp.add_argument('--no-counters', dest='counters', action='store_false')
@@ -71,7 +72,8 @@ def build(name, benchmarks, jobs, counters):
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
os.rename(
'bins',
- 'bm_diff_%s' % name,)
+ 'bm_diff_%s' % name,
+ )
if __name__ == '__main__':
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
index cff29dbe08..5719e42620 100644
--- a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
@@ -22,10 +22,12 @@ _AVAILABLE_BENCHMARK_TESTS = [
'bm_metadata', 'bm_fullstack_trickle'
]
-_INTERESTING = (
- 'cpu_time', 'real_time', 'call_initial_size-median', 'locks_per_iteration',
- 'allocs_per_iteration', 'writes_per_iteration', 'atm_cas_per_iteration',
- 'atm_add_per_iteration', 'nows_per_iteration',
- 'cli_transport_stalls_per_iteration', 'cli_stream_stalls_per_iteration',
- 'svr_transport_stalls_per_iteration', 'svr_stream_stalls_per_iteration',
- 'http2_pings_sent_per_iteration')
+_INTERESTING = ('cpu_time', 'real_time', 'call_initial_size-median',
+ 'locks_per_iteration', 'allocs_per_iteration',
+ 'writes_per_iteration', 'atm_cas_per_iteration',
+ 'atm_add_per_iteration', 'nows_per_iteration',
+ 'cli_transport_stalls_per_iteration',
+ 'cli_stream_stalls_per_iteration',
+ 'svr_transport_stalls_per_iteration',
+ 'svr_stream_stalls_per_iteration',
+ 'http2_pings_sent_per_iteration')
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
index b8a3b22861..f975a8b402 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
@@ -65,7 +65,8 @@ def _args():
'--loops',
type=int,
default=20,
- help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
+ help=
+ 'Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
)
argp.add_argument(
'-r',
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_main.py b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
index 137c22bf8e..96c63ba060 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_main.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
@@ -78,7 +78,8 @@ def _args():
'--loops',
type=int,
default=10,
- help='Number of times to loops the benchmarks. More loops cuts down on noise'
+ help=
+ 'Number of times to loops the benchmarks. More loops cuts down on noise'
)
argp.add_argument(
'-j',
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
index 08894bbe4d..dfb9b178fa 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
@@ -51,7 +51,8 @@ def _args():
'-n',
'--name',
type=str,
- help='Unique name of the build to run. Needs to match the handle passed to bm_build.py'
+ help=
+ 'Unique name of the build to run. Needs to match the handle passed to bm_build.py'
)
argp.add_argument(
'-r',
@@ -64,7 +65,8 @@ def _args():
'--loops',
type=int,
default=20,
- help='Number of times to loops the benchmarks. More loops cuts down on noise'
+ help=
+ 'Number of times to loops the benchmarks. More loops cuts down on noise'
)
argp.add_argument('--counters', dest='counters', action='store_true')
argp.add_argument('--no-counters', dest='counters', action='store_false')
@@ -82,13 +84,14 @@ def _collect_bm_data(bm, cfg, name, regex, idx, loops):
'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_list_tests',
'--benchmark_filter=%s' % regex
]).splitlines():
- stripped_line = line.strip().replace("/", "_").replace(
- "<", "_").replace(">", "_").replace(", ", "_")
+ stripped_line = line.strip().replace("/",
+ "_").replace("<", "_").replace(
+ ">", "_").replace(", ", "_")
cmd = [
'bm_diff_%s/%s/%s' % (name, cfg, bm),
'--benchmark_filter=^%s$' % line,
- '--benchmark_out=%s.%s.%s.%s.%d.json' %
- (bm, stripped_line, cfg, name, idx),
+ '--benchmark_out=%s.%s.%s.%s.%d.json' % (bm, stripped_line, cfg,
+ name, idx),
'--benchmark_out_format=json',
]
jobs_list.append(