aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rwxr-xr-xtools/profiling/microbenchmarks/bm2bq.py18
-rw-r--r--tools/run_tests/generated/sources_and_headers.json18
-rw-r--r--tools/run_tests/generated/tests.json22
-rwxr-xr-xtools/run_tests/run_microbenchmark.py2
4 files changed, 53 insertions, 7 deletions
diff --git a/tools/profiling/microbenchmarks/bm2bq.py b/tools/profiling/microbenchmarks/bm2bq.py
index fd6207f42b..a7d82269f5 100755
--- a/tools/profiling/microbenchmarks/bm2bq.py
+++ b/tools/profiling/microbenchmarks/bm2bq.py
@@ -115,6 +115,8 @@ def numericalize(s):
assert 'not a number: %s' % s
def parse_name(name):
+ if '<' not in name and '/' not in name and name not in bm_specs:
+ return {'name': name}
rest = name
out = {}
tpl_args = []
@@ -145,7 +147,7 @@ def parse_name(name):
rest = s[0]
dyn_args = s[1:]
name = rest
- assert name in bm_specs
+ assert name in bm_specs, 'bm_specs needs to be expanded for %s' % name
assert len(dyn_args) == len(bm_specs[name]['dyn'])
assert len(tpl_args) == len(bm_specs[name]['tpl'])
out['name'] = name
@@ -155,10 +157,13 @@ def parse_name(name):
for bm in js['benchmarks']:
context = js['context']
- labels_list = [s.split(':') for s in bm.get('label', '').split(' ')]
- for el in labels_list:
- el[0] = el[0].replace('/iter', '_per_iteration')
- labels = dict(labels_list)
+ if 'label' in bm:
+ labels_list = [s.split(':') for s in bm['label'].split(' ')]
+ for el in labels_list:
+ el[0] = el[0].replace('/iter', '_per_iteration')
+ labels = dict(labels_list)
+ else:
+ labels = {}
row = {
'jenkins_build': os.environ.get('BUILD_NUMBER', ''),
'jenkins_job': os.environ.get('JOB_NAME', ''),
@@ -167,5 +172,6 @@ for bm in js['benchmarks']:
row.update(bm)
row.update(parse_name(row['name']))
row.update(labels)
- del row['label']
+ if 'label' in row:
+ del row['label']
writer.writerow(row)
diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json
index c362806e3a..704f22dfda 100644
--- a/tools/run_tests/generated/sources_and_headers.json
+++ b/tools/run_tests/generated/sources_and_headers.json
@@ -2369,6 +2369,24 @@
"gpr",
"gpr_test_util",
"grpc",
+ "grpc_test_util"
+ ],
+ "headers": [],
+ "is_filegroup": false,
+ "language": "c++",
+ "name": "bm_closure",
+ "src": [
+ "test/cpp/microbenchmarks/bm_closure.cc"
+ ],
+ "third_party": false,
+ "type": "target"
+ },
+ {
+ "deps": [
+ "benchmark",
+ "gpr",
+ "gpr_test_util",
+ "grpc",
"grpc++",
"grpc++_test_util",
"grpc_test_util"
diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json
index c193caa26f..a161e8a670 100644
--- a/tools/run_tests/generated/tests.json
+++ b/tools/run_tests/generated/tests.json
@@ -2484,6 +2484,28 @@
"flaky": false,
"gtest": false,
"language": "c++",
+ "name": "bm_closure",
+ "platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ]
+ },
+ {
+ "args": [
+ "--benchmark_min_time=0"
+ ],
+ "ci_platforms": [
+ "linux",
+ "mac",
+ "posix"
+ ],
+ "cpu_cost": 1.0,
+ "exclude_configs": [],
+ "exclude_iomgrs": [],
+ "flaky": false,
+ "gtest": false,
+ "language": "c++",
"name": "bm_fullstack",
"platforms": [
"linux",
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 2991bfb656..4884911c99 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -195,7 +195,7 @@ argp.add_argument('-c', '--collect',
default=sorted(collectors.keys()),
help='Which collectors should be run against each benchmark')
argp.add_argument('-b', '--benchmarks',
- default=['bm_fullstack'],
+ default=['bm_fullstack', 'bm_closure'],
nargs='+',
type=str,
help='Which microbenchmarks should be run')