diff options
Diffstat (limited to 'tools')
-rwxr-xr-x | tools/profiling/microbenchmarks/bm2bq.py | 29 | ||||
-rw-r--r-- | tools/run_tests/generated/sources_and_headers.json | 25 | ||||
-rw-r--r-- | tools/run_tests/generated/tests.json | 22 | ||||
-rwxr-xr-x | tools/run_tests/run_microbenchmark.py | 13 |
4 files changed, 77 insertions, 12 deletions
diff --git a/tools/profiling/microbenchmarks/bm2bq.py b/tools/profiling/microbenchmarks/bm2bq.py index 124dbdfec5..a7d82269f5 100755 --- a/tools/profiling/microbenchmarks/bm2bq.py +++ b/tools/profiling/microbenchmarks/bm2bq.py @@ -61,6 +61,11 @@ columns = [ ('allocs_per_iteration', 'float'), ('locks_per_iteration', 'float'), ('writes_per_iteration', 'float'), + ('bandwidth_kilobits', 'integer'), + ('cli_transport_stalls_per_iteration', 'float'), + ('cli_stream_stalls_per_iteration', 'float'), + ('svr_transport_stalls_per_iteration', 'float'), + ('svr_stream_stalls_per_iteration', 'float'), ] if sys.argv[1] == '--schema': @@ -92,7 +97,11 @@ bm_specs = { 'BM_StreamingPingPongMsgs': { 'tpl': ['fixture', 'client_mutator', 'server_mutator'], 'dyn': ['request_size'], - } + }, + 'BM_PumpStreamServerToClient_Trickle': { + 'tpl': [], + 'dyn': ['request_size', 'bandwidth_kilobits'], + }, } def numericalize(s): @@ -106,6 +115,8 @@ def numericalize(s): assert 'not a number: %s' % s def parse_name(name): + if '<' not in name and '/' not in name and name not in bm_specs: + return {'name': name} rest = name out = {} tpl_args = [] @@ -136,7 +147,7 @@ def parse_name(name): rest = s[0] dyn_args = s[1:] name = rest - assert name in bm_specs + assert name in bm_specs, 'bm_specs needs to be expanded for %s' % name assert len(dyn_args) == len(bm_specs[name]['dyn']) assert len(tpl_args) == len(bm_specs[name]['tpl']) out['name'] = name @@ -146,10 +157,13 @@ def parse_name(name): for bm in js['benchmarks']: context = js['context'] - labels_list = [s.split(':') for s in bm.get('label', '').split(' ')] - for el in labels_list: - el[0] = el[0].replace('/iter', '_per_iteration') - labels = dict(labels_list) + if 'label' in bm: + labels_list = [s.split(':') for s in bm['label'].split(' ')] + for el in labels_list: + el[0] = el[0].replace('/iter', '_per_iteration') + labels = dict(labels_list) + else: + labels = {} row = { 'jenkins_build': os.environ.get('BUILD_NUMBER', ''), 'jenkins_job': os.environ.get('JOB_NAME', ''), @@ -158,5 +172,6 @@ for bm in js['benchmarks']: row.update(bm) row.update(parse_name(row['name'])) row.update(labels) - del row['label'] + if 'label' in row: + del row['label'] writer.writerow(row) diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 9c04d04d3f..d71f5735ec 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -2369,6 +2369,24 @@ "gpr", "gpr_test_util", "grpc", + "grpc_test_util" + ], + "headers": [], + "is_filegroup": false, + "language": "c++", + "name": "bm_closure", + "src": [ + "test/cpp/microbenchmarks/bm_closure.cc" + ], + "third_party": false, + "type": "target" + }, + { + "deps": [ + "benchmark", + "gpr", + "gpr_test_util", + "grpc", "grpc++", "grpc++_test_util", "grpc_test_util" @@ -7799,7 +7817,8 @@ "test/core/util/passthru_endpoint.h", "test/core/util/port.h", "test/core/util/port_server_client.h", - "test/core/util/slice_splitter.h" + "test/core/util/slice_splitter.h", + "test/core/util/trickle_endpoint.h" ], "is_filegroup": true, "language": "c", @@ -7834,7 +7853,9 @@ "test/core/util/port_uv.c", "test/core/util/port_windows.c", "test/core/util/slice_splitter.c", - "test/core/util/slice_splitter.h" + "test/core/util/slice_splitter.h", + "test/core/util/trickle_endpoint.c", + "test/core/util/trickle_endpoint.h" ], "third_party": false, "type": "filegroup" diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index 4aa8917e51..0c3f98c459 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -2484,6 +2484,28 @@ "flaky": false, "gtest": false, "language": "c++", + "name": "bm_closure", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "--benchmark_min_time=0" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "gtest": false, + "language": "c++", "name": "bm_cq", "platforms": [ "linux", diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py index 6bba29d74b..3bb4a9547c 100755 --- a/tools/run_tests/run_microbenchmark.py +++ b/tools/run_tests/run_microbenchmark.py @@ -149,9 +149,12 @@ def collect_summary(bm_name, args): subprocess.check_call( ['make', bm_name, 'CONFIG=counters', '-j', '%d' % multiprocessing.cpu_count()]) - text(subprocess.check_output(['bins/counters/%s' % bm_name, - '--benchmark_out=out.json', - '--benchmark_out_format=json'])) + cmd = ['bins/counters/%s' % bm_name, + '--benchmark_out=out.json', + '--benchmark_out_format=json'] + if args.summary_time is not None: + cmd += ['--benchmark_min_time=%d' % args.summary_time] + text(subprocess.check_output(cmd)) if args.bigquery_upload: with open('out.csv', 'w') as f: f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', 'out.json'])) @@ -179,6 +182,10 @@ argp.add_argument('--bigquery_upload', action='store_const', const=True, help='Upload results from summary collection to bigquery') +argp.add_argument('--summary_time', + default=None, + type=int, + help='Minimum time to run benchmarks for the summary collection') args = argp.parse_args() for bm_name in args.benchmarks: |