diff options
author | Craig Tiller <ctiller@google.com> | 2017-02-23 06:57:05 -0800 |
---|---|---|
committer | Craig Tiller <ctiller@google.com> | 2017-02-23 06:57:05 -0800 |
commit | 4d34729a4ebc411aed085f9800c91253823fb0a7 (patch) | |
tree | 8318980771d144a32c59bcfb03612b8ace7ef0b8 /tools/run_tests/run_microbenchmark.py | |
parent | 6d48602324f22740b6cdcfdfbbba13b6326d769c (diff) | |
parent | b96a96ebd508bfc53fd70ebd944f0b92f62387f8 (diff) |
Merge github.com:grpc/grpc into always-use-port-server
Diffstat (limited to 'tools/run_tests/run_microbenchmark.py')
-rwxr-xr-x | tools/run_tests/run_microbenchmark.py | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py index a67c8468d9..1ab62eeea5 100755 --- a/tools/run_tests/run_microbenchmark.py +++ b/tools/run_tests/run_microbenchmark.py @@ -92,7 +92,9 @@ def collect_latency(bm_name, args): '--benchmark_list_tests']).splitlines(): link(line, '%s.txt' % fnize(line)) benchmarks.append( - jobset.JobSpec(['bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' % line], + jobset.JobSpec(['bins/basicprof/%s' % bm_name, + '--benchmark_filter=^%s$' % line, + '--benchmark_min_time=0.05'], environ={'LATENCY_TRACE': '%s.trace' % fnize(line)})) profile_analysis.append( jobset.JobSpec([sys.executable, @@ -104,7 +106,7 @@ def collect_latency(bm_name, args): # consume upwards of five gigabytes of ram in some cases, and so analysing # hundreds of them at once is impractical -- but we want at least some # concurrency or the work takes too long - if len(benchmarks) >= min(4, multiprocessing.cpu_count()): + if len(benchmarks) >= min(16, multiprocessing.cpu_count()): # run up to half the cpu count: each benchmark can use up to two cores # (one for the microbenchmark, one for the data flush) jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2)) @@ -192,7 +194,7 @@ argp.add_argument('-c', '--collect', default=sorted(collectors.keys()), help='Which collectors should be run against each benchmark') argp.add_argument('-b', '--benchmarks', - default=['bm_fullstack', 'bm_closure'], + default=['bm_fullstack', 'bm_closure', 'bm_cq', 'bm_call_create'], nargs='+', type=str, help='Which microbenchmarks should be run') |