diff options
author | 2017-12-13 14:44:29 -0800 | |
---|---|---|
committer | 2017-12-13 14:44:29 -0800 | |
commit | 54d699ddda18e8aca7a556ad3c38d1684efc88ec (patch) | |
tree | b0ecaaf07074bdcde6c3315f647cc04417da7b90 /tools/profiling/microbenchmarks/bm_diff/bm_run.py | |
parent | 62d86e9987121c8bd79d4594fb0db019c4faafad (diff) | |
parent | 91a851c6e1f6bc7c1dbf84ea12558d535c911252 (diff) |
Merge branch 'master' of github.com:grpc/grpc into backoff_cpp
Diffstat (limited to 'tools/profiling/microbenchmarks/bm_diff/bm_run.py')
-rwxr-xr-x | tools/profiling/microbenchmarks/bm_diff/bm_run.py | 158 |
1 files changed, 80 insertions, 78 deletions
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py index 81db5a226a..08894bbe4d 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ Python utility to run opt and counters benchmarks and save json output """ import bm_constants @@ -27,93 +26,96 @@ import sys import os sys.path.append( - os.path.join( - os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests', - 'python_utils')) + os.path.join( + os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests', + 'python_utils')) import jobset def _args(): - argp = argparse.ArgumentParser(description='Runs microbenchmarks') - argp.add_argument( - '-b', - '--benchmarks', - nargs='+', - choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, - default=bm_constants._AVAILABLE_BENCHMARK_TESTS, - help='Benchmarks to run') - argp.add_argument( - '-j', - '--jobs', - type=int, - default=multiprocessing.cpu_count(), - help='Number of CPUs to use') - argp.add_argument( - '-n', - '--name', - type=str, - help='Unique name of the build to run. Needs to match the handle passed to bm_build.py' - ) - argp.add_argument( - '-r', - '--regex', - type=str, - default="", - help='Regex to filter benchmarks run') - argp.add_argument( - '-l', - '--loops', - type=int, - default=20, - help='Number of times to loops the benchmarks. More loops cuts down on noise' - ) - argp.add_argument('--counters', dest='counters', action='store_true') - argp.add_argument('--no-counters', dest='counters', action='store_false') - argp.set_defaults(counters=True) - args = argp.parse_args() - assert args.name - if args.loops < 3: - print "WARNING: This run will likely be noisy. Increase loops to at least 3." - return args + argp = argparse.ArgumentParser(description='Runs microbenchmarks') + argp.add_argument( + '-b', + '--benchmarks', + nargs='+', + choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, + default=bm_constants._AVAILABLE_BENCHMARK_TESTS, + help='Benchmarks to run') + argp.add_argument( + '-j', + '--jobs', + type=int, + default=multiprocessing.cpu_count(), + help='Number of CPUs to use') + argp.add_argument( + '-n', + '--name', + type=str, + help='Unique name of the build to run. Needs to match the handle passed to bm_build.py' + ) + argp.add_argument( + '-r', + '--regex', + type=str, + default="", + help='Regex to filter benchmarks run') + argp.add_argument( + '-l', + '--loops', + type=int, + default=20, + help='Number of times to loops the benchmarks. More loops cuts down on noise' + ) + argp.add_argument('--counters', dest='counters', action='store_true') + argp.add_argument('--no-counters', dest='counters', action='store_false') + argp.set_defaults(counters=True) + args = argp.parse_args() + assert args.name + if args.loops < 3: + print "WARNING: This run will likely be noisy. Increase loops to at least 3." + return args def _collect_bm_data(bm, cfg, name, regex, idx, loops): - jobs_list = [] - for line in subprocess.check_output( - ['bm_diff_%s/%s/%s' % (name, cfg, bm), - '--benchmark_list_tests', '--benchmark_filter=%s' % regex]).splitlines(): - stripped_line = line.strip().replace("/", "_").replace( - "<", "_").replace(">", "_").replace(", ", "_") - cmd = [ - 'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' % - line, '--benchmark_out=%s.%s.%s.%s.%d.json' % - (bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json', - ] - jobs_list.append( - jobset.JobSpec( - cmd, - shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1, - loops), - verbose_success=True, - cpu_cost=2, - timeout_seconds=60 * 60)) # one hour - return jobs_list + jobs_list = [] + for line in subprocess.check_output([ + 'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_list_tests', + '--benchmark_filter=%s' % regex + ]).splitlines(): + stripped_line = line.strip().replace("/", "_").replace( + "<", "_").replace(">", "_").replace(", ", "_") + cmd = [ + 'bm_diff_%s/%s/%s' % (name, cfg, bm), + '--benchmark_filter=^%s$' % line, + '--benchmark_out=%s.%s.%s.%s.%d.json' % + (bm, stripped_line, cfg, name, idx), + '--benchmark_out_format=json', + ] + jobs_list.append( + jobset.JobSpec( + cmd, + shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1, + loops), + verbose_success=True, + cpu_cost=2, + timeout_seconds=60 * 60)) # one hour + return jobs_list def create_jobs(name, benchmarks, loops, regex, counters): - jobs_list = [] - for loop in range(0, loops): - for bm in benchmarks: - jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops) - if counters: - jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop, - loops) - random.shuffle(jobs_list, random.SystemRandom().random) - return jobs_list + jobs_list = [] + for loop in range(0, loops): + for bm in benchmarks: + jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops) + if counters: + jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop, + loops) + random.shuffle(jobs_list, random.SystemRandom().random) + return jobs_list if __name__ == '__main__': - args = _args() - jobs_list = create_jobs(args.name, args.benchmarks, args.loops, - args.regex, args.counters) - jobset.run(jobs_list, maxjobs=args.jobs) + args = _args() + jobs_list = create_jobs(args.name, args.benchmarks, args.loops, args.regex, + args.counters) + jobset.run(jobs_list, maxjobs=args.jobs) |