aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/profiling/microbenchmarks/bm_diff/bm_run.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/profiling/microbenchmarks/bm_diff/bm_run.py')
-rwxr-xr-xtools/profiling/microbenchmarks/bm_diff/bm_run.py142
1 files changed, 71 insertions, 71 deletions
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
index 0c2e7e36f6..ba04e879f7 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
@@ -40,87 +40,87 @@ import sys
import os
sys.path.append(
- os.path.join(
- os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
- 'python_utils'))
+ os.path.join(
+ os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
+ 'python_utils'))
import jobset
def _args():
- argp = argparse.ArgumentParser(description='Runs microbenchmarks')
- argp.add_argument(
- '-b',
- '--benchmarks',
- nargs='+',
- choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
- help='Benchmarks to run')
- argp.add_argument(
- '-j',
- '--jobs',
- type=int,
- default=multiprocessing.cpu_count(),
- help='Number of CPUs to use')
- argp.add_argument(
- '-n',
- '--name',
- type=str,
- help='Unique name of the build to run. Needs to match the handle passed to bm_build.py'
- )
- argp.add_argument(
- '-r',
- '--repetitions',
- type=int,
- default=1,
- help='Number of repetitions to pass to the benchmarks')
- argp.add_argument(
- '-l',
- '--loops',
- type=int,
- default=20,
- help='Number of times to loops the benchmarks. More loops cuts down on noise'
- )
- args = argp.parse_args()
- assert args.name
- if args.loops < 3:
- print "WARNING: This run will likely be noisy. Increase loops to at least 3."
- return args
+ argp = argparse.ArgumentParser(description='Runs microbenchmarks')
+ argp.add_argument(
+ '-b',
+ '--benchmarks',
+ nargs='+',
+ choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+ help='Benchmarks to run')
+ argp.add_argument(
+ '-j',
+ '--jobs',
+ type=int,
+ default=multiprocessing.cpu_count(),
+ help='Number of CPUs to use')
+ argp.add_argument(
+ '-n',
+ '--name',
+ type=str,
+ help='Unique name of the build to run. Needs to match the handle passed to bm_build.py'
+ )
+ argp.add_argument(
+ '-r',
+ '--repetitions',
+ type=int,
+ default=1,
+ help='Number of repetitions to pass to the benchmarks')
+ argp.add_argument(
+ '-l',
+ '--loops',
+ type=int,
+ default=20,
+ help='Number of times to loops the benchmarks. More loops cuts down on noise'
+ )
+ args = argp.parse_args()
+ assert args.name
+ if args.loops < 3:
+ print "WARNING: This run will likely be noisy. Increase loops to at least 3."
+ return args
def _collect_bm_data(bm, cfg, name, reps, idx, loops):
- jobs_list = []
- for line in subprocess.check_output(
- ['bm_diff_%s/%s/%s' % (name, cfg, bm),
- '--benchmark_list_tests']).splitlines():
- stripped_line = line.strip().replace("/", "_").replace(
- "<", "_").replace(">", "_").replace(", ", "_")
- cmd = [
- 'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' %
- line, '--benchmark_out=%s.%s.%s.%s.%d.json' %
- (bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json',
- '--benchmark_repetitions=%d' % (reps)
- ]
- jobs_list.append(
- jobset.JobSpec(
- cmd,
- shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
- loops),
- verbose_success=True,
- timeout_seconds=60 * 2))
- return jobs_list
+ jobs_list = []
+ for line in subprocess.check_output(
+ ['bm_diff_%s/%s/%s' % (name, cfg, bm),
+ '--benchmark_list_tests']).splitlines():
+ stripped_line = line.strip().replace("/", "_").replace(
+ "<", "_").replace(">", "_").replace(", ", "_")
+ cmd = [
+ 'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' %
+ line, '--benchmark_out=%s.%s.%s.%s.%d.json' %
+ (bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json',
+ '--benchmark_repetitions=%d' % (reps)
+ ]
+ jobs_list.append(
+ jobset.JobSpec(
+ cmd,
+ shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
+ loops),
+ verbose_success=True,
+ timeout_seconds=60 * 2))
+ return jobs_list
def run(name, benchmarks, jobs, loops, reps):
- jobs_list = []
- for loop in range(0, loops):
- for bm in benchmarks:
- jobs_list += _collect_bm_data(bm, 'opt', name, reps, loop, loops)
- jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop,
- loops)
- random.shuffle(jobs_list, random.SystemRandom().random)
- jobset.run(jobs_list, maxjobs=jobs)
+ jobs_list = []
+ for loop in range(0, loops):
+ for bm in benchmarks:
+ jobs_list += _collect_bm_data(bm, 'opt', name, reps, loop, loops)
+ jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop,
+ loops)
+ random.shuffle(jobs_list, random.SystemRandom().random)
+ jobset.run(jobs_list, maxjobs=jobs)
if __name__ == '__main__':
- args = _args()
- run(args.name, args.benchmarks, args.jobs, args.loops, args.repetitions)
+ args = _args()
+ run(args.name, args.benchmarks, args.jobs, args.loops, args.repetitions)