diff options
author | mtklein <mtklein@chromium.org> | 2015-02-24 11:45:11 -0800 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2015-02-24 11:45:11 -0800 |
commit | f73e589c0d3d54371466dcaa0642925824df24d7 (patch) | |
tree | 00b668f49e75d530570e14bd043ec9b4f2e14146 /tools/nanobench_flags.py | |
parent | fe1b180bee3f254cb74b5cb2ae383050fc316937 (diff) |
Add tools/nanobench_flags.py.
This should look suspiciously similar to tools/dm_flags.py. In fact, I
tweaked tools/dm_flags.py a bit to make it even more suspiciously similar.
I'll leave actually deduping this to future me.
I noticed we have an opportunity to make our Valgrind run of nanobench faster,
by not only making it not auto-calibrate (--loops 1) but also take only one
measurement (--samples 1). Should be 5-10x faster than the default.
BUG=skia:
Review URL: https://codereview.chromium.org/957503002
Diffstat (limited to 'tools/nanobench_flags.py')
-rwxr-xr-x | tools/nanobench_flags.py | 91 |
1 files changed, 91 insertions, 0 deletions
diff --git a/tools/nanobench_flags.py b/tools/nanobench_flags.py new file mode 100755 index 0000000000..18668c9dc2 --- /dev/null +++ b/tools/nanobench_flags.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python + +usage = ''' +Write extra flags to outfile for nanobench based on the bot name: + $ python nanobench_flags.py outfile Perf-Android-GalaxyS3-Mali400-Arm7-Release +Or run self-tests: + $ python nanobench_flags.py test +''' + +import inspect +import json +import os +import sys + + +def lineno(): + caller = inspect.stack()[1] # Up one level to our caller. + return inspect.getframeinfo(caller[0]).lineno + + +cov_start = lineno()+1 # We care about coverage starting just past this def. +def get_args(bot): + args = [] + + args.extend(['--scales', '1.0', '1.1']) + + if 'Valgrind' in bot: + # Don't care about Valgrind performance. + args.extend(['--loops', '1']) + args.extend(['--samples', '1']) + + match = [] + if 'Android' in bot: + # Segfaults when run as GPU bench. Very large texture? + match.append('~blurroundrect') + match.append('~patch_grid') # skia:2847 + match.append('~desk_carsvg') + if 'HD2000' in bot: + match.extend(['~gradient', '~etc1bitmap']) # skia:2895 + if 'Nexus7' in bot: + match = ['skp'] # skia:2774 + if match: + args.append('--match') + args.extend(match) + + if ('GalaxyS3' in bot or + 'GalaxyS4' in bot): + args.append('--nocpu') + return args +cov_end = lineno() # Don't care about code coverage past here. + + +def self_test(): + import coverage # This way the bots don't need coverage.py to be installed. + args = {} + cases = [ + 'Perf-Android-GalaxyS3-Mali400-Arm7-Release', + 'Perf-Android-Nexus7-Tegra3-Arm7-Release', + 'Test-Ubuntu12-ShuttleA-GTX550Ti-x86_64-Release-Valgrind', + 'Test-Win7-ShuttleA-HD2000-x86-Debug-ANGLE', + ] + + cov = coverage.coverage() + cov.start() + for case in cases: + args[case] = get_args(case) + cov.stop() + + this_file = os.path.basename(__file__) + _, _, not_run, _ = cov.analysis(this_file) + filtered = [line for line in not_run if line > cov_start and line < cov_end] + if filtered: + print 'Lines not covered by test cases: ', filtered + sys.exit(1) + + golden = this_file.replace('.py', '.json') + with open(os.path.join(os.path.dirname(__file__), golden), 'w') as f: + json.dump(args, f, indent=2, sort_keys=True) + + +if __name__ == '__main__': + if len(sys.argv) == 2 and sys.argv[1] == 'test': + self_test() + sys.exit(0) + + if len(sys.argv) != 3: + print usage + sys.exit(1) + + with open(sys.argv[1], 'w') as out: + json.dump(get_args(sys.argv[2]), out) |