From f964e772afcbe237cc08e7674e41e381e73a9d75 Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Thu, 27 Oct 2016 10:45:47 -0700 Subject: Add --inner_jobs args to run_tests_matrix.py --- tools/run_tests/run_tests_matrix.py | 84 ++++++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 33 deletions(-) (limited to 'tools/run_tests/run_tests_matrix.py') diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py index 21d3dd4a0b..af24d75236 100755 --- a/tools/run_tests/run_tests_matrix.py +++ b/tools/run_tests/run_tests_matrix.py @@ -46,16 +46,16 @@ os.chdir(_ROOT) _RUNTESTS_TIMEOUT = 2*60*60 # Number of jobs assigned to each run_tests.py instance -_INNER_JOBS = 2 +_DEFAULT_INNER_JOBS = 2 -def _docker_jobspec(name, runtests_args=[]): +def _docker_jobspec(name, runtests_args=[], inner_jobs=_DEFAULT_INNER_JOBS): """Run a single instance of run_tests.py in a docker container""" test_job = jobset.JobSpec( cmdline=['python', 'tools/run_tests/run_tests.py', '--use_docker', '-t', - '-j', str(_INNER_JOBS), + '-j', str(inner_jobs), '-x', 'report_%s.xml' % name, '--report_suite_name', '%s' % name] + runtests_args, shortname='run_tests_%s' % name, @@ -63,7 +63,7 @@ def _docker_jobspec(name, runtests_args=[]): return test_job -def _workspace_jobspec(name, runtests_args=[], workspace_name=None): +def _workspace_jobspec(name, runtests_args=[], workspace_name=None, inner_jobs=_DEFAULT_INNER_JOBS): """Run a single instance of run_tests.py in a separate workspace""" if not workspace_name: workspace_name = 'workspace_%s' % name @@ -71,7 +71,7 @@ def _workspace_jobspec(name, runtests_args=[], workspace_name=None): test_job = jobset.JobSpec( cmdline=['tools/run_tests/run_tests_in_workspace.sh', '-t', - '-j', str(_INNER_JOBS), + '-j', str(inner_jobs), '-x', '../report_%s.xml' % name, '--report_suite_name', '%s' % name] + runtests_args, environ=env, @@ -82,7 +82,8 @@ def _workspace_jobspec(name, runtests_args=[], workspace_name=None): def _generate_jobs(languages, configs, platforms, arch=None, compiler=None, - labels=[], extra_args=[]): + labels=[], extra_args=[], + inner_jobs=_DEFAULT_INNER_JOBS): result = [] for language in languages: for platform in platforms: @@ -97,60 +98,66 @@ def _generate_jobs(languages, configs, platforms, runtests_args += extra_args if platform == 'linux': - job = _docker_jobspec(name=name, runtests_args=runtests_args) + job = _docker_jobspec(name=name, runtests_args=runtests_args, inner_jobs=inner_jobs) else: - job = _workspace_jobspec(name=name, runtests_args=runtests_args) + job = _workspace_jobspec(name=name, runtests_args=runtests_args, inner_jobs=inner_jobs) job.labels = [platform, config, language] + labels result.append(job) return result -def _create_test_jobs(extra_args=[]): +def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS): test_jobs = [] # supported on linux only test_jobs += _generate_jobs(languages=['sanity', 'php7'], configs=['dbg', 'opt'], platforms=['linux'], labels=['basictests'], - extra_args=extra_args) - + extra_args=extra_args, + inner_jobs=inner_jobs) + # supported on all platforms. test_jobs += _generate_jobs(languages=['c', 'csharp', 'node', 'python'], configs=['dbg', 'opt'], platforms=['linux', 'macos', 'windows'], labels=['basictests'], - extra_args=extra_args) - + extra_args=extra_args, + inner_jobs=inner_jobs) + # supported on linux and mac. test_jobs += _generate_jobs(languages=['c++', 'ruby', 'php'], configs=['dbg', 'opt'], platforms=['linux', 'macos'], labels=['basictests'], - extra_args=extra_args) - + extra_args=extra_args, + inner_jobs=inner_jobs) + # supported on mac only. test_jobs += _generate_jobs(languages=['objc'], configs=['dbg', 'opt'], platforms=['macos'], labels=['basictests'], - extra_args=extra_args) - + extra_args=extra_args, + inner_jobs=inner_jobs) + # sanitizers test_jobs += _generate_jobs(languages=['c'], configs=['msan', 'asan', 'tsan'], platforms=['linux'], labels=['sanitizers'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) test_jobs += _generate_jobs(languages=['c++'], configs=['asan', 'tsan'], platforms=['linux'], labels=['sanitizers'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) return test_jobs - -def _create_portability_test_jobs(extra_args=[]): + +def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS): test_jobs = [] # portability C x86 test_jobs += _generate_jobs(languages=['c'], @@ -159,8 +166,9 @@ def _create_portability_test_jobs(extra_args=[]): arch='x86', compiler='default', labels=['portability'], - extra_args=extra_args) - + extra_args=extra_args, + inner_jobs=inner_jobs) + # portability C and C++ on x64 for compiler in ['gcc4.4', 'gcc4.6', 'gcc5.3', 'clang3.5', 'clang3.6', 'clang3.7']: @@ -170,8 +178,9 @@ def _create_portability_test_jobs(extra_args=[]): arch='x64', compiler=compiler, labels=['portability'], - extra_args=extra_args) - + extra_args=extra_args, + inner_jobs=inner_jobs) + # portability C on Windows for arch in ['x86', 'x64']: for compiler in ['vs2013', 'vs2015']: @@ -181,24 +190,27 @@ def _create_portability_test_jobs(extra_args=[]): arch=arch, compiler=compiler, labels=['portability'], - extra_args=extra_args) - + extra_args=extra_args, + inner_jobs=inner_jobs) + test_jobs += _generate_jobs(languages=['python'], configs=['dbg'], platforms=['linux'], arch='default', compiler='python3.4', labels=['portability'], - extra_args=extra_args) - + extra_args=extra_args, + inner_jobs=inner_jobs) + test_jobs += _generate_jobs(languages=['csharp'], configs=['dbg'], platforms=['linux'], arch='default', compiler='coreclr', labels=['portability'], - extra_args=extra_args) - return test_jobs + extra_args=extra_args, + inner_jobs=inner_jobs) + return test_jobs def _allowed_labels(): @@ -212,7 +224,7 @@ def _allowed_labels(): argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.') argp.add_argument('-j', '--jobs', - default=multiprocessing.cpu_count()/_INNER_JOBS, + default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS, type=int, help='Number of concurrent run_tests.py instances.') argp.add_argument('-f', '--filter', @@ -241,15 +253,21 @@ argp.add_argument('--base_branch', default='origin/master', type=str, help='Branch that pull request is requesting to merge into') +argp.add_argument('--inner_jobs', + default=_DEFAULT_INNER_JOBS, + type=int, + help='Number of jobs in each run_tests.py instance') args = argp.parse_args() + extra_args = [] if args.build_only: extra_args.append('--build_only') if args.force_default_poller: extra_args.append('--force_default_poller') -all_jobs = _create_test_jobs(extra_args=extra_args) + _create_portability_test_jobs(extra_args=extra_args) +all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \ + _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) jobs = [] for job in all_jobs: -- cgit v1.2.3 From 5c691c634de144b50fc1f2e166303219721c19d0 Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Thu, 20 Oct 2016 17:11:18 -0700 Subject: Enable pull request test filtering --- tools/run_tests/filter_pull_request_tests.py | 9 +++++-- tools/run_tests/jobset.py | 13 ++++++++++- tools/run_tests/report_utils.py | 2 ++ tools/run_tests/run_tests_matrix.py | 35 +++++++++++++++++----------- 4 files changed, 42 insertions(+), 17 deletions(-) (limited to 'tools/run_tests/run_tests_matrix.py') diff --git a/tools/run_tests/filter_pull_request_tests.py b/tools/run_tests/filter_pull_request_tests.py index e2027a2340..b7ebe2085a 100644 --- a/tools/run_tests/filter_pull_request_tests.py +++ b/tools/run_tests/filter_pull_request_tests.py @@ -77,6 +77,7 @@ _ALL_TEST_SUITES = [_SANITY_TEST_SUITE, _CORE_TEST_SUITE, _CPP_TEST_SUITE, # and the value is a list of tests that should be run. An empty list means that # the changed files should not trigger any tests. Any changed file that does not # match any of these regexes will trigger all tests +# DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING (be careful even if you do) _WHITELIST_DICT = { '^doc/': [], '^examples/': [], @@ -174,9 +175,13 @@ def filter_tests(tests, base_branch): print("Finding file differences between gRPC %s branch and pull request...\n" % base_branch) changed_files = _get_changed_files(base_branch) for changed_file in changed_files: - print(changed_file) + print(" %s" % changed_file) print + # todo(mattkwong): Remove this + # Faking changed files to test test filtering on Jenkins + changed_files = ['src/node/something', 'src/python/something'] + # Regex that combines all keys in _WHITELIST_DICT all_triggers = "(" + ")|(".join(_WHITELIST_DICT.keys()) + ")" # Check if all tests have to be run @@ -188,7 +193,7 @@ def filter_tests(tests, base_branch): for test_suite in _ALL_TEST_SUITES: if _can_skip_tests(changed_files, test_suite.triggers): for label in test_suite.labels: - print(" Filtering %s tests" % label) + print(" %s tests safe to skip" % label) skippable_labels.append(label) tests = _remove_irrelevant_tests(tests, skippable_labels) diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py index b6fb6318e0..b84eb3b5d7 100755 --- a/tools/run_tests/jobset.py +++ b/tools/run_tests/jobset.py @@ -96,6 +96,7 @@ _COLORS = { 'lightgray': [ 37, 0], 'gray': [ 30, 1 ], 'purple': [ 35, 0 ], + 'cyan': [ 36, 0 ] } @@ -114,6 +115,7 @@ _TAG_COLOR = { 'WAITING': 'yellow', 'SUCCESS': 'green', 'IDLE': 'gray', + 'SKIPPED': 'cyan' } @@ -450,7 +452,16 @@ def run(cmdlines, travis=False, infinite_runs=False, stop_on_failure=False, - add_env={}): + add_env={}, + skip_jobs=False): + if skip_jobs: + results = {} + skipped_job_result = JobResult() + skipped_job_result.state = 'SKIPPED' + for job in cmdlines: + message('SKIPPED', job.shortname, do_newline=True) + results[job.shortname] = [skipped_job_result] + return results js = Jobset(check_cancelled, maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS, newline_on_success, travis, stop_on_failure, add_env) diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py index 3e18f36510..90055e3530 100644 --- a/tools/run_tests/report_utils.py +++ b/tools/run_tests/report_utils.py @@ -74,6 +74,8 @@ def render_junit_xml_report(resultset, xml_report, suite_package='grpc', ET.SubElement(xml_test, 'failure', message='Failure') elif result.state == 'TIMEOUT': ET.SubElement(xml_test, 'error', message='Timeout') + elif result.state == 'SKIPPED': + ET.SubElement(xml_test, 'skipped', message='Skipped') tree = ET.ElementTree(root) tree.write(xml_report, encoding='UTF-8') diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py index 2656f1ac5d..ae8cb8518e 100755 --- a/tools/run_tests/run_tests_matrix.py +++ b/tools/run_tests/run_tests_matrix.py @@ -292,28 +292,29 @@ print('IMPORTANT: The changes you are testing need to be locally committed') print('because only the committed changes in the current branch will be') print('copied to the docker environment or into subworkspaces.') -print -print 'Will run these tests:' -for job in jobs: - if args.dry_run: - print ' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)) - else: - print ' %s' % job.shortname -print - +skipped_jobs = [] if args.filter_pr_tests: - print 'IMPORTANT: Test filtering is not active; this is only for testing.' + print 'Looking for irrelevant tests to skip...' relevant_jobs = filter_tests(jobs, args.base_branch) - # todo(mattkwong): add skipped tests to report.xml print if len(relevant_jobs) == len(jobs): - print '(TESTING) No tests will be skipped.' + print 'No tests will be skipped.' else: - print '(TESTING) These tests will be skipped:' - for job in list(set(jobs) - set(relevant_jobs)): + print 'These tests will be skipped:' + skipped_jobs = set(jobs) - set(relevant_jobs) + for job in list(skipped_jobs): print ' %s' % job.shortname + jobs = relevant_jobs print +print 'Will run these tests:' +for job in jobs: + if args.dry_run: + print ' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)) + else: + print ' %s' % job.shortname +print + if args.dry_run: print '--dry_run was used, exiting' sys.exit(1) @@ -323,9 +324,15 @@ num_failures, resultset = jobset.run(jobs, newline_on_success=True, travis=True, maxjobs=args.jobs) +# Merge skipped tests into results to show skipped tests on report.xml +if skipped_jobs: + skipped_results = jobset.run(skipped_jobs, + skip_jobs=True) + resultset.update(skipped_results) report_utils.render_junit_xml_report(resultset, 'report.xml', suite_name='aggregate_tests') + if num_failures == 0: jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.', do_newline=True) -- cgit v1.2.3 From 7e9bd6ca9a2e167dcb6133c78c05cf27584ee062 Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Mon, 24 Oct 2016 17:30:25 -0700 Subject: Add tests for pull request test filtering --- tools/run_tests/filter_pull_request_tests.py | 18 +-- tools/run_tests/run_tests_matrix.py | 208 ++++++++++++------------- tools/run_tests/sanity/check_test_filtering.py | 138 ++++++++++++++++ tools/run_tests/sanity/sanity_tests.yaml | 1 + 4 files changed, 249 insertions(+), 116 deletions(-) create mode 100755 tools/run_tests/sanity/check_test_filtering.py (limited to 'tools/run_tests/run_tests_matrix.py') diff --git a/tools/run_tests/filter_pull_request_tests.py b/tools/run_tests/filter_pull_request_tests.py index b7ebe2085a..981fbe3a92 100644 --- a/tools/run_tests/filter_pull_request_tests.py +++ b/tools/run_tests/filter_pull_request_tests.py @@ -31,7 +31,7 @@ """Filter out tests based on file differences compared to merge target branch""" import re -from subprocess import call, check_output +from subprocess import check_output class TestSuite: @@ -105,7 +105,7 @@ _WHITELIST_DICT = { 'config\.m4$': [_PHP_TEST_SUITE], 'CONTRIBUTING\.md$': [], 'Gemfile$': [_RUBY_TEST_SUITE], - 'grpc.def$': [_WINDOWS_TEST_SUITE], + 'grpc\.def$': [_WINDOWS_TEST_SUITE], 'grpc\.gemspec$': [_RUBY_TEST_SUITE], 'gRPC\.podspec$': [_OBJC_TEST_SUITE], 'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE], @@ -172,15 +172,11 @@ def filter_tests(tests, base_branch): :param tests: list of all tests generated by run_tests_matrix.py :return: list of relevant tests """ - print("Finding file differences between gRPC %s branch and pull request...\n" % base_branch) + print('Finding file differences between gRPC %s branch and pull request...\n' % base_branch) changed_files = _get_changed_files(base_branch) for changed_file in changed_files: - print(" %s" % changed_file) - print - - # todo(mattkwong): Remove this - # Faking changed files to test test filtering on Jenkins - changed_files = ['src/node/something', 'src/python/something'] + print(' %s' % changed_file) + print('') # Regex that combines all keys in _WHITELIST_DICT all_triggers = "(" + ")|(".join(_WHITELIST_DICT.keys()) + ")" @@ -193,8 +189,8 @@ def filter_tests(tests, base_branch): for test_suite in _ALL_TEST_SUITES: if _can_skip_tests(changed_files, test_suite.triggers): for label in test_suite.labels: - print(" %s tests safe to skip" % label) + print(' %s tests safe to skip' % label) skippable_labels.append(label) - tests = _remove_irrelevant_tests(tests, skippable_labels) return tests + diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py index ae8cb8518e..25fbe8110f 100755 --- a/tools/run_tests/run_tests_matrix.py +++ b/tools/run_tests/run_tests_matrix.py @@ -231,112 +231,110 @@ def _allowed_labels(): return sorted(all_labels) -argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.') -argp.add_argument('-j', '--jobs', - default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS, - type=int, - help='Number of concurrent run_tests.py instances.') -argp.add_argument('-f', '--filter', - choices=_allowed_labels(), - nargs='+', - default=[], - help='Filter targets to run by label with AND semantics.') -argp.add_argument('--build_only', - default=False, - action='store_const', - const=True, - help='Pass --build_only flag to run_tests.py instances.') -argp.add_argument('--force_default_poller', default=False, action='store_const', const=True, - help='Pass --force_default_poller to run_tests.py instances.') -argp.add_argument('--dry_run', - default=False, - action='store_const', - const=True, - help='Only print what would be run.') -argp.add_argument('--filter_pr_tests', - default=False, - action='store_const', - const=True, - help='Filters out tests irrelavant to pull request changes.') -argp.add_argument('--base_branch', - default='origin/master', - type=str, - help='Branch that pull request is requesting to merge into') -argp.add_argument('--inner_jobs', - default=_DEFAULT_INNER_JOBS, - type=int, - help='Number of jobs in each run_tests.py instance') -args = argp.parse_args() - - -extra_args = [] -if args.build_only: - extra_args.append('--build_only') -if args.force_default_poller: - extra_args.append('--force_default_poller') - -all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \ - _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) - -jobs = [] -for job in all_jobs: - if not args.filter or all(filter in job.labels for filter in args.filter): - jobs.append(job) - -if not jobs: - jobset.message('FAILED', 'No test suites match given criteria.', - do_newline=True) - sys.exit(1) - -print('IMPORTANT: The changes you are testing need to be locally committed') -print('because only the committed changes in the current branch will be') -print('copied to the docker environment or into subworkspaces.') - -skipped_jobs = [] -if args.filter_pr_tests: - print 'Looking for irrelevant tests to skip...' - relevant_jobs = filter_tests(jobs, args.base_branch) - print - if len(relevant_jobs) == len(jobs): - print 'No tests will be skipped.' - else: - print 'These tests will be skipped:' - skipped_jobs = set(jobs) - set(relevant_jobs) - for job in list(skipped_jobs): - print ' %s' % job.shortname - jobs = relevant_jobs +if __name__ == "__main__": + argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.') + argp.add_argument('-j', '--jobs', + default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS, + type=int, + help='Number of concurrent run_tests.py instances.') + argp.add_argument('-f', '--filter', + choices=_allowed_labels(), + nargs='+', + default=[], + help='Filter targets to run by label with AND semantics.') + argp.add_argument('--build_only', + default=False, + action='store_const', + const=True, + help='Pass --build_only flag to run_tests.py instances.') + argp.add_argument('--force_default_poller', default=False, action='store_const', const=True, + help='Pass --force_default_poller to run_tests.py instances.') + argp.add_argument('--dry_run', + default=False, + action='store_const', + const=True, + help='Only print what would be run.') + argp.add_argument('--filter_pr_tests', + default=False, + action='store_const', + const=True, + help='Filters out tests irrelavant to pull request changes.') + argp.add_argument('--base_branch', + default='origin/master', + type=str, + help='Branch that pull request is requesting to merge into') + argp.add_argument('--inner_jobs', + default=_DEFAULT_INNER_JOBS, + type=int, + help='Number of jobs in each run_tests.py instance') + args = argp.parse_args() + + extra_args = [] + if args.build_only: + extra_args.append('--build_only') + if args.force_default_poller: + extra_args.append('--force_default_poller') + + all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \ + _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + + jobs = [] + for job in all_jobs: + if not args.filter or all(filter in job.labels for filter in args.filter): + jobs.append(job) + + if not jobs: + jobset.message('FAILED', 'No test suites match given criteria.', + do_newline=True) + sys.exit(1) + + print('IMPORTANT: The changes you are testing need to be locally committed') + print('because only the committed changes in the current branch will be') + print('copied to the docker environment or into subworkspaces.') + + skipped_jobs = [] + + if args.filter_pr_tests: + print('Looking for irrelevant tests to skip...') + relevant_jobs = filter_tests(jobs, args.base_branch) + if len(relevant_jobs) == len(jobs): + print('No tests will be skipped.') + else: + print('These tests will be skipped:') + skipped_jobs = [job for job in jobs if job not in relevant_jobs] + for job in list(skipped_jobs): + print(' %s' % job.shortname) + jobs = relevant_jobs + + print('Will run these tests:') + for job in jobs: + if args.dry_run: + print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline))) + else: + print(' %s' % job.shortname) print -print 'Will run these tests:' -for job in jobs: if args.dry_run: - print ' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)) + print('--dry_run was used, exiting') + sys.exit(1) + + jobset.message('START', 'Running test matrix.', do_newline=True) + num_failures, resultset = jobset.run(jobs, + newline_on_success=True, + travis=True, + maxjobs=args.jobs) + # Merge skipped tests into results to show skipped tests on report.xml + if skipped_jobs: + skipped_results = jobset.run(skipped_jobs, + skip_jobs=True) + resultset.update(skipped_results) + report_utils.render_junit_xml_report(resultset, 'report.xml', + suite_name='aggregate_tests') + + if num_failures == 0: + jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.', + do_newline=True) else: - print ' %s' % job.shortname -print - -if args.dry_run: - print '--dry_run was used, exiting' - sys.exit(1) - -jobset.message('START', 'Running test matrix.', do_newline=True) -num_failures, resultset = jobset.run(jobs, - newline_on_success=True, - travis=True, - maxjobs=args.jobs) -# Merge skipped tests into results to show skipped tests on report.xml -if skipped_jobs: - skipped_results = jobset.run(skipped_jobs, - skip_jobs=True) - resultset.update(skipped_results) -report_utils.render_junit_xml_report(resultset, 'report.xml', - suite_name='aggregate_tests') - - -if num_failures == 0: - jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.', - do_newline=True) -else: - jobset.message('FAILED', 'Some run_tests.py instance have failed.', - do_newline=True) - sys.exit(1) + jobset.message('FAILED', 'Some run_tests.py instance have failed.', + do_newline=True) + sys.exit(1) diff --git a/tools/run_tests/sanity/check_test_filtering.py b/tools/run_tests/sanity/check_test_filtering.py new file mode 100755 index 0000000000..adcdd338d2 --- /dev/null +++ b/tools/run_tests/sanity/check_test_filtering.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python2.7 + +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import os +import sys +import unittest +import re + +# hack import paths to pick up extra code +sys.path.insert(0, os.path.abspath('tools/run_tests/')) +from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs +import filter_pull_request_tests + +_LIST_OF_LANGUAGE_LABELS = ['sanity', 'c', 'c++', 'csharp', 'node', 'objc', 'php', 'php7', 'python', 'ruby'] +_LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows'] + +class TestFilteringTest(unittest.TestCase): + + def generate_all_tests(self): + all_jobs = _create_test_jobs() + _create_portability_test_jobs() + self.assertIsNotNone(all_jobs) + return all_jobs + + def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS): + """ + Default args should filter no tests because changed_files is empty and + default labels should be able to match all jobs + :param changed_files: mock list of changed_files from pull request + :param labels: list of job labels that should be skipped + """ + all_jobs = self.generate_all_tests() + # Replacing _get_changed_files function to allow specifying changed files in filter_tests function + def _get_changed_files(foo): + return changed_files + filter_pull_request_tests._get_changed_files = _get_changed_files + print + filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test") + + for label in labels: + for job in filtered_jobs: + self.assertNotIn(label, job.labels) + + jobs_matching_labels = 0 + for label in labels: + for job in all_jobs: + if (label in job.labels): + jobs_matching_labels += 1 + self.assertEquals(len(filtered_jobs), len(all_jobs) - jobs_matching_labels) + + def test_individual_language_filters(self): + # Changing unlisted file should trigger all languages + self.test_filtering(['ffffoo/bar.baz'], [_LIST_OF_LANGUAGE_LABELS]) + # Changing core should trigger all tests + self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS]) + # Testing individual languages + self.test_filtering(['templates/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._SANITY_TEST_SUITE.labels]) + self.test_filtering(['test/core/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._CORE_TEST_SUITE.labels]) + self.test_filtering(['src/cpp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._CPP_TEST_SUITE.labels]) + self.test_filtering(['src/csharp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._CSHARP_TEST_SUITE.labels]) + self.test_filtering(['src/node/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._NODE_TEST_SUITE.labels]) + self.test_filtering(['src/objective-c/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._OBJC_TEST_SUITE.labels]) + self.test_filtering(['src/php/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._PHP_TEST_SUITE.labels]) + self.test_filtering(['src/python/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._PYTHON_TEST_SUITE.labels]) + self.test_filtering(['src/ruby/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._RUBY_TEST_SUITE.labels]) + + def test_combined_language_filters(self): + self.test_filtering(['templates/foo.bar', 'test/core/foo.bar'], + [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._SANITY_TEST_SUITE.labels and label not in + filter_pull_request_tests._CORE_TEST_SUITE.labels]) + self.test_filtering(['src/node/foo.bar', 'src/cpp/foo.bar', "src/csharp/foo.bar"], + [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._NODE_TEST_SUITE.labels and label not in + filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in + filter_pull_request_tests._CSHARP_TEST_SUITE.labels]) + self.test_filtering(['src/objective-c/foo.bar', 'src/php/foo.bar', "src/python/foo.bar", "src/ruby/foo.bar"], + [label for label in _LIST_OF_LANGUAGE_LABELS if label not in + filter_pull_request_tests._OBJC_TEST_SUITE.labels and label not in + filter_pull_request_tests._PHP_TEST_SUITE.labels and label not in + filter_pull_request_tests._PYTHON_TEST_SUITE.labels and label not in + filter_pull_request_tests._RUBY_TEST_SUITE.labels]) + + def test_platform_filter(self): + self.test_filtering(['vsprojects/foo.bar'], [label for label in _LIST_OF_PLATFORM_LABELS if label not in + filter_pull_request_tests._WINDOWS_TEST_SUITE.labels]) + + def test_whitelist(self): + whitelist = filter_pull_request_tests._WHITELIST_DICT + files_that_should_trigger_all_tests = ['src/core/foo.bar', + 'some_file_not_on_the_white_list', + 'BUILD', + 'etc/roots.pem', + 'Makefile', + 'tools/foo'] + for key in whitelist.keys(): + for file_name in files_that_should_trigger_all_tests: + self.assertFalse(re.match(key, file_name)) + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/tools/run_tests/sanity/sanity_tests.yaml b/tools/run_tests/sanity/sanity_tests.yaml index e699c5194d..5c2bedca8f 100644 --- a/tools/run_tests/sanity/sanity_tests.yaml +++ b/tools/run_tests/sanity/sanity_tests.yaml @@ -2,6 +2,7 @@ - script: tools/run_tests/sanity/check_cache_mk.sh - script: tools/run_tests/sanity/check_sources_and_headers.py - script: tools/run_tests/sanity/check_submodules.sh +- script: tools/run_tests/sanity/check_test_filtering.py - script: tools/buildgen/generate_projects.sh -j 3 cpu_cost: 3 - script: tools/distrib/check_copyright.py -- cgit v1.2.3 From aa6c94cb26cf7b5301da3adbee068e6ae10fee19 Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Wed, 9 Nov 2016 15:53:23 -0800 Subject: Fix skipped tests not showing up in Jenkins UI --- tools/run_tests/run_tests_matrix.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'tools/run_tests/run_tests_matrix.py') diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py index 25fbe8110f..544b6bc57d 100755 --- a/tools/run_tests/run_tests_matrix.py +++ b/tools/run_tests/run_tests_matrix.py @@ -301,7 +301,9 @@ if __name__ == "__main__": print('No tests will be skipped.') else: print('These tests will be skipped:') - skipped_jobs = [job for job in jobs if job not in relevant_jobs] + skipped_jobs = list(set(jobs) - set(relevant_jobs)) + # Sort by shortnames to make printing of skipped tests consistent + skipped_jobs.sort(key=lambda job: job.shortname) for job in list(skipped_jobs): print(' %s' % job.shortname) jobs = relevant_jobs -- cgit v1.2.3