aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/run_tests/generated/tests.json23
-rwxr-xr-xtools/run_tests/python_utils/jobset.py14
-rwxr-xr-xtools/run_tests/run_tests.py5
-rwxr-xr-xtools/run_tests/run_tests_matrix.py5
4 files changed, 42 insertions, 5 deletions
diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json
index 0cc270f52c..8b7bc1f14d 100644
--- a/tools/run_tests/generated/tests.json
+++ b/tools/run_tests/generated/tests.json
@@ -85167,6 +85167,29 @@
},
{
"args": [
+ "test/core/end2end/fuzzers/api_fuzzer_corpus/clusterfuzz-testcase-6462055064272896"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [
+ "tsan"
+ ],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "api_fuzzer_one_entry",
+ "platforms": [
+ "mac",
+ "linux"
+ ],
+ "uses_polling": false
+ },
+ {
+ "args": [
"test/core/end2end/fuzzers/api_fuzzer_corpus/clusterfuzz-testcase-6499902139924480"
],
"ci_platforms": [
diff --git a/tools/run_tests/python_utils/jobset.py b/tools/run_tests/python_utils/jobset.py
index 5d812f28ee..460f359cf3 100755
--- a/tools/run_tests/python_utils/jobset.py
+++ b/tools/run_tests/python_utils/jobset.py
@@ -348,7 +348,7 @@ class Jobset(object):
"""Manages one run of jobs."""
def __init__(self, check_cancelled, maxjobs, newline_on_success, travis,
- stop_on_failure, add_env, quiet_success):
+ stop_on_failure, add_env, quiet_success, max_time):
self._running = set()
self._check_cancelled = check_cancelled
self._cancelled = False
@@ -360,6 +360,7 @@ class Jobset(object):
self._stop_on_failure = stop_on_failure
self._add_env = add_env
self._quiet_success = quiet_success
+ self._max_time = max_time
self.resultset = {}
self._remaining = None
self._start_time = time.time()
@@ -379,6 +380,12 @@ class Jobset(object):
def start(self, spec):
"""Start a job. Return True on success, False on failure."""
while True:
+ if self._max_time > 0 and time.time() - self._start_time > self._max_time:
+ skipped_job_result = JobResult()
+ skipped_job_result.state = 'SKIPPED'
+ message('SKIPPED', spec.shortname, do_newline=True)
+ self.resultset[spec.shortname] = [skipped_job_result]
+ return True
if self.cancelled(): return False
current_cpu_cost = self.cpu_cost()
if current_cpu_cost == 0: break
@@ -474,7 +481,8 @@ def run(cmdlines,
stop_on_failure=False,
add_env={},
skip_jobs=False,
- quiet_success=False):
+ quiet_success=False,
+ max_time=-1):
if skip_jobs:
resultset = {}
skipped_job_result = JobResult()
@@ -486,7 +494,7 @@ def run(cmdlines,
js = Jobset(check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env,
- quiet_success)
+ quiet_success, max_time)
for cmdline, remaining in tag_remaining(cmdlines):
if not js.start(cmdline):
break
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 9130bc960a..a1ec1b2f45 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -1210,6 +1210,7 @@ argp.add_argument('--quiet_success',
'Useful when running many iterations of each test (argument -n).')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Dont try to iterate over many polling strategies when they exist')
+argp.add_argument('--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
args = argp.parse_args()
if args.force_default_poller:
@@ -1465,7 +1466,7 @@ def _build_and_run(
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
- if args.travis:
+ if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
@@ -1493,7 +1494,7 @@ def _build_and_run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
- quiet_success=args.quiet_success)
+ quiet_success=args.quiet_success, max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py
index 1da754d9f8..02f0ec5eff 100755
--- a/tools/run_tests/run_tests_matrix.py
+++ b/tools/run_tests/run_tests_matrix.py
@@ -377,6 +377,9 @@ if __name__ == "__main__":
argp.add_argument('-n', '--runs_per_test', default=1, type=_runs_per_test_type,
help='How many times to run each tests. >1 runs implies ' +
'omitting passing test from the output & reports.')
+ argp.add_argument('--max_time', default=-1, type=int,
+ help='Maximum amount of time to run tests for' +
+ '(other tests will be skipped)')
args = argp.parse_args()
extra_args = []
@@ -388,6 +391,8 @@ if __name__ == "__main__":
extra_args.append('-n')
extra_args.append('%s' % args.runs_per_test)
extra_args.append('--quiet_success')
+ if args.max_time > 0:
+ extra_args.extend(('--max_time', '%d' % args.max_time))
all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
_create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)