diff options
-rwxr-xr-x | tools/run_tests/jobset.py | 11 | ||||
-rwxr-xr-x | tools/run_tests/run_interop_tests.py | 7 | ||||
-rwxr-xr-x | tools/run_tests/run_tests.py | 32 |
3 files changed, 43 insertions, 7 deletions
diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py index 8343441a18..a8ff9f613f 100755 --- a/tools/run_tests/jobset.py +++ b/tools/run_tests/jobset.py @@ -182,6 +182,7 @@ class JobResult(object): self.state = 'UNKNOWN' self.returncode = -1 self.elapsed_time = 0 + self.num_failures = 0 self.retries = 0 self.message = '' @@ -243,6 +244,7 @@ class Job(object): self._spec.shortname, self._process.returncode, self._process.pid), stdout, do_newline=True) self._retries += 1 + self.result.num_failures += 1 self.result.retries = self._timeout_retries + self._retries self.start() else: @@ -252,6 +254,7 @@ class Job(object): self._spec.shortname, self._process.returncode, self._process.pid), stdout, do_newline=True) self.result.state = 'FAILED' + self.result.num_failures += 1 self.result.returncode = self._process.returncode if self._xml_test is not None: ET.SubElement(self._xml_test, 'failure', message='Failure') @@ -271,6 +274,7 @@ class Job(object): if self._timeout_retries < self._spec.timeout_retries: message('TIMEOUT_FLAKE', self._spec.shortname, stdout, do_newline=True) self._timeout_retries += 1 + self.result.num_failures += 1 self.result.retries = self._timeout_retries + self._retries if self._spec.kill_handler: self._spec.kill_handler(self) @@ -280,6 +284,7 @@ class Job(object): message('TIMEOUT', self._spec.shortname, stdout, do_newline=True) self.kill() self.result.state = 'TIMEOUT' + self.result.num_failures += 1 if self._xml_test is not None: ET.SubElement(self._xml_test, 'system-out').text = filtered_stdout ET.SubElement(self._xml_test, 'error', message='Timeout') @@ -294,7 +299,7 @@ class Job(object): def suppress_failure_message(self): self._suppress_failure_message = True - + class Jobset(object): """Manages one run of jobs.""" @@ -347,7 +352,7 @@ class Jobset(object): self._add_env, self._xml_report) self._running.add(job) - self.resultset[job.GetSpec().shortname] = None + self.resultset[job.GetSpec().shortname] = [] return True def reap(self): @@ -367,7 +372,7 @@ class Jobset(object): break for job in dead: self._completed += 1 - self.resultset[job.GetSpec().shortname] = job.result + self.resultset[job.GetSpec().shortname].append(job.result) self._running.remove(job) if dead: return if (not self._travis): diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py index e1d60b2de3..729f962bb1 100755 --- a/tools/run_tests/run_interop_tests.py +++ b/tools/run_tests/run_interop_tests.py @@ -474,7 +474,9 @@ def build_interop_image_jobspec(language, tag=None): # TODO(adelez): Use mako template. def fill_one_test_result(shortname, resultset, html_str): if shortname in resultset: - result = resultset[shortname] + # Because interop tests does not have runs_per_test flag, each test is run + # once. So there should only be one element for each result. + result = resultset[shortname][0] if result.state == 'PASSED': html_str = '%s<td bgcolor=\"green\">PASS</td>\n' % html_str else: @@ -483,7 +485,8 @@ def fill_one_test_result(shortname, resultset, html_str): if result.returncode > 0: tooltip = 'returncode: %d ' % result.returncode if result.message: - tooltip = '%smessage: %s' % (tooltip, result.message) + escaped_msg = result.message.replace('"', '"') + tooltip = '%smessage: %s' % (tooltip, escaped_msg) if result.state == 'FAILED': html_str = '%s<td bgcolor=\"red\">' % html_str if tooltip: diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 8482b2fd52..4232637c7f 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -812,6 +812,23 @@ def _start_port_server(port_server_port): raise +def _calculate_num_runs_failures(list_of_results): + """Caculate number of runs and failures for a particular test. + + Args: + list_of_results: (List) of JobResult object. + Returns: + A tuple of total number of runs and failures. + """ + num_runs = len(list_of_results) # By default, there is 1 run per JobResult. + num_failures = 0 + for jobresult in list_of_results: + if jobresult.retries > 0: + num_runs += jobresult.retries + if jobresult.num_failures > 0: + num_failures += jobresult.num_failures + return num_runs, num_failures + def _build_and_run( check_cancelled, newline_on_success, travis, cache, xml_report=None): """Do one pass of building & running tests.""" @@ -853,13 +870,24 @@ def _build_and_run( root = ET.Element('testsuites') if xml_report else None testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc', name='tests') if xml_report else None - number_failures, _ = jobset.run( - all_runs, check_cancelled, newline_on_success=newline_on_success, + number_failures, resultset = jobset.run( + all_runs, check_cancelled, newline_on_success=newline_on_success, travis=travis, infinite_runs=infinite_runs, maxjobs=args.jobs, stop_on_failure=args.stop_on_failure, cache=cache if not xml_report else None, xml_report=testsuite, add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) + if resultset: + for k, v in resultset.iteritems(): + num_runs, num_failures = _calculate_num_runs_failures(v) + if num_failures == num_runs: # what about infinite_runs??? + jobset.message('FAILED', k, do_newline=True) + elif num_failures > 0: + jobset.message( + 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs), + do_newline=True) + else: + jobset.message('PASSED', k, do_newline=True) if number_failures: return 2 finally: |