diff options
author | Mark D. Roth <roth@google.com> | 2016-07-21 11:17:50 -0700 |
---|---|---|
committer | Mark D. Roth <roth@google.com> | 2016-07-21 11:17:50 -0700 |
commit | daf626c3bba0266a090661530f264d5b6870b2a3 (patch) | |
tree | 7705f7d0ecf2241a2124730a0c63cdde9e3055bb /tools/run_tests | |
parent | 8d1d2ee1d771d70d24932c552bccfe1bd7d51066 (diff) | |
parent | 898d84d309ede54c19c29e18a3d78546b3168323 (diff) |
Merge branch 'filter_call_init_failure' into filter_api
Diffstat (limited to 'tools/run_tests')
-rw-r--r-- | tools/run_tests/perf_html_report.template | 21 | ||||
-rwxr-xr-x | tools/run_tests/performance/bq_upload_result.py | 2 | ||||
-rw-r--r-- | tools/run_tests/performance/scenario_result_schema.json | 10 | ||||
-rw-r--r-- | tools/run_tests/report_utils.py | 37 | ||||
-rwxr-xr-x | tools/run_tests/run_interop_tests.py | 2 | ||||
-rwxr-xr-x | tools/run_tests/run_performance_tests.py | 11 |
6 files changed, 81 insertions, 2 deletions
diff --git a/tools/run_tests/perf_html_report.template b/tools/run_tests/perf_html_report.template new file mode 100644 index 0000000000..c219fa888a --- /dev/null +++ b/tools/run_tests/perf_html_report.template @@ -0,0 +1,21 @@ +<!DOCTYPE html> +<html lang="en"> +<head><title>Performance Test Result</title></head> +<body> + <h2>Performance Test Result</h2> + <table style="width:50%" border="1"> + <% sorted_test_cases = sorted(resultset.keys()) %> + % for test_case in sorted_test_cases: + <tr><td bgcolor="#00BFFF" style="width:30%"><b>${test_case}</b></td> + <% result = resultset[test_case] %> + <td> + % for k, v in result.iteritems(): + ${k}: ${v}<br> + % endfor + </td> + </tr> + % endfor + </table> + +</body> +</html> diff --git a/tools/run_tests/performance/bq_upload_result.py b/tools/run_tests/performance/bq_upload_result.py index fbccf3bdca..2a99499843 100755 --- a/tools/run_tests/performance/bq_upload_result.py +++ b/tools/run_tests/performance/bq_upload_result.py @@ -118,6 +118,8 @@ def _flatten_result_inplace(scenario_result): for stats in scenario_result['clientStats']: stats['latencies'] = json.dumps(stats['latencies']) scenario_result['serverCores'] = json.dumps(scenario_result['serverCores']) + scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess']) + scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess']) def _populate_metadata_inplace(scenario_result): diff --git a/tools/run_tests/performance/scenario_result_schema.json b/tools/run_tests/performance/scenario_result_schema.json index 0325414757..6bec21df39 100644 --- a/tools/run_tests/performance/scenario_result_schema.json +++ b/tools/run_tests/performance/scenario_result_schema.json @@ -198,5 +198,15 @@ "mode": "NULLABLE" } ] + }, + { + "name": "clientSuccess", + "type": "STRING", + "mode": "NULLABLE" + }, + { + "name": "serverSuccess", + "type": "STRING", + "mode": "NULLABLE" } ] diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py index 5648a694cd..7188d3dcd7 100644 --- a/tools/run_tests/report_utils.py +++ b/tools/run_tests/report_utils.py @@ -37,6 +37,8 @@ try: from mako import exceptions except (ImportError): pass # Mako not installed but it is ok. +import glob +import json import os import string import xml.etree.cElementTree as ET @@ -120,3 +122,38 @@ def render_interop_html_report( print(exceptions.text_error_template().render()) raise + +def render_perf_html_report(report_dir): + """Generate a simple HTML report for the perf tests.""" + template_file = 'tools/run_tests/perf_html_report.template' + try: + mytemplate = Template(filename=template_file, format_exceptions=True) + except NameError: + print('Mako template is not installed. Skipping HTML report generation.') + return + except IOError as e: + print('Failed to find the template %s: %s' % (template_file, e)) + return + + resultset = {} + for result_file in glob.glob(os.path.join(report_dir, '*.json')): + with open(result_file, 'r') as f: + scenario_result = json.loads(f.read()) + test_case = scenario_result['scenario']['name'] + if 'ping_pong' in test_case: + latency50 = round(scenario_result['summary']['latency50'], 2) + latency99 = round(scenario_result['summary']['latency99'], 2) + summary = {'latency50': latency50, 'latency99': latency99} + else: + summary = {'qps': round(scenario_result['summary']['qps'], 2)} + resultset[test_case] = summary + + args = {'resultset': resultset} + + html_file_path = os.path.join(report_dir, 'index.html') + try: + with open(html_file_path, 'w') as output_file: + mytemplate.render_context(Context(output_file, **args)) + except: + print(exceptions.text_error_template().render()) + raise diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py index f9065c5bfd..2e5a2f7721 100755 --- a/tools/run_tests/run_interop_tests.py +++ b/tools/run_tests/run_interop_tests.py @@ -288,7 +288,7 @@ class RubyLanguage: return {} def unimplemented_test_cases(self): - return _SKIP_ADVANCED + _SKIP_COMPRESSION + return _SKIP_ADVANCED + _SKIP_SERVER_COMPRESSION def unimplemented_test_cases_server(self): return _SKIP_ADVANCED + _SKIP_COMPRESSION diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py index 5fdf7a407d..5ff9696808 100755 --- a/tools/run_tests/run_performance_tests.py +++ b/tools/run_tests/run_performance_tests.py @@ -40,6 +40,7 @@ import multiprocessing import os import pipes import re +import report_utils import subprocess import sys import tempfile @@ -54,6 +55,7 @@ os.chdir(_ROOT) _REMOTE_HOST_USERNAME = 'jenkins' +_REPORT_DIR = 'perf_reports' class QpsWorkerJob: @@ -103,7 +105,11 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None, cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table cmd += 'tools/run_tests/performance/run_qps_driver.sh ' cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]})) - cmd += '--scenario_result_file=scenario_result.json' + if not os.path.isdir(_REPORT_DIR): + os.makedirs(_REPORT_DIR) + report_path = os.path.join(_REPORT_DIR, + '%s-scenario_result.json' % scenario_json['name']) + cmd += '--scenario_result_file=%s' % report_path if remote_host: user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host) cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd)) @@ -436,6 +442,9 @@ try: jobset.message('START', 'Running scenarios.', do_newline=True) num_failures, _ = jobset.run( scenarios, newline_on_success=True, maxjobs=1) + + report_utils.render_perf_html_report(_REPORT_DIR) + if num_failures == 0: jobset.message('SUCCESS', 'All scenarios finished successfully.', |