From 0589e533cd65a2ca9e0e610cc1b284d016986572 Mon Sep 17 00:00:00 2001 From: siddharthshukla Date: Thu, 7 Jul 2016 16:08:01 +0200 Subject: Make testing toolchain python 3.x compliant six is necessary for making these scripts cross compatible between python 2.x and 3.x Changes: Add six to python_deps.include Include python_deps.include to all Dockerfile templates in test directory Include python_deps.include to all Dockerfile templates in stress_test directory Include python_deps.include to all Dockerfile templates in interop_test directory Replace print statements with print function calls (from futute..) Replace .iteritems() with .items() wherever necessary use six.moves to import BaseHTTPServer Generate new dockerfiles using generate_projects.sh --- tools/run_tests/run_performance_tests.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'tools/run_tests/run_performance_tests.py') diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py index 14901caf07..5fdf7a407d 100755 --- a/tools/run_tests/run_performance_tests.py +++ b/tools/run_tests/run_performance_tests.py @@ -30,6 +30,8 @@ """Run performance tests locally or remotely.""" +from __future__ import print_function + import argparse import itertools import jobset @@ -310,7 +312,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', 'in the same scenario') if custom_server_lang: if not workers_by_lang.get(custom_server_lang, []): - print 'Warning: Skipping scenario %s as' % scenario_json['name'] + print('Warning: Skipping scenario %s as' % scenario_json['name']) print('SERVER_LANGUAGE is set to %s yet the language has ' 'not been selected with -l' % custom_server_lang) continue @@ -319,7 +321,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', workers[idx] = workers_by_lang[custom_server_lang][idx] if custom_client_lang: if not workers_by_lang.get(custom_client_lang, []): - print 'Warning: Skipping scenario %s as' % scenario_json['name'] + print('Warning: Skipping scenario %s as' % scenario_json['name']) print('CLIENT_LANGUAGE is set to %s yet the language has ' 'not been selected with -l' % custom_client_lang) continue @@ -344,14 +346,14 @@ def finish_qps_workers(jobs): while any(job.is_running() for job in jobs): for job in qpsworker_jobs: if job.is_running(): - print 'QPS worker "%s" is still running.' % job.host_and_port + print('QPS worker "%s" is still running.' % job.host_and_port) if retries > 10: - print 'Killing all QPS workers.' + print('Killing all QPS workers.') for job in jobs: job.kill() retries += 1 time.sleep(3) - print 'All QPS workers finished.' + print('All QPS workers finished.') argp = argparse.ArgumentParser(description='Run performance tests.') -- cgit v1.2.3 From 7d4373bc6e63be216e8895bcb494e6bda88372a0 Mon Sep 17 00:00:00 2001 From: Adele Zhou Date: Thu, 16 Jun 2016 16:10:49 -0700 Subject: Generate html report for perf tests --- tools/run_tests/perf_html_report.template | 21 ++++++++++++++++++ tools/run_tests/report_utils.py | 37 +++++++++++++++++++++++++++++++ tools/run_tests/run_performance_tests.py | 11 ++++++++- 3 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 tools/run_tests/perf_html_report.template (limited to 'tools/run_tests/run_performance_tests.py') diff --git a/tools/run_tests/perf_html_report.template b/tools/run_tests/perf_html_report.template new file mode 100644 index 0000000000..c219fa888a --- /dev/null +++ b/tools/run_tests/perf_html_report.template @@ -0,0 +1,21 @@ + + +Performance Test Result + +

Performance Test Result

+ + <% sorted_test_cases = sorted(resultset.keys()) %> + % for test_case in sorted_test_cases: + + <% result = resultset[test_case] %> + + + % endfor +
${test_case} + % for k, v in result.iteritems(): + ${k}: ${v}
+ % endfor +
+ + + diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py index 5648a694cd..7188d3dcd7 100644 --- a/tools/run_tests/report_utils.py +++ b/tools/run_tests/report_utils.py @@ -37,6 +37,8 @@ try: from mako import exceptions except (ImportError): pass # Mako not installed but it is ok. +import glob +import json import os import string import xml.etree.cElementTree as ET @@ -120,3 +122,38 @@ def render_interop_html_report( print(exceptions.text_error_template().render()) raise + +def render_perf_html_report(report_dir): + """Generate a simple HTML report for the perf tests.""" + template_file = 'tools/run_tests/perf_html_report.template' + try: + mytemplate = Template(filename=template_file, format_exceptions=True) + except NameError: + print('Mako template is not installed. Skipping HTML report generation.') + return + except IOError as e: + print('Failed to find the template %s: %s' % (template_file, e)) + return + + resultset = {} + for result_file in glob.glob(os.path.join(report_dir, '*.json')): + with open(result_file, 'r') as f: + scenario_result = json.loads(f.read()) + test_case = scenario_result['scenario']['name'] + if 'ping_pong' in test_case: + latency50 = round(scenario_result['summary']['latency50'], 2) + latency99 = round(scenario_result['summary']['latency99'], 2) + summary = {'latency50': latency50, 'latency99': latency99} + else: + summary = {'qps': round(scenario_result['summary']['qps'], 2)} + resultset[test_case] = summary + + args = {'resultset': resultset} + + html_file_path = os.path.join(report_dir, 'index.html') + try: + with open(html_file_path, 'w') as output_file: + mytemplate.render_context(Context(output_file, **args)) + except: + print(exceptions.text_error_template().render()) + raise diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py index 5fdf7a407d..5ff9696808 100755 --- a/tools/run_tests/run_performance_tests.py +++ b/tools/run_tests/run_performance_tests.py @@ -40,6 +40,7 @@ import multiprocessing import os import pipes import re +import report_utils import subprocess import sys import tempfile @@ -54,6 +55,7 @@ os.chdir(_ROOT) _REMOTE_HOST_USERNAME = 'jenkins' +_REPORT_DIR = 'perf_reports' class QpsWorkerJob: @@ -103,7 +105,11 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None, cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table cmd += 'tools/run_tests/performance/run_qps_driver.sh ' cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]})) - cmd += '--scenario_result_file=scenario_result.json' + if not os.path.isdir(_REPORT_DIR): + os.makedirs(_REPORT_DIR) + report_path = os.path.join(_REPORT_DIR, + '%s-scenario_result.json' % scenario_json['name']) + cmd += '--scenario_result_file=%s' % report_path if remote_host: user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host) cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd)) @@ -436,6 +442,9 @@ try: jobset.message('START', 'Running scenarios.', do_newline=True) num_failures, _ = jobset.run( scenarios, newline_on_success=True, maxjobs=1) + + report_utils.render_perf_html_report(_REPORT_DIR) + if num_failures == 0: jobset.message('SUCCESS', 'All scenarios finished successfully.', -- cgit v1.2.3 From c23d33b29ba6971713da5128687cba4984e0468a Mon Sep 17 00:00:00 2001 From: Vijay Pai Date: Tue, 19 Jul 2016 11:19:12 -0700 Subject: Revert "Generate html report for perf tests" --- tools/run_tests/perf_html_report.template | 21 ------------------ tools/run_tests/report_utils.py | 37 ------------------------------- tools/run_tests/run_performance_tests.py | 11 +-------- 3 files changed, 1 insertion(+), 68 deletions(-) delete mode 100644 tools/run_tests/perf_html_report.template (limited to 'tools/run_tests/run_performance_tests.py') diff --git a/tools/run_tests/perf_html_report.template b/tools/run_tests/perf_html_report.template deleted file mode 100644 index c219fa888a..0000000000 --- a/tools/run_tests/perf_html_report.template +++ /dev/null @@ -1,21 +0,0 @@ - - -Performance Test Result - -

Performance Test Result

- - <% sorted_test_cases = sorted(resultset.keys()) %> - % for test_case in sorted_test_cases: - - <% result = resultset[test_case] %> - - - % endfor -
${test_case} - % for k, v in result.iteritems(): - ${k}: ${v}
- % endfor -
- - - diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py index 7188d3dcd7..5648a694cd 100644 --- a/tools/run_tests/report_utils.py +++ b/tools/run_tests/report_utils.py @@ -37,8 +37,6 @@ try: from mako import exceptions except (ImportError): pass # Mako not installed but it is ok. -import glob -import json import os import string import xml.etree.cElementTree as ET @@ -122,38 +120,3 @@ def render_interop_html_report( print(exceptions.text_error_template().render()) raise - -def render_perf_html_report(report_dir): - """Generate a simple HTML report for the perf tests.""" - template_file = 'tools/run_tests/perf_html_report.template' - try: - mytemplate = Template(filename=template_file, format_exceptions=True) - except NameError: - print('Mako template is not installed. Skipping HTML report generation.') - return - except IOError as e: - print('Failed to find the template %s: %s' % (template_file, e)) - return - - resultset = {} - for result_file in glob.glob(os.path.join(report_dir, '*.json')): - with open(result_file, 'r') as f: - scenario_result = json.loads(f.read()) - test_case = scenario_result['scenario']['name'] - if 'ping_pong' in test_case: - latency50 = round(scenario_result['summary']['latency50'], 2) - latency99 = round(scenario_result['summary']['latency99'], 2) - summary = {'latency50': latency50, 'latency99': latency99} - else: - summary = {'qps': round(scenario_result['summary']['qps'], 2)} - resultset[test_case] = summary - - args = {'resultset': resultset} - - html_file_path = os.path.join(report_dir, 'index.html') - try: - with open(html_file_path, 'w') as output_file: - mytemplate.render_context(Context(output_file, **args)) - except: - print(exceptions.text_error_template().render()) - raise diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py index 5ff9696808..5fdf7a407d 100755 --- a/tools/run_tests/run_performance_tests.py +++ b/tools/run_tests/run_performance_tests.py @@ -40,7 +40,6 @@ import multiprocessing import os import pipes import re -import report_utils import subprocess import sys import tempfile @@ -55,7 +54,6 @@ os.chdir(_ROOT) _REMOTE_HOST_USERNAME = 'jenkins' -_REPORT_DIR = 'perf_reports' class QpsWorkerJob: @@ -105,11 +103,7 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None, cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table cmd += 'tools/run_tests/performance/run_qps_driver.sh ' cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]})) - if not os.path.isdir(_REPORT_DIR): - os.makedirs(_REPORT_DIR) - report_path = os.path.join(_REPORT_DIR, - '%s-scenario_result.json' % scenario_json['name']) - cmd += '--scenario_result_file=%s' % report_path + cmd += '--scenario_result_file=scenario_result.json' if remote_host: user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host) cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd)) @@ -442,9 +436,6 @@ try: jobset.message('START', 'Running scenarios.', do_newline=True) num_failures, _ = jobset.run( scenarios, newline_on_success=True, maxjobs=1) - - report_utils.render_perf_html_report(_REPORT_DIR) - if num_failures == 0: jobset.message('SUCCESS', 'All scenarios finished successfully.', -- cgit v1.2.3