From cdff92f02ea04ef238b6393a843e3605060efde1 Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Tue, 12 Jul 2016 12:30:30 -0700 Subject: added ruby client compression interop tests --- tools/run_tests/run_interop_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py index 13a4a49325..b71627a9c8 100755 --- a/tools/run_tests/run_interop_tests.py +++ b/tools/run_tests/run_interop_tests.py @@ -286,7 +286,7 @@ class RubyLanguage: return {} def unimplemented_test_cases(self): - return _SKIP_ADVANCED + _SKIP_COMPRESSION + return _SKIP_ADVANCED + _SKIP_SERVER_COMPRESSION def unimplemented_test_cases_server(self): return _SKIP_ADVANCED + _SKIP_COMPRESSION -- cgit v1.2.3 From d28fb6df8f31405eb7a6e9baaf577e052a300d70 Mon Sep 17 00:00:00 2001 From: yang-g Date: Thu, 14 Jul 2016 18:32:45 -0700 Subject: regenerate projects --- tools/dockerfile/test/python_pyenv_x64/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/dockerfile/test/python_pyenv_x64/Dockerfile b/tools/dockerfile/test/python_pyenv_x64/Dockerfile index e2355c4443..abb5f3c89b 100644 --- a/tools/dockerfile/test/python_pyenv_x64/Dockerfile +++ b/tools/dockerfile/test/python_pyenv_x64/Dockerfile @@ -76,7 +76,7 @@ RUN apt-get update && apt-get install -y \ # Install Python packages from PyPI RUN pip install pip --upgrade RUN pip install virtualenv -RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 +RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.0.0a2 six==1.10.0 # Install dependencies for pyenv RUN apt-get update && apt-get install -y \ -- cgit v1.2.3 From 6dbfce0a9de06c4aad3ba044b87413abfb25e9f3 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Fri, 15 Jul 2016 11:05:24 -0700 Subject: Add clientSuccess and serverSuccess to BQ schema --- tools/run_tests/performance/bq_upload_result.py | 2 ++ tools/run_tests/performance/scenario_result_schema.json | 10 ++++++++++ 2 files changed, 12 insertions(+) (limited to 'tools') diff --git a/tools/run_tests/performance/bq_upload_result.py b/tools/run_tests/performance/bq_upload_result.py index fbccf3bdca..2a99499843 100755 --- a/tools/run_tests/performance/bq_upload_result.py +++ b/tools/run_tests/performance/bq_upload_result.py @@ -118,6 +118,8 @@ def _flatten_result_inplace(scenario_result): for stats in scenario_result['clientStats']: stats['latencies'] = json.dumps(stats['latencies']) scenario_result['serverCores'] = json.dumps(scenario_result['serverCores']) + scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess']) + scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess']) def _populate_metadata_inplace(scenario_result): diff --git a/tools/run_tests/performance/scenario_result_schema.json b/tools/run_tests/performance/scenario_result_schema.json index 0325414757..6bec21df39 100644 --- a/tools/run_tests/performance/scenario_result_schema.json +++ b/tools/run_tests/performance/scenario_result_schema.json @@ -198,5 +198,15 @@ "mode": "NULLABLE" } ] + }, + { + "name": "clientSuccess", + "type": "STRING", + "mode": "NULLABLE" + }, + { + "name": "serverSuccess", + "type": "STRING", + "mode": "NULLABLE" } ] -- cgit v1.2.3 From 7d4373bc6e63be216e8895bcb494e6bda88372a0 Mon Sep 17 00:00:00 2001 From: Adele Zhou Date: Thu, 16 Jun 2016 16:10:49 -0700 Subject: Generate html report for perf tests --- tools/run_tests/perf_html_report.template | 21 ++++++++++++++++++ tools/run_tests/report_utils.py | 37 +++++++++++++++++++++++++++++++ tools/run_tests/run_performance_tests.py | 11 ++++++++- 3 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 tools/run_tests/perf_html_report.template (limited to 'tools') diff --git a/tools/run_tests/perf_html_report.template b/tools/run_tests/perf_html_report.template new file mode 100644 index 0000000000..c219fa888a --- /dev/null +++ b/tools/run_tests/perf_html_report.template @@ -0,0 +1,21 @@ + + +Performance Test Result + +

Performance Test Result

+ + <% sorted_test_cases = sorted(resultset.keys()) %> + % for test_case in sorted_test_cases: + + <% result = resultset[test_case] %> + + + % endfor +
${test_case} + % for k, v in result.iteritems(): + ${k}: ${v}
+ % endfor +
+ + + diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py index 5648a694cd..7188d3dcd7 100644 --- a/tools/run_tests/report_utils.py +++ b/tools/run_tests/report_utils.py @@ -37,6 +37,8 @@ try: from mako import exceptions except (ImportError): pass # Mako not installed but it is ok. +import glob +import json import os import string import xml.etree.cElementTree as ET @@ -120,3 +122,38 @@ def render_interop_html_report( print(exceptions.text_error_template().render()) raise + +def render_perf_html_report(report_dir): + """Generate a simple HTML report for the perf tests.""" + template_file = 'tools/run_tests/perf_html_report.template' + try: + mytemplate = Template(filename=template_file, format_exceptions=True) + except NameError: + print('Mako template is not installed. Skipping HTML report generation.') + return + except IOError as e: + print('Failed to find the template %s: %s' % (template_file, e)) + return + + resultset = {} + for result_file in glob.glob(os.path.join(report_dir, '*.json')): + with open(result_file, 'r') as f: + scenario_result = json.loads(f.read()) + test_case = scenario_result['scenario']['name'] + if 'ping_pong' in test_case: + latency50 = round(scenario_result['summary']['latency50'], 2) + latency99 = round(scenario_result['summary']['latency99'], 2) + summary = {'latency50': latency50, 'latency99': latency99} + else: + summary = {'qps': round(scenario_result['summary']['qps'], 2)} + resultset[test_case] = summary + + args = {'resultset': resultset} + + html_file_path = os.path.join(report_dir, 'index.html') + try: + with open(html_file_path, 'w') as output_file: + mytemplate.render_context(Context(output_file, **args)) + except: + print(exceptions.text_error_template().render()) + raise diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py index 5fdf7a407d..5ff9696808 100755 --- a/tools/run_tests/run_performance_tests.py +++ b/tools/run_tests/run_performance_tests.py @@ -40,6 +40,7 @@ import multiprocessing import os import pipes import re +import report_utils import subprocess import sys import tempfile @@ -54,6 +55,7 @@ os.chdir(_ROOT) _REMOTE_HOST_USERNAME = 'jenkins' +_REPORT_DIR = 'perf_reports' class QpsWorkerJob: @@ -103,7 +105,11 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None, cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table cmd += 'tools/run_tests/performance/run_qps_driver.sh ' cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]})) - cmd += '--scenario_result_file=scenario_result.json' + if not os.path.isdir(_REPORT_DIR): + os.makedirs(_REPORT_DIR) + report_path = os.path.join(_REPORT_DIR, + '%s-scenario_result.json' % scenario_json['name']) + cmd += '--scenario_result_file=%s' % report_path if remote_host: user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host) cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd)) @@ -436,6 +442,9 @@ try: jobset.message('START', 'Running scenarios.', do_newline=True) num_failures, _ = jobset.run( scenarios, newline_on_success=True, maxjobs=1) + + report_utils.render_perf_html_report(_REPORT_DIR) + if num_failures == 0: jobset.message('SUCCESS', 'All scenarios finished successfully.', -- cgit v1.2.3 From 6660c110dfc842314ba1ef063d8fcbe3f79dd180 Mon Sep 17 00:00:00 2001 From: thinkerou Date: Sun, 17 Jul 2016 18:27:23 +0800 Subject: fix spell error by pyflakes found --- tools/gcp/stress_test/stress_test_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/gcp/stress_test/stress_test_utils.py b/tools/gcp/stress_test/stress_test_utils.py index b821fc8fcc..be50af3184 100755 --- a/tools/gcp/stress_test/stress_test_utils.py +++ b/tools/gcp/stress_test/stress_test_utils.py @@ -121,7 +121,7 @@ class BigQueryHelper: if not page['jobComplete']: print('TIMEOUT ERROR: The query %s timed out. Current timeout value is' ' %d msec. Returning False (i.e assuming there are no failures)' - ) % (query, timeoout_msec) + ) % (query, timeout_msec) return False num_failures = int(page['totalRows']) -- cgit v1.2.3 From 268a47a2b1402d92d924889b52d9b4266f53483c Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 19 Jul 2016 12:57:18 -0700 Subject: Add latency profiling from 'important' marks Allows seeing directly how long was spent: - running up to putting bytes on the wire - between sending and receiving - between receiving and finishing --- .../profiling/latency_profile/profile_analyzer.py | 28 ++++++++++++++++------ 1 file changed, 21 insertions(+), 7 deletions(-) (limited to 'tools') diff --git a/tools/profiling/latency_profile/profile_analyzer.py b/tools/profiling/latency_profile/profile_analyzer.py index dad0712d40..48b8e9b950 100755 --- a/tools/profiling/latency_profile/profile_analyzer.py +++ b/tools/profiling/latency_profile/profile_analyzer.py @@ -43,6 +43,7 @@ TIME_FROM_SCOPE_START = object() TIME_TO_SCOPE_END = object() TIME_FROM_STACK_START = object() TIME_TO_STACK_END = object() +TIME_FROM_LAST_IMPORTANT = object() argp = argparse.ArgumentParser(description='Process output of basic_prof builds') @@ -78,10 +79,14 @@ class ScopeBuilder(object): self.call_stack_builder.lines.append(line_item) def finish(self, line): - assert line['tag'] == self.top_line.tag, 'expected %s, got %s; thread=%s; t0=%f t1=%f' % (self.top_line.tag, line['tag'], line['thd'], self.top_line.start_time, line['t']) + assert line['tag'] == self.top_line.tag, ( + 'expected %s, got %s; thread=%s; t0=%f t1=%f' % + (self.top_line.tag, line['tag'], line['thd'], self.top_line.start_time, + line['t'])) final_time_stamp = line['t'] assert self.top_line.end_time is None self.top_line.end_time = final_time_stamp + self.top_line.important = self.top_line.important or line['imp'] assert SELF_TIME not in self.top_line.times self.top_line.times[SELF_TIME] = final_time_stamp - self.top_line.start_time for line in self.call_stack_builder.lines[self.first_child_pos:]: @@ -101,9 +106,14 @@ class CallStackBuilder(object): start_time = self.lines[0].start_time end_time = self.lines[0].end_time self.signature = self.signature.hexdigest() + last_important = start_time for line in self.lines: line.times[TIME_FROM_STACK_START] = line.start_time - start_time line.times[TIME_TO_STACK_END] = end_time - line.end_time + line.times[TIME_FROM_LAST_IMPORTANT] = line.start_time - last_important + if line.important: + last_important = line.end_time + last_important = end_time def add(self, line): line_type = line['type'] @@ -113,7 +123,9 @@ class CallStackBuilder(object): self.stk.append(ScopeBuilder(self, line)) return False elif line_type == '}': - assert self.stk, 'expected non-empty stack for closing %s; thread=%s; t=%f' % (line['tag'], line['thd'], line['t']) + assert self.stk, ( + 'expected non-empty stack for closing %s; thread=%s; t=%f' % + (line['tag'], line['thd'], line['t'])) self.stk.pop().finish(line) if not self.stk: self.finish() @@ -216,9 +228,16 @@ def time_format(idx): return '' return ent +BANNER = { + 'simple': 'Count: %(count)d', + 'html': '

Count: %(count)d

' +} + FORMAT = [ ('TAG', lambda line: '..'*line.indent + tidy_tag(line.tag)), ('LOC', lambda line: '%s:%d' % (line.filename[line.filename.rfind('/')+1:], line.fileline)), + ('IMP', lambda line: '*' if line.important else ''), + ('FROM_IMP', time_format(TIME_FROM_LAST_IMPORTANT)), ('FROM_STACK_START', time_format(TIME_FROM_STACK_START)), ('SELF', time_format(SELF_TIME)), ('TO_STACK_END', time_format(TIME_TO_STACK_END)), @@ -227,11 +246,6 @@ FORMAT = [ ('TO_SCOPE_END', time_format(TIME_TO_SCOPE_END)), ] -BANNER = { - 'simple': 'Count: %(count)d', - 'html': '

Count: %(count)d

' -} - if args.fmt == 'html': print '' print '' -- cgit v1.2.3