aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
authorGravatar Mark D. Roth <roth@google.com>2016-07-21 11:17:50 -0700
committerGravatar Mark D. Roth <roth@google.com>2016-07-21 11:17:50 -0700
commitdaf626c3bba0266a090661530f264d5b6870b2a3 (patch)
tree7705f7d0ecf2241a2124730a0c63cdde9e3055bb /tools
parent8d1d2ee1d771d70d24932c552bccfe1bd7d51066 (diff)
parent898d84d309ede54c19c29e18a3d78546b3168323 (diff)
Merge branch 'filter_call_init_failure' into filter_api
Diffstat (limited to 'tools')
-rwxr-xr-xtools/gcp/stress_test/stress_test_utils.py2
-rwxr-xr-xtools/profiling/latency_profile/profile_analyzer.py28
-rw-r--r--tools/run_tests/perf_html_report.template21
-rwxr-xr-xtools/run_tests/performance/bq_upload_result.py2
-rw-r--r--tools/run_tests/performance/scenario_result_schema.json10
-rw-r--r--tools/run_tests/report_utils.py37
-rwxr-xr-xtools/run_tests/run_interop_tests.py2
-rwxr-xr-xtools/run_tests/run_performance_tests.py11
8 files changed, 103 insertions, 10 deletions
diff --git a/tools/gcp/stress_test/stress_test_utils.py b/tools/gcp/stress_test/stress_test_utils.py
index b821fc8fcc..be50af3184 100755
--- a/tools/gcp/stress_test/stress_test_utils.py
+++ b/tools/gcp/stress_test/stress_test_utils.py
@@ -121,7 +121,7 @@ class BigQueryHelper:
if not page['jobComplete']:
print('TIMEOUT ERROR: The query %s timed out. Current timeout value is'
' %d msec. Returning False (i.e assuming there are no failures)'
- ) % (query, timeoout_msec)
+ ) % (query, timeout_msec)
return False
num_failures = int(page['totalRows'])
diff --git a/tools/profiling/latency_profile/profile_analyzer.py b/tools/profiling/latency_profile/profile_analyzer.py
index dad0712d40..48b8e9b950 100755
--- a/tools/profiling/latency_profile/profile_analyzer.py
+++ b/tools/profiling/latency_profile/profile_analyzer.py
@@ -43,6 +43,7 @@ TIME_FROM_SCOPE_START = object()
TIME_TO_SCOPE_END = object()
TIME_FROM_STACK_START = object()
TIME_TO_STACK_END = object()
+TIME_FROM_LAST_IMPORTANT = object()
argp = argparse.ArgumentParser(description='Process output of basic_prof builds')
@@ -78,10 +79,14 @@ class ScopeBuilder(object):
self.call_stack_builder.lines.append(line_item)
def finish(self, line):
- assert line['tag'] == self.top_line.tag, 'expected %s, got %s; thread=%s; t0=%f t1=%f' % (self.top_line.tag, line['tag'], line['thd'], self.top_line.start_time, line['t'])
+ assert line['tag'] == self.top_line.tag, (
+ 'expected %s, got %s; thread=%s; t0=%f t1=%f' %
+ (self.top_line.tag, line['tag'], line['thd'], self.top_line.start_time,
+ line['t']))
final_time_stamp = line['t']
assert self.top_line.end_time is None
self.top_line.end_time = final_time_stamp
+ self.top_line.important = self.top_line.important or line['imp']
assert SELF_TIME not in self.top_line.times
self.top_line.times[SELF_TIME] = final_time_stamp - self.top_line.start_time
for line in self.call_stack_builder.lines[self.first_child_pos:]:
@@ -101,9 +106,14 @@ class CallStackBuilder(object):
start_time = self.lines[0].start_time
end_time = self.lines[0].end_time
self.signature = self.signature.hexdigest()
+ last_important = start_time
for line in self.lines:
line.times[TIME_FROM_STACK_START] = line.start_time - start_time
line.times[TIME_TO_STACK_END] = end_time - line.end_time
+ line.times[TIME_FROM_LAST_IMPORTANT] = line.start_time - last_important
+ if line.important:
+ last_important = line.end_time
+ last_important = end_time
def add(self, line):
line_type = line['type']
@@ -113,7 +123,9 @@ class CallStackBuilder(object):
self.stk.append(ScopeBuilder(self, line))
return False
elif line_type == '}':
- assert self.stk, 'expected non-empty stack for closing %s; thread=%s; t=%f' % (line['tag'], line['thd'], line['t'])
+ assert self.stk, (
+ 'expected non-empty stack for closing %s; thread=%s; t=%f' %
+ (line['tag'], line['thd'], line['t']))
self.stk.pop().finish(line)
if not self.stk:
self.finish()
@@ -216,9 +228,16 @@ def time_format(idx):
return ''
return ent
+BANNER = {
+ 'simple': 'Count: %(count)d',
+ 'html': '<h1>Count: %(count)d</h1>'
+}
+
FORMAT = [
('TAG', lambda line: '..'*line.indent + tidy_tag(line.tag)),
('LOC', lambda line: '%s:%d' % (line.filename[line.filename.rfind('/')+1:], line.fileline)),
+ ('IMP', lambda line: '*' if line.important else ''),
+ ('FROM_IMP', time_format(TIME_FROM_LAST_IMPORTANT)),
('FROM_STACK_START', time_format(TIME_FROM_STACK_START)),
('SELF', time_format(SELF_TIME)),
('TO_STACK_END', time_format(TIME_TO_STACK_END)),
@@ -227,11 +246,6 @@ FORMAT = [
('TO_SCOPE_END', time_format(TIME_TO_SCOPE_END)),
]
-BANNER = {
- 'simple': 'Count: %(count)d',
- 'html': '<h1>Count: %(count)d</h1>'
-}
-
if args.fmt == 'html':
print '<html>'
print '<head>'
diff --git a/tools/run_tests/perf_html_report.template b/tools/run_tests/perf_html_report.template
new file mode 100644
index 0000000000..c219fa888a
--- /dev/null
+++ b/tools/run_tests/perf_html_report.template
@@ -0,0 +1,21 @@
+<!DOCTYPE html>
+<html lang="en">
+<head><title>Performance Test Result</title></head>
+<body>
+ <h2>Performance Test Result</h2>
+ <table style="width:50%" border="1">
+ <% sorted_test_cases = sorted(resultset.keys()) %>
+ % for test_case in sorted_test_cases:
+ <tr><td bgcolor="#00BFFF" style="width:30%"><b>${test_case}</b></td>
+ <% result = resultset[test_case] %>
+ <td>
+ % for k, v in result.iteritems():
+ ${k}: ${v}<br>
+ % endfor
+ </td>
+ </tr>
+ % endfor
+ </table>
+
+</body>
+</html>
diff --git a/tools/run_tests/performance/bq_upload_result.py b/tools/run_tests/performance/bq_upload_result.py
index fbccf3bdca..2a99499843 100755
--- a/tools/run_tests/performance/bq_upload_result.py
+++ b/tools/run_tests/performance/bq_upload_result.py
@@ -118,6 +118,8 @@ def _flatten_result_inplace(scenario_result):
for stats in scenario_result['clientStats']:
stats['latencies'] = json.dumps(stats['latencies'])
scenario_result['serverCores'] = json.dumps(scenario_result['serverCores'])
+ scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess'])
+ scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess'])
def _populate_metadata_inplace(scenario_result):
diff --git a/tools/run_tests/performance/scenario_result_schema.json b/tools/run_tests/performance/scenario_result_schema.json
index 0325414757..6bec21df39 100644
--- a/tools/run_tests/performance/scenario_result_schema.json
+++ b/tools/run_tests/performance/scenario_result_schema.json
@@ -198,5 +198,15 @@
"mode": "NULLABLE"
}
]
+ },
+ {
+ "name": "clientSuccess",
+ "type": "STRING",
+ "mode": "NULLABLE"
+ },
+ {
+ "name": "serverSuccess",
+ "type": "STRING",
+ "mode": "NULLABLE"
}
]
diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py
index 5648a694cd..7188d3dcd7 100644
--- a/tools/run_tests/report_utils.py
+++ b/tools/run_tests/report_utils.py
@@ -37,6 +37,8 @@ try:
from mako import exceptions
except (ImportError):
pass # Mako not installed but it is ok.
+import glob
+import json
import os
import string
import xml.etree.cElementTree as ET
@@ -120,3 +122,38 @@ def render_interop_html_report(
print(exceptions.text_error_template().render())
raise
+
+def render_perf_html_report(report_dir):
+ """Generate a simple HTML report for the perf tests."""
+ template_file = 'tools/run_tests/perf_html_report.template'
+ try:
+ mytemplate = Template(filename=template_file, format_exceptions=True)
+ except NameError:
+ print('Mako template is not installed. Skipping HTML report generation.')
+ return
+ except IOError as e:
+ print('Failed to find the template %s: %s' % (template_file, e))
+ return
+
+ resultset = {}
+ for result_file in glob.glob(os.path.join(report_dir, '*.json')):
+ with open(result_file, 'r') as f:
+ scenario_result = json.loads(f.read())
+ test_case = scenario_result['scenario']['name']
+ if 'ping_pong' in test_case:
+ latency50 = round(scenario_result['summary']['latency50'], 2)
+ latency99 = round(scenario_result['summary']['latency99'], 2)
+ summary = {'latency50': latency50, 'latency99': latency99}
+ else:
+ summary = {'qps': round(scenario_result['summary']['qps'], 2)}
+ resultset[test_case] = summary
+
+ args = {'resultset': resultset}
+
+ html_file_path = os.path.join(report_dir, 'index.html')
+ try:
+ with open(html_file_path, 'w') as output_file:
+ mytemplate.render_context(Context(output_file, **args))
+ except:
+ print(exceptions.text_error_template().render())
+ raise
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index f9065c5bfd..2e5a2f7721 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -288,7 +288,7 @@ class RubyLanguage:
return {}
def unimplemented_test_cases(self):
- return _SKIP_ADVANCED + _SKIP_COMPRESSION
+ return _SKIP_ADVANCED + _SKIP_SERVER_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_ADVANCED + _SKIP_COMPRESSION
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index 5fdf7a407d..5ff9696808 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -40,6 +40,7 @@ import multiprocessing
import os
import pipes
import re
+import report_utils
import subprocess
import sys
import tempfile
@@ -54,6 +55,7 @@ os.chdir(_ROOT)
_REMOTE_HOST_USERNAME = 'jenkins'
+_REPORT_DIR = 'perf_reports'
class QpsWorkerJob:
@@ -103,7 +105,11 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None,
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
cmd += 'tools/run_tests/performance/run_qps_driver.sh '
cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]}))
- cmd += '--scenario_result_file=scenario_result.json'
+ if not os.path.isdir(_REPORT_DIR):
+ os.makedirs(_REPORT_DIR)
+ report_path = os.path.join(_REPORT_DIR,
+ '%s-scenario_result.json' % scenario_json['name'])
+ cmd += '--scenario_result_file=%s' % report_path
if remote_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
@@ -436,6 +442,9 @@ try:
jobset.message('START', 'Running scenarios.', do_newline=True)
num_failures, _ = jobset.run(
scenarios, newline_on_success=True, maxjobs=1)
+
+ report_utils.render_perf_html_report(_REPORT_DIR)
+
if num_failures == 0:
jobset.message('SUCCESS',
'All scenarios finished successfully.',