aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/run_tests/perf_html_report.template21
-rw-r--r--tools/run_tests/report_utils.py37
-rwxr-xr-xtools/run_tests/run_performance_tests.py11
-rw-r--r--tools/run_tests/sources_and_headers.json9
4 files changed, 8 insertions, 70 deletions
diff --git a/tools/run_tests/perf_html_report.template b/tools/run_tests/perf_html_report.template
deleted file mode 100644
index c219fa888a..0000000000
--- a/tools/run_tests/perf_html_report.template
+++ /dev/null
@@ -1,21 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-<head><title>Performance Test Result</title></head>
-<body>
- <h2>Performance Test Result</h2>
- <table style="width:50%" border="1">
- <% sorted_test_cases = sorted(resultset.keys()) %>
- % for test_case in sorted_test_cases:
- <tr><td bgcolor="#00BFFF" style="width:30%"><b>${test_case}</b></td>
- <% result = resultset[test_case] %>
- <td>
- % for k, v in result.iteritems():
- ${k}: ${v}<br>
- % endfor
- </td>
- </tr>
- % endfor
- </table>
-
-</body>
-</html>
diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py
index 7188d3dcd7..5648a694cd 100644
--- a/tools/run_tests/report_utils.py
+++ b/tools/run_tests/report_utils.py
@@ -37,8 +37,6 @@ try:
from mako import exceptions
except (ImportError):
pass # Mako not installed but it is ok.
-import glob
-import json
import os
import string
import xml.etree.cElementTree as ET
@@ -122,38 +120,3 @@ def render_interop_html_report(
print(exceptions.text_error_template().render())
raise
-
-def render_perf_html_report(report_dir):
- """Generate a simple HTML report for the perf tests."""
- template_file = 'tools/run_tests/perf_html_report.template'
- try:
- mytemplate = Template(filename=template_file, format_exceptions=True)
- except NameError:
- print('Mako template is not installed. Skipping HTML report generation.')
- return
- except IOError as e:
- print('Failed to find the template %s: %s' % (template_file, e))
- return
-
- resultset = {}
- for result_file in glob.glob(os.path.join(report_dir, '*.json')):
- with open(result_file, 'r') as f:
- scenario_result = json.loads(f.read())
- test_case = scenario_result['scenario']['name']
- if 'ping_pong' in test_case:
- latency50 = round(scenario_result['summary']['latency50'], 2)
- latency99 = round(scenario_result['summary']['latency99'], 2)
- summary = {'latency50': latency50, 'latency99': latency99}
- else:
- summary = {'qps': round(scenario_result['summary']['qps'], 2)}
- resultset[test_case] = summary
-
- args = {'resultset': resultset}
-
- html_file_path = os.path.join(report_dir, 'index.html')
- try:
- with open(html_file_path, 'w') as output_file:
- mytemplate.render_context(Context(output_file, **args))
- except:
- print(exceptions.text_error_template().render())
- raise
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index 5ff9696808..5fdf7a407d 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -40,7 +40,6 @@ import multiprocessing
import os
import pipes
import re
-import report_utils
import subprocess
import sys
import tempfile
@@ -55,7 +54,6 @@ os.chdir(_ROOT)
_REMOTE_HOST_USERNAME = 'jenkins'
-_REPORT_DIR = 'perf_reports'
class QpsWorkerJob:
@@ -105,11 +103,7 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None,
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
cmd += 'tools/run_tests/performance/run_qps_driver.sh '
cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]}))
- if not os.path.isdir(_REPORT_DIR):
- os.makedirs(_REPORT_DIR)
- report_path = os.path.join(_REPORT_DIR,
- '%s-scenario_result.json' % scenario_json['name'])
- cmd += '--scenario_result_file=%s' % report_path
+ cmd += '--scenario_result_file=scenario_result.json'
if remote_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
@@ -442,9 +436,6 @@ try:
jobset.message('START', 'Running scenarios.', do_newline=True)
num_failures, _ = jobset.run(
scenarios, newline_on_success=True, maxjobs=1)
-
- report_utils.render_perf_html_report(_REPORT_DIR)
-
if num_failures == 0:
jobset.message('SUCCESS',
'All scenarios finished successfully.',
diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json
index 2d8264d6c8..65959da203 100644
--- a/tools/run_tests/sources_and_headers.json
+++ b/tools/run_tests/sources_and_headers.json
@@ -2133,6 +2133,7 @@
"gpr_test_util",
"grpc",
"grpc++",
+ "grpc++_reflection",
"grpc++_test_config",
"grpc++_test_util",
"grpc_cli_libs",
@@ -4460,11 +4461,13 @@
{
"deps": [
"grpc++",
+ "grpc++_reflection",
"grpc_plugin_support"
],
"headers": [
"test/cpp/util/cli_call.h",
- "test/cpp/util/proto_file_parser.h"
+ "test/cpp/util/proto_file_parser.h",
+ "test/cpp/util/proto_reflection_descriptor_database.h"
],
"language": "c++",
"name": "grpc_cli_libs",
@@ -4472,7 +4475,9 @@
"test/cpp/util/cli_call.cc",
"test/cpp/util/cli_call.h",
"test/cpp/util/proto_file_parser.cc",
- "test/cpp/util/proto_file_parser.h"
+ "test/cpp/util/proto_file_parser.h",
+ "test/cpp/util/proto_reflection_descriptor_database.cc",
+ "test/cpp/util/proto_reflection_descriptor_database.h"
],
"third_party": false,
"type": "lib"