aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/run_performance_tests.py
diff options
context:
space:
mode:
authorGravatar kpayson64 <kpayson@google.com>2016-07-18 10:31:43 -0700
committerGravatar GitHub <noreply@github.com>2016-07-18 10:31:43 -0700
commit3b043935119666397996829eb4aae031bc1ceadf (patch)
tree3a0a94e2fcd7f75380d034ea3251c429698f1866 /tools/run_tests/run_performance_tests.py
parentbbf2fcd29b4e768e8cbe264e7b017c8922730272 (diff)
parent7d4373bc6e63be216e8895bcb494e6bda88372a0 (diff)
Merge pull request #6942 from adelez/perf_html_report
Generate html report for perf tests
Diffstat (limited to 'tools/run_tests/run_performance_tests.py')
-rwxr-xr-xtools/run_tests/run_performance_tests.py11
1 files changed, 10 insertions, 1 deletions
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index 5fdf7a407d..5ff9696808 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -40,6 +40,7 @@ import multiprocessing
import os
import pipes
import re
+import report_utils
import subprocess
import sys
import tempfile
@@ -54,6 +55,7 @@ os.chdir(_ROOT)
_REMOTE_HOST_USERNAME = 'jenkins'
+_REPORT_DIR = 'perf_reports'
class QpsWorkerJob:
@@ -103,7 +105,11 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None,
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
cmd += 'tools/run_tests/performance/run_qps_driver.sh '
cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]}))
- cmd += '--scenario_result_file=scenario_result.json'
+ if not os.path.isdir(_REPORT_DIR):
+ os.makedirs(_REPORT_DIR)
+ report_path = os.path.join(_REPORT_DIR,
+ '%s-scenario_result.json' % scenario_json['name'])
+ cmd += '--scenario_result_file=%s' % report_path
if remote_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
@@ -436,6 +442,9 @@ try:
jobset.message('START', 'Running scenarios.', do_newline=True)
num_failures, _ = jobset.run(
scenarios, newline_on_success=True, maxjobs=1)
+
+ report_utils.render_perf_html_report(_REPORT_DIR)
+
if num_failures == 0:
jobset.message('SUCCESS',
'All scenarios finished successfully.',