aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/run_performance_tests.py
diff options
context:
space:
mode:
authorGravatar Jan Tattermusch <jtattermusch@users.noreply.github.com>2016-05-10 20:33:02 -0700
committerGravatar Jan Tattermusch <jtattermusch@users.noreply.github.com>2016-05-10 20:33:02 -0700
commit7363674e11e0dfe0f922a0480a10250db72f6fd9 (patch)
tree2ae9e2194381d00df04212f747b1581b2d80b1ac /tools/run_tests/run_performance_tests.py
parent8b382748fd9ce6f5d898e1acf15c97851c4fbd8b (diff)
parent6de6971bdca299edf4632c978310a8ca08daa672 (diff)
Merge pull request #6477 from jtattermusch/benchmarking_add_netperf
Add support for running netperf as part of benchmarks.
Diffstat (limited to 'tools/run_tests/run_performance_tests.py')
-rwxr-xr-xtools/run_tests/run_performance_tests.py47
1 files changed, 45 insertions, 2 deletions
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index b1f5889e54..674d864539 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -131,6 +131,25 @@ def create_quit_jobspec(workers, remote_host=None):
verbose_success=True)
+def create_netperf_jobspec(server_host='localhost', client_host=None,
+ bq_result_table=None):
+ """Runs netperf benchmark."""
+ cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
+ if bq_result_table:
+ cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
+ cmd += 'tools/run_tests/performance/run_netperf.sh'
+ if client_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
+ cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
+
+ return jobset.JobSpec(
+ cmdline=[cmd],
+ shortname='netperf',
+ timeout_seconds=60,
+ shell=True,
+ verbose_success=True)
+
+
def archive_repo(languages):
"""Archives local version of repo including submodules."""
cmdline=['tar', '-cf', '../grpc.tar', '../grpc/']
@@ -244,12 +263,28 @@ def start_qpsworkers(languages, worker_hosts):
def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
- category='all', bq_result_table=None):
+ category='all', bq_result_table=None,
+ netperf=False, netperf_hosts=[]):
"""Create jobspecs for scenarios to run."""
all_workers = [worker
for workers in workers_by_lang.values()
for worker in workers]
scenarios = []
+
+ if netperf:
+ if not netperf_hosts:
+ netperf_server='localhost'
+ netperf_client=None
+ elif len(netperf_hosts) == 1:
+ netperf_server=netperf_hosts[0]
+ netperf_client=netperf_hosts[0]
+ else:
+ netperf_server=netperf_hosts[0]
+ netperf_client=netperf_hosts[1]
+ scenarios.append(create_netperf_jobspec(server_host=netperf_server,
+ client_host=netperf_client,
+ bq_result_table=bq_result_table))
+
for language in languages:
for scenario_json in language.scenarios():
if re.search(args.regex, scenario_json['name']):
@@ -316,6 +351,11 @@ argp.add_argument('--category',
choices=['smoketest','all'],
default='smoketest',
help='Select a category of tests to run. Smoketest runs by default.')
+argp.add_argument('--netperf',
+ default=False,
+ action='store_const',
+ const=True,
+ help='Run netperf benchmark as one of the scenarios.')
args = argp.parse_args()
@@ -360,7 +400,10 @@ try:
remote_host=args.remote_driver_host,
regex=args.regex,
category=args.category,
- bq_result_table=args.bq_result_table)
+ bq_result_table=args.bq_result_table,
+ netperf=args.netperf,
+ netperf_hosts=args.remote_worker_host)
+
if not scenarios:
raise Exception('No scenarios to run')