aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests
diff options
context:
space:
mode:
Diffstat (limited to 'tools/run_tests')
-rw-r--r--tools/run_tests/generated/tests.json26
-rwxr-xr-xtools/run_tests/python_utils/jobset.py7
-rwxr-xr-xtools/run_tests/run_interop_tests.py3
-rwxr-xr-xtools/run_tests/run_microbenchmark.py45
-rwxr-xr-xtools/run_tests/run_tests.py4
5 files changed, 70 insertions, 15 deletions
diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json
index eca65ac533..ab7938d0ca 100644
--- a/tools/run_tests/generated/tests.json
+++ b/tools/run_tests/generated/tests.json
@@ -3005,7 +3005,9 @@
]
},
{
- "args": [],
+ "args": [
+ "--generated_file_path=gens/src/proto/grpc/testing/compiler_test.grpc.pb.h"
+ ],
"ci_platforms": [
"linux",
"mac",
@@ -80538,6 +80540,28 @@
},
{
"args": [
+ "test/core/end2end/fuzzers/api_fuzzer_corpus/clusterfuzz-testcase-5834320218423296"
+ ],
+ "ci_platforms": [
+ "linux"
+ ],
+ "cpu_cost": 0.1,
+ "exclude_configs": [
+ "tsan"
+ ],
+ "exclude_iomgrs": [
+ "uv"
+ ],
+ "flaky": false,
+ "language": "c",
+ "name": "api_fuzzer_one_entry",
+ "platforms": [
+ "linux"
+ ],
+ "uses_polling": false
+ },
+ {
+ "args": [
"test/core/end2end/fuzzers/api_fuzzer_corpus/crash-0597bbdd657fa4ed14443994c9147a1a7bbc205f"
],
"ci_platforms": [
diff --git a/tools/run_tests/python_utils/jobset.py b/tools/run_tests/python_utils/jobset.py
index 7b2c62d1a2..f3047431e2 100755
--- a/tools/run_tests/python_utils/jobset.py
+++ b/tools/run_tests/python_utils/jobset.py
@@ -31,6 +31,7 @@
from __future__ import print_function
+import logging
import multiprocessing
import os
import platform
@@ -128,6 +129,8 @@ _TAG_COLOR = {
'SKIPPED': 'cyan'
}
+_FORMAT = '%(asctime)-15s %(message)s'
+logging.basicConfig(level=logging.INFO, format=_FORMAT)
def message(tag, msg, explanatory_text=None, do_newline=False):
if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
@@ -137,8 +140,8 @@ def message(tag, msg, explanatory_text=None, do_newline=False):
try:
if platform_string() == 'windows' or not sys.stdout.isatty():
if explanatory_text:
- print(explanatory_text)
- print('%s: %s' % (tag, msg))
+ logging.info(explanatory_text)
+ logging.info('%s: %s', tag, msg)
else:
sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
_BEGINNING_OF_LINE,
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index eccfe41dc0..d29bfa9b0c 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -474,8 +474,7 @@ _HTTP2_TEST_CASES = ['tls', 'framing']
_HTTP2_BADSERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data',
'goaway', 'ping', 'max_streams']
-# TODO: Add python once the tests are fixed.
-_LANGUAGES_FOR_HTTP2_BADSERVER_TESTS = ['java', 'go']
+_LANGUAGES_FOR_HTTP2_BADSERVER_TESTS = ['java', 'go', 'python']
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 4307906a7e..2da52e5d49 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -170,20 +170,25 @@ def collect_perf(bm_name, args):
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
-def collect_summary(bm_name, args):
- heading('Summary: %s' % bm_name)
+def run_summary(bm_name, cfg, base_json_name):
subprocess.check_call(
['make', bm_name,
- 'CONFIG=counters', '-j', '%d' % multiprocessing.cpu_count()])
- cmd = ['bins/counters/%s' % bm_name,
- '--benchmark_out=out.json',
+ 'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()])
+ cmd = ['bins/%s/%s' % (cfg, bm_name),
+ '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
'--benchmark_out_format=json']
if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
- text(subprocess.check_output(cmd))
+ return subprocess.check_output(cmd)
+
+def collect_summary(bm_name, args):
+ heading('Summary: %s [no counters]' % bm_name)
+ text(run_summary(bm_name, 'opt', 'out'))
+ heading('Summary: %s [with counters]' % bm_name)
+ text(run_summary(bm_name, 'counters', 'out'))
if args.bigquery_upload:
with open('out.csv', 'w') as f:
- f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', 'out.json']))
+ f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', 'out.counters.json', 'out.opt.json']))
subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', 'out.csv'])
collectors = {
@@ -195,7 +200,7 @@ collectors = {
argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
argp.add_argument('-c', '--collect',
choices=sorted(collectors.keys()),
- nargs='+',
+ nargs='*',
default=sorted(collectors.keys()),
help='Which collectors should be run against each benchmark')
argp.add_argument('-b', '--benchmarks',
@@ -209,6 +214,10 @@ argp.add_argument('-b', '--benchmarks',
nargs='+',
type=str,
help='Which microbenchmarks should be run')
+argp.add_argument('--diff_perf',
+ default=None,
+ type=str,
+ help='Diff microbenchmarks against this git revision')
argp.add_argument('--bigquery_upload',
default=False,
action='store_const',
@@ -223,6 +232,26 @@ args = argp.parse_args()
for bm_name in args.benchmarks:
for collect in args.collect:
collectors[collect](bm_name, args)
+if args.diff_perf:
+ for bm_name in args.benchmarks:
+ run_summary(bm_name, 'opt', '%s.new' % bm_name)
+ where_am_i = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
+ subprocess.check_call(['git', 'checkout', args.diff_perf])
+ comparables = []
+ subprocess.check_call(['make', 'clean'])
+ try:
+ for bm_name in args.benchmarks:
+ try:
+ run_summary(bm_name, 'opt', '%s.old' % bm_name)
+ comparables.append(bm_name)
+ except subprocess.CalledProcessError, e:
+ pass
+ finally:
+ subprocess.check_call(['git', 'checkout', where_am_i])
+ for bm_name in comparables:
+ subprocess.check_call(['third_party/benchmark/tools/compare_bench.py',
+ '%s.new.opt.json' % bm_name,
+ '%s.old.opt.json' % bm_name])
index_html += "</body>\n</html>\n"
with open('reports/index.html', 'w') as f:
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 9e9af59c25..9741624c4f 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -307,9 +307,9 @@ class CLanguage(object):
assert base is not None
assert line[1] == ' '
test = base + line.strip()
- cmdline = [binary] + ['--gtest_filter=%s' % test]
+ cmdline = [binary, '--gtest_filter=%s' % test] + target['args']
out.append(self.config.job_spec(cmdline,
- shortname='%s --gtest_filter=%s %s' % (binary, test, shortname_ext),
+ shortname='%s %s' % (' '.join(cmdline), shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
environ=env))