diff options
Diffstat (limited to 'tools/run_tests')
-rw-r--r-- | tools/run_tests/artifacts/build_artifact_node.bat | 2 | ||||
-rwxr-xr-x | tools/run_tests/artifacts/build_artifact_node.sh | 2 | ||||
-rw-r--r-- | tools/run_tests/generated/sources_and_headers.json | 45 | ||||
-rwxr-xr-x | tools/run_tests/performance/bq_upload_result.py | 8 | ||||
-rw-r--r-- | tools/run_tests/performance/scenario_result_schema.json | 22 | ||||
-rwxr-xr-x | tools/run_tests/run_interop_tests.py | 136 | ||||
-rwxr-xr-x | tools/run_tests/run_performance_tests.py | 20 |
7 files changed, 203 insertions, 32 deletions
diff --git a/tools/run_tests/artifacts/build_artifact_node.bat b/tools/run_tests/artifacts/build_artifact_node.bat index 2e0ecd21d0..59644c8896 100644 --- a/tools/run_tests/artifacts/build_artifact_node.bat +++ b/tools/run_tests/artifacts/build_artifact_node.bat @@ -27,7 +27,7 @@ @rem (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE @rem OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -set node_versions=0.12.0 1.0.0 1.1.0 2.0.0 3.0.0 4.0.0 5.0.0 6.0.0 7.0.0 +set node_versions=1.1.0 2.0.0 3.0.0 4.0.0 5.0.0 6.0.0 7.0.0 set PATH=%PATH%;C:\Program Files\nodejs\;%APPDATA%\npm diff --git a/tools/run_tests/artifacts/build_artifact_node.sh b/tools/run_tests/artifacts/build_artifact_node.sh index 1066ebde19..1deb33ba86 100755 --- a/tools/run_tests/artifacts/build_artifact_node.sh +++ b/tools/run_tests/artifacts/build_artifact_node.sh @@ -42,7 +42,7 @@ mkdir -p artifacts npm update -node_versions=( 0.12.0 1.0.0 1.1.0 2.0.0 3.0.0 4.0.0 5.0.0 6.0.0 7.0.0 ) +node_versions=( 1.1.0 2.0.0 3.0.0 4.0.0 5.0.0 6.0.0 7.0.0 ) for version in ${node_versions[@]} do diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 8849dcc600..278915e0f0 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -2774,6 +2774,23 @@ }, { "deps": [ + "grpc", + "grpc++", + "grpc++_test_config", + "grpc++_test_util", + "grpc_test_util", + "http2_client_main" + ], + "headers": [], + "is_filegroup": false, + "language": "c++", + "name": "http2_client", + "src": [], + "third_party": false, + "type": "target" + }, + { + "deps": [ "gpr", "gpr_test_util", "grpc", @@ -5390,6 +5407,33 @@ }, { "deps": [ + "grpc", + "grpc++", + "grpc++_test_config", + "grpc++_test_util", + "grpc_test_util" + ], + "headers": [ + "src/proto/grpc/testing/empty.grpc.pb.h", + "src/proto/grpc/testing/empty.pb.h", + "src/proto/grpc/testing/messages.grpc.pb.h", + "src/proto/grpc/testing/messages.pb.h", + "src/proto/grpc/testing/test.grpc.pb.h", + "src/proto/grpc/testing/test.pb.h", + "test/cpp/interop/http2_client.h" + ], + "is_filegroup": false, + "language": "c++", + "name": "http2_client_main", + "src": [ + "test/cpp/interop/http2_client.cc", + "test/cpp/interop/http2_client.h" + ], + "third_party": false, + "type": "lib" + }, + { + "deps": [ "gpr", "grpc", "grpc++", @@ -5447,6 +5491,7 @@ "gpr", "grpc", "grpc++", + "grpc++_test_util", "grpc_test_util" ], "headers": [ diff --git a/tools/run_tests/performance/bq_upload_result.py b/tools/run_tests/performance/bq_upload_result.py index ddcf053ae5..89d2a9b320 100755 --- a/tools/run_tests/performance/bq_upload_result.py +++ b/tools/run_tests/performance/bq_upload_result.py @@ -115,9 +115,11 @@ def _flatten_result_inplace(scenario_result): scenario_result['scenario']['clientConfig'] = json.dumps(scenario_result['scenario']['clientConfig']) scenario_result['scenario']['serverConfig'] = json.dumps(scenario_result['scenario']['serverConfig']) scenario_result['latencies'] = json.dumps(scenario_result['latencies']) + scenario_result['serverCpuStats'] = [] for stats in scenario_result['serverStats']: - stats.pop('totalCpuTime', None) - stats.pop('idleCpuTime', None) + scenario_result['serverCpuStats'].append(dict()) + scenario_result['serverCpuStats'][-1]['totalCpuTime'] = stats.pop('totalCpuTime', None) + scenario_result['serverCpuStats'][-1]['idleCpuTime'] = stats.pop('idleCpuTime', None) for stats in scenario_result['clientStats']: stats['latencies'] = json.dumps(stats['latencies']) stats.pop('requestResults', None) @@ -125,7 +127,7 @@ def _flatten_result_inplace(scenario_result): scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess']) scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess']) scenario_result['requestResults'] = json.dumps(scenario_result.get('requestResults', [])) - scenario_result['summary'].pop('serverCpuUsage', None) + scenario_result['serverCpuUsage'] = scenario_result['summary'].pop('serverCpuUsage', None) scenario_result['summary'].pop('successfulRequestsPerSecond', None) scenario_result['summary'].pop('failedRequestsPerSecond', None) diff --git a/tools/run_tests/performance/scenario_result_schema.json b/tools/run_tests/performance/scenario_result_schema.json index 3285f212d7..8ec41c377c 100644 --- a/tools/run_tests/performance/scenario_result_schema.json +++ b/tools/run_tests/performance/scenario_result_schema.json @@ -213,5 +213,27 @@ "name": "requestResults", "type": "STRING", "mode": "NULLABLE" + }, + { + "name": "serverCpuStats", + "type": "RECORD", + "mode": "REPEATED", + "fields": [ + { + "name": "totalCpuTime", + "type": "INTEGER", + "mode": "NULLABLE" + }, + { + "name": "idleCpuTime", + "type": "INTEGER", + "mode": "NULLABLE" + } + ] + }, + { + "name": "serverCpuUsage", + "type": "FLOAT", + "mode": "NULLABLE" } ] diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py index 981e38b813..56efac50ca 100755 --- a/tools/run_tests/run_interop_tests.py +++ b/tools/run_tests/run_interop_tests.py @@ -169,6 +169,9 @@ class JavaLanguage: def client_cmd(self, args): return ['./run-test-client.sh'] + args + def client_cmd_http2interop(self, args): + return ['./run-http2-client.sh'] + args + def cloud_to_prod_env(self): return {} @@ -217,6 +220,33 @@ class GoLanguage: def __str__(self): return 'go' +class Http2Server: + """Represents the HTTP/2 Interop Test server + + This pretends to be a language in order to be built and run, but really it + isn't. + """ + def __init__(self): + self.server_cwd = None + self.safename = str(self) + + def server_cmd(self, args): + return ['python test/http2_test/http2_test_server.py'] + + def cloud_to_prod_env(self): + return {} + + def global_env(self): + return {} + + def unimplemented_test_cases(self): + return _TEST_CASES + + def unimplemented_test_cases_server(self): + return _TEST_CASES + + def __str__(self): + return 'http2' class Http2Client: """Represents the HTTP/2 Interop Test @@ -350,10 +380,10 @@ class RubyLanguage: return {} def unimplemented_test_cases(self): - return _SKIP_ADVANCED + _SKIP_SERVER_COMPRESSION + return _SKIP_SERVER_COMPRESSION def unimplemented_test_cases_server(self): - return _SKIP_ADVANCED + _SKIP_COMPRESSION + return _SKIP_COMPRESSION def __str__(self): return 'ruby' @@ -375,6 +405,11 @@ class PythonLanguage: '--args="{}"'.format(' '.join(args)) ] + def client_cmd_http2interop(self, args): + return [ 'py27/bin/python', + 'src/python/grpcio_tests/tests/http2/_negative_http2_client.py', + ] + args + def cloud_to_prod_env(self): return {} @@ -429,7 +464,10 @@ _TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong', _AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds', 'oauth2_auth_token', 'per_rpc_creds'] -_HTTP2_TEST_CASES = ["tls", "framing"] +_HTTP2_TEST_CASES = ['tls', 'framing'] + +_HTTP2_BADSERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data', + 'goaway', 'ping', 'max_streams'] DOCKER_WORKDIR_ROOT = '/var/local/git/grpc' @@ -550,13 +588,25 @@ def cloud_to_prod_jobspec(language, test_case, server_host_name, def cloud_to_cloud_jobspec(language, test_case, server_name, server_host, server_port, docker_image=None): """Creates jobspec for cloud-to-cloud interop test""" - cmdline = bash_cmdline(language.client_cmd([ + interop_only_options = [ '--server_host_override=foo.test.google.fr', '--use_tls=true', '--use_test_ca=true', + ] + common_options = [ '--test_case=%s' % test_case, '--server_host=%s' % server_host, - '--server_port=%s' % server_port])) + ] + if test_case in _HTTP2_BADSERVER_TEST_CASES: + # We are running the http2_badserver_interop test. Adjust command line accordingly. + offset = sorted(_HTTP2_BADSERVER_TEST_CASES).index(test_case) + client_options = common_options + ['--server_port=%s' % + (int(server_port)+offset)] + cmdline = bash_cmdline(language.client_cmd_http2interop(client_options)) + else: + client_options = interop_only_options + common_options + ['--server_port=%s' % server_port] + cmdline = bash_cmdline(language.client_cmd(client_options)) + cwd = language.client_cwd environ = language.global_env() if docker_image: @@ -590,13 +640,30 @@ def server_jobspec(language, docker_image): cmdline = bash_cmdline( language.server_cmd(['--port=%s' % _DEFAULT_SERVER_PORT])) environ = language.global_env() + if language.safename == 'http2': + # we are running the http2 interop server. Open next N ports beginning + # with the server port. These ports are used for http2 interop test + # (one test case per port). We also attach the docker container running + # the server to local network, so we don't have to mess with port mapping + port_args = [ + '-p', str(_DEFAULT_SERVER_PORT+0), + '-p', str(_DEFAULT_SERVER_PORT+1), + '-p', str(_DEFAULT_SERVER_PORT+2), + '-p', str(_DEFAULT_SERVER_PORT+3), + '-p', str(_DEFAULT_SERVER_PORT+4), + '-p', str(_DEFAULT_SERVER_PORT+5), + '-p', str(_DEFAULT_SERVER_PORT+6), + '--net=host', + ] + else: + port_args = ['-p', str(_DEFAULT_SERVER_PORT)] + docker_cmdline = docker_run_cmdline(cmdline, image=docker_image, cwd=language.server_cwd, environ=environ, - docker_args=['-p', str(_DEFAULT_SERVER_PORT), - '--name', container_name]) - + docker_args=port_args + + ['--name', container_name]) server_job = jobset.JobSpec( cmdline=docker_cmdline, environ=environ, @@ -730,7 +797,12 @@ argp.add_argument('--http2_interop', default=False, action='store_const', const=True, - help='Enable HTTP/2 interop tests') + help='Enable HTTP/2 client edge case testing. (Bad client, good server)') +argp.add_argument('--http2_badserver_interop', + default=False, + action='store_const', + const=True, + help='Enable HTTP/2 server edge case testing. (Good client, bad server)') args = argp.parse_args() @@ -757,6 +829,7 @@ languages = set(_LANGUAGES[l] for x in args.language)) http2Interop = Http2Client() if args.http2_interop else None +http2InteropServer = Http2Server() if args.http2_badserver_interop else None docker_images={} if args.use_docker: @@ -766,6 +839,9 @@ if args.use_docker: if args.http2_interop: languages_to_build.add(http2Interop) + if args.http2_badserver_interop: + languages_to_build.add(http2InteropServer) + build_jobs = [] for l in languages_to_build: job = build_interop_image_jobspec(l) @@ -797,6 +873,14 @@ try: server_jobs[lang] = job server_addresses[lang] = ('localhost', job.mapped_port(_DEFAULT_SERVER_PORT)) + if args.http2_badserver_interop: + # launch a HTTP2 server emulator that creates edge cases + lang = str(http2InteropServer) + spec = server_jobspec(http2InteropServer, docker_images.get(lang)) + job = dockerjob.DockerJob(spec) + server_jobs[lang] = job + server_addresses[lang] = ('localhost', _DEFAULT_SERVER_PORT) + jobs = [] if args.cloud_to_prod: for server_host_name in args.prod_servers: @@ -840,17 +924,18 @@ try: skip_server = [] # test cases unimplemented by server if server_language: skip_server = server_language.unimplemented_test_cases_server() - for language in languages: - for test_case in _TEST_CASES: - if not test_case in language.unimplemented_test_cases(): - if not test_case in skip_server: - test_job = cloud_to_cloud_jobspec(language, - test_case, - server_name, - server_host, - server_port, - docker_image=docker_images.get(str(language))) - jobs.append(test_job) + if not args.http2_badserver_interop: + for language in languages: + for test_case in _TEST_CASES: + if not test_case in language.unimplemented_test_cases(): + if not test_case in skip_server: + test_job = cloud_to_cloud_jobspec(language, + test_case, + server_name, + server_host, + server_port, + docker_image=docker_images.get(str(language))) + jobs.append(test_job) if args.http2_interop: for test_case in _HTTP2_TEST_CASES: @@ -865,6 +950,17 @@ try: docker_image=docker_images.get(str(http2Interop))) jobs.append(test_job) + if args.http2_badserver_interop: + for language in languages: + for test_case in _HTTP2_BADSERVER_TEST_CASES: + test_job = cloud_to_cloud_jobspec(language, + test_case, + server_name, + server_host, + server_port, + docker_image=docker_images.get(str(language))) + jobs.append(test_job) + if not jobs: print('No jobs to run.') for image in docker_images.itervalues(): diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py index b7b742d7af..d6eed3f5bd 100755 --- a/tools/run_tests/run_performance_tests.py +++ b/tools/run_tests/run_performance_tests.py @@ -113,7 +113,7 @@ def create_qpsworker_job(language, shortname=None, port=10000, remote_host=None, def create_scenario_jobspec(scenario_json, workers, remote_host=None, - bq_result_table=None): + bq_result_table=None, server_cpu_load=0): """Runs one scenario using QPS driver.""" # setting QPS_WORKERS env variable here makes sure it works with SSH too. cmd = 'QPS_WORKERS="%s" ' % ','.join(workers) @@ -121,7 +121,9 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None, cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table cmd += 'tools/run_tests/performance/run_qps_driver.sh ' cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]})) - cmd += '--scenario_result_file=scenario_result.json' + cmd += '--scenario_result_file=scenario_result.json ' + if server_cpu_load != 0: + cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load if remote_host: user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host) cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd)) @@ -129,7 +131,7 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None, return jobset.JobSpec( cmdline=[cmd], shortname='qps_json_driver.%s' % scenario_json['name'], - timeout_seconds=3*60, + timeout_seconds=12*60, shell=True, verbose_success=True) @@ -318,7 +320,7 @@ Scenario = collections.namedtuple('Scenario', 'jobspec workers name') def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', category='all', bq_result_table=None, - netperf=False, netperf_hosts=[]): + netperf=False, netperf_hosts=[], server_cpu_load=0): """Create jobspecs for scenarios to run.""" all_workers = [worker for workers in workers_by_lang.values() @@ -379,7 +381,8 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', create_scenario_jobspec(scenario_json, [w.host_and_port for w in workers], remote_host=remote_host, - bq_result_table=bq_result_table), + bq_result_table=bq_result_table, + server_cpu_load=server_cpu_load), workers, scenario_json['name']) scenarios.append(scenario) @@ -461,6 +464,9 @@ argp.add_argument('--netperf', action='store_const', const=True, help='Run netperf benchmark as one of the scenarios.') +argp.add_argument('--server_cpu_load', + default=0, type=int, + help='Select a targeted server cpu load to run. 0 means ignore this flag') argp.add_argument('-x', '--xml_report', default='report.xml', type=str, help='Name of XML report file to generate.') argp.add_argument('--perf_args', @@ -490,7 +496,6 @@ argp.add_argument('--skip_generate_flamegraphs', 'May be useful if "perf_args" arguments do not make sense for ' 'generating flamegraphs (e.g., "--perf_args=stat ...")')) - args = argp.parse_args() languages = set(scenario_config.LANGUAGES[l] @@ -540,7 +545,8 @@ scenarios = create_scenarios(languages, category=args.category, bq_result_table=args.bq_result_table, netperf=args.netperf, - netperf_hosts=args.remote_worker_host) + netperf_hosts=args.remote_worker_host, + server_cpu_load=args.server_cpu_load) if not scenarios: raise Exception('No scenarios to run') |