aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Alexander Polcyn <apolcyn@google.com>2017-02-09 21:06:08 -0800
committerGravatar Alexander Polcyn <apolcyn@google.com>2017-03-22 13:46:51 -0700
commit50fdc8ab19ea97e67d9fb1d30ae5016cffab1f43 (patch)
tree157d26c4e675ceac27e3bb8a7140100beb4e0876
parent40a947ef93aeddca3b606613f628d4d09f094e77 (diff)
add http2 server test for unary calls that uses small frames and padding
-rw-r--r--doc/http2-interop-test-descriptions.md (renamed from doc/negative-http2-interop-test-descriptions.md)76
-rw-r--r--test/http2_test/http2_base_server.py35
-rw-r--r--test/http2_test/http2_test_server.py10
-rw-r--r--test/http2_test/test_data_frame_padding.py94
-rw-r--r--tools/doxygen/Doxyfile.c++2
-rw-r--r--tools/doxygen/Doxyfile.c++.internal2
-rw-r--r--tools/doxygen/Doxyfile.core2
-rw-r--r--tools/doxygen/Doxyfile.core.internal2
-rwxr-xr-xtools/internal_ci/linux/grpc_interop_badserver_java.sh2
-rwxr-xr-xtools/internal_ci/linux/grpc_interop_badserver_python.sh2
-rwxr-xr-xtools/jenkins/run_interop.sh2
-rw-r--r--tools/run_tests/interop/interop_html_report.template10
-rw-r--r--tools/run_tests/python_utils/report_utils.py11
-rwxr-xr-xtools/run_tests/run_interop_tests.py134
14 files changed, 307 insertions, 77 deletions
diff --git a/doc/negative-http2-interop-test-descriptions.md b/doc/http2-interop-test-descriptions.md
index b64fe6a024..435a8de709 100644
--- a/doc/negative-http2-interop-test-descriptions.md
+++ b/doc/http2-interop-test-descriptions.md
@@ -193,3 +193,79 @@ Server Procedure:
1. Sets MAX_CONCURRENT_STREAMS to one after the connection is made.
*The assertion that the MAX_CONCURRENT_STREAMS limit is upheld occurs in the http2 library we used.*
+
+### data_frame_padding
+
+This test verifies that the client can correctly receive padded http2 data
+frames. It also stresses the client's flow control (there is a high chance
+that the sender will deadlock if the client's flow control logic doesn't
+correctly account for padding).
+
+Client Procedure:
+(Note this is the same procedure as in the "large_unary" gRPC interop tests.
+Clients should use their "large_unary" gRPC interop test implementations.)
+Procedure:
+ 1. Client calls UnaryCall with:
+
+ ```
+ {
+ response_size: 314159
+ payload:{
+ body: 271828 bytes of zeros
+ }
+ }
+ ```
+
+Client asserts:
+* call was successful
+* response payload body is 314159 bytes in size
+* clients are free to assert that the response payload body contents are zero
+ and comparing the entire response message against a golden response
+
+Server Procedure:
+ 1. Reply to the client's request with a `SimpleResponse`, with a payload
+ body length of `SimpleRequest.response_size`. But send it across specific
+ http2 data frames as follows:
+ * Each http2 data frame contains a 5 byte payload and 255 bytes of padding.
+
+ * Note the 5 byte payload and 255 byte padding are partly arbitrary,
+ and other numbers are also ok. With 255 bytes of padding for each 5 bytes of
+ payload containing actual gRPC message, the 300KB response size will
+ multiply into around 15 megabytes of flow control debt, which should stress
+ flow control accounting.
+
+### no_df_padding_sanity_test
+
+This test verifies that the client can correctly receive a series of small
+data frames. Note that this test is intentionally a slight variation of
+"data_frame_padding", with the only difference being that this test doesn't use data
+frame padding when the response is sent. This test is primarily meant to
+prove correctness of the http2 server implementation and highlight failures
+of the "data_frame_padding" test.
+
+Client Procedure:
+(Note this is the same procedure as in the "large_unary" gRPC interop tests.
+Clients should use their "large_unary" gRPC interop test implementations.)
+Procedure:
+ 1. Client calls UnaryCall with:
+
+ ```
+ {
+ response_size: 314159
+ payload:{
+ body: 271828 bytes of zeros
+ }
+ }
+ ```
+
+Client asserts:
+* call was successful
+* response payload body is 314159 bytes in size
+* clients are free to assert that the response payload body contents are zero
+ and comparing the entire response message against a golden response
+
+Server Procedure:
+ 1. Reply to the client's request with a `SimpleResponse`, with a payload
+ body length of `SimpleRequest.response_size`. But send it across series of
+ http2 data frames that contain 5 bytes of "payload" and zero bytes of
+ "padding" (the padding flags on the data frames should not be set).
diff --git a/test/http2_test/http2_base_server.py b/test/http2_test/http2_base_server.py
index 8de028ceb1..e158e9b703 100644
--- a/test/http2_test/http2_base_server.py
+++ b/test/http2_test/http2_base_server.py
@@ -39,6 +39,7 @@ import twisted.internet.protocol
_READ_CHUNK_SIZE = 16384
_GRPC_HEADER_SIZE = 5
+_MIN_SETTINGS_MAX_FRAME_SIZE = 16384
class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
def __init__(self):
@@ -121,38 +122,46 @@ class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
)
self.transport.write(self._conn.data_to_send())
- def on_window_update_default(self, event):
- # send pending data, if any
- self.default_send(event.stream_id)
+ def on_window_update_default(self, _, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
+ # try to resume sending on all active streams (update might be for connection)
+ for stream_id in self._send_remaining:
+ self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size)
def send_reset_stream(self):
self._conn.reset_stream(self._stream_id)
self.transport.write(self._conn.data_to_send())
- def setup_send(self, data_to_send, stream_id):
+ def setup_send(self, data_to_send, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
logging.info('Setting up data to send for stream_id: %d' % stream_id)
self._send_remaining[stream_id] = len(data_to_send)
self._send_offset = 0
self._data_to_send = data_to_send
- self.default_send(stream_id)
+ self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size)
- def default_send(self, stream_id):
+ def default_send(self, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
if not self._send_remaining.has_key(stream_id):
# not setup to send data yet
return
while self._send_remaining[stream_id] > 0:
lfcw = self._conn.local_flow_control_window(stream_id)
- if lfcw == 0:
+ padding_bytes = pad_length + 1 if pad_length is not None else 0
+ if lfcw - padding_bytes <= 0:
+ logging.info('Stream %d. lfcw: %d. padding bytes: %d. not enough quota yet' % (stream_id, lfcw, padding_bytes))
break
- chunk_size = min(lfcw, _READ_CHUNK_SIZE)
+ chunk_size = min(lfcw - padding_bytes, read_chunk_size)
bytes_to_send = min(chunk_size, self._send_remaining[stream_id])
- logging.info('flow_control_window = %d. sending [%d:%d] stream_id %d' %
- (lfcw, self._send_offset, self._send_offset + bytes_to_send,
- stream_id))
+ logging.info('flow_control_window = %d. sending [%d:%d] stream_id %d. includes %d total padding bytes' %
+ (lfcw, self._send_offset, self._send_offset + bytes_to_send + padding_bytes,
+ stream_id, padding_bytes))
+ # The receiver might allow sending frames larger than the http2 minimum
+ # max frame size (16384), but this test should never send more than 16384
+ # for simplicity (which is always legal).
+ if bytes_to_send + padding_bytes > _MIN_SETTINGS_MAX_FRAME_SIZE:
+ raise ValueError("overload: sending %d" % (bytes_to_send + padding_bytes))
data = self._data_to_send[self._send_offset : self._send_offset + bytes_to_send]
try:
- self._conn.send_data(stream_id, data, False)
+ self._conn.send_data(stream_id, data, end_stream=False, pad_length=pad_length)
except h2.exceptions.ProtocolError:
logging.info('Stream %d is closed' % stream_id)
break
@@ -200,5 +209,5 @@ class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
req_proto_str = recv_buffer[5:5+grpc_msg_size]
sr = messages_pb2.SimpleRequest()
sr.ParseFromString(req_proto_str)
- logging.info('Parsed request for stream %d: response_size=%s' % (stream_id, sr.response_size))
+ logging.info('Parsed simple request for stream %d' % stream_id)
return sr
diff --git a/test/http2_test/http2_test_server.py b/test/http2_test/http2_test_server.py
index 46c3e00d18..6a7849b94a 100644
--- a/test/http2_test/http2_test_server.py
+++ b/test/http2_test/http2_test_server.py
@@ -44,6 +44,7 @@ import test_ping
import test_rst_after_data
import test_rst_after_header
import test_rst_during_data
+import test_data_frame_padding
_TEST_CASE_MAPPING = {
'rst_after_header': test_rst_after_header.TestcaseRstStreamAfterHeader,
@@ -52,6 +53,10 @@ _TEST_CASE_MAPPING = {
'goaway': test_goaway.TestcaseGoaway,
'ping': test_ping.TestcasePing,
'max_streams': test_max_streams.TestcaseSettingsMaxStreams,
+
+ # Positive tests below:
+ 'data_frame_padding': test_data_frame_padding.TestDataFramePadding,
+ 'no_df_padding_sanity_test': test_data_frame_padding.TestDataFramePadding,
}
_exit_code = 0
@@ -73,6 +78,8 @@ class H2Factory(twisted.internet.protocol.Factory):
if self._testcase == 'goaway':
return t(self._num_streams).get_base_server()
+ elif self._testcase == 'no_df_padding_sanity_test':
+ return t(use_padding=False).get_base_server()
else:
return t().get_base_server()
@@ -81,7 +88,8 @@ def parse_arguments():
parser.add_argument('--base_port', type=int, default=8080,
help='base port to run the servers (default: 8080). One test server is '
'started on each incrementing port, beginning with base_port, in the '
- 'following order: goaway,max_streams,ping,rst_after_data,rst_after_header,'
+ 'following order: data_frame_padding,goaway,max_streams,'
+ 'no_df_padding_sanity_test,ping,rst_after_data,rst_after_header,'
'rst_during_data'
)
return parser.parse_args()
diff --git a/test/http2_test/test_data_frame_padding.py b/test/http2_test/test_data_frame_padding.py
new file mode 100644
index 0000000000..e1db28faed
--- /dev/null
+++ b/test/http2_test/test_data_frame_padding.py
@@ -0,0 +1,94 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import http2_base_server
+import logging
+import messages_pb2
+
+# Set the number of padding bytes per data frame to be very large
+# relative to the number of data bytes for each data frame sent.
+_LARGE_PADDING_LENGTH = 255
+_SMALL_READ_CHUNK_SIZE = 5
+
+class TestDataFramePadding(object):
+ """
+ In response to an incoming request, this test sends headers, followed by
+ data, followed by a reset stream frame. Client asserts that the RPC failed.
+ Client needs to deliver the complete message to the application layer.
+ """
+ def __init__(self, use_padding=True):
+ self._base_server = http2_base_server.H2ProtocolBaseServer()
+ self._base_server._handlers['DataReceived'] = self.on_data_received
+ self._base_server._handlers['WindowUpdated'] = self.on_window_update
+ self._base_server._handlers['RequestReceived'] = self.on_request_received
+
+ # _total_updates maps stream ids to total flow control updates received
+ self._total_updates = {}
+ # zero window updates so far for connection window (stream id '0')
+ self._total_updates[0] = 0
+ self._read_chunk_size = _SMALL_READ_CHUNK_SIZE
+
+ if use_padding:
+ self._pad_length = _LARGE_PADDING_LENGTH
+ else:
+ self._pad_length = None
+
+ def get_base_server(self):
+ return self._base_server
+
+ def on_data_received(self, event):
+ logging.info('on data received. Stream id: %d. Data length: %d' % (event.stream_id, len(event.data)))
+ self._base_server.on_data_received_default(event)
+ if len(event.data) == 0:
+ return
+ sr = self._base_server.parse_received_data(event.stream_id)
+ stream_bytes = ''
+ # Check if full grpc msg has been read into the recv buffer yet
+ if sr:
+ response_data = self._base_server.default_response_data(sr.response_size)
+ logging.info('Stream id: %d. total resp size: %d' % (event.stream_id, len(response_data)))
+ # Begin sending the response. Add ``self._pad_length`` padding to each
+ # data frame and split the whole message into data frames each carrying
+ # only self._read_chunk_size of data.
+ # The purpose is to have the majority of the data frame response bytes
+ # be padding bytes, since ``self._pad_length`` >> ``self._read_chunk_size``.
+ self._base_server.setup_send(response_data , event.stream_id, pad_length=self._pad_length, read_chunk_size=self._read_chunk_size)
+
+ def on_request_received(self, event):
+ self._base_server.on_request_received_default(event)
+ logging.info('on request received. Stream id: %s.' % event.stream_id)
+ self._total_updates[event.stream_id] = 0
+
+ # Log debug info and try to resume sending on all currently active streams.
+ def on_window_update(self, event):
+ logging.info('on window update. Stream id: %s. Delta: %s' % (event.stream_id, event.delta))
+ self._total_updates[event.stream_id] += event.delta
+ total = self._total_updates[event.stream_id]
+ logging.info('... - total updates for stream %d : %d' % (event.stream_id, total))
+ self._base_server.on_window_update_default(event, pad_length=self._pad_length, read_chunk_size=self._read_chunk_size)
diff --git a/tools/doxygen/Doxyfile.c++ b/tools/doxygen/Doxyfile.c++
index b5021ede15..d21b48cabf 100644
--- a/tools/doxygen/Doxyfile.c++
+++ b/tools/doxygen/Doxyfile.c++
@@ -779,11 +779,11 @@ doc/fail_fast.md \
doc/g_stands_for.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \
+doc/http2-interop-test-descriptions.md \
doc/internationalization.md \
doc/interop-test-descriptions.md \
doc/load-balancing.md \
doc/naming.md \
-doc/negative-http2-interop-test-descriptions.md \
doc/server-reflection.md \
doc/server_reflection_tutorial.md \
doc/server_side_auth.md \
diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal
index 758ba2402a..8d6c2838bb 100644
--- a/tools/doxygen/Doxyfile.c++.internal
+++ b/tools/doxygen/Doxyfile.c++.internal
@@ -779,11 +779,11 @@ doc/fail_fast.md \
doc/g_stands_for.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \
+doc/http2-interop-test-descriptions.md \
doc/internationalization.md \
doc/interop-test-descriptions.md \
doc/load-balancing.md \
doc/naming.md \
-doc/negative-http2-interop-test-descriptions.md \
doc/server-reflection.md \
doc/server_reflection_tutorial.md \
doc/server_side_auth.md \
diff --git a/tools/doxygen/Doxyfile.core b/tools/doxygen/Doxyfile.core
index cd3f2af44c..b4fdcddbcf 100644
--- a/tools/doxygen/Doxyfile.core
+++ b/tools/doxygen/Doxyfile.core
@@ -778,11 +778,11 @@ doc/fail_fast.md \
doc/g_stands_for.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \
+doc/http2-interop-test-descriptions.md \
doc/internationalization.md \
doc/interop-test-descriptions.md \
doc/load-balancing.md \
doc/naming.md \
-doc/negative-http2-interop-test-descriptions.md \
doc/server-reflection.md \
doc/server_reflection_tutorial.md \
doc/server_side_auth.md \
diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal
index b273350082..3e679e3a2b 100644
--- a/tools/doxygen/Doxyfile.core.internal
+++ b/tools/doxygen/Doxyfile.core.internal
@@ -778,11 +778,11 @@ doc/fail_fast.md \
doc/g_stands_for.md \
doc/health-checking.md \
doc/http-grpc-status-mapping.md \
+doc/http2-interop-test-descriptions.md \
doc/internationalization.md \
doc/interop-test-descriptions.md \
doc/load-balancing.md \
doc/naming.md \
-doc/negative-http2-interop-test-descriptions.md \
doc/server-reflection.md \
doc/server_reflection_tutorial.md \
doc/server_side_auth.md \
diff --git a/tools/internal_ci/linux/grpc_interop_badserver_java.sh b/tools/internal_ci/linux/grpc_interop_badserver_java.sh
index 0985e657c6..c309c623e0 100755
--- a/tools/internal_ci/linux/grpc_interop_badserver_java.sh
+++ b/tools/internal_ci/linux/grpc_interop_badserver_java.sh
@@ -37,5 +37,5 @@ cd $(dirname $0)/../../..
git submodule update --init
-tools/run_tests/run_interop_tests.py -l java --use_docker --http2_badserver_interop $@
+tools/run_tests/run_interop_tests.py -l java --use_docker --http2_server_interop $@
diff --git a/tools/internal_ci/linux/grpc_interop_badserver_python.sh b/tools/internal_ci/linux/grpc_interop_badserver_python.sh
index 3fff537d2b..c3bb92f33d 100755
--- a/tools/internal_ci/linux/grpc_interop_badserver_python.sh
+++ b/tools/internal_ci/linux/grpc_interop_badserver_python.sh
@@ -37,5 +37,5 @@ cd $(dirname $0)/../../..
git submodule update --init
-tools/run_tests/run_interop_tests.py -l python --use_docker --http2_badserver_interop $@
+tools/run_tests/run_interop_tests.py -l python --use_docker --http2_server_interop $@
diff --git a/tools/jenkins/run_interop.sh b/tools/jenkins/run_interop.sh
index 2a9fc662a9..13c208e97c 100755
--- a/tools/jenkins/run_interop.sh
+++ b/tools/jenkins/run_interop.sh
@@ -36,4 +36,4 @@ export LANG=en_US.UTF-8
# Enter the gRPC repo root
cd $(dirname $0)/../..
-tools/run_tests/run_interop_tests.py -l all -s all --cloud_to_prod --cloud_to_prod_auth --use_docker --http2_interop --http2_badserver_interop -t -j 12 $@ || true
+tools/run_tests/run_interop_tests.py -l all -s all --cloud_to_prod --cloud_to_prod_auth --use_docker --http2_interop --http2_server_interop -t -j 12 $@ || true
diff --git a/tools/run_tests/interop/interop_html_report.template b/tools/run_tests/interop/interop_html_report.template
index 88ecd4e4db..6d9de5c62e 100644
--- a/tools/run_tests/interop/interop_html_report.template
+++ b/tools/run_tests/interop/interop_html_report.template
@@ -106,19 +106,19 @@
% endfor
% endif
-% if http2_badserver_cases:
- <h2>HTTP/2 Bad Server Tests</h2>
+% if http2_server_cases:
+ <h2>HTTP/2 Server Tests</h2>
## Each column header is the client language.
<table style="width:100%" border="1">
<tr bgcolor="#00BFFF">
<th>Client languages &#9658;<br/>Test Cases &#9660;</th>
- % for client_lang in client_langs_http2_badserver_cases:
+ % for client_lang in client_langs:
<th>${client_lang}</th>
% endfor
</tr>
- % for test_case in http2_badserver_cases:
+ % for test_case in http2_server_cases:
<tr><td><b>${test_case}</b></td>
- % for client_lang in client_langs_http2_badserver_cases:
+ % for client_lang in client_langs:
<%
shortname = 'cloud_to_cloud:%s:http2_server:%s' % (client_lang,
test_case)
diff --git a/tools/run_tests/python_utils/report_utils.py b/tools/run_tests/python_utils/report_utils.py
index 131772f55f..3b2b4f8712 100644
--- a/tools/run_tests/python_utils/report_utils.py
+++ b/tools/run_tests/python_utils/report_utils.py
@@ -80,10 +80,9 @@ def render_junit_xml_report(resultset, xml_report, suite_package='grpc',
tree = ET.ElementTree(root)
tree.write(xml_report, encoding='UTF-8')
-
def render_interop_html_report(
client_langs, server_langs, test_cases, auth_test_cases, http2_cases,
- http2_badserver_cases, client_langs_http2_badserver_cases, resultset,
+ http2_server_cases, resultset,
num_failures, cloud_to_prod, prod_servers, http2_interop):
"""Generate HTML report for interop tests."""
template_file = 'tools/run_tests/interop/interop_html_report.template'
@@ -99,9 +98,7 @@ def render_interop_html_report(
sorted_test_cases = sorted(test_cases)
sorted_auth_test_cases = sorted(auth_test_cases)
sorted_http2_cases = sorted(http2_cases)
- sorted_http2_badserver_cases = sorted(http2_badserver_cases)
- sorted_client_langs_http2_badserver_cases = sorted(
- client_langs_http2_badserver_cases)
+ sorted_http2_server_cases = sorted(http2_server_cases)
sorted_client_langs = sorted(client_langs)
sorted_server_langs = sorted(server_langs)
sorted_prod_servers = sorted(prod_servers)
@@ -111,9 +108,7 @@ def render_interop_html_report(
'test_cases': sorted_test_cases,
'auth_test_cases': sorted_auth_test_cases,
'http2_cases': sorted_http2_cases,
- 'http2_badserver_cases': sorted_http2_badserver_cases,
- 'client_langs_http2_badserver_cases': (
- sorted_client_langs_http2_badserver_cases),
+ 'http2_server_cases': sorted_http2_server_cases,
'resultset': resultset,
'num_failures': num_failures,
'cloud_to_prod': cloud_to_prod,
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index ce4dfb863e..2d7f4a625d 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -45,6 +45,7 @@ import tempfile
import time
import uuid
import six
+import traceback
import python_utils.dockerjob as dockerjob
import python_utils.jobset as jobset
@@ -73,6 +74,10 @@ _SKIP_ADVANCED = ['status_code_and_message',
_TEST_TIMEOUT = 3*60
+# disable this test on core-based languages,
+# see https://github.com/grpc/grpc/issues/9779
+_SKIP_DATA_FRAME_PADDING = ['data_frame_padding']
+
class CXXLanguage:
def __init__(self):
@@ -97,7 +102,7 @@ class CXXLanguage:
return {}
def unimplemented_test_cases(self):
- return []
+ return _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return []
@@ -126,7 +131,7 @@ class CSharpLanguage:
return {}
def unimplemented_test_cases(self):
- return _SKIP_SERVER_COMPRESSION
+ return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
@@ -155,7 +160,7 @@ class CSharpCoreCLRLanguage:
return {}
def unimplemented_test_cases(self):
- return _SKIP_SERVER_COMPRESSION
+ return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
@@ -250,7 +255,7 @@ class Http2Server:
return {}
def unimplemented_test_cases(self):
- return _TEST_CASES
+ return _TEST_CASES + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return _TEST_CASES
@@ -281,7 +286,7 @@ class Http2Client:
return _TEST_CASES
def unimplemented_test_cases_server(self):
- return []
+ return _TEST_CASES
def __str__(self):
return 'http2'
@@ -308,7 +313,7 @@ class NodeLanguage:
return {}
def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION
+ return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
@@ -333,7 +338,7 @@ class PHPLanguage:
return {}
def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION
+ return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return []
@@ -358,7 +363,7 @@ class PHP7Language:
return {}
def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION
+ return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return []
@@ -389,7 +394,7 @@ class RubyLanguage:
return {}
def unimplemented_test_cases(self):
- return _SKIP_SERVER_COMPRESSION
+ return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
@@ -437,7 +442,7 @@ class PythonLanguage:
'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)}
def unimplemented_test_cases(self):
- return _SKIP_COMPRESSION
+ return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
@@ -476,10 +481,14 @@ _AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds',
_HTTP2_TEST_CASES = ['tls', 'framing']
-_HTTP2_BADSERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data',
- 'goaway', 'ping', 'max_streams']
+_HTTP2_SERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data',
+ 'goaway', 'ping', 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test']
+
+_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = { 'data_frame_padding': 'large_unary', 'no_df_padding_sanity_test': 'large_unary' }
-_LANGUAGES_FOR_HTTP2_BADSERVER_TESTS = ['java', 'go', 'python', 'c++']
+_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys()
+
+_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = ['java', 'go', 'python', 'c++']
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
@@ -631,14 +640,28 @@ def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
'--use_tls=%s' % ('false' if insecure else 'true'),
'--use_test_ca=true',
]
+
+ client_test_case = test_case
+ if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+ client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[test_case]
+ if client_test_case in language.unimplemented_test_cases():
+ print('asking client %s to run unimplemented test case %s' % (repr(language), client_test_case))
+ sys.exit(1)
+
common_options = [
- '--test_case=%s' % test_case,
+ '--test_case=%s' % client_test_case,
'--server_host=%s' % server_host,
'--server_port=%s' % server_port,
]
- if test_case in _HTTP2_BADSERVER_TEST_CASES:
- cmdline = bash_cmdline(language.client_cmd_http2interop(common_options))
- cwd = language.http2_cwd
+
+ if test_case in _HTTP2_SERVER_TEST_CASES:
+ if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+ client_options = interop_only_options + common_options
+ cmdline = bash_cmdline(language.client_cmd(client_options))
+ cwd = language.client_cwd
+ else:
+ cmdline = bash_cmdline(language.client_cmd_http2interop(common_options))
+ cwd = language.http2_cwd
else:
cmdline = bash_cmdline(language.client_cmd(common_options+interop_only_options))
cwd = language.client_cwd
@@ -686,7 +709,7 @@ def server_jobspec(language, docker_image, insecure=False, manual_cmd_log=None):
docker_args += list(
itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i))
for i in range(
- len(_HTTP2_BADSERVER_TEST_CASES))))
+ len(_HTTP2_SERVER_TEST_CASES))))
# Enable docker's healthcheck mechanism.
# This runs a Python script inside the container every second. The script
# pings the http2 server to verify it is ready. The 'health-retries' flag
@@ -856,11 +879,11 @@ argp.add_argument('--http2_interop',
action='store_const',
const=True,
help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
-argp.add_argument('--http2_badserver_interop',
+argp.add_argument('--http2_server_interop',
default=False,
action='store_const',
const=True,
- help='Enable HTTP/2 server edge case testing. (Good client, bad server)')
+ help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests')
argp.add_argument('--insecure',
default=False,
action='store_const',
@@ -895,26 +918,26 @@ languages = set(_LANGUAGES[l]
six.iterkeys(_LANGUAGES) if x == 'all' else [x]
for x in args.language))
-languages_http2_badserver_interop = set()
-if args.http2_badserver_interop:
- languages_http2_badserver_interop = set(
- _LANGUAGES[l] for l in _LANGUAGES_FOR_HTTP2_BADSERVER_TESTS
+languages_http2_clients_for_http2_server_interop = set()
+if args.http2_server_interop:
+ languages_http2_clients_for_http2_server_interop = set(
+ _LANGUAGES[l] for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
if 'all' in args.language or l in args.language)
http2Interop = Http2Client() if args.http2_interop else None
-http2InteropServer = Http2Server() if args.http2_badserver_interop else None
+http2InteropServer = Http2Server() if args.http2_server_interop else None
docker_images={}
if args.use_docker:
# languages for which to build docker images
languages_to_build = set(
_LANGUAGES[k] for k in set([str(l) for l in languages] + [s for s in servers]))
- languages_to_build = languages_to_build | languages_http2_badserver_interop
+ languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
if args.http2_interop:
languages_to_build.add(http2Interop)
- if args.http2_badserver_interop:
+ if args.http2_server_interop:
languages_to_build.add(http2InteropServer)
build_jobs = []
@@ -943,7 +966,6 @@ client_manual_cmd_log = [] if args.manual_run else None
# Start interop servers.
server_jobs = {}
server_addresses = {}
-http2_badserver_ports = ()
try:
for s in servers:
lang = str(s)
@@ -957,15 +979,15 @@ try:
# don't run the server, set server port to a placeholder value
server_addresses[lang] = ('localhost', '${SERVER_PORT}')
- http2_badserver_job = None
- if args.http2_badserver_interop:
+ http2_server_job = None
+ if args.http2_server_interop:
# launch a HTTP2 server emulator that creates edge cases
lang = str(http2InteropServer)
spec = server_jobspec(http2InteropServer, docker_images.get(lang),
manual_cmd_log=server_manual_cmd_log)
if not args.manual_run:
- http2_badserver_job = dockerjob.DockerJob(spec)
- server_jobs[lang] = http2_badserver_job
+ http2_server_job = dockerjob.DockerJob(spec)
+ server_jobs[lang] = http2_server_job
else:
# don't run the server, set server port to a placeholder value
server_addresses[lang] = ('localhost', '${SERVER_PORT}')
@@ -1049,15 +1071,15 @@ try:
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
- if args.http2_badserver_interop:
+ if args.http2_server_interop:
if not args.manual_run:
- http2_badserver_job.wait_for_healthy(timeout_seconds=600)
- for language in languages_http2_badserver_interop:
- for test_case in _HTTP2_BADSERVER_TEST_CASES:
- offset = sorted(_HTTP2_BADSERVER_TEST_CASES).index(test_case)
+ http2_server_job.wait_for_healthy(timeout_seconds=600)
+ for language in languages_http2_clients_for_http2_server_interop:
+ for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
+ offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
server_port = _DEFAULT_SERVER_PORT+offset
if not args.manual_run:
- server_port = http2_badserver_job.mapped_port(server_port)
+ server_port = http2_server_job.mapped_port(server_port)
test_job = cloud_to_cloud_jobspec(language,
test_case,
str(http2InteropServer),
@@ -1066,6 +1088,31 @@ try:
docker_image=docker_images.get(str(language)),
manual_cmd_log=client_manual_cmd_log)
jobs.append(test_job)
+ for language in languages:
+ # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
+ # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
+ # than specialized http2 clients, reusing existing test implementations.
+ # For example, in the "data_frame_padding" test, use language's gRPC
+ # interop clients and make them think that theyre running "large_unary"
+ # test case. This avoids implementing a new test case in each language.
+ for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+ if test_case not in language.unimplemented_test_cases():
+ offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
+ server_port = _DEFAULT_SERVER_PORT+offset
+ if not args.manual_run:
+ server_port = http2_server_job.mapped_port(server_port)
+ if not args.insecure:
+ print(('Creating grpc cient to http2 server test case with insecure connection, even though'
+ ' args.insecure is False. Http2 test server only supports insecure connections.'))
+ test_job = cloud_to_cloud_jobspec(language,
+ test_case,
+ str(http2InteropServer),
+ 'localhost',
+ server_port,
+ docker_image=docker_images.get(str(language)),
+ insecure=True,
+ manual_cmd_log=client_manual_cmd_log)
+ jobs.append(test_job)
if not jobs:
print('No jobs to run.')
@@ -1093,16 +1140,17 @@ try:
if "http2" in name:
job[0].http2results = aggregate_http2_results(job[0].message)
- http2_badserver_test_cases = (
- _HTTP2_BADSERVER_TEST_CASES if args.http2_badserver_interop else [])
+ http2_server_test_cases = (
+ _HTTP2_SERVER_TEST_CASES if args.http2_server_interop else [])
report_utils.render_interop_html_report(
set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES,
- _HTTP2_TEST_CASES, http2_badserver_test_cases,
- _LANGUAGES_FOR_HTTP2_BADSERVER_TESTS, resultset, num_failures,
+ _HTTP2_TEST_CASES, http2_server_test_cases, resultset, num_failures,
args.cloud_to_prod_auth or args.cloud_to_prod, args.prod_servers,
args.http2_interop)
-
+except Exception as e:
+ print('exception occurred:')
+ traceback.print_exc(file=sys.stdout)
finally:
# Check if servers are still running.
for server, job in server_jobs.items():