aboutsummaryrefslogtreecommitdiffhomepage
path: root/test/http2_test
diff options
context:
space:
mode:
authorGravatar Alexander Polcyn <apolcyn@google.com>2017-02-09 21:06:08 -0800
committerGravatar Alexander Polcyn <apolcyn@google.com>2017-03-22 13:46:51 -0700
commit50fdc8ab19ea97e67d9fb1d30ae5016cffab1f43 (patch)
tree157d26c4e675ceac27e3bb8a7140100beb4e0876 /test/http2_test
parent40a947ef93aeddca3b606613f628d4d09f094e77 (diff)
add http2 server test for unary calls that uses small frames and padding
Diffstat (limited to 'test/http2_test')
-rw-r--r--test/http2_test/http2_base_server.py35
-rw-r--r--test/http2_test/http2_test_server.py10
-rw-r--r--test/http2_test/test_data_frame_padding.py94
3 files changed, 125 insertions, 14 deletions
diff --git a/test/http2_test/http2_base_server.py b/test/http2_test/http2_base_server.py
index 8de028ceb1..e158e9b703 100644
--- a/test/http2_test/http2_base_server.py
+++ b/test/http2_test/http2_base_server.py
@@ -39,6 +39,7 @@ import twisted.internet.protocol
_READ_CHUNK_SIZE = 16384
_GRPC_HEADER_SIZE = 5
+_MIN_SETTINGS_MAX_FRAME_SIZE = 16384
class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
def __init__(self):
@@ -121,38 +122,46 @@ class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
)
self.transport.write(self._conn.data_to_send())
- def on_window_update_default(self, event):
- # send pending data, if any
- self.default_send(event.stream_id)
+ def on_window_update_default(self, _, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
+ # try to resume sending on all active streams (update might be for connection)
+ for stream_id in self._send_remaining:
+ self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size)
def send_reset_stream(self):
self._conn.reset_stream(self._stream_id)
self.transport.write(self._conn.data_to_send())
- def setup_send(self, data_to_send, stream_id):
+ def setup_send(self, data_to_send, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
logging.info('Setting up data to send for stream_id: %d' % stream_id)
self._send_remaining[stream_id] = len(data_to_send)
self._send_offset = 0
self._data_to_send = data_to_send
- self.default_send(stream_id)
+ self.default_send(stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size)
- def default_send(self, stream_id):
+ def default_send(self, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE):
if not self._send_remaining.has_key(stream_id):
# not setup to send data yet
return
while self._send_remaining[stream_id] > 0:
lfcw = self._conn.local_flow_control_window(stream_id)
- if lfcw == 0:
+ padding_bytes = pad_length + 1 if pad_length is not None else 0
+ if lfcw - padding_bytes <= 0:
+ logging.info('Stream %d. lfcw: %d. padding bytes: %d. not enough quota yet' % (stream_id, lfcw, padding_bytes))
break
- chunk_size = min(lfcw, _READ_CHUNK_SIZE)
+ chunk_size = min(lfcw - padding_bytes, read_chunk_size)
bytes_to_send = min(chunk_size, self._send_remaining[stream_id])
- logging.info('flow_control_window = %d. sending [%d:%d] stream_id %d' %
- (lfcw, self._send_offset, self._send_offset + bytes_to_send,
- stream_id))
+ logging.info('flow_control_window = %d. sending [%d:%d] stream_id %d. includes %d total padding bytes' %
+ (lfcw, self._send_offset, self._send_offset + bytes_to_send + padding_bytes,
+ stream_id, padding_bytes))
+ # The receiver might allow sending frames larger than the http2 minimum
+ # max frame size (16384), but this test should never send more than 16384
+ # for simplicity (which is always legal).
+ if bytes_to_send + padding_bytes > _MIN_SETTINGS_MAX_FRAME_SIZE:
+ raise ValueError("overload: sending %d" % (bytes_to_send + padding_bytes))
data = self._data_to_send[self._send_offset : self._send_offset + bytes_to_send]
try:
- self._conn.send_data(stream_id, data, False)
+ self._conn.send_data(stream_id, data, end_stream=False, pad_length=pad_length)
except h2.exceptions.ProtocolError:
logging.info('Stream %d is closed' % stream_id)
break
@@ -200,5 +209,5 @@ class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
req_proto_str = recv_buffer[5:5+grpc_msg_size]
sr = messages_pb2.SimpleRequest()
sr.ParseFromString(req_proto_str)
- logging.info('Parsed request for stream %d: response_size=%s' % (stream_id, sr.response_size))
+ logging.info('Parsed simple request for stream %d' % stream_id)
return sr
diff --git a/test/http2_test/http2_test_server.py b/test/http2_test/http2_test_server.py
index 46c3e00d18..6a7849b94a 100644
--- a/test/http2_test/http2_test_server.py
+++ b/test/http2_test/http2_test_server.py
@@ -44,6 +44,7 @@ import test_ping
import test_rst_after_data
import test_rst_after_header
import test_rst_during_data
+import test_data_frame_padding
_TEST_CASE_MAPPING = {
'rst_after_header': test_rst_after_header.TestcaseRstStreamAfterHeader,
@@ -52,6 +53,10 @@ _TEST_CASE_MAPPING = {
'goaway': test_goaway.TestcaseGoaway,
'ping': test_ping.TestcasePing,
'max_streams': test_max_streams.TestcaseSettingsMaxStreams,
+
+ # Positive tests below:
+ 'data_frame_padding': test_data_frame_padding.TestDataFramePadding,
+ 'no_df_padding_sanity_test': test_data_frame_padding.TestDataFramePadding,
}
_exit_code = 0
@@ -73,6 +78,8 @@ class H2Factory(twisted.internet.protocol.Factory):
if self._testcase == 'goaway':
return t(self._num_streams).get_base_server()
+ elif self._testcase == 'no_df_padding_sanity_test':
+ return t(use_padding=False).get_base_server()
else:
return t().get_base_server()
@@ -81,7 +88,8 @@ def parse_arguments():
parser.add_argument('--base_port', type=int, default=8080,
help='base port to run the servers (default: 8080). One test server is '
'started on each incrementing port, beginning with base_port, in the '
- 'following order: goaway,max_streams,ping,rst_after_data,rst_after_header,'
+ 'following order: data_frame_padding,goaway,max_streams,'
+ 'no_df_padding_sanity_test,ping,rst_after_data,rst_after_header,'
'rst_during_data'
)
return parser.parse_args()
diff --git a/test/http2_test/test_data_frame_padding.py b/test/http2_test/test_data_frame_padding.py
new file mode 100644
index 0000000000..e1db28faed
--- /dev/null
+++ b/test/http2_test/test_data_frame_padding.py
@@ -0,0 +1,94 @@
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import http2_base_server
+import logging
+import messages_pb2
+
+# Set the number of padding bytes per data frame to be very large
+# relative to the number of data bytes for each data frame sent.
+_LARGE_PADDING_LENGTH = 255
+_SMALL_READ_CHUNK_SIZE = 5
+
+class TestDataFramePadding(object):
+ """
+ In response to an incoming request, this test sends headers, followed by
+ data, followed by a reset stream frame. Client asserts that the RPC failed.
+ Client needs to deliver the complete message to the application layer.
+ """
+ def __init__(self, use_padding=True):
+ self._base_server = http2_base_server.H2ProtocolBaseServer()
+ self._base_server._handlers['DataReceived'] = self.on_data_received
+ self._base_server._handlers['WindowUpdated'] = self.on_window_update
+ self._base_server._handlers['RequestReceived'] = self.on_request_received
+
+ # _total_updates maps stream ids to total flow control updates received
+ self._total_updates = {}
+ # zero window updates so far for connection window (stream id '0')
+ self._total_updates[0] = 0
+ self._read_chunk_size = _SMALL_READ_CHUNK_SIZE
+
+ if use_padding:
+ self._pad_length = _LARGE_PADDING_LENGTH
+ else:
+ self._pad_length = None
+
+ def get_base_server(self):
+ return self._base_server
+
+ def on_data_received(self, event):
+ logging.info('on data received. Stream id: %d. Data length: %d' % (event.stream_id, len(event.data)))
+ self._base_server.on_data_received_default(event)
+ if len(event.data) == 0:
+ return
+ sr = self._base_server.parse_received_data(event.stream_id)
+ stream_bytes = ''
+ # Check if full grpc msg has been read into the recv buffer yet
+ if sr:
+ response_data = self._base_server.default_response_data(sr.response_size)
+ logging.info('Stream id: %d. total resp size: %d' % (event.stream_id, len(response_data)))
+ # Begin sending the response. Add ``self._pad_length`` padding to each
+ # data frame and split the whole message into data frames each carrying
+ # only self._read_chunk_size of data.
+ # The purpose is to have the majority of the data frame response bytes
+ # be padding bytes, since ``self._pad_length`` >> ``self._read_chunk_size``.
+ self._base_server.setup_send(response_data , event.stream_id, pad_length=self._pad_length, read_chunk_size=self._read_chunk_size)
+
+ def on_request_received(self, event):
+ self._base_server.on_request_received_default(event)
+ logging.info('on request received. Stream id: %s.' % event.stream_id)
+ self._total_updates[event.stream_id] = 0
+
+ # Log debug info and try to resume sending on all currently active streams.
+ def on_window_update(self, event):
+ logging.info('on window update. Stream id: %s. Delta: %s' % (event.stream_id, event.delta))
+ self._total_updates[event.stream_id] += event.delta
+ total = self._total_updates[event.stream_id]
+ logging.info('... - total updates for stream %d : %d' % (event.stream_id, total))
+ self._base_server.on_window_update_default(event, pad_length=self._pad_length, read_chunk_size=self._read_chunk_size)