diff options
author | Alistair Veitch <aveitch@google.com> | 2016-07-21 17:29:04 -0700 |
---|---|---|
committer | Alistair Veitch <aveitch@google.com> | 2016-07-21 17:29:04 -0700 |
commit | 3abd27e5312540e2efd63e94ca024a2e8d42af39 (patch) | |
tree | 9d580b71d6e81fd42f7ef9cb72e6b74321a9b55e /src/python | |
parent | ef1c860197742c87819070e6906b96d1e42451b3 (diff) | |
parent | b8e26c021718758c2a0b2e73757580476d995498 (diff) |
merge to upstream/master
Diffstat (limited to 'src/python')
199 files changed, 4144 insertions, 14349 deletions
diff --git a/src/python/grpcio/.gitignore b/src/python/grpcio/.gitignore index 6e5e65096c..7cd8fab273 100644 --- a/src/python/grpcio/.gitignore +++ b/src/python/grpcio/.gitignore @@ -9,7 +9,6 @@ dist/ .coverage .coverage.* .cache/ -.tox/ nosetests.xml doc/ _grpcio_metadata.py diff --git a/src/python/grpcio/README.rst b/src/python/grpcio/README.rst index afc4fe6a37..3fc318539e 100644 --- a/src/python/grpcio/README.rst +++ b/src/python/grpcio/README.rst @@ -46,7 +46,7 @@ package named :code:`python-dev`). :: $ export REPO_ROOT=grpc # REPO_ROOT can be any directory of your choice - $ git clone https://github.com/grpc/grpc.git $REPO_ROOT + $ git clone -b $(curl -L http://grpc.io/release) https://github.com/grpc/grpc $REPO_ROOT $ cd $REPO_ROOT $ git submodule update --init diff --git a/src/python/grpcio/_unixccompiler_patch.py b/src/python/grpcio/_unixccompiler_patch.py new file mode 100644 index 0000000000..0ce5d63e98 --- /dev/null +++ b/src/python/grpcio/_unixccompiler_patch.py @@ -0,0 +1,73 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Covers inadequacies in distutils.""" + +from distutils import ccompiler +from distutils import errors +from distutils import unixccompiler +import os +import os.path +import shutil +import sys +import tempfile + +def _unix_commandfile_spawn(self, command): + """Wrapper around distutils.util.spawn that attempts to use command files. + + Meant to replace the CCompiler method `spawn` on UnixCCompiler and its + derivatives (e.g. the MinGW32 compiler). + + Some commands like `gcc` (and friends like `clang`) support command files to + work around shell command length limits. + """ + command_base = os.path.basename(command[0].strip()) + if command_base == 'ccache': + command_base = command[:2] + command_args = command[2:] + elif command_base.startswith('ccache') or command_base in ['gcc', 'clang', 'clang++', 'g++']: + command_base = command[:1] + command_args = command[1:] + else: + return ccompiler.CCompiler.spawn(self, command) + temporary_directory = tempfile.mkdtemp() + command_filename = os.path.abspath(os.path.join(temporary_directory, 'command')) + with open(command_filename, 'w') as command_file: + escaped_args = [arg.replace('\\', '\\\\') for arg in command_args] + command_file.write(' '.join(escaped_args)) + modified_command = command_base + ['@{}'.format(command_filename)] + result = ccompiler.CCompiler.spawn(self, modified_command) + shutil.rmtree(temporary_directory) + return result + + +def monkeypatch_unix_compiler(): + """Monkeypatching is dumb, but it's either that or we become maintainers of + something much, much bigger.""" + unixccompiler.UnixCCompiler.spawn = _unix_commandfile_spawn diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py index 295dab2d27..86a73fa836 100644 --- a/src/python/grpcio/commands.py +++ b/src/python/grpcio/commands.py @@ -58,10 +58,31 @@ CONF_PY_ADDENDUM = """ extensions.append('sphinx.ext.napoleon') napoleon_google_docstring = True napoleon_numpy_docstring = True +napoleon_include_special_with_doc = True html_theme = 'sphinx_rtd_theme' """ +API_GLOSSARY = """ + +Glossary +================ + +.. glossary:: + + metadatum + A key-value pair included in the HTTP header. It is a + 2-tuple where the first entry is the key and the + second is the value, i.e. (key, value). The metadata key is an ASCII str, + and must be a valid HTTP header name. The metadata value can be + either a valid HTTP ASCII str, or bytes. If bytes are provided, + the key must end with '-bin', i.e. + ``('binary-metadata-bin', b'\\x00\\xFF')`` + + metadata + A sequence of metadatum. +""" + class CommandError(Exception): """Simple exception class for GRPC custom commands.""" @@ -131,76 +152,12 @@ class SphinxDocumentation(setuptools.Command): conf_filepath = os.path.join('doc', 'src', 'conf.py') with open(conf_filepath, 'a') as conf_file: conf_file.write(CONF_PY_ADDENDUM) + glossary_filepath = os.path.join('doc', 'src', 'grpc.rst') + with open(glossary_filepath, 'a') as glossary_filepath: + glossary_filepath.write(API_GLOSSARY) sphinx.main(['', os.path.join('doc', 'src'), os.path.join('doc', 'build')]) -class BuildProtoModules(setuptools.Command): - """Command to generate project *_pb2.py modules from proto files.""" - - description = 'build protobuf modules' - user_options = [ - ('include=', None, 'path patterns to include in protobuf generation'), - ('exclude=', None, 'path patterns to exclude from protobuf generation') - ] - - def initialize_options(self): - self.exclude = None - self.include = r'.*\.proto$' - self.protoc_command = None - self.grpc_python_plugin_command = None - - def finalize_options(self): - self.protoc_command = distutils.spawn.find_executable('protoc') - self.grpc_python_plugin_command = distutils.spawn.find_executable( - 'grpc_python_plugin') - - def run(self): - if not self.protoc_command: - raise CommandError('could not find protoc') - if not self.grpc_python_plugin_command: - raise CommandError('could not find grpc_python_plugin ' - '(protoc plugin for GRPC Python)') - - if not os.path.exists(PROTO_GEN_STEM): - os.makedirs(PROTO_GEN_STEM) - - include_regex = re.compile(self.include) - exclude_regex = re.compile(self.exclude) if self.exclude else None - paths = [] - for walk_root, directories, filenames in os.walk(PROTO_STEM): - for filename in filenames: - path = os.path.join(walk_root, filename) - if include_regex.match(path) and not ( - exclude_regex and exclude_regex.match(path)): - paths.append(path) - - # TODO(kpayson): It would be nice to do this in a batch command, - # but we currently have name conflicts in src/proto - for path in paths: - command = [ - self.protoc_command, - '--plugin=protoc-gen-python-grpc={}'.format( - self.grpc_python_plugin_command), - '-I {}'.format(GRPC_STEM), - '--python_out={}'.format(PROTO_GEN_STEM), - '--python-grpc_out={}'.format(PROTO_GEN_STEM), - ] + [path] - try: - subprocess.check_output(' '.join(command), cwd=PYTHON_STEM, shell=True, - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - sys.stderr.write( - 'warning: Command:\n{}\nMessage:\n{}\nOutput:\n{}'.format( - command, e.message, e.output)) - - # Generated proto directories dont include __init__.py, but - # these are needed for python package resolution - for walk_root, _, _ in os.walk(PROTO_GEN_STEM): - if walk_root != PROTO_GEN_STEM: - path = os.path.join(walk_root, '__init__.py') - open(path, 'a').close() - - class BuildProjectMetadata(setuptools.Command): """Command to generate project metadata in a module.""" @@ -223,10 +180,6 @@ class BuildPy(build_py.build_py): """Custom project build command.""" def run(self): - try: - self.run_command('build_proto_modules') - except CommandError as error: - sys.stderr.write('warning: %s\n' % error.message) self.run_command('build_project_metadata') build_py.build_py.run(self) @@ -279,76 +232,3 @@ class Gather(setuptools.Command): self.distribution.fetch_build_eggs(self.distribution.install_requires) if self.test and self.distribution.tests_require: self.distribution.fetch_build_eggs(self.distribution.tests_require) - - -class TestLite(setuptools.Command): - """Command to run tests without fetching or building anything.""" - - description = 'run tests without fetching or building anything.' - user_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - # distutils requires this override. - pass - - def run(self): - self._add_eggs_to_path() - - import tests - loader = tests.Loader() - loader.loadTestsFromNames(['tests']) - runner = tests.Runner() - result = runner.run(loader.suite) - if not result.wasSuccessful(): - sys.exit('Test failure') - - def _add_eggs_to_path(self): - """Fetch install and test requirements""" - self.distribution.fetch_build_eggs(self.distribution.install_requires) - self.distribution.fetch_build_eggs(self.distribution.tests_require) - - -class RunInterop(test.test): - - description = 'run interop test client/server' - user_options = [ - ('args=', 'a', 'pass-thru arguments for the client/server'), - ('client', 'c', 'flag indicating to run the client'), - ('server', 's', 'flag indicating to run the server') - ] - - def initialize_options(self): - self.args = '' - self.client = False - self.server = False - - def finalize_options(self): - if self.client and self.server: - raise DistutilsOptionError('you may only specify one of client or server') - - def run(self): - if self.distribution.install_requires: - self.distribution.fetch_build_eggs(self.distribution.install_requires) - if self.distribution.tests_require: - self.distribution.fetch_build_eggs(self.distribution.tests_require) - if self.client: - self.run_client() - elif self.server: - self.run_server() - - def run_server(self): - # We import here to ensure that our setuptools parent has had a chance to - # edit the Python system path. - from tests.interop import server - sys.argv[1:] = self.args.split() - server.serve() - - def run_client(self): - # We import here to ensure that our setuptools parent has had a chance to - # edit the Python system path. - from tests.interop import client - sys.argv[1:] = self.args.split() - client.test_interoperability() diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py index 5ba5a4e1fd..fd015129f0 100644 --- a/src/python/grpcio/grpc/__init__.py +++ b/src/python/grpcio/grpc/__init__.py @@ -29,8 +29,6 @@ """gRPC's Python API.""" -__import__('pkg_resources').declare_namespace(__name__) - import abc import enum @@ -212,14 +210,14 @@ class ChannelConnectivity(enum.Enum): READY: The channel is ready to conduct RPCs. TRANSIENT_FAILURE: The channel has seen a failure from which it expects to recover. - FATAL_FAILURE: The channel has seen a failure from which it cannot recover. + SHUTDOWN: The channel has seen a failure from which it cannot recover. """ IDLE = (_cygrpc.ConnectivityState.idle, 'idle') CONNECTING = (_cygrpc.ConnectivityState.connecting, 'connecting') READY = (_cygrpc.ConnectivityState.ready, 'ready') TRANSIENT_FAILURE = ( _cygrpc.ConnectivityState.transient_failure, 'transient failure') - FATAL_FAILURE = (_cygrpc.ConnectivityState.fatal_failure, 'fatal failure') + SHUTDOWN = (_cygrpc.ConnectivityState.shutdown, 'shutdown') @enum.unique @@ -314,7 +312,7 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)): This method blocks until the value is available. Returns: - The initial metadata as a sequence of pairs of bytes. + The initial :term:`metadata`. """ raise NotImplementedError() @@ -325,7 +323,7 @@ class Call(six.with_metaclass(abc.ABCMeta, RpcContext)): This method blocks until the value is available. Returns: - The trailing metadata as a sequence of pairs of bytes. + The trailing :term:`metadata`. """ raise NotImplementedError() @@ -396,8 +394,7 @@ class AuthMetadataPluginCallback(six.with_metaclass(abc.ABCMeta)): """Inform the gRPC runtime of the metadata to construct a CallCredentials. Args: - metadata: An iterable of 2-sequences (e.g. tuples) of metadata key/value - pairs. + metadata: The :term:`metadata` used to construct the CallCredentials. error: An Exception to indicate error or None to indicate success. """ raise NotImplementedError() @@ -438,23 +435,39 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)): """Affords invoking a unary-unary RPC.""" @abc.abstractmethod - def __call__( - self, request, timeout=None, metadata=None, credentials=None, - with_call=False): + def __call__(self, request, timeout=None, metadata=None, credentials=None): """Synchronously invokes the underlying RPC. Args: request: The request value for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. - with_call: Whether or not to include return a Call for the RPC in addition - to the response. Returns: - The response value for the RPC, and a Call for the RPC if with_call was - set to True at invocation. + The response value for the RPC. + + Raises: + RpcError: Indicating that the RPC terminated with non-OK status. The + raised RpcError will also be a Call for the RPC affording the RPC's + metadata, status code, and details. + """ + raise NotImplementedError() + + @abc.abstractmethod + def with_call(self, request, timeout=None, metadata=None, credentials=None): + """Synchronously invokes the underlying RPC. + + Args: + request: The request value for the RPC. + timeout: An optional durating of time in seconds to allow for the RPC. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. + + Returns: + The response value for the RPC and a Call value for the RPC. Raises: RpcError: Indicating that the RPC terminated with non-OK status. The @@ -470,7 +483,7 @@ class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)): Args: request: The request value for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. @@ -493,7 +506,7 @@ class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)): Args: request: The request value for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: An optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. @@ -510,18 +523,15 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)): @abc.abstractmethod def __call__( - self, request_iterator, timeout=None, metadata=None, credentials=None, - with_call=False): + self, request_iterator, timeout=None, metadata=None, credentials=None): """Synchronously invokes the underlying RPC. Args: request_iterator: An iterator that yields request values for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. - with_call: Whether or not to include return a Call for the RPC in addition - to the response. Returns: The response value for the RPC, and a Call for the RPC if with_call was @@ -535,6 +545,28 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)): raise NotImplementedError() @abc.abstractmethod + def with_call( + self, request_iterator, timeout=None, metadata=None, credentials=None): + """Synchronously invokes the underlying RPC. + + Args: + request_iterator: An iterator that yields request values for the RPC. + timeout: An optional duration of time in seconds to allow for the RPC. + metadata: Optional :term:`metadata` to be transmitted to the + service-side of the RPC. + credentials: An optional CallCredentials for the RPC. + + Returns: + The response value for the RPC and a Call for the RPC. + + Raises: + RpcError: Indicating that the RPC terminated with non-OK status. The + raised RpcError will also be a Call for the RPC affording the RPC's + metadata, status code, and details. + """ + raise NotImplementedError() + + @abc.abstractmethod def future( self, request_iterator, timeout=None, metadata=None, credentials=None): """Asynchronously invokes the underlying RPC. @@ -542,7 +574,7 @@ class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)): Args: request_iterator: An iterator that yields request values for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. @@ -566,7 +598,7 @@ class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)): Args: request_iterator: An iterator that yields request values for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. - metadata: An optional sequence of pairs of bytes to be transmitted to the + metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. @@ -674,7 +706,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)): """Accesses the metadata from the invocation-side of the RPC. Returns: - The invocation metadata object as a sequence of pairs of bytes. + The invocation :term:`metadata`. """ raise NotImplementedError() @@ -695,8 +727,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)): service-side initial metadata to transmit. Args: - initial_metadata: The initial metadata of the RPC as a sequence of pairs - of bytes. + initial_metadata: The initial :term:`metadata`. """ raise NotImplementedError() @@ -708,8 +739,7 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)): service-side trailing metadata to transmit. Args: - trailing_metadata: The trailing metadata of the RPC as a sequence of pairs - of bytes. + trailing_metadata: The trailing :term:`metadata`. """ raise NotImplementedError() @@ -782,7 +812,7 @@ class HandlerCallDetails(six.with_metaclass(abc.ABCMeta)): """Describes an RPC that has just arrived for service. Attributes: method: The method name of the RPC. - invocation_metadata: The metadata from the invocation side of the RPC. + invocation_metadata: The :term:`metadata` from the invocation side of the RPC. """ @@ -900,6 +930,102 @@ class Server(six.with_metaclass(abc.ABCMeta)): ################################# Functions ################################ +def unary_unary_rpc_method_handler( + behavior, request_deserializer=None, response_serializer=None): + """Creates an RpcMethodHandler for a unary-unary RPC method. + + Args: + behavior: The implementation of an RPC method as a callable behavior taking + a single request value and returning a single response value. + request_deserializer: An optional request deserialization behavior. + response_serializer: An optional response serialization behavior. + + Returns: + An RpcMethodHandler for a unary-unary RPC method constructed from the given + parameters. + """ + from grpc import _utilities + return _utilities.RpcMethodHandler( + False, False, request_deserializer, response_serializer, behavior, None, + None, None) + + +def unary_stream_rpc_method_handler( + behavior, request_deserializer=None, response_serializer=None): + """Creates an RpcMethodHandler for a unary-stream RPC method. + + Args: + behavior: The implementation of an RPC method as a callable behavior taking + a single request value and returning an iterator of response values. + request_deserializer: An optional request deserialization behavior. + response_serializer: An optional response serialization behavior. + + Returns: + An RpcMethodHandler for a unary-stream RPC method constructed from the + given parameters. + """ + from grpc import _utilities + return _utilities.RpcMethodHandler( + False, True, request_deserializer, response_serializer, None, behavior, + None, None) + + +def stream_unary_rpc_method_handler( + behavior, request_deserializer=None, response_serializer=None): + """Creates an RpcMethodHandler for a stream-unary RPC method. + + Args: + behavior: The implementation of an RPC method as a callable behavior taking + an iterator of request values and returning a single response value. + request_deserializer: An optional request deserialization behavior. + response_serializer: An optional response serialization behavior. + + Returns: + An RpcMethodHandler for a stream-unary RPC method constructed from the + given parameters. + """ + from grpc import _utilities + return _utilities.RpcMethodHandler( + True, False, request_deserializer, response_serializer, None, None, + behavior, None) + + +def stream_stream_rpc_method_handler( + behavior, request_deserializer=None, response_serializer=None): + """Creates an RpcMethodHandler for a stream-stream RPC method. + + Args: + behavior: The implementation of an RPC method as a callable behavior taking + an iterator of request values and returning an iterator of response + values. + request_deserializer: An optional request deserialization behavior. + response_serializer: An optional response serialization behavior. + + Returns: + An RpcMethodHandler for a stream-stream RPC method constructed from the + given parameters. + """ + from grpc import _utilities + return _utilities.RpcMethodHandler( + True, True, request_deserializer, response_serializer, None, None, None, + behavior) + + +def method_handlers_generic_handler(service, method_handlers): + """Creates a grpc.GenericRpcHandler from RpcMethodHandlers. + + Args: + service: A service name to be used for the given method handlers. + method_handlers: A dictionary from method name to RpcMethodHandler + implementing the named method. + + Returns: + A GenericRpcHandler constructed from the given parameters. + """ + from grpc import _utilities + return _utilities.DictionaryGenericHandler(service, method_handlers) + + def ssl_channel_credentials( root_certificates=None, private_key=None, certificate_chain=None): """Creates a ChannelCredentials for use with an SSL-enabled Channel. @@ -962,37 +1088,41 @@ def access_token_call_credentials(access_token): _auth.AccessTokenCallCredentials(access_token)) -def composite_call_credentials(call_credentials, additional_call_credentials): - """Compose two CallCredentials to make a new one. +def composite_call_credentials(*call_credentials): + """Compose multiple CallCredentials to make a new CallCredentials. Args: - call_credentials: A CallCredentials object. - additional_call_credentials: Another CallCredentials object to compose on - top of call_credentials. + *call_credentials: At least two CallCredentials objects. Returns: - A new CallCredentials composed of the two given CallCredentials. + A CallCredentials object composed of the given CallCredentials objects. """ + from grpc import _credential_composition + cygrpc_call_credentials = tuple( + single_call_credentials._credentials + for single_call_credentials in call_credentials) return CallCredentials( - _cygrpc.call_credentials_composite( - call_credentials._credentials, - additional_call_credentials._credentials)) + _credential_composition.call(cygrpc_call_credentials)) -def composite_channel_credentials(channel_credentials, call_credentials): - """Compose a ChannelCredentials and a CallCredentials. +def composite_channel_credentials(channel_credentials, *call_credentials): + """Compose a ChannelCredentials and one or more CallCredentials objects. Args: channel_credentials: A ChannelCredentials. - call_credentials: A CallCredentials. + *call_credentials: One or more CallCredentials objects. Returns: A ChannelCredentials composed of the given ChannelCredentials and - CallCredentials. + CallCredentials objects. """ + from grpc import _credential_composition + cygrpc_call_credentials = tuple( + single_call_credentials._credentials + for single_call_credentials in call_credentials) return ChannelCredentials( - _cygrpc.channel_credentials_composite( - channel_credentials._credentials, call_credentials._credentials)) + _credential_composition.channel( + channel_credentials._credentials, cygrpc_call_credentials)) def ssl_server_credentials( @@ -1059,7 +1189,7 @@ def insecure_channel(target, options=None): A Channel to the target through which RPCs may be conducted. """ from grpc import _channel - return _channel.Channel(target, None, options) + return _channel.Channel(target, options, None) def secure_channel(target, credentials, options=None): @@ -1075,25 +1205,69 @@ def secure_channel(target, credentials, options=None): A Channel to the target through which RPCs may be conducted. """ from grpc import _channel - return _channel.Channel(target, credentials, options) + return _channel.Channel(target, options, credentials._credentials) -def server(generic_rpc_handlers, thread_pool, options=None): +def server(thread_pool, handlers=None): """Creates a Server with which RPCs can be serviced. - The GenericRpcHandlers passed to this function needn't be the only - GenericRpcHandlers that will be used to serve RPCs; others may be added later - by calling add_generic_rpc_handlers any time before the returned server is - started. - Args: - generic_rpc_handlers: Some number of GenericRpcHandlers that will be used - to service RPCs after the returned Server is started. thread_pool: A futures.ThreadPoolExecutor to be used by the returned Server to service RPCs. + handlers: An optional sequence of GenericRpcHandlers to be used to service + RPCs after the returned Server is started. These handlers need not be the + only handlers the server will use to service RPCs; other handlers may + later be added by calling add_generic_rpc_handlers any time before the + returned Server is started. Returns: A Server with which RPCs can be serviced. """ from grpc import _server - return _server.Server(generic_rpc_handlers, thread_pool) + return _server.Server(thread_pool, () if handlers is None else handlers) + + +################################### __all__ ################################# + + +__all__ = ( + 'FutureTimeoutError', + 'FutureCancelledError', + 'Future', + 'ChannelConnectivity', + 'StatusCode', + 'RpcError', + 'RpcContext', + 'Call', + 'ChannelCredentials', + 'CallCredentials', + 'AuthMetadataContext', + 'AuthMetadataPluginCallback', + 'AuthMetadataPlugin', + 'ServerCredentials', + 'UnaryUnaryMultiCallable', + 'UnaryStreamMultiCallable', + 'StreamUnaryMultiCallable', + 'StreamStreamMultiCallable', + 'Channel', + 'ServicerContext', + 'RpcMethodHandler', + 'HandlerCallDetails', + 'GenericRpcHandler', + 'Server', + 'unary_unary_rpc_method_handler', + 'unary_stream_rpc_method_handler', + 'stream_unary_rpc_method_handler', + 'stream_stream_rpc_method_handler', + 'method_handlers_generic_handler', + 'ssl_channel_credentials', + 'metadata_call_credentials', + 'access_token_call_credentials', + 'composite_call_credentials', + 'composite_channel_credentials', + 'ssl_server_credentials', + 'channel_ready_future', + 'insecure_channel', + 'secure_channel', + 'server', +) diff --git a/src/python/grpcio/grpc/_adapter/.gitignore b/src/python/grpcio/grpc/_adapter/.gitignore deleted file mode 100644 index a6f96cd6db..0000000000 --- a/src/python/grpcio/grpc/_adapter/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.a -*.so -*.dll -*.pyc -*.pyd diff --git a/src/python/grpcio/grpc/_adapter/_common.py b/src/python/grpcio/grpc/_adapter/_common.py deleted file mode 100644 index 492849f4cb..0000000000 --- a/src/python/grpcio/grpc/_adapter/_common.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State used by both invocation-side and service-side code.""" - -import enum - - -@enum.unique -class HighWrite(enum.Enum): - """The possible categories of high-level write state.""" - - OPEN = 'OPEN' - CLOSED = 'CLOSED' - - -class WriteState(object): - """A description of the state of writing to an RPC. - - Attributes: - low: A side-specific value describing the low-level state of writing. - high: A HighWrite value describing the high-level state of writing. - pending: A list of bytestrings for the RPC waiting to be written to the - other side of the RPC. - """ - - def __init__(self, low, high, pending): - self.low = low - self.high = high - self.pending = pending - - -class CommonRPCState(object): - """A description of an RPC's state. - - Attributes: - write: A WriteState describing the state of writing to the RPC. - sequence_number: The lowest-unused sequence number for use in generating - tickets locally describing the progress of the RPC. - deserializer: The behavior to be used to deserialize payload bytestreams - taken off the wire. - serializer: The behavior to be used to serialize payloads to be sent on the - wire. - """ - - def __init__(self, write, sequence_number, deserializer, serializer): - self.write = write - self.sequence_number = sequence_number - self.deserializer = deserializer - self.serializer = serializer diff --git a/src/python/grpcio/grpc/_adapter/_intermediary_low.py b/src/python/grpcio/grpc/_adapter/_intermediary_low.py deleted file mode 100644 index 9698ffeabf..0000000000 --- a/src/python/grpcio/grpc/_adapter/_intermediary_low.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Temporary old _low-like layer. - -Eases refactoring burden while we overhaul the Python framework. - -Plan: - The layers used to look like: - ... # outside _adapter - fore.py + rear.py # visible outside _adapter - _low - _c - The layers currently look like: - ... # outside _adapter - fore.py + rear.py # visible outside _adapter - _low_intermediary # adapter for new '_low' to old '_low' - _low # new '_low' - _c # new '_c' - We will later remove _low_intermediary after refactoring of fore.py and - rear.py according to the ticket system refactoring and get: - ... # outside _adapter, refactored - fore.py + rear.py # visible outside _adapter, refactored - _low # new '_low' - _c # new '_c' -""" - -import collections -import enum - -from grpc._adapter import _low -from grpc._adapter import _types - -_IGNORE_ME_TAG = object() -Code = _types.StatusCode -WriteFlags = _types.OpWriteFlags - - -class Status(collections.namedtuple('Status', ['code', 'details'])): - """Describes an RPC's overall status.""" - - -class ServiceAcceptance( - collections.namedtuple( - 'ServiceAcceptance', ['call', 'method', 'host', 'deadline'])): - """Describes an RPC on the service side at the start of service.""" - - -class Event( - collections.namedtuple( - 'Event', - ['kind', 'tag', 'write_accepted', 'complete_accepted', - 'service_acceptance', 'bytes', 'status', 'metadata'])): - """Describes an event emitted from a completion queue.""" - - @enum.unique - class Kind(enum.Enum): - """Describes the kind of an event.""" - - STOP = object() - WRITE_ACCEPTED = object() - COMPLETE_ACCEPTED = object() - SERVICE_ACCEPTED = object() - READ_ACCEPTED = object() - METADATA_ACCEPTED = object() - FINISH = object() - - -class _TagAdapter(collections.namedtuple('_TagAdapter', [ - 'user_tag', - 'kind' - ])): - pass - - -class Call(object): - """Adapter from old _low.Call interface to new _low.Call.""" - - def __init__(self, channel, completion_queue, method, host, deadline): - self._internal = channel._internal.create_call( - completion_queue._internal, method, host, deadline) - self._metadata = [] - - @staticmethod - def _from_internal(internal): - call = Call.__new__(Call) - call._internal = internal - call._metadata = [] - return call - - def invoke(self, completion_queue, metadata_tag, finish_tag): - err = self._internal.start_batch([ - _types.OpArgs.send_initial_metadata(self._metadata) - ], _IGNORE_ME_TAG) - if err != _types.CallError.OK: - return err - err = self._internal.start_batch([ - _types.OpArgs.recv_initial_metadata() - ], _TagAdapter(metadata_tag, Event.Kind.METADATA_ACCEPTED)) - if err != _types.CallError.OK: - return err - err = self._internal.start_batch([ - _types.OpArgs.recv_status_on_client() - ], _TagAdapter(finish_tag, Event.Kind.FINISH)) - return err - - def write(self, message, tag, flags): - return self._internal.start_batch([ - _types.OpArgs.send_message(message, flags) - ], _TagAdapter(tag, Event.Kind.WRITE_ACCEPTED)) - - def complete(self, tag): - return self._internal.start_batch([ - _types.OpArgs.send_close_from_client() - ], _TagAdapter(tag, Event.Kind.COMPLETE_ACCEPTED)) - - def accept(self, completion_queue, tag): - return self._internal.start_batch([ - _types.OpArgs.recv_close_on_server() - ], _TagAdapter(tag, Event.Kind.FINISH)) - - def add_metadata(self, key, value): - self._metadata.append((key, value)) - - def premetadata(self): - result = self._internal.start_batch([ - _types.OpArgs.send_initial_metadata(self._metadata) - ], _IGNORE_ME_TAG) - self._metadata = [] - return result - - def read(self, tag): - return self._internal.start_batch([ - _types.OpArgs.recv_message() - ], _TagAdapter(tag, Event.Kind.READ_ACCEPTED)) - - def status(self, status, tag): - return self._internal.start_batch([ - _types.OpArgs.send_status_from_server( - self._metadata, status.code, status.details) - ], _TagAdapter(tag, Event.Kind.COMPLETE_ACCEPTED)) - - def cancel(self): - return self._internal.cancel() - - def peer(self): - return self._internal.peer() - - def set_credentials(self, creds): - return self._internal.set_credentials(creds) - - -class Channel(object): - """Adapter from old _low.Channel interface to new _low.Channel.""" - - def __init__(self, hostport, channel_credentials, server_host_override=None): - args = [] - if server_host_override: - args.append((_types.GrpcChannelArgumentKeys.SSL_TARGET_NAME_OVERRIDE.value, server_host_override)) - self._internal = _low.Channel(hostport, args, channel_credentials) - - -class CompletionQueue(object): - """Adapter from old _low.CompletionQueue interface to new _low.CompletionQueue.""" - - def __init__(self): - self._internal = _low.CompletionQueue() - - def get(self, deadline=None): - if deadline is None: - ev = self._internal.next(float('+inf')) - else: - ev = self._internal.next(deadline) - if ev is None: - return None - elif ev.tag is _IGNORE_ME_TAG: - return self.get(deadline) - elif ev.type == _types.EventType.QUEUE_SHUTDOWN: - kind = Event.Kind.STOP - tag = None - write_accepted = None - complete_accepted = None - service_acceptance = None - message_bytes = None - status = None - metadata = None - elif ev.type == _types.EventType.OP_COMPLETE: - kind = ev.tag.kind - tag = ev.tag.user_tag - write_accepted = ev.success if kind == Event.Kind.WRITE_ACCEPTED else None - complete_accepted = ev.success if kind == Event.Kind.COMPLETE_ACCEPTED else None - service_acceptance = ServiceAcceptance(Call._from_internal(ev.call), ev.call_details.method, ev.call_details.host, ev.call_details.deadline) if kind == Event.Kind.SERVICE_ACCEPTED else None - message_bytes = ev.results[0].message if kind == Event.Kind.READ_ACCEPTED else None - status = Status(ev.results[0].status.code, ev.results[0].status.details) if (kind == Event.Kind.FINISH and ev.results[0].status) else Status(_types.StatusCode.CANCELLED if ev.results[0].cancelled else _types.StatusCode.OK, '') if len(ev.results) > 0 and ev.results[0].cancelled is not None else None - metadata = ev.results[0].initial_metadata if (kind in [Event.Kind.SERVICE_ACCEPTED, Event.Kind.METADATA_ACCEPTED]) else (ev.results[0].trailing_metadata if kind == Event.Kind.FINISH else None) - else: - raise RuntimeError('unknown event') - result_ev = Event(kind=kind, tag=tag, write_accepted=write_accepted, complete_accepted=complete_accepted, service_acceptance=service_acceptance, bytes=message_bytes, status=status, metadata=metadata) - return result_ev - - def stop(self): - self._internal.shutdown() - - -class Server(object): - """Adapter from old _low.Server interface to new _low.Server.""" - - def __init__(self, completion_queue): - self._internal = _low.Server(completion_queue._internal, []) - self._internal_cq = completion_queue._internal - - def add_http2_addr(self, addr): - return self._internal.add_http2_port(addr) - - def add_secure_http2_addr(self, addr, server_credentials): - if server_credentials is None: - return self._internal.add_http2_port(addr, None) - else: - return self._internal.add_http2_port(addr, server_credentials) - - def start(self): - return self._internal.start() - - def service(self, tag): - return self._internal.request_call(self._internal_cq, _TagAdapter(tag, Event.Kind.SERVICE_ACCEPTED)) - - def cancel_all_calls(self): - self._internal.cancel_all_calls() - - def stop(self): - return self._internal.shutdown(_TagAdapter(None, Event.Kind.STOP)) - diff --git a/src/python/grpcio/grpc/_adapter/_low.py b/src/python/grpcio/grpc/_adapter/_low.py deleted file mode 100644 index 48410167a0..0000000000 --- a/src/python/grpcio/grpc/_adapter/_low.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import threading - -from grpc import _grpcio_metadata -from grpc import _plugin_wrapping -from grpc._cython import cygrpc -from grpc._adapter import _types - -_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__) - -ChannelCredentials = cygrpc.ChannelCredentials -CallCredentials = cygrpc.CallCredentials -ServerCredentials = cygrpc.ServerCredentials - -channel_credentials_composite = cygrpc.channel_credentials_composite -call_credentials_composite = cygrpc.call_credentials_composite - -def server_credentials_ssl(root_credentials, pair_sequence, force_client_auth): - return cygrpc.server_credentials_ssl( - root_credentials, - [cygrpc.SslPemKeyCertPair(key, pem) for key, pem in pair_sequence], - force_client_auth) - -def channel_credentials_ssl( - root_certificates, private_key, certificate_chain): - pair = None - if private_key is not None or certificate_chain is not None: - pair = cygrpc.SslPemKeyCertPair(private_key, certificate_chain) - return cygrpc.channel_credentials_ssl(root_certificates, pair) - - -call_credentials_metadata_plugin = ( - _plugin_wrapping.call_credentials_metadata_plugin) - - -class CompletionQueue(_types.CompletionQueue): - - def __init__(self): - self.completion_queue = cygrpc.CompletionQueue() - - def next(self, deadline=float('+inf')): - raw_event = self.completion_queue.poll(cygrpc.Timespec(deadline)) - if raw_event.type == cygrpc.CompletionType.queue_timeout: - return None - event_type = raw_event.type - event_tag = raw_event.tag - event_call = Call(raw_event.operation_call) - if raw_event.request_call_details: - event_call_details = _types.CallDetails( - raw_event.request_call_details.method, - raw_event.request_call_details.host, - float(raw_event.request_call_details.deadline)) - else: - event_call_details = None - event_success = raw_event.success - event_results = [] - if raw_event.is_new_request: - event_results.append(_types.OpResult( - _types.OpType.RECV_INITIAL_METADATA, raw_event.request_metadata, - None, None, None, None)) - else: - if raw_event.batch_operations: - for operation in raw_event.batch_operations: - result_type = operation.type - result_initial_metadata = operation.received_metadata_or_none - result_trailing_metadata = operation.received_metadata_or_none - result_message = operation.received_message_or_none - if result_message is not None: - result_message = result_message.bytes() - result_cancelled = operation.received_cancelled_or_none - if operation.has_status: - result_status = _types.Status( - operation.received_status_code_or_none, - operation.received_status_details_or_none) - else: - result_status = None - event_results.append( - _types.OpResult(result_type, result_initial_metadata, - result_trailing_metadata, result_message, - result_status, result_cancelled)) - return _types.Event(event_type, event_tag, event_call, event_call_details, - event_results, event_success) - - def shutdown(self): - self.completion_queue.shutdown() - - -class Call(_types.Call): - - def __init__(self, call): - self.call = call - - def start_batch(self, ops, tag): - translated_ops = [] - for op in ops: - if op.type == _types.OpType.SEND_INITIAL_METADATA: - translated_op = cygrpc.operation_send_initial_metadata( - cygrpc.Metadata( - cygrpc.Metadatum(key, value) - for key, value in op.initial_metadata), - op.flags) - elif op.type == _types.OpType.SEND_MESSAGE: - translated_op = cygrpc.operation_send_message(op.message, op.flags) - elif op.type == _types.OpType.SEND_CLOSE_FROM_CLIENT: - translated_op = cygrpc.operation_send_close_from_client(op.flags) - elif op.type == _types.OpType.SEND_STATUS_FROM_SERVER: - translated_op = cygrpc.operation_send_status_from_server( - cygrpc.Metadata( - cygrpc.Metadatum(key, value) - for key, value in op.trailing_metadata), - op.status.code, - op.status.details, - op.flags) - elif op.type == _types.OpType.RECV_INITIAL_METADATA: - translated_op = cygrpc.operation_receive_initial_metadata( - op.flags) - elif op.type == _types.OpType.RECV_MESSAGE: - translated_op = cygrpc.operation_receive_message(op.flags) - elif op.type == _types.OpType.RECV_STATUS_ON_CLIENT: - translated_op = cygrpc.operation_receive_status_on_client( - op.flags) - elif op.type == _types.OpType.RECV_CLOSE_ON_SERVER: - translated_op = cygrpc.operation_receive_close_on_server(op.flags) - else: - raise ValueError('unexpected operation type {}'.format(op.type)) - translated_ops.append(translated_op) - return self.call.start_batch(cygrpc.Operations(translated_ops), tag) - - def cancel(self, code=None, details=None): - if code is None and details is None: - return self.call.cancel() - else: - return self.call.cancel(code, details) - - def peer(self): - return self.call.peer() - - def set_credentials(self, creds): - return self.call.set_credentials(creds) - - -class Channel(_types.Channel): - - def __init__(self, target, args, creds=None): - args = list(args) + [ - (cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)] - args = cygrpc.ChannelArgs( - cygrpc.ChannelArg(key, value) for key, value in args) - if creds is None: - self.channel = cygrpc.Channel(target, args) - else: - self.channel = cygrpc.Channel(target, args, creds) - - def create_call(self, completion_queue, method, host, deadline=None): - internal_call = self.channel.create_call( - None, 0, completion_queue.completion_queue, method, host, - cygrpc.Timespec(deadline)) - return Call(internal_call) - - def check_connectivity_state(self, try_to_connect): - return self.channel.check_connectivity_state(try_to_connect) - - def watch_connectivity_state(self, last_observed_state, deadline, - completion_queue, tag): - self.channel.watch_connectivity_state( - last_observed_state, cygrpc.Timespec(deadline), - completion_queue.completion_queue, tag) - - def target(self): - return self.channel.target() - - -_NO_TAG = object() - -class Server(_types.Server): - - def __init__(self, completion_queue, args): - args = cygrpc.ChannelArgs( - cygrpc.ChannelArg(key, value) for key, value in args) - self.server = cygrpc.Server(args) - self.server.register_completion_queue(completion_queue.completion_queue) - self.server_queue = completion_queue - - def add_http2_port(self, addr, creds=None): - if creds is None: - return self.server.add_http2_port(addr) - else: - return self.server.add_http2_port(addr, creds) - - def start(self): - return self.server.start() - - def shutdown(self, tag=None): - return self.server.shutdown(self.server_queue.completion_queue, tag) - - def request_call(self, completion_queue, tag): - return self.server.request_call(completion_queue.completion_queue, - self.server_queue.completion_queue, tag) - - def cancel_all_calls(self): - return self.server.cancel_all_calls() diff --git a/src/python/grpcio/grpc/_adapter/_types.py b/src/python/grpcio/grpc/_adapter/_types.py deleted file mode 100644 index f8405949d4..0000000000 --- a/src/python/grpcio/grpc/_adapter/_types.py +++ /dev/null @@ -1,446 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import abc -import collections -import enum - -import six - -from grpc._cython import cygrpc - - -class GrpcChannelArgumentKeys(enum.Enum): - """Mirrors keys used in grpc_channel_args for GRPC-specific arguments.""" - SSL_TARGET_NAME_OVERRIDE = 'grpc.ssl_target_name_override' - - -@enum.unique -class CallError(enum.IntEnum): - """Mirrors grpc_call_error in the C core.""" - OK = cygrpc.CallError.ok - ERROR = cygrpc.CallError.error - ERROR_NOT_ON_SERVER = cygrpc.CallError.not_on_server - ERROR_NOT_ON_CLIENT = cygrpc.CallError.not_on_client - ERROR_ALREADY_ACCEPTED = cygrpc.CallError.already_accepted - ERROR_ALREADY_INVOKED = cygrpc.CallError.already_invoked - ERROR_NOT_INVOKED = cygrpc.CallError.not_invoked - ERROR_ALREADY_FINISHED = cygrpc.CallError.already_finished - ERROR_TOO_MANY_OPERATIONS = cygrpc.CallError.too_many_operations - ERROR_INVALID_FLAGS = cygrpc.CallError.invalid_flags - ERROR_INVALID_METADATA = cygrpc.CallError.invalid_metadata - - -@enum.unique -class StatusCode(enum.IntEnum): - """Mirrors grpc_status_code in the C core.""" - OK = cygrpc.StatusCode.ok - CANCELLED = cygrpc.StatusCode.cancelled - UNKNOWN = cygrpc.StatusCode.unknown - INVALID_ARGUMENT = cygrpc.StatusCode.invalid_argument - DEADLINE_EXCEEDED = cygrpc.StatusCode.deadline_exceeded - NOT_FOUND = cygrpc.StatusCode.not_found - ALREADY_EXISTS = cygrpc.StatusCode.already_exists - PERMISSION_DENIED = cygrpc.StatusCode.permission_denied - RESOURCE_EXHAUSTED = cygrpc.StatusCode.resource_exhausted - FAILED_PRECONDITION = cygrpc.StatusCode.failed_precondition - ABORTED = cygrpc.StatusCode.aborted - OUT_OF_RANGE = cygrpc.StatusCode.out_of_range - UNIMPLEMENTED = cygrpc.StatusCode.unimplemented - INTERNAL = cygrpc.StatusCode.internal - UNAVAILABLE = cygrpc.StatusCode.unavailable - DATA_LOSS = cygrpc.StatusCode.data_loss - UNAUTHENTICATED = cygrpc.StatusCode.unauthenticated - - -@enum.unique -class OpWriteFlags(enum.IntEnum): - """Mirrors defined write-flag constants in the C core.""" - WRITE_BUFFER_HINT = cygrpc.WriteFlag.buffer_hint - WRITE_NO_COMPRESS = cygrpc.WriteFlag.no_compress - - -@enum.unique -class OpType(enum.IntEnum): - """Mirrors grpc_op_type in the C core.""" - SEND_INITIAL_METADATA = cygrpc.OperationType.send_initial_metadata - SEND_MESSAGE = cygrpc.OperationType.send_message - SEND_CLOSE_FROM_CLIENT = cygrpc.OperationType.send_close_from_client - SEND_STATUS_FROM_SERVER = cygrpc.OperationType.send_status_from_server - RECV_INITIAL_METADATA = cygrpc.OperationType.receive_initial_metadata - RECV_MESSAGE = cygrpc.OperationType.receive_message - RECV_STATUS_ON_CLIENT = cygrpc.OperationType.receive_status_on_client - RECV_CLOSE_ON_SERVER = cygrpc.OperationType.receive_close_on_server - - -@enum.unique -class EventType(enum.IntEnum): - """Mirrors grpc_completion_type in the C core.""" - QUEUE_SHUTDOWN = cygrpc.CompletionType.queue_shutdown - QUEUE_TIMEOUT = cygrpc.CompletionType.queue_timeout - OP_COMPLETE = cygrpc.CompletionType.operation_complete - - -@enum.unique -class ConnectivityState(enum.IntEnum): - """Mirrors grpc_connectivity_state in the C core.""" - IDLE = cygrpc.ConnectivityState.idle - CONNECTING = cygrpc.ConnectivityState.connecting - READY = cygrpc.ConnectivityState.ready - TRANSIENT_FAILURE = cygrpc.ConnectivityState.transient_failure - FATAL_FAILURE = cygrpc.ConnectivityState.fatal_failure - - -class Status(collections.namedtuple( - 'Status', [ - 'code', - 'details', - ])): - """The end status of a GRPC call. - - Attributes: - code (StatusCode): ... - details (str): ... - """ - - -class CallDetails(collections.namedtuple( - 'CallDetails', [ - 'method', - 'host', - 'deadline', - ])): - """Provides information to the server about the client's call. - - Attributes: - method (str): ... - host (str): ... - deadline (float): ... - """ - - -class OpArgs(collections.namedtuple( - 'OpArgs', [ - 'type', - 'initial_metadata', - 'trailing_metadata', - 'message', - 'status', - 'flags', - ])): - """Arguments passed into a GRPC operation. - - Attributes: - type (OpType): ... - initial_metadata (sequence of 2-sequence of str): Only valid if type == - OpType.SEND_INITIAL_METADATA, else is None. - trailing_metadata (sequence of 2-sequence of str): Only valid if type == - OpType.SEND_STATUS_FROM_SERVER, else is None. - message (bytes): Only valid if type == OpType.SEND_MESSAGE, else is None. - status (Status): Only valid if type == OpType.SEND_STATUS_FROM_SERVER, else - is None. - flags (int): a bitwise OR'ing of 0 or more OpWriteFlags values. - """ - - @staticmethod - def send_initial_metadata(initial_metadata): - return OpArgs(OpType.SEND_INITIAL_METADATA, initial_metadata, None, None, None, 0) - - @staticmethod - def send_message(message, flags): - return OpArgs(OpType.SEND_MESSAGE, None, None, message, None, flags) - - @staticmethod - def send_close_from_client(): - return OpArgs(OpType.SEND_CLOSE_FROM_CLIENT, None, None, None, None, 0) - - @staticmethod - def send_status_from_server(trailing_metadata, status_code, status_details): - return OpArgs(OpType.SEND_STATUS_FROM_SERVER, None, trailing_metadata, None, Status(status_code, status_details), 0) - - @staticmethod - def recv_initial_metadata(): - return OpArgs(OpType.RECV_INITIAL_METADATA, None, None, None, None, 0); - - @staticmethod - def recv_message(): - return OpArgs(OpType.RECV_MESSAGE, None, None, None, None, 0) - - @staticmethod - def recv_status_on_client(): - return OpArgs(OpType.RECV_STATUS_ON_CLIENT, None, None, None, None, 0) - - @staticmethod - def recv_close_on_server(): - return OpArgs(OpType.RECV_CLOSE_ON_SERVER, None, None, None, None, 0) - - -class OpResult(collections.namedtuple( - 'OpResult', [ - 'type', - 'initial_metadata', - 'trailing_metadata', - 'message', - 'status', - 'cancelled', - ])): - """Results received from a GRPC operation. - - Attributes: - type (OpType): ... - initial_metadata (sequence of 2-sequence of str): Only valid if type == - OpType.RECV_INITIAL_METADATA, else is None. - trailing_metadata (sequence of 2-sequence of str): Only valid if type == - OpType.RECV_STATUS_ON_CLIENT, else is None. - message (bytes): Only valid if type == OpType.RECV_MESSAGE, else is None. - status (Status): Only valid if type == OpType.RECV_STATUS_ON_CLIENT, else - is None. - cancelled (bool): Only valid if type == OpType.RECV_CLOSE_ON_SERVER, else - is None. - """ - - -class Event(collections.namedtuple( - 'Event', [ - 'type', - 'tag', - 'call', - 'call_details', - 'results', - 'success', - ])): - """An event received from a GRPC completion queue. - - Attributes: - type (EventType): ... - tag (object): ... - call (Call): The Call object associated with this event (if there is one, - else None). - call_details (CallDetails): The call details associated with the - server-side call (if there is such information, else None). - results (list of OpResult): ... - success (bool): ... - """ - - -class CompletionQueue(six.with_metaclass(abc.ABCMeta)): - - @abc.abstractmethod - def __init__(self): - pass - - def __iter__(self): - """This class may be iterated over. - - This is the equivalent of calling next() repeatedly with an absolute - deadline of None (i.e. no deadline). - """ - return self - - def __next__(self): - return self.next() - - @abc.abstractmethod - def next(self, deadline=float('+inf')): - """Get the next event on this completion queue. - - Args: - deadline (float): absolute deadline in seconds from the Python epoch, or - None for no deadline. - - Returns: - Event: ... - """ - pass - - @abc.abstractmethod - def shutdown(self): - """Begin the shutdown process of this completion queue. - - Note that this does not immediately destroy the completion queue. - Nevertheless, user code should not pass it around after invoking this. - """ - return None - - -class Call(six.with_metaclass(abc.ABCMeta)): - - @abc.abstractmethod - def start_batch(self, ops, tag): - """Start a batch of operations. - - Args: - ops (sequence of OpArgs): ... - tag (object): ... - - Returns: - CallError: ... - """ - return CallError.ERROR - - @abc.abstractmethod - def cancel(self, code=None, details=None): - """Cancel the call. - - Args: - code (int): Status code to cancel with (on the server side). If - specified, so must `details`. - details (str): Status details to cancel with (on the server side). If - specified, so must `code`. - - Returns: - CallError: ... - """ - return CallError.ERROR - - @abc.abstractmethod - def peer(self): - """Get the peer of this call. - - Returns: - str: the peer of this call. - """ - return None - - def set_credentials(self, creds): - """Set per-call credentials. - - Args: - creds (CallCredentials): Credentials to be set for this call. - """ - return None - - -class Channel(six.with_metaclass(abc.ABCMeta)): - - @abc.abstractmethod - def __init__(self, target, args, credentials=None): - """Initialize a Channel. - - Args: - target (str): ... - args (sequence of 2-sequence of str, (str|integer)): ... - credentials (ChannelCredentials): If None, create an insecure channel, - else create a secure channel using the client credentials. - """ - - @abc.abstractmethod - def create_call(self, completion_queue, method, host, deadline=float('+inf')): - """Create a call from this channel. - - Args: - completion_queue (CompletionQueue): ... - method (str): ... - host (str): ... - deadline (float): absolute deadline in seconds from the Python epoch, or - None for no deadline. - - Returns: - Call: call object associated with this Channel and passed parameters. - """ - return None - - @abc.abstractmethod - def check_connectivity_state(self, try_to_connect): - """Check and optionally repair the connectivity state of the channel. - - Args: - try_to_connect (bool): whether or not to try to connect the channel if - disconnected. - - Returns: - ConnectivityState: state of the channel at the time of this invocation. - """ - return None - - @abc.abstractmethod - def watch_connectivity_state(self, last_observed_state, deadline, - completion_queue, tag): - """Watch for connectivity state changes from the last_observed_state. - - Args: - last_observed_state (ConnectivityState): ... - deadline (float): ... - completion_queue (CompletionQueue): ... - tag (object) ... - """ - - @abc.abstractmethod - def target(self): - """Get the target of this channel. - - Returns: - str: the target of this channel. - """ - return None - - -class Server(six.with_metaclass(abc.ABCMeta)): - - @abc.abstractmethod - def __init__(self, completion_queue, args): - """Initialize a server. - - Args: - completion_queue (CompletionQueue): ... - args (sequence of 2-sequence of str, (str|integer)): ... - """ - - @abc.abstractmethod - def add_http2_port(self, address, credentials=None): - """Adds an HTTP/2 address+port to the server. - - Args: - address (str): ... - credentials (ServerCredentials): If None, create an insecure port, else - create a secure port using the server credentials. - """ - - @abc.abstractmethod - def start(self): - """Starts the server.""" - - @abc.abstractmethod - def shutdown(self, tag=None): - """Shuts down the server. Does not immediately destroy the server. - - Args: - tag (object): if not None, have the server place an event on its - completion queue notifying it when this server has completely shut down. - """ - - @abc.abstractmethod - def request_call(self, completion_queue, tag): - """Requests a call from the server on the server's completion queue. - - Args: - completion_queue (CompletionQueue): Completion queue for the call. May be - the same as the server's completion queue. - tag (object) ... - """ diff --git a/src/python/grpcio/grpc/_auth.py b/src/python/grpcio/grpc/_auth.py index 3ae00ca23a..dea3221c9d 100644 --- a/src/python/grpcio/grpc/_auth.py +++ b/src/python/grpcio/grpc/_auth.py @@ -29,6 +29,7 @@ """GRPCAuthMetadataPlugins for standard authentication.""" +import inspect from concurrent import futures import grpc @@ -46,9 +47,21 @@ class GoogleCallCredentials(grpc.AuthMetadataPlugin): self._credentials = credentials self._pool = futures.ThreadPoolExecutor(max_workers=1) + # Hack to determine if these are JWT creds and we need to pass + # additional_claims when getting a token + if 'additional_claims' in inspect.getargspec( + credentials.get_access_token).args: + self._is_jwt = True + else: + self._is_jwt = False + def __call__(self, context, callback): # MetadataPlugins cannot block (see grpc.beta.interfaces.py) - future = self._pool.submit(self._credentials.get_access_token) + if self._is_jwt: + future = self._pool.submit(self._credentials.get_access_token, + additional_claims={'aud': context.service_url}) + else: + future = self._pool.submit(self._credentials.get_access_token) future.add_done_callback(lambda x: self._get_token_callback(callback, x)) def _get_token_callback(self, callback, future): diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py index d9eb5a4b77..29dbc3a668 100644 --- a/src/python/grpcio/grpc/_channel.py +++ b/src/python/grpcio/grpc/_channel.py @@ -85,7 +85,7 @@ def _deadline(timeout): def _unknown_code_details(unknown_cygrpc_code, details): - return b'Server sent unknown code {} and details "{}"'.format( + return 'Server sent unknown code {} and details "{}"'.format( unknown_cygrpc_code, details) @@ -134,19 +134,19 @@ def _handle_event(event, state, response_deserializer): for batch_operation in event.batch_operations: operation_type = batch_operation.type state.due.remove(operation_type) - if operation_type is cygrpc.OperationType.receive_initial_metadata: + if operation_type == cygrpc.OperationType.receive_initial_metadata: state.initial_metadata = batch_operation.received_metadata - elif operation_type is cygrpc.OperationType.receive_message: + elif operation_type == cygrpc.OperationType.receive_message: serialized_response = batch_operation.received_message.bytes() if serialized_response is not None: response = _common.deserialize( serialized_response, response_deserializer) if response is None: - details = b'Exception deserializing response!' + details = 'Exception deserializing response!' _abort(state, grpc.StatusCode.INTERNAL, details) else: state.response = response - elif operation_type is cygrpc.OperationType.receive_status_on_client: + elif operation_type == cygrpc.OperationType.receive_status_on_client: state.trailing_metadata = batch_operation.received_metadata if state.code is None: code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get( @@ -179,6 +179,7 @@ def _event_handler(state, call, response_deserializer): def _consume_request_iterator( request_iterator, state, call, request_serializer): event_handler = _event_handler(state, call, None) + def consume_request_iterator(): for request in request_iterator: serialized_request = _common.serialize(request, request_serializer) @@ -186,7 +187,7 @@ def _consume_request_iterator( if state.code is None and not state.cancelled: if serialized_request is None: call.cancel() - details = b'Exception serializing request!' + details = 'Exception serializing request!' _abort(state, grpc.StatusCode.INTERNAL, details) return else: @@ -194,7 +195,8 @@ def _consume_request_iterator( cygrpc.operation_send_message( serialized_request, _EMPTY_FLAGS), ) - call.start_batch(cygrpc.Operations(operations), event_handler) + call.start_client_batch(cygrpc.Operations(operations), + event_handler) state.due.add(cygrpc.OperationType.send_message) while True: state.condition.wait() @@ -210,10 +212,20 @@ def _consume_request_iterator( operations = ( cygrpc.operation_send_close_from_client(_EMPTY_FLAGS), ) - call.start_batch(cygrpc.Operations(operations), event_handler) + call.start_client_batch(cygrpc.Operations(operations), event_handler) state.due.add(cygrpc.OperationType.send_close_from_client) - thread = threading.Thread(target=consume_request_iterator) - thread.start() + + def stop_consumption_thread(timeout): + with state.condition: + if state.code is None: + call.cancel() + state.cancelled = True + _abort(state, grpc.StatusCode.CANCELLED, 'Cancelled!') + state.condition.notify_all() + + consumption_thread = _common.CleanupThread( + stop_consumption_thread, target=consume_request_iterator) + consumption_thread.start() class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): @@ -230,7 +242,7 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): if self._state.code is None: self._call.cancel() self._state.cancelled = True - _abort(self._state, grpc.StatusCode.CANCELLED, b'Cancelled!') + _abort(self._state, grpc.StatusCode.CANCELLED, 'Cancelled!') self._state.condition.notify_all() return False @@ -301,7 +313,7 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): if self._state.code is None: event_handler = _event_handler( self._state, self._call, self._response_deserializer) - self._call.start_batch( + self._call.start_client_batch( cygrpc.Operations( (cygrpc.operation_receive_message(_EMPTY_FLAGS),)), event_handler) @@ -353,13 +365,13 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): with self._state.condition: while self._state.initial_metadata is None: self._state.condition.wait() - return self._state.initial_metadata + return _common.application_metadata(self._state.initial_metadata) def trailing_metadata(self): with self._state.condition: while self._state.trailing_metadata is None: self._state.condition.wait() - return self._state.trailing_metadata + return _common.application_metadata(self._state.trailing_metadata) def code(self): with self._state.condition: @@ -371,7 +383,7 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): with self._state.condition: while self._state.details is None: self._state.condition.wait() - return self._state.details + return _common.decode(self._state.details) def _repr(self): with self._state.condition: @@ -379,7 +391,7 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): return '<_Rendezvous object of in-flight RPC>' else: return '<_Rendezvous of RPC that terminated with ({}, {})>'.format( - self._state.code, self._state.details) + self._state.code, _common.decode(self._state.details)) def __repr__(self): return self._repr() @@ -402,7 +414,7 @@ def _start_unary_request(request, timeout, request_serializer): if serialized_request is None: state = _RPCState( (), _EMPTY_METADATA, _EMPTY_METADATA, grpc.StatusCode.INTERNAL, - b'Exception serializing request!') + 'Exception serializing request!') rendezvous = _Rendezvous(state, None, None, deadline) return deadline, deadline_timespec, None, rendezvous else: @@ -440,7 +452,7 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None) operations = ( cygrpc.operation_send_initial_metadata( - _common.metadata(metadata), _EMPTY_FLAGS), + _common.cygrpc_metadata(metadata), _EMPTY_FLAGS), cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS), cygrpc.operation_send_close_from_client(_EMPTY_FLAGS), cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS), @@ -449,9 +461,7 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): ) return state, operations, deadline, deadline_timespec, None - def __call__( - self, request, timeout=None, metadata=None, credentials=None, - with_call=False): + def _blocking(self, request, timeout, metadata, credentials): state, operations, deadline, deadline_timespec, rendezvous = self._prepare( request, timeout, metadata) if rendezvous: @@ -462,9 +472,17 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): None, 0, completion_queue, self._method, None, deadline_timespec) if credentials is not None: call.set_credentials(credentials._credentials) - call.start_batch(cygrpc.Operations(operations), None) + call.start_client_batch(cygrpc.Operations(operations), None) _handle_event(completion_queue.poll(), state, self._response_deserializer) - return _end_unary_response_blocking(state, with_call, deadline) + return state, deadline + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + state, deadline, = self._blocking(request, timeout, metadata, credentials) + return _end_unary_response_blocking(state, False, deadline) + + def with_call(self, request, timeout=None, metadata=None, credentials=None): + state, deadline, = self._blocking(request, timeout, metadata, credentials) + return _end_unary_response_blocking(state, True, deadline) def future(self, request, timeout=None, metadata=None, credentials=None): state, operations, deadline, deadline_timespec, rendezvous = self._prepare( @@ -478,7 +496,7 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): call.set_credentials(credentials._credentials) event_handler = _event_handler(state, call, self._response_deserializer) with state.condition: - call.start_batch(cygrpc.Operations(operations), event_handler) + call.start_client_batch(cygrpc.Operations(operations), event_handler) return _Rendezvous(state, call, self._response_deserializer, deadline) @@ -506,18 +524,18 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): call.set_credentials(credentials._credentials) event_handler = _event_handler(state, call, self._response_deserializer) with state.condition: - call.start_batch( + call.start_client_batch( cygrpc.Operations( (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)), event_handler) operations = ( cygrpc.operation_send_initial_metadata( - _common.metadata(metadata), _EMPTY_FLAGS), + _common.cygrpc_metadata(metadata), _EMPTY_FLAGS), cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS), cygrpc.operation_send_close_from_client(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_batch(cygrpc.Operations(operations), event_handler) + call.start_client_batch(cygrpc.Operations(operations), event_handler) return _Rendezvous(state, call, self._response_deserializer, deadline) @@ -532,9 +550,7 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): self._request_serializer = request_serializer self._response_deserializer = response_deserializer - def __call__( - self, request_iterator, timeout=None, metadata=None, credentials=None, - with_call=False): + def _blocking(self, request_iterator, timeout, metadata, credentials): deadline, deadline_timespec = _deadline(timeout) state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None) completion_queue = cygrpc.CompletionQueue() @@ -543,17 +559,17 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): if credentials is not None: call.set_credentials(credentials._credentials) with state.condition: - call.start_batch( + call.start_client_batch( cygrpc.Operations( (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)), None) operations = ( cygrpc.operation_send_initial_metadata( - _common.metadata(metadata), _EMPTY_FLAGS), + _common.cygrpc_metadata(metadata), _EMPTY_FLAGS), cygrpc.operation_receive_message(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_batch(cygrpc.Operations(operations), None) + call.start_client_batch(cygrpc.Operations(operations), None) _consume_request_iterator( request_iterator, state, call, self._request_serializer) while True: @@ -563,7 +579,19 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): state.condition.notify_all() if not state.due: break - return _end_unary_response_blocking(state, with_call, deadline) + return state, deadline + + def __call__( + self, request_iterator, timeout=None, metadata=None, credentials=None): + state, deadline, = self._blocking( + request_iterator, timeout, metadata, credentials) + return _end_unary_response_blocking(state, False, deadline) + + def with_call( + self, request_iterator, timeout=None, metadata=None, credentials=None): + state, deadline, = self._blocking( + request_iterator, timeout, metadata, credentials) + return _end_unary_response_blocking(state, True, deadline) def future( self, request_iterator, timeout=None, metadata=None, credentials=None): @@ -575,17 +603,17 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): call.set_credentials(credentials._credentials) event_handler = _event_handler(state, call, self._response_deserializer) with state.condition: - call.start_batch( + call.start_client_batch( cygrpc.Operations( (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)), event_handler) operations = ( cygrpc.operation_send_initial_metadata( - _common.metadata(metadata), _EMPTY_FLAGS), + _common.cygrpc_metadata(metadata), _EMPTY_FLAGS), cygrpc.operation_receive_message(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_batch(cygrpc.Operations(operations), event_handler) + call.start_client_batch(cygrpc.Operations(operations), event_handler) _consume_request_iterator( request_iterator, state, call, self._request_serializer) return _Rendezvous(state, call, self._response_deserializer, deadline) @@ -612,16 +640,16 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable): call.set_credentials(credentials._credentials) event_handler = _event_handler(state, call, self._response_deserializer) with state.condition: - call.start_batch( + call.start_client_batch( cygrpc.Operations( (cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)), event_handler) operations = ( cygrpc.operation_send_initial_metadata( - _common.metadata(metadata), _EMPTY_FLAGS), + _common.cygrpc_metadata(metadata), _EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_batch(cygrpc.Operations(operations), event_handler) + call.start_client_batch(cygrpc.Operations(operations), event_handler) _consume_request_iterator( request_iterator, state, call, self._request_serializer) return _Rendezvous(state, call, self._response_deserializer, deadline) @@ -636,16 +664,27 @@ class _ChannelCallState(object): self.managed_calls = None -def _call_spin(state): - while True: - event = state.completion_queue.poll() - completed_call = event.tag(event) - if completed_call is not None: - with state.lock: - state.managed_calls.remove(completed_call) - if not state.managed_calls: - state.managed_calls = None - return +def _run_channel_spin_thread(state): + def channel_spin(): + while True: + event = state.completion_queue.poll() + completed_call = event.tag(event) + if completed_call is not None: + with state.lock: + state.managed_calls.remove(completed_call) + if not state.managed_calls: + state.managed_calls = None + return + + def stop_channel_spin(timeout): + with state.lock: + if state.managed_calls is not None: + for call in state.managed_calls: + call.cancel() + + channel_spin_thread = _common.CleanupThread( + stop_channel_spin, target=channel_spin) + channel_spin_thread.start() def _create_channel_managed_call(state): @@ -674,8 +713,7 @@ def _create_channel_managed_call(state): parent, flags, state.completion_queue, method, host, deadline) if state.managed_calls is None: state.managed_calls = set((call,)) - spin_thread = threading.Thread(target=_call_spin, args=(state,)) - spin_thread.start() + _run_channel_spin_thread(state) else: state.managed_calls.add(call) return call @@ -768,11 +806,18 @@ def _poll_connectivity(state, channel, initial_try_to_connect): _spawn_delivery(state, callbacks) +def _moot(state): + with state.lock: + del state.callbacks_and_connectivities[:] + + def _subscribe(state, callback, try_to_connect): with state.lock: if not state.callbacks_and_connectivities and not state.polling: - polling_thread = threading.Thread( - target=_poll_connectivity, + def cancel_all_subscriptions(timeout): + _moot(state) + polling_thread = _common.CleanupThread( + cancel_all_subscriptions, target=_poll_connectivity, args=(state, state.channel, bool(try_to_connect))) polling_thread.start() state.polling = True @@ -796,25 +841,33 @@ def _unsubscribe(state, callback): break -def _moot(state): - with state.lock: - del state.callbacks_and_connectivities[:] - - def _options(options): if options is None: pairs = ((cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT),) else: pairs = list(options) + [ (cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)] - return cygrpc.ChannelArgs( - cygrpc.ChannelArg(arg_name, arg_value) for arg_name, arg_value in pairs) + encoded_pairs = [ + (_common.encode(arg_name), arg_value) if isinstance(arg_value, int) + else (_common.encode(arg_name), _common.encode(arg_value)) + for arg_name, arg_value in pairs] + return cygrpc.ChannelArgs([ + cygrpc.ChannelArg(arg_name, arg_value) + for arg_name, arg_value in encoded_pairs]) class Channel(grpc.Channel): def __init__(self, target, options, credentials): - self._channel = cygrpc.Channel(target, _options(options), credentials) + """Constructor. + + Args: + target: The target to which to connect. + options: Configuration options for the channel. + credentials: A cygrpc.ChannelCredentials or None. + """ + self._channel = cygrpc.Channel( + _common.encode(target), _options(options), credentials) self._call_state = _ChannelCallState(self._channel) self._connectivity_state = _ChannelConnectivityState(self._channel) @@ -827,26 +880,26 @@ class Channel(grpc.Channel): def unary_unary( self, method, request_serializer=None, response_deserializer=None): return _UnaryUnaryMultiCallable( - self._channel, _create_channel_managed_call(self._call_state), method, - request_serializer, response_deserializer) + self._channel, _create_channel_managed_call(self._call_state), + _common.encode(method), request_serializer, response_deserializer) def unary_stream( self, method, request_serializer=None, response_deserializer=None): return _UnaryStreamMultiCallable( - self._channel, _create_channel_managed_call(self._call_state), method, - request_serializer, response_deserializer) + self._channel, _create_channel_managed_call(self._call_state), + _common.encode(method), request_serializer, response_deserializer) def stream_unary( self, method, request_serializer=None, response_deserializer=None): return _StreamUnaryMultiCallable( - self._channel, _create_channel_managed_call(self._call_state), method, - request_serializer, response_deserializer) + self._channel, _create_channel_managed_call(self._call_state), + _common.encode(method), request_serializer, response_deserializer) def stream_stream( self, method, request_serializer=None, response_deserializer=None): return _StreamStreamMultiCallable( - self._channel, _create_channel_managed_call(self._call_state), method, - request_serializer, response_deserializer) + self._channel, _create_channel_managed_call(self._call_state), + _common.encode(method), request_serializer, response_deserializer) def __del__(self): _moot(self._connectivity_state) diff --git a/src/python/grpcio/grpc/_common.py b/src/python/grpcio/grpc/_common.py index a3fb66cd07..4d7d521419 100644 --- a/src/python/grpcio/grpc/_common.py +++ b/src/python/grpcio/grpc/_common.py @@ -30,6 +30,8 @@ """Shared implementation.""" import logging +import threading +import time import six @@ -44,8 +46,8 @@ CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = { cygrpc.ConnectivityState.ready: grpc.ChannelConnectivity.READY, cygrpc.ConnectivityState.transient_failure: grpc.ChannelConnectivity.TRANSIENT_FAILURE, - cygrpc.ConnectivityState.fatal_failure: - grpc.ChannelConnectivity.FATAL_FAILURE, + cygrpc.ConnectivityState.shutdown: + grpc.ChannelConnectivity.SHUTDOWN, } CYGRPC_STATUS_CODE_TO_STATUS_CODE = { @@ -74,9 +76,37 @@ STATUS_CODE_TO_CYGRPC_STATUS_CODE = { } -def metadata(application_metadata): +def encode(s): + if isinstance(s, bytes): + return s + else: + return s.encode('ascii') + + +def decode(b): + if isinstance(b, str): + return b + else: + try: + return b.decode('utf8') + except UnicodeDecodeError: + logging.exception('Invalid encoding on {}'.format(b)) + return b.decode('latin1') + + +def cygrpc_metadata(application_metadata): return _EMPTY_METADATA if application_metadata is None else cygrpc.Metadata( - cygrpc.Metadatum(key, value) for key, value in application_metadata) + cygrpc.Metadatum(encode(key), encode(value)) + for key, value in application_metadata) + + +def application_metadata(cygrpc_metadata): + if cygrpc_metadata is None: + return () + else: + return tuple( + (decode(key), value if key[-4:] == b'-bin' else decode(value)) + for key, value in cygrpc_metadata) def _transform(message, transformer, exception_message): @@ -97,3 +127,47 @@ def serialize(message, serializer): def deserialize(serialized_message, deserializer): return _transform(serialized_message, deserializer, 'Exception deserializing message!') + + +def fully_qualified_method(group, method): + return '/{}/{}'.format(group, method) + + +class CleanupThread(threading.Thread): + """A threading.Thread subclass supporting custom behavior on join(). + + On Python Interpreter exit, Python will attempt to join outstanding threads + prior to garbage collection. We may need to do additional cleanup, and + we accomplish this by overriding the join() method. + """ + + def __init__(self, behavior, group=None, target=None, name=None, + args=(), kwargs={}): + """Constructor. + + Args: + behavior (function): Function called on join() with a single + argument, timeout, indicating the maximum duration of + `behavior`, or None indicating `behavior` has no deadline. + `behavior` must be idempotent. + group (None): should be None. Reseved for future extensions + when ThreadGroup is implemented. + target (function): The function to invoke when this thread is + run. Defaults to None. + name (str): The name of this thread. Defaults to None. + args (tuple[object]): A tuple of arguments to pass to `target`. + kwargs (dict[str,object]): A dictionary of keyword arguments to + pass to `target`. + """ + super(CleanupThread, self).__init__(group=group, target=target, + name=name, args=args, kwargs=kwargs) + self._behavior = behavior + + def join(self, timeout=None): + start_time = time.time() + self._behavior(timeout) + end_time = time.time() + if timeout is not None: + timeout -= end_time - start_time + timeout = max(timeout, 0) + super(CleanupThread, self).join(timeout) diff --git a/src/python/grpcio/tests/unit/_cython/test_utilities.py b/src/python/grpcio/grpc/_credential_composition.py index 6a09739643..9cb5508e27 100644 --- a/src/python/grpcio/tests/unit/_cython/test_utilities.py +++ b/src/python/grpcio/grpc/_credential_composition.py @@ -1,4 +1,4 @@ -# Copyright 2015, Google Inc. +# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,20 +27,22 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import threading - from grpc._cython import cygrpc -class CompletionQueuePollFuture: +def _call(call_credentialses): + call_credentials_iterator = iter(call_credentialses) + composition = next(call_credentials_iterator) + for additional_call_credentials in call_credentials_iterator: + composition = cygrpc.call_credentials_composite( + composition, additional_call_credentials) + return composition + + +def call(call_credentialses): + return _call(call_credentialses) - def __init__(self, completion_queue, deadline): - def poller_function(): - self._event_result = completion_queue.poll(deadline) - self._event_result = None - self._thread = threading.Thread(target=poller_function) - self._thread.start() - def result(self): - self._thread.join() - return self._event_result +def channel(channel_credentials, call_credentialses): + return cygrpc.channel_credentials_composite( + channel_credentials, _call(call_credentialses)) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi index 1bfe6344e0..ba60986143 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi @@ -37,13 +37,16 @@ cdef class Call: self.c_call = NULL self.references = [] - def start_batch(self, operations, tag): + def _start_batch(self, operations, tag, retain_self): if not self.is_valid: raise ValueError("invalid call object cannot be used from Python") cdef grpc_call_error result cdef Operations cy_operations = Operations(operations) cdef OperationTag operation_tag = OperationTag(tag) - operation_tag.operation_call = self + if retain_self: + operation_tag.operation_call = self + else: + operation_tag.operation_call = None operation_tag.batch_operations = cy_operations cpython.Py_INCREF(operation_tag) with nogil: @@ -52,9 +55,18 @@ cdef class Call: <cpython.PyObject *>operation_tag, NULL) return result + def start_client_batch(self, operations, tag): + # We don't reference this call in the operations tag because + # it should be cancelled when it goes out of scope + return self._start_batch(operations, tag, False) + + def start_server_batch(self, operations, tag): + return self._start_batch(operations, tag, True) + def cancel( self, grpc_status_code error_code=GRPC_STATUS__DO_NOT_USE, details=None): + details = str_to_bytes(details) if not self.is_valid: raise ValueError("invalid call object cannot be used from Python") if (details is None) != (error_code == GRPC_STATUS__DO_NOT_USE): @@ -63,12 +75,6 @@ cdef class Call: cdef grpc_call_error result cdef char *c_details = NULL if error_code != GRPC_STATUS__DO_NOT_USE: - if isinstance(details, bytes): - pass - elif isinstance(details, basestring): - details = details.encode() - else: - raise TypeError("expected details to be str or bytes") self.references.append(details) c_details = details with nogil: @@ -99,8 +105,7 @@ cdef class Call: def __dealloc__(self): if self.c_call != NULL: - with nogil: - grpc_call_destroy(self.c_call) + grpc_call_destroy(self.c_call) # The object *should* always be valid from Python. Used for debugging. @property diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi index c26bc083cf..5416401431 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi @@ -32,7 +32,7 @@ cimport cpython cdef class Channel: - def __cinit__(self, target, ChannelArgs arguments=None, + def __cinit__(self, bytes target, ChannelArgs arguments=None, ChannelCredentials channel_credentials=None): cdef grpc_channel_args *c_arguments = NULL cdef char *c_target = NULL @@ -40,12 +40,6 @@ cdef class Channel: self.references = [] if arguments is not None: c_arguments = &arguments.c_args - if isinstance(target, bytes): - pass - elif isinstance(target, basestring): - target = target.encode() - else: - raise TypeError("expected target to be str or bytes") c_target = target if channel_credentials is None: with nogil: @@ -64,23 +58,10 @@ cdef class Channel: method, host, Timespec deadline not None): if queue.is_shutting_down: raise ValueError("queue must not be shutting down or shutdown") - if isinstance(method, bytes): - pass - elif isinstance(method, basestring): - method = method.encode() - else: - raise TypeError("expected method to be str or bytes") cdef char *method_c_string = method cdef char *host_c_string = NULL - if host is None: - pass - elif isinstance(host, bytes): - host_c_string = host - elif isinstance(host, basestring): - host = host.encode() + if host is not None: host_c_string = host - else: - raise TypeError("expected host to be str, bytes, or None") cdef Call operation_call = Call() operation_call.references = [self, method, host, queue] cdef grpc_call *parent_call = NULL @@ -121,5 +102,4 @@ cdef class Channel: def __dealloc__(self): if self.c_channel != NULL: - with nogil: - grpc_channel_destroy(self.c_channel) + grpc_channel_destroy(self.c_channel) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pxd.pxi index a67c963684..01089c3dc0 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pxd.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pxd.pxi @@ -31,9 +31,6 @@ cdef class CompletionQueue: cdef grpc_completion_queue *c_completion_queue - cdef object pluck_condition - cdef int num_plucking - cdef int num_polling cdef bint is_shutting_down cdef bint is_shutdown diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi index cdae39d519..5955021ceb 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi @@ -32,6 +32,8 @@ cimport cpython import threading import time +cdef int _INTERRUPT_CHECK_PERIOD_MS = 200 + cdef class CompletionQueue: @@ -40,9 +42,6 @@ cdef class CompletionQueue: self.c_completion_queue = grpc_completion_queue_create(NULL) self.is_shutting_down = False self.is_shutdown = False - self.pluck_condition = threading.Condition() - self.num_plucking = 0 - self.num_polling = 0 cdef _interpret_event(self, grpc_event event): cdef OperationTag tag = None @@ -83,45 +82,27 @@ cdef class CompletionQueue: def poll(self, Timespec deadline=None): # We name this 'poll' to avoid problems with CPython's expectations for # 'special' methods (like next and __next__). + cdef gpr_timespec c_increment + cdef gpr_timespec c_timeout cdef gpr_timespec c_deadline with nogil: + c_increment = gpr_time_from_millis(_INTERRUPT_CHECK_PERIOD_MS, GPR_TIMESPAN) c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME) - if deadline is not None: - c_deadline = deadline.c_time - cdef grpc_event event - - # Poll within a critical section to detect contention - with self.pluck_condition: - assert self.num_plucking == 0, 'cannot simultaneously pluck and poll' - self.num_polling += 1 - with nogil: - event = grpc_completion_queue_next( - self.c_completion_queue, c_deadline, NULL) - with self.pluck_condition: - self.num_polling -= 1 - return self._interpret_event(event) - - def pluck(self, OperationTag tag, Timespec deadline=None): - # Plucking a 'None' tag is equivalent to passing control to GRPC core until - # the deadline. - cdef gpr_timespec c_deadline = gpr_inf_future( - GPR_CLOCK_REALTIME) - if deadline is not None: - c_deadline = deadline.c_time - cdef grpc_event event - - # Pluck within a critical section to detect contention - with self.pluck_condition: - assert self.num_polling == 0, 'cannot simultaneously pluck and poll' - assert self.num_plucking < GRPC_MAX_COMPLETION_QUEUE_PLUCKERS, ( - 'cannot pluck more than {} times simultaneously'.format( - GRPC_MAX_COMPLETION_QUEUE_PLUCKERS)) - self.num_plucking += 1 - with nogil: - event = grpc_completion_queue_pluck( - self.c_completion_queue, <cpython.PyObject *>tag, c_deadline, NULL) - with self.pluck_condition: - self.num_plucking -= 1 + if deadline is not None: + c_deadline = deadline.c_time + + while True: + c_timeout = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), c_increment) + if gpr_time_cmp(c_timeout, c_deadline) > 0: + c_timeout = c_deadline + event = grpc_completion_queue_next( + self.c_completion_queue, c_timeout, NULL) + if event.type != GRPC_QUEUE_TIMEOUT or gpr_time_cmp(c_timeout, c_deadline) == 0: + break; + + # Handle any signals + with gil: + cpython.PyErr_CheckSignals() return self._interpret_event(event) def shutdown(self): @@ -137,18 +118,14 @@ cdef class CompletionQueue: def __dealloc__(self): cdef gpr_timespec c_deadline - with nogil: - c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME) + c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME) if self.c_completion_queue != NULL: # Ensure shutdown if not self.is_shutting_down: - with nogil: - grpc_completion_queue_shutdown(self.c_completion_queue) - # Pump the queue + grpc_completion_queue_shutdown(self.c_completion_queue) + # Pump the queue (All outstanding calls should have been cancelled) while not self.is_shutdown: - with nogil: - event = grpc_completion_queue_next( - self.c_completion_queue, c_deadline, NULL) + event = grpc_completion_queue_next( + self.c_completion_queue, c_deadline, NULL) self._interpret_event(event) - with nogil: - grpc_completion_queue_destroy(self.c_completion_queue) + grpc_completion_queue_destroy(self.c_completion_queue) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi index 19a59e08f3..d377a67520 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi @@ -54,7 +54,7 @@ cdef class ServerCredentials: cdef class CredentialsMetadataPlugin: cdef object plugin_callback - cdef str plugin_name + cdef bytes plugin_name cdef grpc_metadata_credentials_plugin make_c_plugin(self) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi index 1ba86457af..035ac49a8b 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi @@ -46,8 +46,7 @@ cdef class ChannelCredentials: def __dealloc__(self): if self.c_credentials != NULL: - with nogil: - grpc_channel_credentials_release(self.c_credentials) + grpc_channel_credentials_release(self.c_credentials) cdef class CallCredentials: @@ -64,8 +63,7 @@ cdef class CallCredentials: def __dealloc__(self): if self.c_credentials != NULL: - with nogil: - grpc_call_credentials_release(self.c_credentials) + grpc_call_credentials_release(self.c_credentials) cdef class ServerCredentials: @@ -76,13 +74,12 @@ cdef class ServerCredentials: def __dealloc__(self): if self.c_credentials != NULL: - with nogil: - grpc_server_credentials_release(self.c_credentials) + grpc_server_credentials_release(self.c_credentials) cdef class CredentialsMetadataPlugin: - def __cinit__(self, object plugin_callback, str name): + def __cinit__(self, object plugin_callback, bytes name): """ Args: plugin_callback (callable): Callback accepting a service URL (str/bytes) @@ -91,7 +88,7 @@ cdef class CredentialsMetadataPlugin: when called should be non-blocking and eventually call the callback object with the appropriate status code/details and metadata (if successful). - name (str): Plugin name. + name (bytes): Plugin name. """ if not callable(plugin_callback): raise ValueError('expected callable plugin_callback') @@ -129,7 +126,7 @@ cdef void plugin_get_metadata( grpc_credentials_plugin_metadata_cb cb, void *user_data) with gil: def python_callback( Metadata metadata, grpc_status_code status, - const char *error_details): + bytes error_details): cb(user_data, metadata.c_metadata_array.metadata, metadata.c_metadata_array.count, status, error_details) cdef CredentialsMetadataPlugin self = <CredentialsMetadataPlugin>state @@ -148,14 +145,7 @@ def channel_credentials_google_default(): def channel_credentials_ssl(pem_root_certificates, SslPemKeyCertPair ssl_pem_key_cert_pair): - if pem_root_certificates is None: - pass - elif isinstance(pem_root_certificates, bytes): - pass - elif isinstance(pem_root_certificates, basestring): - pem_root_certificates = pem_root_certificates.encode() - else: - raise TypeError("expected str or bytes for pem_root_certificates") + pem_root_certificates = str_to_bytes(pem_root_certificates) cdef ChannelCredentials credentials = ChannelCredentials() cdef const char *c_pem_root_certificates = NULL if pem_root_certificates is not None: @@ -207,12 +197,7 @@ def call_credentials_google_compute_engine(): def call_credentials_service_account_jwt_access( json_key, Timespec token_lifetime not None): - if isinstance(json_key, bytes): - pass - elif isinstance(json_key, basestring): - json_key = json_key.encode() - else: - raise TypeError("expected json_key to be str or bytes") + json_key = str_to_bytes(json_key) cdef CallCredentials credentials = CallCredentials() cdef char *json_key_c_string = json_key with nogil: @@ -223,12 +208,7 @@ def call_credentials_service_account_jwt_access( return credentials def call_credentials_google_refresh_token(json_refresh_token): - if isinstance(json_refresh_token, bytes): - pass - elif isinstance(json_refresh_token, basestring): - json_refresh_token = json_refresh_token.encode() - else: - raise TypeError("expected json_refresh_token to be str or bytes") + json_refresh_token = str_to_bytes(json_refresh_token) cdef CallCredentials credentials = CallCredentials() cdef char *json_refresh_token_c_string = json_refresh_token with nogil: @@ -238,18 +218,8 @@ def call_credentials_google_refresh_token(json_refresh_token): return credentials def call_credentials_google_iam(authorization_token, authority_selector): - if isinstance(authorization_token, bytes): - pass - elif isinstance(authorization_token, basestring): - authorization_token = authorization_token.encode() - else: - raise TypeError("expected authorization_token to be str or bytes") - if isinstance(authority_selector, bytes): - pass - elif isinstance(authority_selector, basestring): - authority_selector = authority_selector.encode() - else: - raise TypeError("expected authority_selector to be str or bytes") + authorization_token = str_to_bytes(authorization_token) + authority_selector = str_to_bytes(authority_selector) cdef CallCredentials credentials = CallCredentials() cdef char *authorization_token_c_string = authorization_token cdef char *authority_selector_c_string = authority_selector @@ -272,16 +242,10 @@ def call_credentials_metadata_plugin(CredentialsMetadataPlugin plugin): def server_credentials_ssl(pem_root_certs, pem_key_cert_pairs, bint force_client_auth): + pem_root_certs = str_to_bytes(pem_root_certs) cdef char *c_pem_root_certs = NULL - if pem_root_certs is None: - pass - elif isinstance(pem_root_certs, bytes): - c_pem_root_certs = pem_root_certs - elif isinstance(pem_root_certs, basestring): - pem_root_certs = pem_root_certs.encode() + if pem_root_certs is not None: c_pem_root_certs = pem_root_certs - else: - raise TypeError("expected pem_root_certs to be str or bytes") pem_key_cert_pairs = list(pem_key_cert_pairs) for pair in pem_key_cert_pairs: if not isinstance(pair, SslPemKeyCertPair): diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi index 05b8886df7..7714504d1b 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi @@ -37,6 +37,7 @@ cdef extern from "grpc/_cython/loader.h": ctypedef long int64_t int pygrpc_load_core(char*) + int pygrpc_initialize_core() void *gpr_malloc(size_t size) nogil void gpr_free(void *ptr) nogil @@ -80,6 +81,12 @@ cdef extern from "grpc/_cython/loader.h": gpr_timespec gpr_convert_clock_type(gpr_timespec t, gpr_clock_type target_clock) nogil + gpr_timespec gpr_time_from_millis(int64_t ms, gpr_clock_type type) nogil + + gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) nogil + + int gpr_time_cmp(gpr_timespec a, gpr_timespec b) nogil + ctypedef enum grpc_status_code: GRPC_STATUS_OK GRPC_STATUS_CANCELLED @@ -125,8 +132,8 @@ cdef extern from "grpc/_cython/loader.h": size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) nogil void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer) nogil - void grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, - grpc_byte_buffer *buffer) nogil + int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, + grpc_byte_buffer *buffer) nogil int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader, gpr_slice *slice) nogil void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) nogil diff --git a/src/python/grpcio/grpc/framework/interfaces/links/utilities.py b/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi index 6e4fd76d93..274f038e0a 100644 --- a/src/python/grpcio/grpc/framework/interfaces/links/utilities.py +++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc_string.pyx.pxi @@ -1,4 +1,4 @@ -# Copyright 2015, Google Inc. +# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,18 +27,13 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Utilities provided as part of the links interface.""" -from grpc.framework.interfaces.links import links - - -class _NullLink(links.Link): - """A do-nothing links.Link.""" - - def accept_ticket(self, ticket): - pass - - def join_link(self, link): - pass - -NULL_LINK = _NullLink() +# This function will ascii encode unicode string inputs if neccesary. +# In Python3, unicode strings are the default str type. +cdef bytes str_to_bytes(object s): + if s is None or isinstance(s, bytes): + return s + elif isinstance(s, unicode): + return s.encode('ascii') + else: + raise TypeError('Expected bytes, str, or unicode, not {}'.format(type(s))) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi index 0474697af8..96c5b02bc2 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi @@ -58,14 +58,14 @@ cdef class Event: cdef readonly bint success cdef readonly object tag - # For operations with calls - cdef readonly Call operation_call - # For Server.request_call cdef readonly bint is_new_request cdef readonly CallDetails request_call_details cdef readonly Metadata request_metadata + # For server calls + cdef readonly Call operation_call + # For Call.start_batch cdef readonly Operations batch_operations diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi index e0219b0086..54b3d00dfc 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi @@ -33,7 +33,7 @@ class ConnectivityState: connecting = GRPC_CHANNEL_CONNECTING ready = GRPC_CHANNEL_READY transient_failure = GRPC_CHANNEL_TRANSIENT_FAILURE - fatal_failure = GRPC_CHANNEL_SHUTDOWN + shutdown = GRPC_CHANNEL_SHUTDOWN class ChannelArgKey: @@ -231,22 +231,10 @@ cdef class Event: cdef class ByteBuffer: - def __cinit__(self, data): + def __cinit__(self, bytes data): if data is None: self.c_byte_buffer = NULL return - if isinstance(data, bytes): - pass - elif isinstance(data, basestring): - data = data.encode() - elif isinstance(data, ByteBuffer): - data = (<ByteBuffer>data).bytes() - if data is None: - self.c_byte_buffer = NULL - return - else: - raise TypeError("expected value to be of type str, bytes, or " - "ByteBuffer, not {}".format(type(data))) cdef char *c_data = data cdef gpr_slice data_slice @@ -264,9 +252,13 @@ cdef class ByteBuffer: cdef gpr_slice data_slice cdef size_t data_slice_length cdef void *data_slice_pointer + cdef bint reader_status if self.c_byte_buffer != NULL: with nogil: - grpc_byte_buffer_reader_init(&reader, self.c_byte_buffer) + reader_status = grpc_byte_buffer_reader_init( + &reader, self.c_byte_buffer) + if not reader_status: + return None result = bytearray() with nogil: while grpc_byte_buffer_reader_next(&reader, &data_slice): @@ -295,54 +287,33 @@ cdef class ByteBuffer: def __dealloc__(self): if self.c_byte_buffer != NULL: - with nogil: - grpc_byte_buffer_destroy(self.c_byte_buffer) + grpc_byte_buffer_destroy(self.c_byte_buffer) cdef class SslPemKeyCertPair: - def __cinit__(self, private_key, certificate_chain): - if isinstance(private_key, bytes): - self.private_key = private_key - elif isinstance(private_key, basestring): - self.private_key = private_key.encode() - else: - raise TypeError("expected private_key to be of type str or bytes") - if isinstance(certificate_chain, bytes): - self.certificate_chain = certificate_chain - elif isinstance(certificate_chain, basestring): - self.certificate_chain = certificate_chain.encode() - else: - raise TypeError("expected certificate_chain to be of type str or bytes " - "or int") + def __cinit__(self, bytes private_key, bytes certificate_chain): + self.private_key = private_key + self.certificate_chain = certificate_chain self.c_pair.private_key = self.private_key self.c_pair.certificate_chain = self.certificate_chain cdef class ChannelArg: - def __cinit__(self, key, value): - if isinstance(key, bytes): - self.key = key - elif isinstance(key, basestring): - self.key = key.encode() - else: - raise TypeError("expected key to be of type str or bytes") - if isinstance(value, bytes): + def __cinit__(self, bytes key, value): + self.key = key + self.c_arg.key = self.key + if isinstance(value, int): self.value = value - self.c_arg.type = GRPC_ARG_STRING - self.c_arg.value.string = self.value - elif isinstance(value, basestring): - self.value = value.encode() - self.c_arg.type = GRPC_ARG_STRING - self.c_arg.value.string = self.value - elif isinstance(value, int): - self.value = int(value) self.c_arg.type = GRPC_ARG_INTEGER self.c_arg.value.integer = self.value + elif isinstance(value, bytes): + self.value = value + self.c_arg.type = GRPC_ARG_STRING + self.c_arg.value.string = self.value else: - raise TypeError("expected value to be of type str or bytes or int") - self.c_arg.key = self.key + raise TypeError('Expected int or bytes, got {}'.format(type(value))) cdef class ChannelArgs: @@ -374,19 +345,9 @@ cdef class ChannelArgs: cdef class Metadatum: - def __cinit__(self, key, value): - if isinstance(key, bytes): - self._key = key - elif isinstance(key, basestring): - self._key = key.encode() - else: - raise TypeError("expected key to be of type str or bytes") - if isinstance(value, bytes): - self._value = value - elif isinstance(value, basestring): - self._value = value.encode() - else: - raise TypeError("expected value to be of type str or bytes") + def __cinit__(self, bytes key, bytes value): + self._key = key + self._value = value self.c_metadata.key = self._key self.c_metadata.value = self._value self.c_metadata.value_length = len(self._value) @@ -458,8 +419,7 @@ cdef class Metadata: # this frees the allocated memory for the grpc_metadata_array (although # it'd be nice if that were documented somewhere...) # TODO(atash): document this in the C core - with nogil: - grpc_metadata_array_destroy(&self.c_metadata_array) + grpc_metadata_array_destroy(&self.c_metadata_array) def __len__(self): return self.c_metadata_array.count @@ -568,8 +528,7 @@ cdef class Operation: # Python. The remaining one(s) are primitive fields filled in by GRPC core. # This means that we need to clean up after receive_status_on_client. if self.c_op.type == GRPC_OP_RECV_STATUS_ON_CLIENT: - with nogil: - gpr_free(self._received_status_details) + gpr_free(self._received_status_details) def operation_send_initial_metadata(Metadata metadata, int flags): cdef Operation op = Operation() @@ -600,13 +559,7 @@ def operation_send_close_from_client(int flags): return op def operation_send_status_from_server( - Metadata metadata, grpc_status_code code, details, int flags): - if isinstance(details, bytes): - pass - elif isinstance(details, basestring): - details = details.encode() - else: - raise TypeError("expected a str or bytes object for details") + Metadata metadata, grpc_status_code code, bytes details, int flags): cdef Operation op = Operation() op.c_op.type = GRPC_OP_SEND_STATUS_FROM_SERVER op.c_op.flags = flags diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi index 55948755b2..4f2d51b03f 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi @@ -99,16 +99,11 @@ cdef class Server: with nogil: grpc_server_start(self.c_server) # Ensure the core has gotten a chance to do the start-up work - self.backup_shutdown_queue.pluck(None, Timespec(None)) + self.backup_shutdown_queue.poll(Timespec(None)) - def add_http2_port(self, address, + def add_http2_port(self, bytes address, ServerCredentials server_credentials=None): - if isinstance(address, bytes): - pass - elif isinstance(address, basestring): - address = address.encode() - else: - raise TypeError("expected address to be a str or bytes") + address = str_to_bytes(address) self.references.append(address) cdef int result cdef char *address_c_string = address @@ -176,5 +171,4 @@ cdef class Server: # much but repeatedly release the GIL and wait while not self.is_shutdown: time.sleep(0) - with nogil: - grpc_server_destroy(self.c_server) + grpc_server_destroy(self.c_server) diff --git a/src/python/grpcio/grpc/_cython/cygrpc.pyx b/src/python/grpcio/grpc/_cython/cygrpc.pyx index 8823ea5ef5..e055d321bc 100644 --- a/src/python/grpcio/grpc/_cython/cygrpc.pyx +++ b/src/python/grpcio/grpc/_cython/cygrpc.pyx @@ -35,6 +35,7 @@ import sys # TODO(atash): figure out why the coverage tool gets confused about the Cython # coverage plugin when the following files don't have a '.pxi' suffix. +include "grpc/_cython/_cygrpc/grpc_string.pyx.pxi" include "grpc/_cython/_cygrpc/call.pyx.pxi" include "grpc/_cython/_cygrpc/channel.pyx.pxi" include "grpc/_cython/_cygrpc/credentials.pyx.pxi" @@ -44,30 +45,15 @@ include "grpc/_cython/_cygrpc/security.pyx.pxi" include "grpc/_cython/_cygrpc/server.pyx.pxi" # -# Global state +# initialize gRPC # -cdef class _ModuleState: - cdef bint is_loaded +def _initialize(): + if not pygrpc_initialize_core(): + raise ImportError('failed to initialize core gRPC library') - def __cinit__(self): - if 'win32' in sys.platform: - filename = pkg_resources.resource_filename( - 'grpc._cython', '_windows/grpc_c.64.python') - if not pygrpc_load_core(filename): - raise ImportError('failed to load core gRPC library') - with nogil: - grpc_init() - self.is_loaded = True - with nogil: - grpc_set_ssl_roots_override_callback( + grpc_set_ssl_roots_override_callback( <grpc_ssl_roots_override_callback>ssl_roots_override_callback) - def __dealloc__(self): - if self.is_loaded: - with nogil: - grpc_shutdown() - -_module_state = _ModuleState() - +_initialize() diff --git a/src/python/grpcio/grpc/_cython/imports.generated.c b/src/python/grpcio/grpc/_cython/imports.generated.c index d811ec68da..c0080b5a47 100644 --- a/src/python/grpcio/grpc/_cython/imports.generated.c +++ b/src/python/grpcio/grpc/_cython/imports.generated.c @@ -31,542 +31,7 @@ * */ +/* TODO(atash) remove cruft */ #include <grpc/support/port_platform.h> #include "imports.generated.h" - -#ifdef GPR_WINDOWS - -census_initialize_type census_initialize_import; -census_shutdown_type census_shutdown_import; -census_supported_type census_supported_import; -census_enabled_type census_enabled_import; -census_context_create_type census_context_create_import; -census_context_destroy_type census_context_destroy_import; -census_context_get_status_type census_context_get_status_import; -census_context_initialize_iterator_type census_context_initialize_iterator_import; -census_context_next_tag_type census_context_next_tag_import; -census_context_get_tag_type census_context_get_tag_import; -census_context_encode_type census_context_encode_import; -census_context_decode_type census_context_decode_import; -census_trace_mask_type census_trace_mask_import; -census_set_trace_mask_type census_set_trace_mask_import; -census_start_rpc_op_timestamp_type census_start_rpc_op_timestamp_import; -census_start_client_rpc_op_type census_start_client_rpc_op_import; -census_set_rpc_client_peer_type census_set_rpc_client_peer_import; -census_start_server_rpc_op_type census_start_server_rpc_op_import; -census_start_op_type census_start_op_import; -census_end_op_type census_end_op_import; -census_trace_print_type census_trace_print_import; -census_trace_scan_start_type census_trace_scan_start_import; -census_get_trace_record_type census_get_trace_record_import; -census_trace_scan_end_type census_trace_scan_end_import; -census_define_resource_type census_define_resource_import; -census_delete_resource_type census_delete_resource_import; -census_resource_id_type census_resource_id_import; -census_record_values_type census_record_values_import; -grpc_compression_algorithm_parse_type grpc_compression_algorithm_parse_import; -grpc_compression_algorithm_name_type grpc_compression_algorithm_name_import; -grpc_compression_algorithm_for_level_type grpc_compression_algorithm_for_level_import; -grpc_compression_options_init_type grpc_compression_options_init_import; -grpc_compression_options_enable_algorithm_type grpc_compression_options_enable_algorithm_import; -grpc_compression_options_disable_algorithm_type grpc_compression_options_disable_algorithm_import; -grpc_compression_options_is_algorithm_enabled_type grpc_compression_options_is_algorithm_enabled_import; -grpc_metadata_array_init_type grpc_metadata_array_init_import; -grpc_metadata_array_destroy_type grpc_metadata_array_destroy_import; -grpc_call_details_init_type grpc_call_details_init_import; -grpc_call_details_destroy_type grpc_call_details_destroy_import; -grpc_register_plugin_type grpc_register_plugin_import; -grpc_init_type grpc_init_import; -grpc_shutdown_type grpc_shutdown_import; -grpc_version_string_type grpc_version_string_import; -grpc_completion_queue_create_type grpc_completion_queue_create_import; -grpc_completion_queue_next_type grpc_completion_queue_next_import; -grpc_completion_queue_pluck_type grpc_completion_queue_pluck_import; -grpc_completion_queue_shutdown_type grpc_completion_queue_shutdown_import; -grpc_completion_queue_destroy_type grpc_completion_queue_destroy_import; -grpc_alarm_create_type grpc_alarm_create_import; -grpc_alarm_cancel_type grpc_alarm_cancel_import; -grpc_alarm_destroy_type grpc_alarm_destroy_import; -grpc_channel_check_connectivity_state_type grpc_channel_check_connectivity_state_import; -grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import; -grpc_channel_create_call_type grpc_channel_create_call_import; -grpc_channel_ping_type grpc_channel_ping_import; -grpc_channel_register_call_type grpc_channel_register_call_import; -grpc_channel_create_registered_call_type grpc_channel_create_registered_call_import; -grpc_call_start_batch_type grpc_call_start_batch_import; -grpc_call_get_peer_type grpc_call_get_peer_import; -grpc_census_call_set_context_type grpc_census_call_set_context_import; -grpc_census_call_get_context_type grpc_census_call_get_context_import; -grpc_channel_get_target_type grpc_channel_get_target_import; -grpc_insecure_channel_create_type grpc_insecure_channel_create_import; -grpc_lame_client_channel_create_type grpc_lame_client_channel_create_import; -grpc_channel_destroy_type grpc_channel_destroy_import; -grpc_call_cancel_type grpc_call_cancel_import; -grpc_call_cancel_with_status_type grpc_call_cancel_with_status_import; -grpc_call_destroy_type grpc_call_destroy_import; -grpc_server_request_call_type grpc_server_request_call_import; -grpc_server_register_method_type grpc_server_register_method_import; -grpc_server_request_registered_call_type grpc_server_request_registered_call_import; -grpc_server_create_type grpc_server_create_import; -grpc_server_register_completion_queue_type grpc_server_register_completion_queue_import; -grpc_server_register_non_listening_completion_queue_type grpc_server_register_non_listening_completion_queue_import; -grpc_server_add_insecure_http2_port_type grpc_server_add_insecure_http2_port_import; -grpc_server_start_type grpc_server_start_import; -grpc_server_shutdown_and_notify_type grpc_server_shutdown_and_notify_import; -grpc_server_cancel_all_calls_type grpc_server_cancel_all_calls_import; -grpc_server_destroy_type grpc_server_destroy_import; -grpc_tracer_set_enabled_type grpc_tracer_set_enabled_import; -grpc_header_key_is_legal_type grpc_header_key_is_legal_import; -grpc_header_nonbin_value_is_legal_type grpc_header_nonbin_value_is_legal_import; -grpc_is_binary_header_type grpc_is_binary_header_import; -grpc_call_error_to_string_type grpc_call_error_to_string_import; -grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import; -grpc_auth_context_property_iterator_type grpc_auth_context_property_iterator_import; -grpc_auth_context_peer_identity_type grpc_auth_context_peer_identity_import; -grpc_auth_context_find_properties_by_name_type grpc_auth_context_find_properties_by_name_import; -grpc_auth_context_peer_identity_property_name_type grpc_auth_context_peer_identity_property_name_import; -grpc_auth_context_peer_is_authenticated_type grpc_auth_context_peer_is_authenticated_import; -grpc_call_auth_context_type grpc_call_auth_context_import; -grpc_auth_context_release_type grpc_auth_context_release_import; -grpc_auth_context_add_property_type grpc_auth_context_add_property_import; -grpc_auth_context_add_cstring_property_type grpc_auth_context_add_cstring_property_import; -grpc_auth_context_set_peer_identity_property_name_type grpc_auth_context_set_peer_identity_property_name_import; -grpc_channel_credentials_release_type grpc_channel_credentials_release_import; -grpc_google_default_credentials_create_type grpc_google_default_credentials_create_import; -grpc_set_ssl_roots_override_callback_type grpc_set_ssl_roots_override_callback_import; -grpc_ssl_credentials_create_type grpc_ssl_credentials_create_import; -grpc_call_credentials_release_type grpc_call_credentials_release_import; -grpc_composite_channel_credentials_create_type grpc_composite_channel_credentials_create_import; -grpc_composite_call_credentials_create_type grpc_composite_call_credentials_create_import; -grpc_google_compute_engine_credentials_create_type grpc_google_compute_engine_credentials_create_import; -grpc_max_auth_token_lifetime_type grpc_max_auth_token_lifetime_import; -grpc_service_account_jwt_access_credentials_create_type grpc_service_account_jwt_access_credentials_create_import; -grpc_google_refresh_token_credentials_create_type grpc_google_refresh_token_credentials_create_import; -grpc_access_token_credentials_create_type grpc_access_token_credentials_create_import; -grpc_google_iam_credentials_create_type grpc_google_iam_credentials_create_import; -grpc_metadata_credentials_create_from_plugin_type grpc_metadata_credentials_create_from_plugin_import; -grpc_secure_channel_create_type grpc_secure_channel_create_import; -grpc_server_credentials_release_type grpc_server_credentials_release_import; -grpc_ssl_server_credentials_create_type grpc_ssl_server_credentials_create_import; -grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex_import; -grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import; -grpc_call_set_credentials_type grpc_call_set_credentials_import; -grpc_server_credentials_set_auth_metadata_processor_type grpc_server_credentials_set_auth_metadata_processor_import; -gpr_malloc_type gpr_malloc_import; -gpr_free_type gpr_free_import; -gpr_realloc_type gpr_realloc_import; -gpr_malloc_aligned_type gpr_malloc_aligned_import; -gpr_free_aligned_type gpr_free_aligned_import; -gpr_set_allocation_functions_type gpr_set_allocation_functions_import; -gpr_get_allocation_functions_type gpr_get_allocation_functions_import; -grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import; -grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import; -grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import; -grpc_byte_buffer_length_type grpc_byte_buffer_length_import; -grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import; -grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import; -grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import; -grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import; -grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import; -grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import; -gpr_log_type gpr_log_import; -gpr_log_message_type gpr_log_message_import; -gpr_set_log_verbosity_type gpr_set_log_verbosity_import; -gpr_log_verbosity_init_type gpr_log_verbosity_init_import; -gpr_set_log_function_type gpr_set_log_function_import; -gpr_slice_ref_type gpr_slice_ref_import; -gpr_slice_unref_type gpr_slice_unref_import; -gpr_slice_new_type gpr_slice_new_import; -gpr_slice_new_with_len_type gpr_slice_new_with_len_import; -gpr_slice_malloc_type gpr_slice_malloc_import; -gpr_slice_from_copied_string_type gpr_slice_from_copied_string_import; -gpr_slice_from_copied_buffer_type gpr_slice_from_copied_buffer_import; -gpr_slice_from_static_string_type gpr_slice_from_static_string_import; -gpr_slice_sub_type gpr_slice_sub_import; -gpr_slice_sub_no_ref_type gpr_slice_sub_no_ref_import; -gpr_slice_split_tail_type gpr_slice_split_tail_import; -gpr_slice_split_head_type gpr_slice_split_head_import; -gpr_empty_slice_type gpr_empty_slice_import; -gpr_slice_cmp_type gpr_slice_cmp_import; -gpr_slice_str_cmp_type gpr_slice_str_cmp_import; -gpr_slice_buffer_init_type gpr_slice_buffer_init_import; -gpr_slice_buffer_destroy_type gpr_slice_buffer_destroy_import; -gpr_slice_buffer_add_type gpr_slice_buffer_add_import; -gpr_slice_buffer_add_indexed_type gpr_slice_buffer_add_indexed_import; -gpr_slice_buffer_addn_type gpr_slice_buffer_addn_import; -gpr_slice_buffer_tiny_add_type gpr_slice_buffer_tiny_add_import; -gpr_slice_buffer_pop_type gpr_slice_buffer_pop_import; -gpr_slice_buffer_reset_and_unref_type gpr_slice_buffer_reset_and_unref_import; -gpr_slice_buffer_swap_type gpr_slice_buffer_swap_import; -gpr_slice_buffer_move_into_type gpr_slice_buffer_move_into_import; -gpr_slice_buffer_trim_end_type gpr_slice_buffer_trim_end_import; -gpr_slice_buffer_move_first_type gpr_slice_buffer_move_first_import; -gpr_slice_buffer_take_first_type gpr_slice_buffer_take_first_import; -gpr_mu_init_type gpr_mu_init_import; -gpr_mu_destroy_type gpr_mu_destroy_import; -gpr_mu_lock_type gpr_mu_lock_import; -gpr_mu_unlock_type gpr_mu_unlock_import; -gpr_mu_trylock_type gpr_mu_trylock_import; -gpr_cv_init_type gpr_cv_init_import; -gpr_cv_destroy_type gpr_cv_destroy_import; -gpr_cv_wait_type gpr_cv_wait_import; -gpr_cv_signal_type gpr_cv_signal_import; -gpr_cv_broadcast_type gpr_cv_broadcast_import; -gpr_once_init_type gpr_once_init_import; -gpr_event_init_type gpr_event_init_import; -gpr_event_set_type gpr_event_set_import; -gpr_event_get_type gpr_event_get_import; -gpr_event_wait_type gpr_event_wait_import; -gpr_ref_init_type gpr_ref_init_import; -gpr_ref_type gpr_ref_import; -gpr_ref_non_zero_type gpr_ref_non_zero_import; -gpr_refn_type gpr_refn_import; -gpr_unref_type gpr_unref_import; -gpr_stats_init_type gpr_stats_init_import; -gpr_stats_inc_type gpr_stats_inc_import; -gpr_stats_read_type gpr_stats_read_import; -gpr_time_0_type gpr_time_0_import; -gpr_inf_future_type gpr_inf_future_import; -gpr_inf_past_type gpr_inf_past_import; -gpr_time_init_type gpr_time_init_import; -gpr_now_type gpr_now_import; -gpr_convert_clock_type_type gpr_convert_clock_type_import; -gpr_time_cmp_type gpr_time_cmp_import; -gpr_time_max_type gpr_time_max_import; -gpr_time_min_type gpr_time_min_import; -gpr_time_add_type gpr_time_add_import; -gpr_time_sub_type gpr_time_sub_import; -gpr_time_from_micros_type gpr_time_from_micros_import; -gpr_time_from_nanos_type gpr_time_from_nanos_import; -gpr_time_from_millis_type gpr_time_from_millis_import; -gpr_time_from_seconds_type gpr_time_from_seconds_import; -gpr_time_from_minutes_type gpr_time_from_minutes_import; -gpr_time_from_hours_type gpr_time_from_hours_import; -gpr_time_to_millis_type gpr_time_to_millis_import; -gpr_time_similar_type gpr_time_similar_import; -gpr_sleep_until_type gpr_sleep_until_import; -gpr_timespec_to_micros_type gpr_timespec_to_micros_import; -gpr_avl_create_type gpr_avl_create_import; -gpr_avl_ref_type gpr_avl_ref_import; -gpr_avl_unref_type gpr_avl_unref_import; -gpr_avl_add_type gpr_avl_add_import; -gpr_avl_remove_type gpr_avl_remove_import; -gpr_avl_get_type gpr_avl_get_import; -gpr_cmdline_create_type gpr_cmdline_create_import; -gpr_cmdline_add_int_type gpr_cmdline_add_int_import; -gpr_cmdline_add_flag_type gpr_cmdline_add_flag_import; -gpr_cmdline_add_string_type gpr_cmdline_add_string_import; -gpr_cmdline_on_extra_arg_type gpr_cmdline_on_extra_arg_import; -gpr_cmdline_set_survive_failure_type gpr_cmdline_set_survive_failure_import; -gpr_cmdline_parse_type gpr_cmdline_parse_import; -gpr_cmdline_destroy_type gpr_cmdline_destroy_import; -gpr_cmdline_usage_string_type gpr_cmdline_usage_string_import; -gpr_cpu_num_cores_type gpr_cpu_num_cores_import; -gpr_cpu_current_cpu_type gpr_cpu_current_cpu_import; -gpr_histogram_create_type gpr_histogram_create_import; -gpr_histogram_destroy_type gpr_histogram_destroy_import; -gpr_histogram_add_type gpr_histogram_add_import; -gpr_histogram_merge_type gpr_histogram_merge_import; -gpr_histogram_percentile_type gpr_histogram_percentile_import; -gpr_histogram_mean_type gpr_histogram_mean_import; -gpr_histogram_stddev_type gpr_histogram_stddev_import; -gpr_histogram_variance_type gpr_histogram_variance_import; -gpr_histogram_maximum_type gpr_histogram_maximum_import; -gpr_histogram_minimum_type gpr_histogram_minimum_import; -gpr_histogram_count_type gpr_histogram_count_import; -gpr_histogram_sum_type gpr_histogram_sum_import; -gpr_histogram_sum_of_squares_type gpr_histogram_sum_of_squares_import; -gpr_histogram_get_contents_type gpr_histogram_get_contents_import; -gpr_histogram_merge_contents_type gpr_histogram_merge_contents_import; -gpr_join_host_port_type gpr_join_host_port_import; -gpr_split_host_port_type gpr_split_host_port_import; -gpr_format_message_type gpr_format_message_import; -gpr_strdup_type gpr_strdup_import; -gpr_asprintf_type gpr_asprintf_import; -gpr_subprocess_binary_extension_type gpr_subprocess_binary_extension_import; -gpr_subprocess_create_type gpr_subprocess_create_import; -gpr_subprocess_destroy_type gpr_subprocess_destroy_import; -gpr_subprocess_join_type gpr_subprocess_join_import; -gpr_subprocess_interrupt_type gpr_subprocess_interrupt_import; -gpr_thd_new_type gpr_thd_new_import; -gpr_thd_options_default_type gpr_thd_options_default_import; -gpr_thd_options_set_detached_type gpr_thd_options_set_detached_import; -gpr_thd_options_set_joinable_type gpr_thd_options_set_joinable_import; -gpr_thd_options_is_detached_type gpr_thd_options_is_detached_import; -gpr_thd_options_is_joinable_type gpr_thd_options_is_joinable_import; -gpr_thd_currentid_type gpr_thd_currentid_import; -gpr_thd_join_type gpr_thd_join_import; - -#ifdef __cplusplus -extern "C" { -#endif /* __cpluslus */ - -void pygrpc_load_imports(HMODULE library) { - census_initialize_import = (census_initialize_type) GetProcAddress(library, "census_initialize"); - census_shutdown_import = (census_shutdown_type) GetProcAddress(library, "census_shutdown"); - census_supported_import = (census_supported_type) GetProcAddress(library, "census_supported"); - census_enabled_import = (census_enabled_type) GetProcAddress(library, "census_enabled"); - census_context_create_import = (census_context_create_type) GetProcAddress(library, "census_context_create"); - census_context_destroy_import = (census_context_destroy_type) GetProcAddress(library, "census_context_destroy"); - census_context_get_status_import = (census_context_get_status_type) GetProcAddress(library, "census_context_get_status"); - census_context_initialize_iterator_import = (census_context_initialize_iterator_type) GetProcAddress(library, "census_context_initialize_iterator"); - census_context_next_tag_import = (census_context_next_tag_type) GetProcAddress(library, "census_context_next_tag"); - census_context_get_tag_import = (census_context_get_tag_type) GetProcAddress(library, "census_context_get_tag"); - census_context_encode_import = (census_context_encode_type) GetProcAddress(library, "census_context_encode"); - census_context_decode_import = (census_context_decode_type) GetProcAddress(library, "census_context_decode"); - census_trace_mask_import = (census_trace_mask_type) GetProcAddress(library, "census_trace_mask"); - census_set_trace_mask_import = (census_set_trace_mask_type) GetProcAddress(library, "census_set_trace_mask"); - census_start_rpc_op_timestamp_import = (census_start_rpc_op_timestamp_type) GetProcAddress(library, "census_start_rpc_op_timestamp"); - census_start_client_rpc_op_import = (census_start_client_rpc_op_type) GetProcAddress(library, "census_start_client_rpc_op"); - census_set_rpc_client_peer_import = (census_set_rpc_client_peer_type) GetProcAddress(library, "census_set_rpc_client_peer"); - census_start_server_rpc_op_import = (census_start_server_rpc_op_type) GetProcAddress(library, "census_start_server_rpc_op"); - census_start_op_import = (census_start_op_type) GetProcAddress(library, "census_start_op"); - census_end_op_import = (census_end_op_type) GetProcAddress(library, "census_end_op"); - census_trace_print_import = (census_trace_print_type) GetProcAddress(library, "census_trace_print"); - census_trace_scan_start_import = (census_trace_scan_start_type) GetProcAddress(library, "census_trace_scan_start"); - census_get_trace_record_import = (census_get_trace_record_type) GetProcAddress(library, "census_get_trace_record"); - census_trace_scan_end_import = (census_trace_scan_end_type) GetProcAddress(library, "census_trace_scan_end"); - census_define_resource_import = (census_define_resource_type) GetProcAddress(library, "census_define_resource"); - census_delete_resource_import = (census_delete_resource_type) GetProcAddress(library, "census_delete_resource"); - census_resource_id_import = (census_resource_id_type) GetProcAddress(library, "census_resource_id"); - census_record_values_import = (census_record_values_type) GetProcAddress(library, "census_record_values"); - grpc_compression_algorithm_parse_import = (grpc_compression_algorithm_parse_type) GetProcAddress(library, "grpc_compression_algorithm_parse"); - grpc_compression_algorithm_name_import = (grpc_compression_algorithm_name_type) GetProcAddress(library, "grpc_compression_algorithm_name"); - grpc_compression_algorithm_for_level_import = (grpc_compression_algorithm_for_level_type) GetProcAddress(library, "grpc_compression_algorithm_for_level"); - grpc_compression_options_init_import = (grpc_compression_options_init_type) GetProcAddress(library, "grpc_compression_options_init"); - grpc_compression_options_enable_algorithm_import = (grpc_compression_options_enable_algorithm_type) GetProcAddress(library, "grpc_compression_options_enable_algorithm"); - grpc_compression_options_disable_algorithm_import = (grpc_compression_options_disable_algorithm_type) GetProcAddress(library, "grpc_compression_options_disable_algorithm"); - grpc_compression_options_is_algorithm_enabled_import = (grpc_compression_options_is_algorithm_enabled_type) GetProcAddress(library, "grpc_compression_options_is_algorithm_enabled"); - grpc_metadata_array_init_import = (grpc_metadata_array_init_type) GetProcAddress(library, "grpc_metadata_array_init"); - grpc_metadata_array_destroy_import = (grpc_metadata_array_destroy_type) GetProcAddress(library, "grpc_metadata_array_destroy"); - grpc_call_details_init_import = (grpc_call_details_init_type) GetProcAddress(library, "grpc_call_details_init"); - grpc_call_details_destroy_import = (grpc_call_details_destroy_type) GetProcAddress(library, "grpc_call_details_destroy"); - grpc_register_plugin_import = (grpc_register_plugin_type) GetProcAddress(library, "grpc_register_plugin"); - grpc_init_import = (grpc_init_type) GetProcAddress(library, "grpc_init"); - grpc_shutdown_import = (grpc_shutdown_type) GetProcAddress(library, "grpc_shutdown"); - grpc_version_string_import = (grpc_version_string_type) GetProcAddress(library, "grpc_version_string"); - grpc_completion_queue_create_import = (grpc_completion_queue_create_type) GetProcAddress(library, "grpc_completion_queue_create"); - grpc_completion_queue_next_import = (grpc_completion_queue_next_type) GetProcAddress(library, "grpc_completion_queue_next"); - grpc_completion_queue_pluck_import = (grpc_completion_queue_pluck_type) GetProcAddress(library, "grpc_completion_queue_pluck"); - grpc_completion_queue_shutdown_import = (grpc_completion_queue_shutdown_type) GetProcAddress(library, "grpc_completion_queue_shutdown"); - grpc_completion_queue_destroy_import = (grpc_completion_queue_destroy_type) GetProcAddress(library, "grpc_completion_queue_destroy"); - grpc_alarm_create_import = (grpc_alarm_create_type) GetProcAddress(library, "grpc_alarm_create"); - grpc_alarm_cancel_import = (grpc_alarm_cancel_type) GetProcAddress(library, "grpc_alarm_cancel"); - grpc_alarm_destroy_import = (grpc_alarm_destroy_type) GetProcAddress(library, "grpc_alarm_destroy"); - grpc_channel_check_connectivity_state_import = (grpc_channel_check_connectivity_state_type) GetProcAddress(library, "grpc_channel_check_connectivity_state"); - grpc_channel_watch_connectivity_state_import = (grpc_channel_watch_connectivity_state_type) GetProcAddress(library, "grpc_channel_watch_connectivity_state"); - grpc_channel_create_call_import = (grpc_channel_create_call_type) GetProcAddress(library, "grpc_channel_create_call"); - grpc_channel_ping_import = (grpc_channel_ping_type) GetProcAddress(library, "grpc_channel_ping"); - grpc_channel_register_call_import = (grpc_channel_register_call_type) GetProcAddress(library, "grpc_channel_register_call"); - grpc_channel_create_registered_call_import = (grpc_channel_create_registered_call_type) GetProcAddress(library, "grpc_channel_create_registered_call"); - grpc_call_start_batch_import = (grpc_call_start_batch_type) GetProcAddress(library, "grpc_call_start_batch"); - grpc_call_get_peer_import = (grpc_call_get_peer_type) GetProcAddress(library, "grpc_call_get_peer"); - grpc_census_call_set_context_import = (grpc_census_call_set_context_type) GetProcAddress(library, "grpc_census_call_set_context"); - grpc_census_call_get_context_import = (grpc_census_call_get_context_type) GetProcAddress(library, "grpc_census_call_get_context"); - grpc_channel_get_target_import = (grpc_channel_get_target_type) GetProcAddress(library, "grpc_channel_get_target"); - grpc_insecure_channel_create_import = (grpc_insecure_channel_create_type) GetProcAddress(library, "grpc_insecure_channel_create"); - grpc_lame_client_channel_create_import = (grpc_lame_client_channel_create_type) GetProcAddress(library, "grpc_lame_client_channel_create"); - grpc_channel_destroy_import = (grpc_channel_destroy_type) GetProcAddress(library, "grpc_channel_destroy"); - grpc_call_cancel_import = (grpc_call_cancel_type) GetProcAddress(library, "grpc_call_cancel"); - grpc_call_cancel_with_status_import = (grpc_call_cancel_with_status_type) GetProcAddress(library, "grpc_call_cancel_with_status"); - grpc_call_destroy_import = (grpc_call_destroy_type) GetProcAddress(library, "grpc_call_destroy"); - grpc_server_request_call_import = (grpc_server_request_call_type) GetProcAddress(library, "grpc_server_request_call"); - grpc_server_register_method_import = (grpc_server_register_method_type) GetProcAddress(library, "grpc_server_register_method"); - grpc_server_request_registered_call_import = (grpc_server_request_registered_call_type) GetProcAddress(library, "grpc_server_request_registered_call"); - grpc_server_create_import = (grpc_server_create_type) GetProcAddress(library, "grpc_server_create"); - grpc_server_register_completion_queue_import = (grpc_server_register_completion_queue_type) GetProcAddress(library, "grpc_server_register_completion_queue"); - grpc_server_register_non_listening_completion_queue_import = (grpc_server_register_non_listening_completion_queue_type) GetProcAddress(library, "grpc_server_register_non_listening_completion_queue"); - grpc_server_add_insecure_http2_port_import = (grpc_server_add_insecure_http2_port_type) GetProcAddress(library, "grpc_server_add_insecure_http2_port"); - grpc_server_start_import = (grpc_server_start_type) GetProcAddress(library, "grpc_server_start"); - grpc_server_shutdown_and_notify_import = (grpc_server_shutdown_and_notify_type) GetProcAddress(library, "grpc_server_shutdown_and_notify"); - grpc_server_cancel_all_calls_import = (grpc_server_cancel_all_calls_type) GetProcAddress(library, "grpc_server_cancel_all_calls"); - grpc_server_destroy_import = (grpc_server_destroy_type) GetProcAddress(library, "grpc_server_destroy"); - grpc_tracer_set_enabled_import = (grpc_tracer_set_enabled_type) GetProcAddress(library, "grpc_tracer_set_enabled"); - grpc_header_key_is_legal_import = (grpc_header_key_is_legal_type) GetProcAddress(library, "grpc_header_key_is_legal"); - grpc_header_nonbin_value_is_legal_import = (grpc_header_nonbin_value_is_legal_type) GetProcAddress(library, "grpc_header_nonbin_value_is_legal"); - grpc_is_binary_header_import = (grpc_is_binary_header_type) GetProcAddress(library, "grpc_is_binary_header"); - grpc_call_error_to_string_import = (grpc_call_error_to_string_type) GetProcAddress(library, "grpc_call_error_to_string"); - grpc_auth_property_iterator_next_import = (grpc_auth_property_iterator_next_type) GetProcAddress(library, "grpc_auth_property_iterator_next"); - grpc_auth_context_property_iterator_import = (grpc_auth_context_property_iterator_type) GetProcAddress(library, "grpc_auth_context_property_iterator"); - grpc_auth_context_peer_identity_import = (grpc_auth_context_peer_identity_type) GetProcAddress(library, "grpc_auth_context_peer_identity"); - grpc_auth_context_find_properties_by_name_import = (grpc_auth_context_find_properties_by_name_type) GetProcAddress(library, "grpc_auth_context_find_properties_by_name"); - grpc_auth_context_peer_identity_property_name_import = (grpc_auth_context_peer_identity_property_name_type) GetProcAddress(library, "grpc_auth_context_peer_identity_property_name"); - grpc_auth_context_peer_is_authenticated_import = (grpc_auth_context_peer_is_authenticated_type) GetProcAddress(library, "grpc_auth_context_peer_is_authenticated"); - grpc_call_auth_context_import = (grpc_call_auth_context_type) GetProcAddress(library, "grpc_call_auth_context"); - grpc_auth_context_release_import = (grpc_auth_context_release_type) GetProcAddress(library, "grpc_auth_context_release"); - grpc_auth_context_add_property_import = (grpc_auth_context_add_property_type) GetProcAddress(library, "grpc_auth_context_add_property"); - grpc_auth_context_add_cstring_property_import = (grpc_auth_context_add_cstring_property_type) GetProcAddress(library, "grpc_auth_context_add_cstring_property"); - grpc_auth_context_set_peer_identity_property_name_import = (grpc_auth_context_set_peer_identity_property_name_type) GetProcAddress(library, "grpc_auth_context_set_peer_identity_property_name"); - grpc_channel_credentials_release_import = (grpc_channel_credentials_release_type) GetProcAddress(library, "grpc_channel_credentials_release"); - grpc_google_default_credentials_create_import = (grpc_google_default_credentials_create_type) GetProcAddress(library, "grpc_google_default_credentials_create"); - grpc_set_ssl_roots_override_callback_import = (grpc_set_ssl_roots_override_callback_type) GetProcAddress(library, "grpc_set_ssl_roots_override_callback"); - grpc_ssl_credentials_create_import = (grpc_ssl_credentials_create_type) GetProcAddress(library, "grpc_ssl_credentials_create"); - grpc_call_credentials_release_import = (grpc_call_credentials_release_type) GetProcAddress(library, "grpc_call_credentials_release"); - grpc_composite_channel_credentials_create_import = (grpc_composite_channel_credentials_create_type) GetProcAddress(library, "grpc_composite_channel_credentials_create"); - grpc_composite_call_credentials_create_import = (grpc_composite_call_credentials_create_type) GetProcAddress(library, "grpc_composite_call_credentials_create"); - grpc_google_compute_engine_credentials_create_import = (grpc_google_compute_engine_credentials_create_type) GetProcAddress(library, "grpc_google_compute_engine_credentials_create"); - grpc_max_auth_token_lifetime_import = (grpc_max_auth_token_lifetime_type) GetProcAddress(library, "grpc_max_auth_token_lifetime"); - grpc_service_account_jwt_access_credentials_create_import = (grpc_service_account_jwt_access_credentials_create_type) GetProcAddress(library, "grpc_service_account_jwt_access_credentials_create"); - grpc_google_refresh_token_credentials_create_import = (grpc_google_refresh_token_credentials_create_type) GetProcAddress(library, "grpc_google_refresh_token_credentials_create"); - grpc_access_token_credentials_create_import = (grpc_access_token_credentials_create_type) GetProcAddress(library, "grpc_access_token_credentials_create"); - grpc_google_iam_credentials_create_import = (grpc_google_iam_credentials_create_type) GetProcAddress(library, "grpc_google_iam_credentials_create"); - grpc_metadata_credentials_create_from_plugin_import = (grpc_metadata_credentials_create_from_plugin_type) GetProcAddress(library, "grpc_metadata_credentials_create_from_plugin"); - grpc_secure_channel_create_import = (grpc_secure_channel_create_type) GetProcAddress(library, "grpc_secure_channel_create"); - grpc_server_credentials_release_import = (grpc_server_credentials_release_type) GetProcAddress(library, "grpc_server_credentials_release"); - grpc_ssl_server_credentials_create_import = (grpc_ssl_server_credentials_create_type) GetProcAddress(library, "grpc_ssl_server_credentials_create"); - grpc_ssl_server_credentials_create_ex_import = (grpc_ssl_server_credentials_create_ex_type) GetProcAddress(library, "grpc_ssl_server_credentials_create_ex"); - grpc_server_add_secure_http2_port_import = (grpc_server_add_secure_http2_port_type) GetProcAddress(library, "grpc_server_add_secure_http2_port"); - grpc_call_set_credentials_import = (grpc_call_set_credentials_type) GetProcAddress(library, "grpc_call_set_credentials"); - grpc_server_credentials_set_auth_metadata_processor_import = (grpc_server_credentials_set_auth_metadata_processor_type) GetProcAddress(library, "grpc_server_credentials_set_auth_metadata_processor"); - gpr_malloc_import = (gpr_malloc_type) GetProcAddress(library, "gpr_malloc"); - gpr_free_import = (gpr_free_type) GetProcAddress(library, "gpr_free"); - gpr_realloc_import = (gpr_realloc_type) GetProcAddress(library, "gpr_realloc"); - gpr_malloc_aligned_import = (gpr_malloc_aligned_type) GetProcAddress(library, "gpr_malloc_aligned"); - gpr_free_aligned_import = (gpr_free_aligned_type) GetProcAddress(library, "gpr_free_aligned"); - gpr_set_allocation_functions_import = (gpr_set_allocation_functions_type) GetProcAddress(library, "gpr_set_allocation_functions"); - gpr_get_allocation_functions_import = (gpr_get_allocation_functions_type) GetProcAddress(library, "gpr_get_allocation_functions"); - grpc_raw_byte_buffer_create_import = (grpc_raw_byte_buffer_create_type) GetProcAddress(library, "grpc_raw_byte_buffer_create"); - grpc_raw_compressed_byte_buffer_create_import = (grpc_raw_compressed_byte_buffer_create_type) GetProcAddress(library, "grpc_raw_compressed_byte_buffer_create"); - grpc_byte_buffer_copy_import = (grpc_byte_buffer_copy_type) GetProcAddress(library, "grpc_byte_buffer_copy"); - grpc_byte_buffer_length_import = (grpc_byte_buffer_length_type) GetProcAddress(library, "grpc_byte_buffer_length"); - grpc_byte_buffer_destroy_import = (grpc_byte_buffer_destroy_type) GetProcAddress(library, "grpc_byte_buffer_destroy"); - grpc_byte_buffer_reader_init_import = (grpc_byte_buffer_reader_init_type) GetProcAddress(library, "grpc_byte_buffer_reader_init"); - grpc_byte_buffer_reader_destroy_import = (grpc_byte_buffer_reader_destroy_type) GetProcAddress(library, "grpc_byte_buffer_reader_destroy"); - grpc_byte_buffer_reader_next_import = (grpc_byte_buffer_reader_next_type) GetProcAddress(library, "grpc_byte_buffer_reader_next"); - grpc_byte_buffer_reader_readall_import = (grpc_byte_buffer_reader_readall_type) GetProcAddress(library, "grpc_byte_buffer_reader_readall"); - grpc_raw_byte_buffer_from_reader_import = (grpc_raw_byte_buffer_from_reader_type) GetProcAddress(library, "grpc_raw_byte_buffer_from_reader"); - gpr_log_import = (gpr_log_type) GetProcAddress(library, "gpr_log"); - gpr_log_message_import = (gpr_log_message_type) GetProcAddress(library, "gpr_log_message"); - gpr_set_log_verbosity_import = (gpr_set_log_verbosity_type) GetProcAddress(library, "gpr_set_log_verbosity"); - gpr_log_verbosity_init_import = (gpr_log_verbosity_init_type) GetProcAddress(library, "gpr_log_verbosity_init"); - gpr_set_log_function_import = (gpr_set_log_function_type) GetProcAddress(library, "gpr_set_log_function"); - gpr_slice_ref_import = (gpr_slice_ref_type) GetProcAddress(library, "gpr_slice_ref"); - gpr_slice_unref_import = (gpr_slice_unref_type) GetProcAddress(library, "gpr_slice_unref"); - gpr_slice_new_import = (gpr_slice_new_type) GetProcAddress(library, "gpr_slice_new"); - gpr_slice_new_with_len_import = (gpr_slice_new_with_len_type) GetProcAddress(library, "gpr_slice_new_with_len"); - gpr_slice_malloc_import = (gpr_slice_malloc_type) GetProcAddress(library, "gpr_slice_malloc"); - gpr_slice_from_copied_string_import = (gpr_slice_from_copied_string_type) GetProcAddress(library, "gpr_slice_from_copied_string"); - gpr_slice_from_copied_buffer_import = (gpr_slice_from_copied_buffer_type) GetProcAddress(library, "gpr_slice_from_copied_buffer"); - gpr_slice_from_static_string_import = (gpr_slice_from_static_string_type) GetProcAddress(library, "gpr_slice_from_static_string"); - gpr_slice_sub_import = (gpr_slice_sub_type) GetProcAddress(library, "gpr_slice_sub"); - gpr_slice_sub_no_ref_import = (gpr_slice_sub_no_ref_type) GetProcAddress(library, "gpr_slice_sub_no_ref"); - gpr_slice_split_tail_import = (gpr_slice_split_tail_type) GetProcAddress(library, "gpr_slice_split_tail"); - gpr_slice_split_head_import = (gpr_slice_split_head_type) GetProcAddress(library, "gpr_slice_split_head"); - gpr_empty_slice_import = (gpr_empty_slice_type) GetProcAddress(library, "gpr_empty_slice"); - gpr_slice_cmp_import = (gpr_slice_cmp_type) GetProcAddress(library, "gpr_slice_cmp"); - gpr_slice_str_cmp_import = (gpr_slice_str_cmp_type) GetProcAddress(library, "gpr_slice_str_cmp"); - gpr_slice_buffer_init_import = (gpr_slice_buffer_init_type) GetProcAddress(library, "gpr_slice_buffer_init"); - gpr_slice_buffer_destroy_import = (gpr_slice_buffer_destroy_type) GetProcAddress(library, "gpr_slice_buffer_destroy"); - gpr_slice_buffer_add_import = (gpr_slice_buffer_add_type) GetProcAddress(library, "gpr_slice_buffer_add"); - gpr_slice_buffer_add_indexed_import = (gpr_slice_buffer_add_indexed_type) GetProcAddress(library, "gpr_slice_buffer_add_indexed"); - gpr_slice_buffer_addn_import = (gpr_slice_buffer_addn_type) GetProcAddress(library, "gpr_slice_buffer_addn"); - gpr_slice_buffer_tiny_add_import = (gpr_slice_buffer_tiny_add_type) GetProcAddress(library, "gpr_slice_buffer_tiny_add"); - gpr_slice_buffer_pop_import = (gpr_slice_buffer_pop_type) GetProcAddress(library, "gpr_slice_buffer_pop"); - gpr_slice_buffer_reset_and_unref_import = (gpr_slice_buffer_reset_and_unref_type) GetProcAddress(library, "gpr_slice_buffer_reset_and_unref"); - gpr_slice_buffer_swap_import = (gpr_slice_buffer_swap_type) GetProcAddress(library, "gpr_slice_buffer_swap"); - gpr_slice_buffer_move_into_import = (gpr_slice_buffer_move_into_type) GetProcAddress(library, "gpr_slice_buffer_move_into"); - gpr_slice_buffer_trim_end_import = (gpr_slice_buffer_trim_end_type) GetProcAddress(library, "gpr_slice_buffer_trim_end"); - gpr_slice_buffer_move_first_import = (gpr_slice_buffer_move_first_type) GetProcAddress(library, "gpr_slice_buffer_move_first"); - gpr_slice_buffer_take_first_import = (gpr_slice_buffer_take_first_type) GetProcAddress(library, "gpr_slice_buffer_take_first"); - gpr_mu_init_import = (gpr_mu_init_type) GetProcAddress(library, "gpr_mu_init"); - gpr_mu_destroy_import = (gpr_mu_destroy_type) GetProcAddress(library, "gpr_mu_destroy"); - gpr_mu_lock_import = (gpr_mu_lock_type) GetProcAddress(library, "gpr_mu_lock"); - gpr_mu_unlock_import = (gpr_mu_unlock_type) GetProcAddress(library, "gpr_mu_unlock"); - gpr_mu_trylock_import = (gpr_mu_trylock_type) GetProcAddress(library, "gpr_mu_trylock"); - gpr_cv_init_import = (gpr_cv_init_type) GetProcAddress(library, "gpr_cv_init"); - gpr_cv_destroy_import = (gpr_cv_destroy_type) GetProcAddress(library, "gpr_cv_destroy"); - gpr_cv_wait_import = (gpr_cv_wait_type) GetProcAddress(library, "gpr_cv_wait"); - gpr_cv_signal_import = (gpr_cv_signal_type) GetProcAddress(library, "gpr_cv_signal"); - gpr_cv_broadcast_import = (gpr_cv_broadcast_type) GetProcAddress(library, "gpr_cv_broadcast"); - gpr_once_init_import = (gpr_once_init_type) GetProcAddress(library, "gpr_once_init"); - gpr_event_init_import = (gpr_event_init_type) GetProcAddress(library, "gpr_event_init"); - gpr_event_set_import = (gpr_event_set_type) GetProcAddress(library, "gpr_event_set"); - gpr_event_get_import = (gpr_event_get_type) GetProcAddress(library, "gpr_event_get"); - gpr_event_wait_import = (gpr_event_wait_type) GetProcAddress(library, "gpr_event_wait"); - gpr_ref_init_import = (gpr_ref_init_type) GetProcAddress(library, "gpr_ref_init"); - gpr_ref_import = (gpr_ref_type) GetProcAddress(library, "gpr_ref"); - gpr_ref_non_zero_import = (gpr_ref_non_zero_type) GetProcAddress(library, "gpr_ref_non_zero"); - gpr_refn_import = (gpr_refn_type) GetProcAddress(library, "gpr_refn"); - gpr_unref_import = (gpr_unref_type) GetProcAddress(library, "gpr_unref"); - gpr_stats_init_import = (gpr_stats_init_type) GetProcAddress(library, "gpr_stats_init"); - gpr_stats_inc_import = (gpr_stats_inc_type) GetProcAddress(library, "gpr_stats_inc"); - gpr_stats_read_import = (gpr_stats_read_type) GetProcAddress(library, "gpr_stats_read"); - gpr_time_0_import = (gpr_time_0_type) GetProcAddress(library, "gpr_time_0"); - gpr_inf_future_import = (gpr_inf_future_type) GetProcAddress(library, "gpr_inf_future"); - gpr_inf_past_import = (gpr_inf_past_type) GetProcAddress(library, "gpr_inf_past"); - gpr_time_init_import = (gpr_time_init_type) GetProcAddress(library, "gpr_time_init"); - gpr_now_import = (gpr_now_type) GetProcAddress(library, "gpr_now"); - gpr_convert_clock_type_import = (gpr_convert_clock_type_type) GetProcAddress(library, "gpr_convert_clock_type"); - gpr_time_cmp_import = (gpr_time_cmp_type) GetProcAddress(library, "gpr_time_cmp"); - gpr_time_max_import = (gpr_time_max_type) GetProcAddress(library, "gpr_time_max"); - gpr_time_min_import = (gpr_time_min_type) GetProcAddress(library, "gpr_time_min"); - gpr_time_add_import = (gpr_time_add_type) GetProcAddress(library, "gpr_time_add"); - gpr_time_sub_import = (gpr_time_sub_type) GetProcAddress(library, "gpr_time_sub"); - gpr_time_from_micros_import = (gpr_time_from_micros_type) GetProcAddress(library, "gpr_time_from_micros"); - gpr_time_from_nanos_import = (gpr_time_from_nanos_type) GetProcAddress(library, "gpr_time_from_nanos"); - gpr_time_from_millis_import = (gpr_time_from_millis_type) GetProcAddress(library, "gpr_time_from_millis"); - gpr_time_from_seconds_import = (gpr_time_from_seconds_type) GetProcAddress(library, "gpr_time_from_seconds"); - gpr_time_from_minutes_import = (gpr_time_from_minutes_type) GetProcAddress(library, "gpr_time_from_minutes"); - gpr_time_from_hours_import = (gpr_time_from_hours_type) GetProcAddress(library, "gpr_time_from_hours"); - gpr_time_to_millis_import = (gpr_time_to_millis_type) GetProcAddress(library, "gpr_time_to_millis"); - gpr_time_similar_import = (gpr_time_similar_type) GetProcAddress(library, "gpr_time_similar"); - gpr_sleep_until_import = (gpr_sleep_until_type) GetProcAddress(library, "gpr_sleep_until"); - gpr_timespec_to_micros_import = (gpr_timespec_to_micros_type) GetProcAddress(library, "gpr_timespec_to_micros"); - gpr_avl_create_import = (gpr_avl_create_type) GetProcAddress(library, "gpr_avl_create"); - gpr_avl_ref_import = (gpr_avl_ref_type) GetProcAddress(library, "gpr_avl_ref"); - gpr_avl_unref_import = (gpr_avl_unref_type) GetProcAddress(library, "gpr_avl_unref"); - gpr_avl_add_import = (gpr_avl_add_type) GetProcAddress(library, "gpr_avl_add"); - gpr_avl_remove_import = (gpr_avl_remove_type) GetProcAddress(library, "gpr_avl_remove"); - gpr_avl_get_import = (gpr_avl_get_type) GetProcAddress(library, "gpr_avl_get"); - gpr_cmdline_create_import = (gpr_cmdline_create_type) GetProcAddress(library, "gpr_cmdline_create"); - gpr_cmdline_add_int_import = (gpr_cmdline_add_int_type) GetProcAddress(library, "gpr_cmdline_add_int"); - gpr_cmdline_add_flag_import = (gpr_cmdline_add_flag_type) GetProcAddress(library, "gpr_cmdline_add_flag"); - gpr_cmdline_add_string_import = (gpr_cmdline_add_string_type) GetProcAddress(library, "gpr_cmdline_add_string"); - gpr_cmdline_on_extra_arg_import = (gpr_cmdline_on_extra_arg_type) GetProcAddress(library, "gpr_cmdline_on_extra_arg"); - gpr_cmdline_set_survive_failure_import = (gpr_cmdline_set_survive_failure_type) GetProcAddress(library, "gpr_cmdline_set_survive_failure"); - gpr_cmdline_parse_import = (gpr_cmdline_parse_type) GetProcAddress(library, "gpr_cmdline_parse"); - gpr_cmdline_destroy_import = (gpr_cmdline_destroy_type) GetProcAddress(library, "gpr_cmdline_destroy"); - gpr_cmdline_usage_string_import = (gpr_cmdline_usage_string_type) GetProcAddress(library, "gpr_cmdline_usage_string"); - gpr_cpu_num_cores_import = (gpr_cpu_num_cores_type) GetProcAddress(library, "gpr_cpu_num_cores"); - gpr_cpu_current_cpu_import = (gpr_cpu_current_cpu_type) GetProcAddress(library, "gpr_cpu_current_cpu"); - gpr_histogram_create_import = (gpr_histogram_create_type) GetProcAddress(library, "gpr_histogram_create"); - gpr_histogram_destroy_import = (gpr_histogram_destroy_type) GetProcAddress(library, "gpr_histogram_destroy"); - gpr_histogram_add_import = (gpr_histogram_add_type) GetProcAddress(library, "gpr_histogram_add"); - gpr_histogram_merge_import = (gpr_histogram_merge_type) GetProcAddress(library, "gpr_histogram_merge"); - gpr_histogram_percentile_import = (gpr_histogram_percentile_type) GetProcAddress(library, "gpr_histogram_percentile"); - gpr_histogram_mean_import = (gpr_histogram_mean_type) GetProcAddress(library, "gpr_histogram_mean"); - gpr_histogram_stddev_import = (gpr_histogram_stddev_type) GetProcAddress(library, "gpr_histogram_stddev"); - gpr_histogram_variance_import = (gpr_histogram_variance_type) GetProcAddress(library, "gpr_histogram_variance"); - gpr_histogram_maximum_import = (gpr_histogram_maximum_type) GetProcAddress(library, "gpr_histogram_maximum"); - gpr_histogram_minimum_import = (gpr_histogram_minimum_type) GetProcAddress(library, "gpr_histogram_minimum"); - gpr_histogram_count_import = (gpr_histogram_count_type) GetProcAddress(library, "gpr_histogram_count"); - gpr_histogram_sum_import = (gpr_histogram_sum_type) GetProcAddress(library, "gpr_histogram_sum"); - gpr_histogram_sum_of_squares_import = (gpr_histogram_sum_of_squares_type) GetProcAddress(library, "gpr_histogram_sum_of_squares"); - gpr_histogram_get_contents_import = (gpr_histogram_get_contents_type) GetProcAddress(library, "gpr_histogram_get_contents"); - gpr_histogram_merge_contents_import = (gpr_histogram_merge_contents_type) GetProcAddress(library, "gpr_histogram_merge_contents"); - gpr_join_host_port_import = (gpr_join_host_port_type) GetProcAddress(library, "gpr_join_host_port"); - gpr_split_host_port_import = (gpr_split_host_port_type) GetProcAddress(library, "gpr_split_host_port"); - gpr_format_message_import = (gpr_format_message_type) GetProcAddress(library, "gpr_format_message"); - gpr_strdup_import = (gpr_strdup_type) GetProcAddress(library, "gpr_strdup"); - gpr_asprintf_import = (gpr_asprintf_type) GetProcAddress(library, "gpr_asprintf"); - gpr_subprocess_binary_extension_import = (gpr_subprocess_binary_extension_type) GetProcAddress(library, "gpr_subprocess_binary_extension"); - gpr_subprocess_create_import = (gpr_subprocess_create_type) GetProcAddress(library, "gpr_subprocess_create"); - gpr_subprocess_destroy_import = (gpr_subprocess_destroy_type) GetProcAddress(library, "gpr_subprocess_destroy"); - gpr_subprocess_join_import = (gpr_subprocess_join_type) GetProcAddress(library, "gpr_subprocess_join"); - gpr_subprocess_interrupt_import = (gpr_subprocess_interrupt_type) GetProcAddress(library, "gpr_subprocess_interrupt"); - gpr_thd_new_import = (gpr_thd_new_type) GetProcAddress(library, "gpr_thd_new"); - gpr_thd_options_default_import = (gpr_thd_options_default_type) GetProcAddress(library, "gpr_thd_options_default"); - gpr_thd_options_set_detached_import = (gpr_thd_options_set_detached_type) GetProcAddress(library, "gpr_thd_options_set_detached"); - gpr_thd_options_set_joinable_import = (gpr_thd_options_set_joinable_type) GetProcAddress(library, "gpr_thd_options_set_joinable"); - gpr_thd_options_is_detached_import = (gpr_thd_options_is_detached_type) GetProcAddress(library, "gpr_thd_options_is_detached"); - gpr_thd_options_is_joinable_import = (gpr_thd_options_is_joinable_type) GetProcAddress(library, "gpr_thd_options_is_joinable"); - gpr_thd_currentid_import = (gpr_thd_currentid_type) GetProcAddress(library, "gpr_thd_currentid"); - gpr_thd_join_import = (gpr_thd_join_type) GetProcAddress(library, "gpr_thd_join"); -} - -#ifdef __cplusplus -} -#endif /* __cpluslus */ - -#endif /* !GPR_WINDOWS */ diff --git a/src/python/grpcio/grpc/_cython/imports.generated.h b/src/python/grpcio/grpc/_cython/imports.generated.h index 5e100796cd..8e5c9a8ce2 100644 --- a/src/python/grpcio/grpc/_cython/imports.generated.h +++ b/src/python/grpcio/grpc/_cython/imports.generated.h @@ -31,829 +31,12 @@ * */ +/* TODO(atash) remove cruft */ #ifndef PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ #define PYGRPC_CYTHON_WINDOWS_IMPORTS_H_ #include <grpc/support/port_platform.h> -#ifdef GPR_WINDOWS - -#include <windows.h> - -#include <grpc/census.h> -#include <grpc/compression.h> -#include <grpc/grpc.h> -#include <grpc/grpc_security.h> -#include <grpc/impl/codegen/alloc.h> -#include <grpc/impl/codegen/byte_buffer.h> -#include <grpc/impl/codegen/log.h> -#include <grpc/impl/codegen/slice.h> -#include <grpc/impl/codegen/slice_buffer.h> -#include <grpc/impl/codegen/sync.h> -#include <grpc/impl/codegen/time.h> -#include <grpc/support/avl.h> -#include <grpc/support/cmdline.h> -#include <grpc/support/cpu.h> -#include <grpc/support/histogram.h> -#include <grpc/support/host_port.h> -#include <grpc/support/log_windows.h> -#include <grpc/support/string_util.h> -#include <grpc/support/subprocess.h> -#include <grpc/support/thd.h> - -typedef int(*census_initialize_type)(int features); -extern census_initialize_type census_initialize_import; -#define census_initialize census_initialize_import -typedef void(*census_shutdown_type)(void); -extern census_shutdown_type census_shutdown_import; -#define census_shutdown census_shutdown_import -typedef int(*census_supported_type)(void); -extern census_supported_type census_supported_import; -#define census_supported census_supported_import -typedef int(*census_enabled_type)(void); -extern census_enabled_type census_enabled_import; -#define census_enabled census_enabled_import -typedef census_context *(*census_context_create_type)(const census_context *base, const census_tag *tags, int ntags, census_context_status const **status); -extern census_context_create_type census_context_create_import; -#define census_context_create census_context_create_import -typedef void(*census_context_destroy_type)(census_context *context); -extern census_context_destroy_type census_context_destroy_import; -#define census_context_destroy census_context_destroy_import -typedef const census_context_status *(*census_context_get_status_type)(const census_context *context); -extern census_context_get_status_type census_context_get_status_import; -#define census_context_get_status census_context_get_status_import -typedef void(*census_context_initialize_iterator_type)(const census_context *context, census_context_iterator *iterator); -extern census_context_initialize_iterator_type census_context_initialize_iterator_import; -#define census_context_initialize_iterator census_context_initialize_iterator_import -typedef int(*census_context_next_tag_type)(census_context_iterator *iterator, census_tag *tag); -extern census_context_next_tag_type census_context_next_tag_import; -#define census_context_next_tag census_context_next_tag_import -typedef int(*census_context_get_tag_type)(const census_context *context, const char *key, census_tag *tag); -extern census_context_get_tag_type census_context_get_tag_import; -#define census_context_get_tag census_context_get_tag_import -typedef size_t(*census_context_encode_type)(const census_context *context, char *buffer, size_t buf_size); -extern census_context_encode_type census_context_encode_import; -#define census_context_encode census_context_encode_import -typedef census_context *(*census_context_decode_type)(const char *buffer, size_t size); -extern census_context_decode_type census_context_decode_import; -#define census_context_decode census_context_decode_import -typedef int(*census_trace_mask_type)(const census_context *context); -extern census_trace_mask_type census_trace_mask_import; -#define census_trace_mask census_trace_mask_import -typedef void(*census_set_trace_mask_type)(int trace_mask); -extern census_set_trace_mask_type census_set_trace_mask_import; -#define census_set_trace_mask census_set_trace_mask_import -typedef census_timestamp(*census_start_rpc_op_timestamp_type)(void); -extern census_start_rpc_op_timestamp_type census_start_rpc_op_timestamp_import; -#define census_start_rpc_op_timestamp census_start_rpc_op_timestamp_import -typedef census_context *(*census_start_client_rpc_op_type)(const census_context *context, int64_t rpc_name_id, const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask, const census_timestamp *start_time); -extern census_start_client_rpc_op_type census_start_client_rpc_op_import; -#define census_start_client_rpc_op census_start_client_rpc_op_import -typedef void(*census_set_rpc_client_peer_type)(census_context *context, const char *peer); -extern census_set_rpc_client_peer_type census_set_rpc_client_peer_import; -#define census_set_rpc_client_peer census_set_rpc_client_peer_import -typedef census_context *(*census_start_server_rpc_op_type)(const char *buffer, int64_t rpc_name_id, const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask, census_timestamp *start_time); -extern census_start_server_rpc_op_type census_start_server_rpc_op_import; -#define census_start_server_rpc_op census_start_server_rpc_op_import -typedef census_context *(*census_start_op_type)(census_context *context, const char *family, const char *name, int trace_mask); -extern census_start_op_type census_start_op_import; -#define census_start_op census_start_op_import -typedef void(*census_end_op_type)(census_context *context, int status); -extern census_end_op_type census_end_op_import; -#define census_end_op census_end_op_import -typedef void(*census_trace_print_type)(census_context *context, uint32_t type, const char *buffer, size_t n); -extern census_trace_print_type census_trace_print_import; -#define census_trace_print census_trace_print_import -typedef int(*census_trace_scan_start_type)(int consume); -extern census_trace_scan_start_type census_trace_scan_start_import; -#define census_trace_scan_start census_trace_scan_start_import -typedef int(*census_get_trace_record_type)(census_trace_record *trace_record); -extern census_get_trace_record_type census_get_trace_record_import; -#define census_get_trace_record census_get_trace_record_import -typedef void(*census_trace_scan_end_type)(); -extern census_trace_scan_end_type census_trace_scan_end_import; -#define census_trace_scan_end census_trace_scan_end_import -typedef int32_t(*census_define_resource_type)(const uint8_t *resource_pb, size_t resource_pb_size); -extern census_define_resource_type census_define_resource_import; -#define census_define_resource census_define_resource_import -typedef void(*census_delete_resource_type)(int32_t resource_id); -extern census_delete_resource_type census_delete_resource_import; -#define census_delete_resource census_delete_resource_import -typedef int32_t(*census_resource_id_type)(const char *name); -extern census_resource_id_type census_resource_id_import; -#define census_resource_id census_resource_id_import -typedef void(*census_record_values_type)(census_context *context, census_value *values, size_t nvalues); -extern census_record_values_type census_record_values_import; -#define census_record_values census_record_values_import -typedef int(*grpc_compression_algorithm_parse_type)(const char *name, size_t name_length, grpc_compression_algorithm *algorithm); -extern grpc_compression_algorithm_parse_type grpc_compression_algorithm_parse_import; -#define grpc_compression_algorithm_parse grpc_compression_algorithm_parse_import -typedef int(*grpc_compression_algorithm_name_type)(grpc_compression_algorithm algorithm, char **name); -extern grpc_compression_algorithm_name_type grpc_compression_algorithm_name_import; -#define grpc_compression_algorithm_name grpc_compression_algorithm_name_import -typedef grpc_compression_algorithm(*grpc_compression_algorithm_for_level_type)(grpc_compression_level level, uint32_t accepted_encodings); -extern grpc_compression_algorithm_for_level_type grpc_compression_algorithm_for_level_import; -#define grpc_compression_algorithm_for_level grpc_compression_algorithm_for_level_import -typedef void(*grpc_compression_options_init_type)(grpc_compression_options *opts); -extern grpc_compression_options_init_type grpc_compression_options_init_import; -#define grpc_compression_options_init grpc_compression_options_init_import -typedef void(*grpc_compression_options_enable_algorithm_type)(grpc_compression_options *opts, grpc_compression_algorithm algorithm); -extern grpc_compression_options_enable_algorithm_type grpc_compression_options_enable_algorithm_import; -#define grpc_compression_options_enable_algorithm grpc_compression_options_enable_algorithm_import -typedef void(*grpc_compression_options_disable_algorithm_type)(grpc_compression_options *opts, grpc_compression_algorithm algorithm); -extern grpc_compression_options_disable_algorithm_type grpc_compression_options_disable_algorithm_import; -#define grpc_compression_options_disable_algorithm grpc_compression_options_disable_algorithm_import -typedef int(*grpc_compression_options_is_algorithm_enabled_type)(const grpc_compression_options *opts, grpc_compression_algorithm algorithm); -extern grpc_compression_options_is_algorithm_enabled_type grpc_compression_options_is_algorithm_enabled_import; -#define grpc_compression_options_is_algorithm_enabled grpc_compression_options_is_algorithm_enabled_import -typedef void(*grpc_metadata_array_init_type)(grpc_metadata_array *array); -extern grpc_metadata_array_init_type grpc_metadata_array_init_import; -#define grpc_metadata_array_init grpc_metadata_array_init_import -typedef void(*grpc_metadata_array_destroy_type)(grpc_metadata_array *array); -extern grpc_metadata_array_destroy_type grpc_metadata_array_destroy_import; -#define grpc_metadata_array_destroy grpc_metadata_array_destroy_import -typedef void(*grpc_call_details_init_type)(grpc_call_details *details); -extern grpc_call_details_init_type grpc_call_details_init_import; -#define grpc_call_details_init grpc_call_details_init_import -typedef void(*grpc_call_details_destroy_type)(grpc_call_details *details); -extern grpc_call_details_destroy_type grpc_call_details_destroy_import; -#define grpc_call_details_destroy grpc_call_details_destroy_import -typedef void(*grpc_register_plugin_type)(void (*init)(void), void (*destroy)(void)); -extern grpc_register_plugin_type grpc_register_plugin_import; -#define grpc_register_plugin grpc_register_plugin_import -typedef void(*grpc_init_type)(void); -extern grpc_init_type grpc_init_import; -#define grpc_init grpc_init_import -typedef void(*grpc_shutdown_type)(void); -extern grpc_shutdown_type grpc_shutdown_import; -#define grpc_shutdown grpc_shutdown_import -typedef const char *(*grpc_version_string_type)(void); -extern grpc_version_string_type grpc_version_string_import; -#define grpc_version_string grpc_version_string_import -typedef grpc_completion_queue *(*grpc_completion_queue_create_type)(void *reserved); -extern grpc_completion_queue_create_type grpc_completion_queue_create_import; -#define grpc_completion_queue_create grpc_completion_queue_create_import -typedef grpc_event(*grpc_completion_queue_next_type)(grpc_completion_queue *cq, gpr_timespec deadline, void *reserved); -extern grpc_completion_queue_next_type grpc_completion_queue_next_import; -#define grpc_completion_queue_next grpc_completion_queue_next_import -typedef grpc_event(*grpc_completion_queue_pluck_type)(grpc_completion_queue *cq, void *tag, gpr_timespec deadline, void *reserved); -extern grpc_completion_queue_pluck_type grpc_completion_queue_pluck_import; -#define grpc_completion_queue_pluck grpc_completion_queue_pluck_import -typedef void(*grpc_completion_queue_shutdown_type)(grpc_completion_queue *cq); -extern grpc_completion_queue_shutdown_type grpc_completion_queue_shutdown_import; -#define grpc_completion_queue_shutdown grpc_completion_queue_shutdown_import -typedef void(*grpc_completion_queue_destroy_type)(grpc_completion_queue *cq); -extern grpc_completion_queue_destroy_type grpc_completion_queue_destroy_import; -#define grpc_completion_queue_destroy grpc_completion_queue_destroy_import -typedef grpc_alarm *(*grpc_alarm_create_type)(grpc_completion_queue *cq, gpr_timespec deadline, void *tag); -extern grpc_alarm_create_type grpc_alarm_create_import; -#define grpc_alarm_create grpc_alarm_create_import -typedef void(*grpc_alarm_cancel_type)(grpc_alarm *alarm); -extern grpc_alarm_cancel_type grpc_alarm_cancel_import; -#define grpc_alarm_cancel grpc_alarm_cancel_import -typedef void(*grpc_alarm_destroy_type)(grpc_alarm *alarm); -extern grpc_alarm_destroy_type grpc_alarm_destroy_import; -#define grpc_alarm_destroy grpc_alarm_destroy_import -typedef grpc_connectivity_state(*grpc_channel_check_connectivity_state_type)(grpc_channel *channel, int try_to_connect); -extern grpc_channel_check_connectivity_state_type grpc_channel_check_connectivity_state_import; -#define grpc_channel_check_connectivity_state grpc_channel_check_connectivity_state_import -typedef void(*grpc_channel_watch_connectivity_state_type)(grpc_channel *channel, grpc_connectivity_state last_observed_state, gpr_timespec deadline, grpc_completion_queue *cq, void *tag); -extern grpc_channel_watch_connectivity_state_type grpc_channel_watch_connectivity_state_import; -#define grpc_channel_watch_connectivity_state grpc_channel_watch_connectivity_state_import -typedef grpc_call *(*grpc_channel_create_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, const char *method, const char *host, gpr_timespec deadline, void *reserved); -extern grpc_channel_create_call_type grpc_channel_create_call_import; -#define grpc_channel_create_call grpc_channel_create_call_import -typedef void(*grpc_channel_ping_type)(grpc_channel *channel, grpc_completion_queue *cq, void *tag, void *reserved); -extern grpc_channel_ping_type grpc_channel_ping_import; -#define grpc_channel_ping grpc_channel_ping_import -typedef void *(*grpc_channel_register_call_type)(grpc_channel *channel, const char *method, const char *host, void *reserved); -extern grpc_channel_register_call_type grpc_channel_register_call_import; -#define grpc_channel_register_call grpc_channel_register_call_import -typedef grpc_call *(*grpc_channel_create_registered_call_type)(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *completion_queue, void *registered_call_handle, gpr_timespec deadline, void *reserved); -extern grpc_channel_create_registered_call_type grpc_channel_create_registered_call_import; -#define grpc_channel_create_registered_call grpc_channel_create_registered_call_import -typedef grpc_call_error(*grpc_call_start_batch_type)(grpc_call *call, const grpc_op *ops, size_t nops, void *tag, void *reserved); -extern grpc_call_start_batch_type grpc_call_start_batch_import; -#define grpc_call_start_batch grpc_call_start_batch_import -typedef char *(*grpc_call_get_peer_type)(grpc_call *call); -extern grpc_call_get_peer_type grpc_call_get_peer_import; -#define grpc_call_get_peer grpc_call_get_peer_import -typedef void(*grpc_census_call_set_context_type)(grpc_call *call, struct census_context *context); -extern grpc_census_call_set_context_type grpc_census_call_set_context_import; -#define grpc_census_call_set_context grpc_census_call_set_context_import -typedef struct census_context *(*grpc_census_call_get_context_type)(grpc_call *call); -extern grpc_census_call_get_context_type grpc_census_call_get_context_import; -#define grpc_census_call_get_context grpc_census_call_get_context_import -typedef char *(*grpc_channel_get_target_type)(grpc_channel *channel); -extern grpc_channel_get_target_type grpc_channel_get_target_import; -#define grpc_channel_get_target grpc_channel_get_target_import -typedef grpc_channel *(*grpc_insecure_channel_create_type)(const char *target, const grpc_channel_args *args, void *reserved); -extern grpc_insecure_channel_create_type grpc_insecure_channel_create_import; -#define grpc_insecure_channel_create grpc_insecure_channel_create_import -typedef grpc_channel *(*grpc_lame_client_channel_create_type)(const char *target, grpc_status_code error_code, const char *error_message); -extern grpc_lame_client_channel_create_type grpc_lame_client_channel_create_import; -#define grpc_lame_client_channel_create grpc_lame_client_channel_create_import -typedef void(*grpc_channel_destroy_type)(grpc_channel *channel); -extern grpc_channel_destroy_type grpc_channel_destroy_import; -#define grpc_channel_destroy grpc_channel_destroy_import -typedef grpc_call_error(*grpc_call_cancel_type)(grpc_call *call, void *reserved); -extern grpc_call_cancel_type grpc_call_cancel_import; -#define grpc_call_cancel grpc_call_cancel_import -typedef grpc_call_error(*grpc_call_cancel_with_status_type)(grpc_call *call, grpc_status_code status, const char *description, void *reserved); -extern grpc_call_cancel_with_status_type grpc_call_cancel_with_status_import; -#define grpc_call_cancel_with_status grpc_call_cancel_with_status_import -typedef void(*grpc_call_destroy_type)(grpc_call *call); -extern grpc_call_destroy_type grpc_call_destroy_import; -#define grpc_call_destroy grpc_call_destroy_import -typedef grpc_call_error(*grpc_server_request_call_type)(grpc_server *server, grpc_call **call, grpc_call_details *details, grpc_metadata_array *request_metadata, grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void *tag_new); -extern grpc_server_request_call_type grpc_server_request_call_import; -#define grpc_server_request_call grpc_server_request_call_import -typedef void *(*grpc_server_register_method_type)(grpc_server *server, const char *method, const char *host, grpc_server_register_method_payload_handling payload_handling, uint32_t flags); -extern grpc_server_register_method_type grpc_server_register_method_import; -#define grpc_server_register_method grpc_server_register_method_import -typedef grpc_call_error(*grpc_server_request_registered_call_type)(grpc_server *server, void *registered_method, grpc_call **call, gpr_timespec *deadline, grpc_metadata_array *request_metadata, grpc_byte_buffer **optional_payload, grpc_completion_queue *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void *tag_new); -extern grpc_server_request_registered_call_type grpc_server_request_registered_call_import; -#define grpc_server_request_registered_call grpc_server_request_registered_call_import -typedef grpc_server *(*grpc_server_create_type)(const grpc_channel_args *args, void *reserved); -extern grpc_server_create_type grpc_server_create_import; -#define grpc_server_create grpc_server_create_import -typedef void(*grpc_server_register_completion_queue_type)(grpc_server *server, grpc_completion_queue *cq, void *reserved); -extern grpc_server_register_completion_queue_type grpc_server_register_completion_queue_import; -#define grpc_server_register_completion_queue grpc_server_register_completion_queue_import -typedef void(*grpc_server_register_non_listening_completion_queue_type)(grpc_server *server, grpc_completion_queue *q, void *reserved); -extern grpc_server_register_non_listening_completion_queue_type grpc_server_register_non_listening_completion_queue_import; -#define grpc_server_register_non_listening_completion_queue grpc_server_register_non_listening_completion_queue_import -typedef int(*grpc_server_add_insecure_http2_port_type)(grpc_server *server, const char *addr); -extern grpc_server_add_insecure_http2_port_type grpc_server_add_insecure_http2_port_import; -#define grpc_server_add_insecure_http2_port grpc_server_add_insecure_http2_port_import -typedef void(*grpc_server_start_type)(grpc_server *server); -extern grpc_server_start_type grpc_server_start_import; -#define grpc_server_start grpc_server_start_import -typedef void(*grpc_server_shutdown_and_notify_type)(grpc_server *server, grpc_completion_queue *cq, void *tag); -extern grpc_server_shutdown_and_notify_type grpc_server_shutdown_and_notify_import; -#define grpc_server_shutdown_and_notify grpc_server_shutdown_and_notify_import -typedef void(*grpc_server_cancel_all_calls_type)(grpc_server *server); -extern grpc_server_cancel_all_calls_type grpc_server_cancel_all_calls_import; -#define grpc_server_cancel_all_calls grpc_server_cancel_all_calls_import -typedef void(*grpc_server_destroy_type)(grpc_server *server); -extern grpc_server_destroy_type grpc_server_destroy_import; -#define grpc_server_destroy grpc_server_destroy_import -typedef int(*grpc_tracer_set_enabled_type)(const char *name, int enabled); -extern grpc_tracer_set_enabled_type grpc_tracer_set_enabled_import; -#define grpc_tracer_set_enabled grpc_tracer_set_enabled_import -typedef int(*grpc_header_key_is_legal_type)(const char *key, size_t length); -extern grpc_header_key_is_legal_type grpc_header_key_is_legal_import; -#define grpc_header_key_is_legal grpc_header_key_is_legal_import -typedef int(*grpc_header_nonbin_value_is_legal_type)(const char *value, size_t length); -extern grpc_header_nonbin_value_is_legal_type grpc_header_nonbin_value_is_legal_import; -#define grpc_header_nonbin_value_is_legal grpc_header_nonbin_value_is_legal_import -typedef int(*grpc_is_binary_header_type)(const char *key, size_t length); -extern grpc_is_binary_header_type grpc_is_binary_header_import; -#define grpc_is_binary_header grpc_is_binary_header_import -typedef const char *(*grpc_call_error_to_string_type)(grpc_call_error error); -extern grpc_call_error_to_string_type grpc_call_error_to_string_import; -#define grpc_call_error_to_string grpc_call_error_to_string_import -typedef const grpc_auth_property *(*grpc_auth_property_iterator_next_type)(grpc_auth_property_iterator *it); -extern grpc_auth_property_iterator_next_type grpc_auth_property_iterator_next_import; -#define grpc_auth_property_iterator_next grpc_auth_property_iterator_next_import -typedef grpc_auth_property_iterator(*grpc_auth_context_property_iterator_type)(const grpc_auth_context *ctx); -extern grpc_auth_context_property_iterator_type grpc_auth_context_property_iterator_import; -#define grpc_auth_context_property_iterator grpc_auth_context_property_iterator_import -typedef grpc_auth_property_iterator(*grpc_auth_context_peer_identity_type)(const grpc_auth_context *ctx); -extern grpc_auth_context_peer_identity_type grpc_auth_context_peer_identity_import; -#define grpc_auth_context_peer_identity grpc_auth_context_peer_identity_import -typedef grpc_auth_property_iterator(*grpc_auth_context_find_properties_by_name_type)(const grpc_auth_context *ctx, const char *name); -extern grpc_auth_context_find_properties_by_name_type grpc_auth_context_find_properties_by_name_import; -#define grpc_auth_context_find_properties_by_name grpc_auth_context_find_properties_by_name_import -typedef const char *(*grpc_auth_context_peer_identity_property_name_type)(const grpc_auth_context *ctx); -extern grpc_auth_context_peer_identity_property_name_type grpc_auth_context_peer_identity_property_name_import; -#define grpc_auth_context_peer_identity_property_name grpc_auth_context_peer_identity_property_name_import -typedef int(*grpc_auth_context_peer_is_authenticated_type)(const grpc_auth_context *ctx); -extern grpc_auth_context_peer_is_authenticated_type grpc_auth_context_peer_is_authenticated_import; -#define grpc_auth_context_peer_is_authenticated grpc_auth_context_peer_is_authenticated_import -typedef grpc_auth_context *(*grpc_call_auth_context_type)(grpc_call *call); -extern grpc_call_auth_context_type grpc_call_auth_context_import; -#define grpc_call_auth_context grpc_call_auth_context_import -typedef void(*grpc_auth_context_release_type)(grpc_auth_context *context); -extern grpc_auth_context_release_type grpc_auth_context_release_import; -#define grpc_auth_context_release grpc_auth_context_release_import -typedef void(*grpc_auth_context_add_property_type)(grpc_auth_context *ctx, const char *name, const char *value, size_t value_length); -extern grpc_auth_context_add_property_type grpc_auth_context_add_property_import; -#define grpc_auth_context_add_property grpc_auth_context_add_property_import -typedef void(*grpc_auth_context_add_cstring_property_type)(grpc_auth_context *ctx, const char *name, const char *value); -extern grpc_auth_context_add_cstring_property_type grpc_auth_context_add_cstring_property_import; -#define grpc_auth_context_add_cstring_property grpc_auth_context_add_cstring_property_import -typedef int(*grpc_auth_context_set_peer_identity_property_name_type)(grpc_auth_context *ctx, const char *name); -extern grpc_auth_context_set_peer_identity_property_name_type grpc_auth_context_set_peer_identity_property_name_import; -#define grpc_auth_context_set_peer_identity_property_name grpc_auth_context_set_peer_identity_property_name_import -typedef void(*grpc_channel_credentials_release_type)(grpc_channel_credentials *creds); -extern grpc_channel_credentials_release_type grpc_channel_credentials_release_import; -#define grpc_channel_credentials_release grpc_channel_credentials_release_import -typedef grpc_channel_credentials *(*grpc_google_default_credentials_create_type)(void); -extern grpc_google_default_credentials_create_type grpc_google_default_credentials_create_import; -#define grpc_google_default_credentials_create grpc_google_default_credentials_create_import -typedef void(*grpc_set_ssl_roots_override_callback_type)(grpc_ssl_roots_override_callback cb); -extern grpc_set_ssl_roots_override_callback_type grpc_set_ssl_roots_override_callback_import; -#define grpc_set_ssl_roots_override_callback grpc_set_ssl_roots_override_callback_import -typedef grpc_channel_credentials *(*grpc_ssl_credentials_create_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair, void *reserved); -extern grpc_ssl_credentials_create_type grpc_ssl_credentials_create_import; -#define grpc_ssl_credentials_create grpc_ssl_credentials_create_import -typedef void(*grpc_call_credentials_release_type)(grpc_call_credentials *creds); -extern grpc_call_credentials_release_type grpc_call_credentials_release_import; -#define grpc_call_credentials_release grpc_call_credentials_release_import -typedef grpc_channel_credentials *(*grpc_composite_channel_credentials_create_type)(grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds, void *reserved); -extern grpc_composite_channel_credentials_create_type grpc_composite_channel_credentials_create_import; -#define grpc_composite_channel_credentials_create grpc_composite_channel_credentials_create_import -typedef grpc_call_credentials *(*grpc_composite_call_credentials_create_type)(grpc_call_credentials *creds1, grpc_call_credentials *creds2, void *reserved); -extern grpc_composite_call_credentials_create_type grpc_composite_call_credentials_create_import; -#define grpc_composite_call_credentials_create grpc_composite_call_credentials_create_import -typedef grpc_call_credentials *(*grpc_google_compute_engine_credentials_create_type)(void *reserved); -extern grpc_google_compute_engine_credentials_create_type grpc_google_compute_engine_credentials_create_import; -#define grpc_google_compute_engine_credentials_create grpc_google_compute_engine_credentials_create_import -typedef gpr_timespec(*grpc_max_auth_token_lifetime_type)(); -extern grpc_max_auth_token_lifetime_type grpc_max_auth_token_lifetime_import; -#define grpc_max_auth_token_lifetime grpc_max_auth_token_lifetime_import -typedef grpc_call_credentials *(*grpc_service_account_jwt_access_credentials_create_type)(const char *json_key, gpr_timespec token_lifetime, void *reserved); -extern grpc_service_account_jwt_access_credentials_create_type grpc_service_account_jwt_access_credentials_create_import; -#define grpc_service_account_jwt_access_credentials_create grpc_service_account_jwt_access_credentials_create_import -typedef grpc_call_credentials *(*grpc_google_refresh_token_credentials_create_type)(const char *json_refresh_token, void *reserved); -extern grpc_google_refresh_token_credentials_create_type grpc_google_refresh_token_credentials_create_import; -#define grpc_google_refresh_token_credentials_create grpc_google_refresh_token_credentials_create_import -typedef grpc_call_credentials *(*grpc_access_token_credentials_create_type)(const char *access_token, void *reserved); -extern grpc_access_token_credentials_create_type grpc_access_token_credentials_create_import; -#define grpc_access_token_credentials_create grpc_access_token_credentials_create_import -typedef grpc_call_credentials *(*grpc_google_iam_credentials_create_type)(const char *authorization_token, const char *authority_selector, void *reserved); -extern grpc_google_iam_credentials_create_type grpc_google_iam_credentials_create_import; -#define grpc_google_iam_credentials_create grpc_google_iam_credentials_create_import -typedef grpc_call_credentials *(*grpc_metadata_credentials_create_from_plugin_type)(grpc_metadata_credentials_plugin plugin, void *reserved); -extern grpc_metadata_credentials_create_from_plugin_type grpc_metadata_credentials_create_from_plugin_import; -#define grpc_metadata_credentials_create_from_plugin grpc_metadata_credentials_create_from_plugin_import -typedef grpc_channel *(*grpc_secure_channel_create_type)(grpc_channel_credentials *creds, const char *target, const grpc_channel_args *args, void *reserved); -extern grpc_secure_channel_create_type grpc_secure_channel_create_import; -#define grpc_secure_channel_create grpc_secure_channel_create_import -typedef void(*grpc_server_credentials_release_type)(grpc_server_credentials *creds); -extern grpc_server_credentials_release_type grpc_server_credentials_release_import; -#define grpc_server_credentials_release grpc_server_credentials_release_import -typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, int force_client_auth, void *reserved); -extern grpc_ssl_server_credentials_create_type grpc_ssl_server_credentials_create_import; -#define grpc_ssl_server_credentials_create grpc_ssl_server_credentials_create_import -typedef grpc_server_credentials *(*grpc_ssl_server_credentials_create_ex_type)(const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, grpc_ssl_client_certificate_request_type client_certificate_request, void *reserved); -extern grpc_ssl_server_credentials_create_ex_type grpc_ssl_server_credentials_create_ex_import; -#define grpc_ssl_server_credentials_create_ex grpc_ssl_server_credentials_create_ex_import -typedef int(*grpc_server_add_secure_http2_port_type)(grpc_server *server, const char *addr, grpc_server_credentials *creds); -extern grpc_server_add_secure_http2_port_type grpc_server_add_secure_http2_port_import; -#define grpc_server_add_secure_http2_port grpc_server_add_secure_http2_port_import -typedef grpc_call_error(*grpc_call_set_credentials_type)(grpc_call *call, grpc_call_credentials *creds); -extern grpc_call_set_credentials_type grpc_call_set_credentials_import; -#define grpc_call_set_credentials grpc_call_set_credentials_import -typedef void(*grpc_server_credentials_set_auth_metadata_processor_type)(grpc_server_credentials *creds, grpc_auth_metadata_processor processor); -extern grpc_server_credentials_set_auth_metadata_processor_type grpc_server_credentials_set_auth_metadata_processor_import; -#define grpc_server_credentials_set_auth_metadata_processor grpc_server_credentials_set_auth_metadata_processor_import -typedef void *(*gpr_malloc_type)(size_t size); -extern gpr_malloc_type gpr_malloc_import; -#define gpr_malloc gpr_malloc_import -typedef void(*gpr_free_type)(void *ptr); -extern gpr_free_type gpr_free_import; -#define gpr_free gpr_free_import -typedef void *(*gpr_realloc_type)(void *p, size_t size); -extern gpr_realloc_type gpr_realloc_import; -#define gpr_realloc gpr_realloc_import -typedef void *(*gpr_malloc_aligned_type)(size_t size, size_t alignment_log); -extern gpr_malloc_aligned_type gpr_malloc_aligned_import; -#define gpr_malloc_aligned gpr_malloc_aligned_import -typedef void(*gpr_free_aligned_type)(void *ptr); -extern gpr_free_aligned_type gpr_free_aligned_import; -#define gpr_free_aligned gpr_free_aligned_import -typedef void(*gpr_set_allocation_functions_type)(gpr_allocation_functions functions); -extern gpr_set_allocation_functions_type gpr_set_allocation_functions_import; -#define gpr_set_allocation_functions gpr_set_allocation_functions_import -typedef gpr_allocation_functions(*gpr_get_allocation_functions_type)(); -extern gpr_get_allocation_functions_type gpr_get_allocation_functions_import; -#define gpr_get_allocation_functions gpr_get_allocation_functions_import -typedef grpc_byte_buffer *(*grpc_raw_byte_buffer_create_type)(gpr_slice *slices, size_t nslices); -extern grpc_raw_byte_buffer_create_type grpc_raw_byte_buffer_create_import; -#define grpc_raw_byte_buffer_create grpc_raw_byte_buffer_create_import -typedef grpc_byte_buffer *(*grpc_raw_compressed_byte_buffer_create_type)(gpr_slice *slices, size_t nslices, grpc_compression_algorithm compression); -extern grpc_raw_compressed_byte_buffer_create_type grpc_raw_compressed_byte_buffer_create_import; -#define grpc_raw_compressed_byte_buffer_create grpc_raw_compressed_byte_buffer_create_import -typedef grpc_byte_buffer *(*grpc_byte_buffer_copy_type)(grpc_byte_buffer *bb); -extern grpc_byte_buffer_copy_type grpc_byte_buffer_copy_import; -#define grpc_byte_buffer_copy grpc_byte_buffer_copy_import -typedef size_t(*grpc_byte_buffer_length_type)(grpc_byte_buffer *bb); -extern grpc_byte_buffer_length_type grpc_byte_buffer_length_import; -#define grpc_byte_buffer_length grpc_byte_buffer_length_import -typedef void(*grpc_byte_buffer_destroy_type)(grpc_byte_buffer *byte_buffer); -extern grpc_byte_buffer_destroy_type grpc_byte_buffer_destroy_import; -#define grpc_byte_buffer_destroy grpc_byte_buffer_destroy_import -typedef void(*grpc_byte_buffer_reader_init_type)(grpc_byte_buffer_reader *reader, grpc_byte_buffer *buffer); -extern grpc_byte_buffer_reader_init_type grpc_byte_buffer_reader_init_import; -#define grpc_byte_buffer_reader_init grpc_byte_buffer_reader_init_import -typedef void(*grpc_byte_buffer_reader_destroy_type)(grpc_byte_buffer_reader *reader); -extern grpc_byte_buffer_reader_destroy_type grpc_byte_buffer_reader_destroy_import; -#define grpc_byte_buffer_reader_destroy grpc_byte_buffer_reader_destroy_import -typedef int(*grpc_byte_buffer_reader_next_type)(grpc_byte_buffer_reader *reader, gpr_slice *slice); -extern grpc_byte_buffer_reader_next_type grpc_byte_buffer_reader_next_import; -#define grpc_byte_buffer_reader_next grpc_byte_buffer_reader_next_import -typedef gpr_slice(*grpc_byte_buffer_reader_readall_type)(grpc_byte_buffer_reader *reader); -extern grpc_byte_buffer_reader_readall_type grpc_byte_buffer_reader_readall_import; -#define grpc_byte_buffer_reader_readall grpc_byte_buffer_reader_readall_import -typedef grpc_byte_buffer *(*grpc_raw_byte_buffer_from_reader_type)(grpc_byte_buffer_reader *reader); -extern grpc_raw_byte_buffer_from_reader_type grpc_raw_byte_buffer_from_reader_import; -#define grpc_raw_byte_buffer_from_reader grpc_raw_byte_buffer_from_reader_import -typedef void(*gpr_log_type)(const char *file, int line, gpr_log_severity severity, const char *format, ...); -extern gpr_log_type gpr_log_import; -#define gpr_log gpr_log_import -typedef void(*gpr_log_message_type)(const char *file, int line, gpr_log_severity severity, const char *message); -extern gpr_log_message_type gpr_log_message_import; -#define gpr_log_message gpr_log_message_import -typedef void(*gpr_set_log_verbosity_type)(gpr_log_severity min_severity_to_print); -extern gpr_set_log_verbosity_type gpr_set_log_verbosity_import; -#define gpr_set_log_verbosity gpr_set_log_verbosity_import -typedef void(*gpr_log_verbosity_init_type)(); -extern gpr_log_verbosity_init_type gpr_log_verbosity_init_import; -#define gpr_log_verbosity_init gpr_log_verbosity_init_import -typedef void(*gpr_set_log_function_type)(gpr_log_func func); -extern gpr_set_log_function_type gpr_set_log_function_import; -#define gpr_set_log_function gpr_set_log_function_import -typedef gpr_slice(*gpr_slice_ref_type)(gpr_slice s); -extern gpr_slice_ref_type gpr_slice_ref_import; -#define gpr_slice_ref gpr_slice_ref_import -typedef void(*gpr_slice_unref_type)(gpr_slice s); -extern gpr_slice_unref_type gpr_slice_unref_import; -#define gpr_slice_unref gpr_slice_unref_import -typedef gpr_slice(*gpr_slice_new_type)(void *p, size_t len, void (*destroy)(void *)); -extern gpr_slice_new_type gpr_slice_new_import; -#define gpr_slice_new gpr_slice_new_import -typedef gpr_slice(*gpr_slice_new_with_len_type)(void *p, size_t len, void (*destroy)(void *, size_t)); -extern gpr_slice_new_with_len_type gpr_slice_new_with_len_import; -#define gpr_slice_new_with_len gpr_slice_new_with_len_import -typedef gpr_slice(*gpr_slice_malloc_type)(size_t length); -extern gpr_slice_malloc_type gpr_slice_malloc_import; -#define gpr_slice_malloc gpr_slice_malloc_import -typedef gpr_slice(*gpr_slice_from_copied_string_type)(const char *source); -extern gpr_slice_from_copied_string_type gpr_slice_from_copied_string_import; -#define gpr_slice_from_copied_string gpr_slice_from_copied_string_import -typedef gpr_slice(*gpr_slice_from_copied_buffer_type)(const char *source, size_t len); -extern gpr_slice_from_copied_buffer_type gpr_slice_from_copied_buffer_import; -#define gpr_slice_from_copied_buffer gpr_slice_from_copied_buffer_import -typedef gpr_slice(*gpr_slice_from_static_string_type)(const char *source); -extern gpr_slice_from_static_string_type gpr_slice_from_static_string_import; -#define gpr_slice_from_static_string gpr_slice_from_static_string_import -typedef gpr_slice(*gpr_slice_sub_type)(gpr_slice s, size_t begin, size_t end); -extern gpr_slice_sub_type gpr_slice_sub_import; -#define gpr_slice_sub gpr_slice_sub_import -typedef gpr_slice(*gpr_slice_sub_no_ref_type)(gpr_slice s, size_t begin, size_t end); -extern gpr_slice_sub_no_ref_type gpr_slice_sub_no_ref_import; -#define gpr_slice_sub_no_ref gpr_slice_sub_no_ref_import -typedef gpr_slice(*gpr_slice_split_tail_type)(gpr_slice *s, size_t split); -extern gpr_slice_split_tail_type gpr_slice_split_tail_import; -#define gpr_slice_split_tail gpr_slice_split_tail_import -typedef gpr_slice(*gpr_slice_split_head_type)(gpr_slice *s, size_t split); -extern gpr_slice_split_head_type gpr_slice_split_head_import; -#define gpr_slice_split_head gpr_slice_split_head_import -typedef gpr_slice(*gpr_empty_slice_type)(void); -extern gpr_empty_slice_type gpr_empty_slice_import; -#define gpr_empty_slice gpr_empty_slice_import -typedef int(*gpr_slice_cmp_type)(gpr_slice a, gpr_slice b); -extern gpr_slice_cmp_type gpr_slice_cmp_import; -#define gpr_slice_cmp gpr_slice_cmp_import -typedef int(*gpr_slice_str_cmp_type)(gpr_slice a, const char *b); -extern gpr_slice_str_cmp_type gpr_slice_str_cmp_import; -#define gpr_slice_str_cmp gpr_slice_str_cmp_import -typedef void(*gpr_slice_buffer_init_type)(gpr_slice_buffer *sb); -extern gpr_slice_buffer_init_type gpr_slice_buffer_init_import; -#define gpr_slice_buffer_init gpr_slice_buffer_init_import -typedef void(*gpr_slice_buffer_destroy_type)(gpr_slice_buffer *sb); -extern gpr_slice_buffer_destroy_type gpr_slice_buffer_destroy_import; -#define gpr_slice_buffer_destroy gpr_slice_buffer_destroy_import -typedef void(*gpr_slice_buffer_add_type)(gpr_slice_buffer *sb, gpr_slice slice); -extern gpr_slice_buffer_add_type gpr_slice_buffer_add_import; -#define gpr_slice_buffer_add gpr_slice_buffer_add_import -typedef size_t(*gpr_slice_buffer_add_indexed_type)(gpr_slice_buffer *sb, gpr_slice slice); -extern gpr_slice_buffer_add_indexed_type gpr_slice_buffer_add_indexed_import; -#define gpr_slice_buffer_add_indexed gpr_slice_buffer_add_indexed_import -typedef void(*gpr_slice_buffer_addn_type)(gpr_slice_buffer *sb, gpr_slice *slices, size_t n); -extern gpr_slice_buffer_addn_type gpr_slice_buffer_addn_import; -#define gpr_slice_buffer_addn gpr_slice_buffer_addn_import -typedef uint8_t *(*gpr_slice_buffer_tiny_add_type)(gpr_slice_buffer *sb, size_t len); -extern gpr_slice_buffer_tiny_add_type gpr_slice_buffer_tiny_add_import; -#define gpr_slice_buffer_tiny_add gpr_slice_buffer_tiny_add_import -typedef void(*gpr_slice_buffer_pop_type)(gpr_slice_buffer *sb); -extern gpr_slice_buffer_pop_type gpr_slice_buffer_pop_import; -#define gpr_slice_buffer_pop gpr_slice_buffer_pop_import -typedef void(*gpr_slice_buffer_reset_and_unref_type)(gpr_slice_buffer *sb); -extern gpr_slice_buffer_reset_and_unref_type gpr_slice_buffer_reset_and_unref_import; -#define gpr_slice_buffer_reset_and_unref gpr_slice_buffer_reset_and_unref_import -typedef void(*gpr_slice_buffer_swap_type)(gpr_slice_buffer *a, gpr_slice_buffer *b); -extern gpr_slice_buffer_swap_type gpr_slice_buffer_swap_import; -#define gpr_slice_buffer_swap gpr_slice_buffer_swap_import -typedef void(*gpr_slice_buffer_move_into_type)(gpr_slice_buffer *src, gpr_slice_buffer *dst); -extern gpr_slice_buffer_move_into_type gpr_slice_buffer_move_into_import; -#define gpr_slice_buffer_move_into gpr_slice_buffer_move_into_import -typedef void(*gpr_slice_buffer_trim_end_type)(gpr_slice_buffer *src, size_t n, gpr_slice_buffer *garbage); -extern gpr_slice_buffer_trim_end_type gpr_slice_buffer_trim_end_import; -#define gpr_slice_buffer_trim_end gpr_slice_buffer_trim_end_import -typedef void(*gpr_slice_buffer_move_first_type)(gpr_slice_buffer *src, size_t n, gpr_slice_buffer *dst); -extern gpr_slice_buffer_move_first_type gpr_slice_buffer_move_first_import; -#define gpr_slice_buffer_move_first gpr_slice_buffer_move_first_import -typedef gpr_slice(*gpr_slice_buffer_take_first_type)(gpr_slice_buffer *src); -extern gpr_slice_buffer_take_first_type gpr_slice_buffer_take_first_import; -#define gpr_slice_buffer_take_first gpr_slice_buffer_take_first_import -typedef void(*gpr_mu_init_type)(gpr_mu *mu); -extern gpr_mu_init_type gpr_mu_init_import; -#define gpr_mu_init gpr_mu_init_import -typedef void(*gpr_mu_destroy_type)(gpr_mu *mu); -extern gpr_mu_destroy_type gpr_mu_destroy_import; -#define gpr_mu_destroy gpr_mu_destroy_import -typedef void(*gpr_mu_lock_type)(gpr_mu *mu); -extern gpr_mu_lock_type gpr_mu_lock_import; -#define gpr_mu_lock gpr_mu_lock_import -typedef void(*gpr_mu_unlock_type)(gpr_mu *mu); -extern gpr_mu_unlock_type gpr_mu_unlock_import; -#define gpr_mu_unlock gpr_mu_unlock_import -typedef int(*gpr_mu_trylock_type)(gpr_mu *mu); -extern gpr_mu_trylock_type gpr_mu_trylock_import; -#define gpr_mu_trylock gpr_mu_trylock_import -typedef void(*gpr_cv_init_type)(gpr_cv *cv); -extern gpr_cv_init_type gpr_cv_init_import; -#define gpr_cv_init gpr_cv_init_import -typedef void(*gpr_cv_destroy_type)(gpr_cv *cv); -extern gpr_cv_destroy_type gpr_cv_destroy_import; -#define gpr_cv_destroy gpr_cv_destroy_import -typedef int(*gpr_cv_wait_type)(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline); -extern gpr_cv_wait_type gpr_cv_wait_import; -#define gpr_cv_wait gpr_cv_wait_import -typedef void(*gpr_cv_signal_type)(gpr_cv *cv); -extern gpr_cv_signal_type gpr_cv_signal_import; -#define gpr_cv_signal gpr_cv_signal_import -typedef void(*gpr_cv_broadcast_type)(gpr_cv *cv); -extern gpr_cv_broadcast_type gpr_cv_broadcast_import; -#define gpr_cv_broadcast gpr_cv_broadcast_import -typedef void(*gpr_once_init_type)(gpr_once *once, void (*init_routine)(void)); -extern gpr_once_init_type gpr_once_init_import; -#define gpr_once_init gpr_once_init_import -typedef void(*gpr_event_init_type)(gpr_event *ev); -extern gpr_event_init_type gpr_event_init_import; -#define gpr_event_init gpr_event_init_import -typedef void(*gpr_event_set_type)(gpr_event *ev, void *value); -extern gpr_event_set_type gpr_event_set_import; -#define gpr_event_set gpr_event_set_import -typedef void *(*gpr_event_get_type)(gpr_event *ev); -extern gpr_event_get_type gpr_event_get_import; -#define gpr_event_get gpr_event_get_import -typedef void *(*gpr_event_wait_type)(gpr_event *ev, gpr_timespec abs_deadline); -extern gpr_event_wait_type gpr_event_wait_import; -#define gpr_event_wait gpr_event_wait_import -typedef void(*gpr_ref_init_type)(gpr_refcount *r, int n); -extern gpr_ref_init_type gpr_ref_init_import; -#define gpr_ref_init gpr_ref_init_import -typedef void(*gpr_ref_type)(gpr_refcount *r); -extern gpr_ref_type gpr_ref_import; -#define gpr_ref gpr_ref_import -typedef void(*gpr_ref_non_zero_type)(gpr_refcount *r); -extern gpr_ref_non_zero_type gpr_ref_non_zero_import; -#define gpr_ref_non_zero gpr_ref_non_zero_import -typedef void(*gpr_refn_type)(gpr_refcount *r, int n); -extern gpr_refn_type gpr_refn_import; -#define gpr_refn gpr_refn_import -typedef int(*gpr_unref_type)(gpr_refcount *r); -extern gpr_unref_type gpr_unref_import; -#define gpr_unref gpr_unref_import -typedef void(*gpr_stats_init_type)(gpr_stats_counter *c, intptr_t n); -extern gpr_stats_init_type gpr_stats_init_import; -#define gpr_stats_init gpr_stats_init_import -typedef void(*gpr_stats_inc_type)(gpr_stats_counter *c, intptr_t inc); -extern gpr_stats_inc_type gpr_stats_inc_import; -#define gpr_stats_inc gpr_stats_inc_import -typedef intptr_t(*gpr_stats_read_type)(const gpr_stats_counter *c); -extern gpr_stats_read_type gpr_stats_read_import; -#define gpr_stats_read gpr_stats_read_import -typedef gpr_timespec(*gpr_time_0_type)(gpr_clock_type type); -extern gpr_time_0_type gpr_time_0_import; -#define gpr_time_0 gpr_time_0_import -typedef gpr_timespec(*gpr_inf_future_type)(gpr_clock_type type); -extern gpr_inf_future_type gpr_inf_future_import; -#define gpr_inf_future gpr_inf_future_import -typedef gpr_timespec(*gpr_inf_past_type)(gpr_clock_type type); -extern gpr_inf_past_type gpr_inf_past_import; -#define gpr_inf_past gpr_inf_past_import -typedef void(*gpr_time_init_type)(void); -extern gpr_time_init_type gpr_time_init_import; -#define gpr_time_init gpr_time_init_import -typedef gpr_timespec(*gpr_now_type)(gpr_clock_type clock); -extern gpr_now_type gpr_now_import; -#define gpr_now gpr_now_import -typedef gpr_timespec(*gpr_convert_clock_type_type)(gpr_timespec t, gpr_clock_type target_clock); -extern gpr_convert_clock_type_type gpr_convert_clock_type_import; -#define gpr_convert_clock_type gpr_convert_clock_type_import -typedef int(*gpr_time_cmp_type)(gpr_timespec a, gpr_timespec b); -extern gpr_time_cmp_type gpr_time_cmp_import; -#define gpr_time_cmp gpr_time_cmp_import -typedef gpr_timespec(*gpr_time_max_type)(gpr_timespec a, gpr_timespec b); -extern gpr_time_max_type gpr_time_max_import; -#define gpr_time_max gpr_time_max_import -typedef gpr_timespec(*gpr_time_min_type)(gpr_timespec a, gpr_timespec b); -extern gpr_time_min_type gpr_time_min_import; -#define gpr_time_min gpr_time_min_import -typedef gpr_timespec(*gpr_time_add_type)(gpr_timespec a, gpr_timespec b); -extern gpr_time_add_type gpr_time_add_import; -#define gpr_time_add gpr_time_add_import -typedef gpr_timespec(*gpr_time_sub_type)(gpr_timespec a, gpr_timespec b); -extern gpr_time_sub_type gpr_time_sub_import; -#define gpr_time_sub gpr_time_sub_import -typedef gpr_timespec(*gpr_time_from_micros_type)(int64_t x, gpr_clock_type clock_type); -extern gpr_time_from_micros_type gpr_time_from_micros_import; -#define gpr_time_from_micros gpr_time_from_micros_import -typedef gpr_timespec(*gpr_time_from_nanos_type)(int64_t x, gpr_clock_type clock_type); -extern gpr_time_from_nanos_type gpr_time_from_nanos_import; -#define gpr_time_from_nanos gpr_time_from_nanos_import -typedef gpr_timespec(*gpr_time_from_millis_type)(int64_t x, gpr_clock_type clock_type); -extern gpr_time_from_millis_type gpr_time_from_millis_import; -#define gpr_time_from_millis gpr_time_from_millis_import -typedef gpr_timespec(*gpr_time_from_seconds_type)(int64_t x, gpr_clock_type clock_type); -extern gpr_time_from_seconds_type gpr_time_from_seconds_import; -#define gpr_time_from_seconds gpr_time_from_seconds_import -typedef gpr_timespec(*gpr_time_from_minutes_type)(int64_t x, gpr_clock_type clock_type); -extern gpr_time_from_minutes_type gpr_time_from_minutes_import; -#define gpr_time_from_minutes gpr_time_from_minutes_import -typedef gpr_timespec(*gpr_time_from_hours_type)(int64_t x, gpr_clock_type clock_type); -extern gpr_time_from_hours_type gpr_time_from_hours_import; -#define gpr_time_from_hours gpr_time_from_hours_import -typedef int32_t(*gpr_time_to_millis_type)(gpr_timespec timespec); -extern gpr_time_to_millis_type gpr_time_to_millis_import; -#define gpr_time_to_millis gpr_time_to_millis_import -typedef int(*gpr_time_similar_type)(gpr_timespec a, gpr_timespec b, gpr_timespec threshold); -extern gpr_time_similar_type gpr_time_similar_import; -#define gpr_time_similar gpr_time_similar_import -typedef void(*gpr_sleep_until_type)(gpr_timespec until); -extern gpr_sleep_until_type gpr_sleep_until_import; -#define gpr_sleep_until gpr_sleep_until_import -typedef double(*gpr_timespec_to_micros_type)(gpr_timespec t); -extern gpr_timespec_to_micros_type gpr_timespec_to_micros_import; -#define gpr_timespec_to_micros gpr_timespec_to_micros_import -typedef gpr_avl(*gpr_avl_create_type)(const gpr_avl_vtable *vtable); -extern gpr_avl_create_type gpr_avl_create_import; -#define gpr_avl_create gpr_avl_create_import -typedef gpr_avl(*gpr_avl_ref_type)(gpr_avl avl); -extern gpr_avl_ref_type gpr_avl_ref_import; -#define gpr_avl_ref gpr_avl_ref_import -typedef void(*gpr_avl_unref_type)(gpr_avl avl); -extern gpr_avl_unref_type gpr_avl_unref_import; -#define gpr_avl_unref gpr_avl_unref_import -typedef gpr_avl(*gpr_avl_add_type)(gpr_avl avl, void *key, void *value); -extern gpr_avl_add_type gpr_avl_add_import; -#define gpr_avl_add gpr_avl_add_import -typedef gpr_avl(*gpr_avl_remove_type)(gpr_avl avl, void *key); -extern gpr_avl_remove_type gpr_avl_remove_import; -#define gpr_avl_remove gpr_avl_remove_import -typedef void *(*gpr_avl_get_type)(gpr_avl avl, void *key); -extern gpr_avl_get_type gpr_avl_get_import; -#define gpr_avl_get gpr_avl_get_import -typedef gpr_cmdline *(*gpr_cmdline_create_type)(const char *description); -extern gpr_cmdline_create_type gpr_cmdline_create_import; -#define gpr_cmdline_create gpr_cmdline_create_import -typedef void(*gpr_cmdline_add_int_type)(gpr_cmdline *cl, const char *name, const char *help, int *value); -extern gpr_cmdline_add_int_type gpr_cmdline_add_int_import; -#define gpr_cmdline_add_int gpr_cmdline_add_int_import -typedef void(*gpr_cmdline_add_flag_type)(gpr_cmdline *cl, const char *name, const char *help, int *value); -extern gpr_cmdline_add_flag_type gpr_cmdline_add_flag_import; -#define gpr_cmdline_add_flag gpr_cmdline_add_flag_import -typedef void(*gpr_cmdline_add_string_type)(gpr_cmdline *cl, const char *name, const char *help, char **value); -extern gpr_cmdline_add_string_type gpr_cmdline_add_string_import; -#define gpr_cmdline_add_string gpr_cmdline_add_string_import -typedef void(*gpr_cmdline_on_extra_arg_type)(gpr_cmdline *cl, const char *name, const char *help, void (*on_extra_arg)(void *user_data, const char *arg), void *user_data); -extern gpr_cmdline_on_extra_arg_type gpr_cmdline_on_extra_arg_import; -#define gpr_cmdline_on_extra_arg gpr_cmdline_on_extra_arg_import -typedef void(*gpr_cmdline_set_survive_failure_type)(gpr_cmdline *cl); -extern gpr_cmdline_set_survive_failure_type gpr_cmdline_set_survive_failure_import; -#define gpr_cmdline_set_survive_failure gpr_cmdline_set_survive_failure_import -typedef int(*gpr_cmdline_parse_type)(gpr_cmdline *cl, int argc, char **argv); -extern gpr_cmdline_parse_type gpr_cmdline_parse_import; -#define gpr_cmdline_parse gpr_cmdline_parse_import -typedef void(*gpr_cmdline_destroy_type)(gpr_cmdline *cl); -extern gpr_cmdline_destroy_type gpr_cmdline_destroy_import; -#define gpr_cmdline_destroy gpr_cmdline_destroy_import -typedef char *(*gpr_cmdline_usage_string_type)(gpr_cmdline *cl, const char *argv0); -extern gpr_cmdline_usage_string_type gpr_cmdline_usage_string_import; -#define gpr_cmdline_usage_string gpr_cmdline_usage_string_import -typedef unsigned(*gpr_cpu_num_cores_type)(void); -extern gpr_cpu_num_cores_type gpr_cpu_num_cores_import; -#define gpr_cpu_num_cores gpr_cpu_num_cores_import -typedef unsigned(*gpr_cpu_current_cpu_type)(void); -extern gpr_cpu_current_cpu_type gpr_cpu_current_cpu_import; -#define gpr_cpu_current_cpu gpr_cpu_current_cpu_import -typedef gpr_histogram *(*gpr_histogram_create_type)(double resolution, double max_bucket_start); -extern gpr_histogram_create_type gpr_histogram_create_import; -#define gpr_histogram_create gpr_histogram_create_import -typedef void(*gpr_histogram_destroy_type)(gpr_histogram *h); -extern gpr_histogram_destroy_type gpr_histogram_destroy_import; -#define gpr_histogram_destroy gpr_histogram_destroy_import -typedef void(*gpr_histogram_add_type)(gpr_histogram *h, double x); -extern gpr_histogram_add_type gpr_histogram_add_import; -#define gpr_histogram_add gpr_histogram_add_import -typedef int(*gpr_histogram_merge_type)(gpr_histogram *dst, const gpr_histogram *src); -extern gpr_histogram_merge_type gpr_histogram_merge_import; -#define gpr_histogram_merge gpr_histogram_merge_import -typedef double(*gpr_histogram_percentile_type)(gpr_histogram *histogram, double percentile); -extern gpr_histogram_percentile_type gpr_histogram_percentile_import; -#define gpr_histogram_percentile gpr_histogram_percentile_import -typedef double(*gpr_histogram_mean_type)(gpr_histogram *histogram); -extern gpr_histogram_mean_type gpr_histogram_mean_import; -#define gpr_histogram_mean gpr_histogram_mean_import -typedef double(*gpr_histogram_stddev_type)(gpr_histogram *histogram); -extern gpr_histogram_stddev_type gpr_histogram_stddev_import; -#define gpr_histogram_stddev gpr_histogram_stddev_import -typedef double(*gpr_histogram_variance_type)(gpr_histogram *histogram); -extern gpr_histogram_variance_type gpr_histogram_variance_import; -#define gpr_histogram_variance gpr_histogram_variance_import -typedef double(*gpr_histogram_maximum_type)(gpr_histogram *histogram); -extern gpr_histogram_maximum_type gpr_histogram_maximum_import; -#define gpr_histogram_maximum gpr_histogram_maximum_import -typedef double(*gpr_histogram_minimum_type)(gpr_histogram *histogram); -extern gpr_histogram_minimum_type gpr_histogram_minimum_import; -#define gpr_histogram_minimum gpr_histogram_minimum_import -typedef double(*gpr_histogram_count_type)(gpr_histogram *histogram); -extern gpr_histogram_count_type gpr_histogram_count_import; -#define gpr_histogram_count gpr_histogram_count_import -typedef double(*gpr_histogram_sum_type)(gpr_histogram *histogram); -extern gpr_histogram_sum_type gpr_histogram_sum_import; -#define gpr_histogram_sum gpr_histogram_sum_import -typedef double(*gpr_histogram_sum_of_squares_type)(gpr_histogram *histogram); -extern gpr_histogram_sum_of_squares_type gpr_histogram_sum_of_squares_import; -#define gpr_histogram_sum_of_squares gpr_histogram_sum_of_squares_import -typedef const uint32_t *(*gpr_histogram_get_contents_type)(gpr_histogram *histogram, size_t *count); -extern gpr_histogram_get_contents_type gpr_histogram_get_contents_import; -#define gpr_histogram_get_contents gpr_histogram_get_contents_import -typedef void(*gpr_histogram_merge_contents_type)(gpr_histogram *histogram, const uint32_t *data, size_t data_count, double min_seen, double max_seen, double sum, double sum_of_squares, double count); -extern gpr_histogram_merge_contents_type gpr_histogram_merge_contents_import; -#define gpr_histogram_merge_contents gpr_histogram_merge_contents_import -typedef int(*gpr_join_host_port_type)(char **out, const char *host, int port); -extern gpr_join_host_port_type gpr_join_host_port_import; -#define gpr_join_host_port gpr_join_host_port_import -typedef int(*gpr_split_host_port_type)(const char *name, char **host, char **port); -extern gpr_split_host_port_type gpr_split_host_port_import; -#define gpr_split_host_port gpr_split_host_port_import -typedef char *(*gpr_format_message_type)(int messageid); -extern gpr_format_message_type gpr_format_message_import; -#define gpr_format_message gpr_format_message_import -typedef char *(*gpr_strdup_type)(const char *src); -extern gpr_strdup_type gpr_strdup_import; -#define gpr_strdup gpr_strdup_import -typedef int(*gpr_asprintf_type)(char **strp, const char *format, ...); -extern gpr_asprintf_type gpr_asprintf_import; -#define gpr_asprintf gpr_asprintf_import -typedef const char *(*gpr_subprocess_binary_extension_type)(); -extern gpr_subprocess_binary_extension_type gpr_subprocess_binary_extension_import; -#define gpr_subprocess_binary_extension gpr_subprocess_binary_extension_import -typedef gpr_subprocess *(*gpr_subprocess_create_type)(int argc, const char **argv); -extern gpr_subprocess_create_type gpr_subprocess_create_import; -#define gpr_subprocess_create gpr_subprocess_create_import -typedef void(*gpr_subprocess_destroy_type)(gpr_subprocess *p); -extern gpr_subprocess_destroy_type gpr_subprocess_destroy_import; -#define gpr_subprocess_destroy gpr_subprocess_destroy_import -typedef int(*gpr_subprocess_join_type)(gpr_subprocess *p); -extern gpr_subprocess_join_type gpr_subprocess_join_import; -#define gpr_subprocess_join gpr_subprocess_join_import -typedef void(*gpr_subprocess_interrupt_type)(gpr_subprocess *p); -extern gpr_subprocess_interrupt_type gpr_subprocess_interrupt_import; -#define gpr_subprocess_interrupt gpr_subprocess_interrupt_import -typedef int(*gpr_thd_new_type)(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg, const gpr_thd_options *options); -extern gpr_thd_new_type gpr_thd_new_import; -#define gpr_thd_new gpr_thd_new_import -typedef gpr_thd_options(*gpr_thd_options_default_type)(void); -extern gpr_thd_options_default_type gpr_thd_options_default_import; -#define gpr_thd_options_default gpr_thd_options_default_import -typedef void(*gpr_thd_options_set_detached_type)(gpr_thd_options *options); -extern gpr_thd_options_set_detached_type gpr_thd_options_set_detached_import; -#define gpr_thd_options_set_detached gpr_thd_options_set_detached_import -typedef void(*gpr_thd_options_set_joinable_type)(gpr_thd_options *options); -extern gpr_thd_options_set_joinable_type gpr_thd_options_set_joinable_import; -#define gpr_thd_options_set_joinable gpr_thd_options_set_joinable_import -typedef int(*gpr_thd_options_is_detached_type)(const gpr_thd_options *options); -extern gpr_thd_options_is_detached_type gpr_thd_options_is_detached_import; -#define gpr_thd_options_is_detached gpr_thd_options_is_detached_import -typedef int(*gpr_thd_options_is_joinable_type)(const gpr_thd_options *options); -extern gpr_thd_options_is_joinable_type gpr_thd_options_is_joinable_import; -#define gpr_thd_options_is_joinable gpr_thd_options_is_joinable_import -typedef gpr_thd_id(*gpr_thd_currentid_type)(void); -extern gpr_thd_currentid_type gpr_thd_currentid_import; -#define gpr_thd_currentid gpr_thd_currentid_import -typedef void(*gpr_thd_join_type)(gpr_thd_id t); -extern gpr_thd_join_type gpr_thd_join_import; -#define gpr_thd_join gpr_thd_join_import - -#ifdef __cplusplus -extern "C" { -#endif /* __cpluslus */ - -void pygrpc_load_imports(HMODULE library); - -#ifdef __cplusplus -} -#endif /* __cpluslus */ - -#else /* !GPR_WINDOWS */ - #include <grpc/byte_buffer.h> #include <grpc/byte_buffer_reader.h> #include <grpc/compression.h> @@ -864,6 +47,4 @@ void pygrpc_load_imports(HMODULE library); #include <grpc/support/time.h> #include <grpc/status.h> -#endif /* !GPR_WINDOWS */ - #endif diff --git a/src/python/grpcio/grpc/_cython/loader.c b/src/python/grpcio/grpc/_cython/loader.c index b909ad594e..34bd897549 100644 --- a/src/python/grpcio/grpc/_cython/loader.c +++ b/src/python/grpcio/grpc/_cython/loader.c @@ -31,36 +31,20 @@ * */ +#include <Python.h> #include "loader.h" #ifdef __cplusplus extern "C" { #endif /* __cpluslus */ -#if GPR_WINDOWS - -int pygrpc_load_core(char *path) { - HMODULE grpc_c; -#ifdef GPR_ARCH_32 - /* Close your eyes for a moment, it'll all be over soon. */ - char *six = strrchr(path, '6'); - *six++ = '3'; - *six = '2'; -#endif - grpc_c = LoadLibraryA(path); - if (grpc_c) { - pygrpc_load_imports(grpc_c); - return 1; - } - - return 0; -} - -#else - int pygrpc_load_core(char *path) { return 1; } -#endif /* !GPR_WINDOWS */ +// Cython doesn't have Py_AtExit bindings, so we call the C_API directly +int pygrpc_initialize_core(void) { + grpc_init(); + return Py_AtExit(grpc_shutdown) < 0 ? 0 : 1; +} #ifdef __cplusplus } diff --git a/src/python/grpcio/grpc/_cython/loader.h b/src/python/grpcio/grpc/_cython/loader.h index 3b8796d39f..62fd225204 100644 --- a/src/python/grpcio/grpc/_cython/loader.h +++ b/src/python/grpcio/grpc/_cython/loader.h @@ -39,6 +39,8 @@ /* Additional inclusions not covered by "imports.generated.h" */ #include <grpc/byte_buffer_reader.h> +/* TODO(atash) remove cruft */ + #ifdef __cplusplus extern "C" { #endif /* __cpluslus */ @@ -46,6 +48,11 @@ extern "C" { /* Attempts to load the core if necessary, and return non-zero upon succes. */ int pygrpc_load_core(char *path); +/* Initializes grpc and registers grpc_shutdown() to be called right before + * interpreter exit. Returns non-zero upon success. + */ +int pygrpc_initialize_core(void); + #ifdef __cplusplus } #endif /* __cpluslus */ diff --git a/src/python/grpcio/grpc/_links/invocation.py b/src/python/grpcio/grpc/_links/invocation.py deleted file mode 100644 index 003653e1c8..0000000000 --- a/src/python/grpcio/grpc/_links/invocation.py +++ /dev/null @@ -1,453 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""The RPC-invocation-side bridge between RPC Framework and GRPC-on-the-wire.""" - -import abc -import enum -import logging -import threading -import time - -import six - -from grpc._adapter import _intermediary_low -from grpc._links import _constants -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.foundation import activated -from grpc.framework.foundation import logging_pool -from grpc.framework.foundation import relay -from grpc.framework.interfaces.links import links - -_IDENTITY = lambda x: x - -_STOP = _intermediary_low.Event.Kind.STOP -_WRITE = _intermediary_low.Event.Kind.WRITE_ACCEPTED -_COMPLETE = _intermediary_low.Event.Kind.COMPLETE_ACCEPTED -_READ = _intermediary_low.Event.Kind.READ_ACCEPTED -_METADATA = _intermediary_low.Event.Kind.METADATA_ACCEPTED -_FINISH = _intermediary_low.Event.Kind.FINISH - - -@enum.unique -class _Read(enum.Enum): - AWAITING_METADATA = 'awaiting metadata' - READING = 'reading' - AWAITING_ALLOWANCE = 'awaiting allowance' - CLOSED = 'closed' - - -@enum.unique -class _HighWrite(enum.Enum): - OPEN = 'open' - CLOSED = 'closed' - - -@enum.unique -class _LowWrite(enum.Enum): - OPEN = 'OPEN' - ACTIVE = 'ACTIVE' - CLOSED = 'CLOSED' - - -class _Context(beta_interfaces.GRPCInvocationContext): - - def __init__(self): - self._lock = threading.Lock() - self._disable_next_compression = False - - def disable_next_request_compression(self): - with self._lock: - self._disable_next_compression = True - - def next_compression_disabled(self): - with self._lock: - disabled = self._disable_next_compression - self._disable_next_compression = False - return disabled - - -class _RPCState(object): - - def __init__( - self, call, request_serializer, response_deserializer, sequence_number, - read, allowance, high_write, low_write, due, context): - self.call = call - self.request_serializer = request_serializer - self.response_deserializer = response_deserializer - self.sequence_number = sequence_number - self.read = read - self.allowance = allowance - self.high_write = high_write - self.low_write = low_write - self.due = due - self.context = context - - -def _no_longer_due(kind, rpc_state, key, rpc_states): - rpc_state.due.remove(kind) - if not rpc_state.due: - del rpc_states[key] - - -class _Kernel(object): - - def __init__( - self, channel, host, metadata_transformer, request_serializers, - response_deserializers, ticket_relay): - self._lock = threading.Lock() - self._channel = channel - self._host = host - self._metadata_transformer = metadata_transformer - self._request_serializers = request_serializers - self._response_deserializers = response_deserializers - self._relay = ticket_relay - - self._completion_queue = None - self._rpc_states = {} - self._pool = None - - def _on_write_event(self, operation_id, unused_event, rpc_state): - if rpc_state.high_write is _HighWrite.CLOSED: - rpc_state.call.complete(operation_id) - rpc_state.due.add(_COMPLETE) - rpc_state.due.remove(_WRITE) - rpc_state.low_write = _LowWrite.CLOSED - else: - ticket = links.Ticket( - operation_id, rpc_state.sequence_number, None, None, None, None, 1, - None, None, None, None, None, None, None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - rpc_state.low_write = _LowWrite.OPEN - _no_longer_due(_WRITE, rpc_state, operation_id, self._rpc_states) - - def _on_read_event(self, operation_id, event, rpc_state): - if event.bytes is None or _FINISH not in rpc_state.due: - rpc_state.read = _Read.CLOSED - _no_longer_due(_READ, rpc_state, operation_id, self._rpc_states) - else: - if 0 < rpc_state.allowance: - rpc_state.allowance -= 1 - rpc_state.call.read(operation_id) - else: - rpc_state.read = _Read.AWAITING_ALLOWANCE - _no_longer_due(_READ, rpc_state, operation_id, self._rpc_states) - ticket = links.Ticket( - operation_id, rpc_state.sequence_number, None, None, None, None, None, - None, rpc_state.response_deserializer(event.bytes), None, None, None, - None, None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - - def _on_metadata_event(self, operation_id, event, rpc_state): - if _FINISH in rpc_state.due: - rpc_state.allowance -= 1 - rpc_state.call.read(operation_id) - rpc_state.read = _Read.READING - rpc_state.due.add(_READ) - rpc_state.due.remove(_METADATA) - ticket = links.Ticket( - operation_id, rpc_state.sequence_number, None, None, - links.Ticket.Subscription.FULL, None, None, event.metadata, None, - None, None, None, None, None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - else: - _no_longer_due(_METADATA, rpc_state, operation_id, self._rpc_states) - - def _on_finish_event(self, operation_id, event, rpc_state): - _no_longer_due(_FINISH, rpc_state, operation_id, self._rpc_states) - if event.status.code == _intermediary_low.Code.OK: - termination = links.Ticket.Termination.COMPLETION - elif event.status.code == _intermediary_low.Code.CANCELLED: - termination = links.Ticket.Termination.CANCELLATION - elif event.status.code == _intermediary_low.Code.DEADLINE_EXCEEDED: - termination = links.Ticket.Termination.EXPIRATION - elif event.status.code == _intermediary_low.Code.UNIMPLEMENTED: - termination = links.Ticket.Termination.REMOTE_FAILURE - elif event.status.code == _intermediary_low.Code.UNKNOWN: - termination = links.Ticket.Termination.LOCAL_FAILURE - else: - termination = links.Ticket.Termination.TRANSMISSION_FAILURE - code = _constants.LOW_STATUS_CODE_TO_HIGH_STATUS_CODE[event.status.code] - ticket = links.Ticket( - operation_id, rpc_state.sequence_number, None, None, None, None, None, - None, None, event.metadata, code, event.status.details, termination, - None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - - def _spin(self, completion_queue): - while True: - event = completion_queue.get(None) - with self._lock: - rpc_state = self._rpc_states.get(event.tag, None) - if event.kind is _STOP: - pass - elif event.kind is _WRITE: - self._on_write_event(event.tag, event, rpc_state) - elif event.kind is _METADATA: - self._on_metadata_event(event.tag, event, rpc_state) - elif event.kind is _READ: - self._on_read_event(event.tag, event, rpc_state) - elif event.kind is _FINISH: - self._on_finish_event(event.tag, event, rpc_state) - elif event.kind is _COMPLETE: - _no_longer_due(_COMPLETE, rpc_state, event.tag, self._rpc_states) - else: - logging.error('Illegal RPC event! %s', (event,)) - - if self._completion_queue is None and not self._rpc_states: - completion_queue.stop() - return - - def _invoke( - self, operation_id, group, method, initial_metadata, payload, termination, - timeout, allowance, options): - """Invoke an RPC. - - Args: - operation_id: Any object to be used as an operation ID for the RPC. - group: The group to which the RPC method belongs. - method: The RPC method name. - initial_metadata: The initial metadata object for the RPC. - payload: A payload object for the RPC or None if no payload was given at - invocation-time. - termination: A links.Ticket.Termination value or None indicated whether or - not more writes will follow from this side of the RPC. - timeout: A duration of time in seconds to allow for the RPC. - allowance: The number of payloads (beyond the free first one) that the - local ticket exchange mate has granted permission to be read. - options: A beta_interfaces.GRPCCallOptions value or None. - """ - if termination is links.Ticket.Termination.COMPLETION: - high_write = _HighWrite.CLOSED - elif termination is None: - high_write = _HighWrite.OPEN - else: - return - - transformed_initial_metadata = self._metadata_transformer(initial_metadata) - request_serializer = self._request_serializers.get( - (group, method), _IDENTITY) - response_deserializer = self._response_deserializers.get( - (group, method), _IDENTITY) - - call = _intermediary_low.Call( - self._channel, self._completion_queue, '/%s/%s' % (group, method), - self._host, time.time() + timeout) - if options is not None and options.credentials is not None: - call.set_credentials(options.credentials._low_credentials) - if transformed_initial_metadata is not None: - for metadata_key, metadata_value in transformed_initial_metadata: - call.add_metadata(metadata_key, metadata_value) - call.invoke(self._completion_queue, operation_id, operation_id) - if payload is None: - if high_write is _HighWrite.CLOSED: - call.complete(operation_id) - low_write = _LowWrite.CLOSED - due = set((_METADATA, _COMPLETE, _FINISH,)) - else: - low_write = _LowWrite.OPEN - due = set((_METADATA, _FINISH,)) - else: - if options is not None and options.disable_compression: - flags = _intermediary_low.WriteFlags.WRITE_NO_COMPRESS - else: - flags = 0 - call.write(request_serializer(payload), operation_id, flags) - low_write = _LowWrite.ACTIVE - due = set((_WRITE, _METADATA, _FINISH,)) - context = _Context() - self._rpc_states[operation_id] = _RPCState( - call, request_serializer, response_deserializer, 1, - _Read.AWAITING_METADATA, 1 if allowance is None else (1 + allowance), - high_write, low_write, due, context) - protocol = links.Protocol(links.Protocol.Kind.INVOCATION_CONTEXT, context) - ticket = links.Ticket( - operation_id, 0, None, None, None, None, None, None, None, None, None, - None, None, protocol) - self._relay.add_value(ticket) - - def _advance(self, operation_id, rpc_state, payload, termination, allowance): - if payload is not None: - disable_compression = rpc_state.context.next_compression_disabled() - if disable_compression: - flags = _intermediary_low.WriteFlags.WRITE_NO_COMPRESS - else: - flags = 0 - rpc_state.call.write( - rpc_state.request_serializer(payload), operation_id, flags) - rpc_state.low_write = _LowWrite.ACTIVE - rpc_state.due.add(_WRITE) - - if allowance is not None: - if rpc_state.read is _Read.AWAITING_ALLOWANCE: - rpc_state.allowance += allowance - 1 - rpc_state.call.read(operation_id) - rpc_state.read = _Read.READING - rpc_state.due.add(_READ) - else: - rpc_state.allowance += allowance - - if termination is links.Ticket.Termination.COMPLETION: - rpc_state.high_write = _HighWrite.CLOSED - if rpc_state.low_write is _LowWrite.OPEN: - rpc_state.call.complete(operation_id) - rpc_state.due.add(_COMPLETE) - rpc_state.low_write = _LowWrite.CLOSED - elif termination is not None: - rpc_state.call.cancel() - - def add_ticket(self, ticket): - with self._lock: - if ticket.sequence_number == 0: - if self._completion_queue is None: - logging.error('Received invocation ticket %s after stop!', ticket) - else: - if (ticket.protocol is not None and - ticket.protocol.kind is links.Protocol.Kind.CALL_OPTION): - grpc_call_options = ticket.protocol.value - else: - grpc_call_options = None - self._invoke( - ticket.operation_id, ticket.group, ticket.method, - ticket.initial_metadata, ticket.payload, ticket.termination, - ticket.timeout, ticket.allowance, grpc_call_options) - else: - rpc_state = self._rpc_states.get(ticket.operation_id) - if rpc_state is not None: - self._advance( - ticket.operation_id, rpc_state, ticket.payload, - ticket.termination, ticket.allowance) - - def start(self): - """Starts this object. - - This method must be called before attempting to exchange tickets with this - object. - """ - with self._lock: - self._completion_queue = _intermediary_low.CompletionQueue() - self._pool = logging_pool.pool(1) - self._pool.submit(self._spin, self._completion_queue) - - def stop(self): - """Stops this object. - - This method must be called for proper termination of this object, and no - attempts to exchange tickets with this object may be made after this method - has been called. - """ - with self._lock: - if not self._rpc_states: - self._completion_queue.stop() - self._completion_queue = None - pool = self._pool - pool.shutdown(wait=True) - - -class InvocationLink(six.with_metaclass(abc.ABCMeta, links.Link, activated.Activated)): - """A links.Link for use on the invocation-side of a gRPC connection. - - Implementations of this interface are only valid for use when activated. - """ - - -class _InvocationLink(InvocationLink): - - def __init__( - self, channel, host, metadata_transformer, request_serializers, - response_deserializers): - self._relay = relay.relay(None) - self._kernel = _Kernel( - channel, host, - _IDENTITY if metadata_transformer is None else metadata_transformer, - {} if request_serializers is None else request_serializers, - {} if response_deserializers is None else response_deserializers, - self._relay) - - def _start(self): - self._relay.start() - self._kernel.start() - return self - - def _stop(self): - self._kernel.stop() - self._relay.stop() - - def accept_ticket(self, ticket): - """See links.Link.accept_ticket for specification.""" - self._kernel.add_ticket(ticket) - - def join_link(self, link): - """See links.Link.join_link for specification.""" - self._relay.set_behavior(link.accept_ticket) - - def __enter__(self): - """See activated.Activated.__enter__ for specification.""" - return self._start() - - def __exit__(self, exc_type, exc_val, exc_tb): - """See activated.Activated.__exit__ for specification.""" - self._stop() - return False - - def start(self): - """See activated.Activated.start for specification.""" - return self._start() - - def stop(self): - """See activated.Activated.stop for specification.""" - self._stop() - - -def invocation_link( - channel, host, metadata_transformer, request_serializers, - response_deserializers): - """Creates an InvocationLink. - - Args: - channel: An _intermediary_low.Channel for use by the link. - host: The host to specify when invoking RPCs. - metadata_transformer: A callable that takes an invocation-side initial - metadata value and returns another metadata value to send in its place. - May be None. - request_serializers: A dict from group-method pair to request object - serialization behavior. - response_deserializers: A dict from group-method pair to response object - deserialization behavior. - - Returns: - An InvocationLink. - """ - return _InvocationLink( - channel, host, metadata_transformer, request_serializers, - response_deserializers) diff --git a/src/python/grpcio/grpc/_links/service.py b/src/python/grpcio/grpc/_links/service.py deleted file mode 100644 index 11310e2240..0000000000 --- a/src/python/grpcio/grpc/_links/service.py +++ /dev/null @@ -1,505 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""The RPC-service-side bridge between RPC Framework and GRPC-on-the-wire.""" - -import abc -import enum -import logging -import threading -import time - -from grpc._adapter import _intermediary_low -from grpc._links import _constants -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.foundation import logging_pool -from grpc.framework.foundation import relay -from grpc.framework.interfaces.links import links - -_IDENTITY = lambda x: x - -_TERMINATION_KIND_TO_CODE = { - links.Ticket.Termination.COMPLETION: _intermediary_low.Code.OK, - links.Ticket.Termination.CANCELLATION: _intermediary_low.Code.CANCELLED, - links.Ticket.Termination.EXPIRATION: - _intermediary_low.Code.DEADLINE_EXCEEDED, - links.Ticket.Termination.SHUTDOWN: _intermediary_low.Code.UNAVAILABLE, - links.Ticket.Termination.RECEPTION_FAILURE: _intermediary_low.Code.INTERNAL, - links.Ticket.Termination.TRANSMISSION_FAILURE: - _intermediary_low.Code.INTERNAL, - links.Ticket.Termination.LOCAL_FAILURE: _intermediary_low.Code.UNKNOWN, - links.Ticket.Termination.REMOTE_FAILURE: _intermediary_low.Code.UNKNOWN, -} - -_STOP = _intermediary_low.Event.Kind.STOP -_WRITE = _intermediary_low.Event.Kind.WRITE_ACCEPTED -_COMPLETE = _intermediary_low.Event.Kind.COMPLETE_ACCEPTED -_SERVICE = _intermediary_low.Event.Kind.SERVICE_ACCEPTED -_READ = _intermediary_low.Event.Kind.READ_ACCEPTED -_FINISH = _intermediary_low.Event.Kind.FINISH - - -@enum.unique -class _Read(enum.Enum): - READING = 'reading' - # TODO(issue 2916): This state will again be necessary after eliminating the - # "early_read" field of _RPCState and going back to only reading when granted - # allowance to read. - # AWAITING_ALLOWANCE = 'awaiting allowance' - CLOSED = 'closed' - - -@enum.unique -class _HighWrite(enum.Enum): - OPEN = 'open' - CLOSED = 'closed' - - -@enum.unique -class _LowWrite(enum.Enum): - """The possible categories of low-level write state.""" - - OPEN = 'OPEN' - ACTIVE = 'ACTIVE' - CLOSED = 'CLOSED' - - -class _Context(beta_interfaces.GRPCServicerContext): - - def __init__(self, call): - self._lock = threading.Lock() - self._call = call - self._disable_next_compression = False - - def peer(self): - with self._lock: - return self._call.peer() - - def disable_next_response_compression(self): - with self._lock: - self._disable_next_compression = True - - def next_compression_disabled(self): - with self._lock: - disabled = self._disable_next_compression - self._disable_next_compression = False - return disabled - - -class _RPCState(object): - - def __init__( - self, request_deserializer, response_serializer, sequence_number, read, - early_read, allowance, high_write, low_write, premetadataed, - terminal_metadata, code, message, due, context): - self.request_deserializer = request_deserializer - self.response_serializer = response_serializer - self.sequence_number = sequence_number - self.read = read - # TODO(issue 2916): Eliminate this by eliminating the necessity of calling - # call.read just to advance the RPC. - self.early_read = early_read # A raw (not deserialized) read. - self.allowance = allowance - self.high_write = high_write - self.low_write = low_write - self.premetadataed = premetadataed - self.terminal_metadata = terminal_metadata - self.code = code - self.message = message - self.due = due - self.context = context - - -def _no_longer_due(kind, rpc_state, key, rpc_states): - rpc_state.due.remove(kind) - if not rpc_state.due: - del rpc_states[key] - - -def _metadatafy(call, metadata): - for metadata_key, metadata_value in metadata: - call.add_metadata(metadata_key, metadata_value) - - -def _status(termination_kind, high_code, details): - low_details = b'' if details is None else details - if high_code is None: - low_code = _TERMINATION_KIND_TO_CODE[termination_kind] - else: - low_code = _constants.HIGH_STATUS_CODE_TO_LOW_STATUS_CODE[high_code] - return _intermediary_low.Status(low_code, low_details) - - -class _Kernel(object): - - def __init__(self, request_deserializers, response_serializers, ticket_relay): - self._lock = threading.Lock() - self._request_deserializers = request_deserializers - self._response_serializers = response_serializers - self._relay = ticket_relay - - self._completion_queue = None - self._due = set() - self._server = None - self._rpc_states = {} - self._pool = None - - def _on_service_acceptance_event(self, event, server): - server.service(None) - - service_acceptance = event.service_acceptance - call = service_acceptance.call - call.accept(self._completion_queue, call) - try: - group, method = service_acceptance.method.split(b'/')[1:3] - except ValueError: - logging.info('Illegal path "%s"!', service_acceptance.method) - return - request_deserializer = self._request_deserializers.get( - (group, method), _IDENTITY) - response_serializer = self._response_serializers.get( - (group, method), _IDENTITY) - - call.read(call) - context = _Context(call) - self._rpc_states[call] = _RPCState( - request_deserializer, response_serializer, 1, _Read.READING, None, 1, - _HighWrite.OPEN, _LowWrite.OPEN, False, None, None, None, - set((_READ, _FINISH,)), context) - protocol = links.Protocol(links.Protocol.Kind.SERVICER_CONTEXT, context) - ticket = links.Ticket( - call, 0, group, method, links.Ticket.Subscription.FULL, - service_acceptance.deadline - time.time(), None, event.metadata, None, - None, None, None, None, protocol) - self._relay.add_value(ticket) - - def _on_read_event(self, event): - call = event.tag - rpc_state = self._rpc_states[call] - - if event.bytes is None: - rpc_state.read = _Read.CLOSED - payload = None - termination = links.Ticket.Termination.COMPLETION - _no_longer_due(_READ, rpc_state, call, self._rpc_states) - else: - if 0 < rpc_state.allowance: - payload = rpc_state.request_deserializer(event.bytes) - termination = None - rpc_state.allowance -= 1 - call.read(call) - else: - rpc_state.early_read = event.bytes - _no_longer_due(_READ, rpc_state, call, self._rpc_states) - return - # TODO(issue 2916): Instead of returning: - # rpc_state.read = _Read.AWAITING_ALLOWANCE - ticket = links.Ticket( - call, rpc_state.sequence_number, None, None, None, None, None, None, - payload, None, None, None, termination, None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - - def _on_write_event(self, event): - call = event.tag - rpc_state = self._rpc_states[call] - - if rpc_state.high_write is _HighWrite.CLOSED: - if rpc_state.terminal_metadata is not None: - _metadatafy(call, rpc_state.terminal_metadata) - status = _status( - links.Ticket.Termination.COMPLETION, rpc_state.code, - rpc_state.message) - call.status(status, call) - rpc_state.low_write = _LowWrite.CLOSED - rpc_state.due.add(_COMPLETE) - rpc_state.due.remove(_WRITE) - else: - ticket = links.Ticket( - call, rpc_state.sequence_number, None, None, None, None, 1, None, - None, None, None, None, None, None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - rpc_state.low_write = _LowWrite.OPEN - _no_longer_due(_WRITE, rpc_state, call, self._rpc_states) - - def _on_finish_event(self, event): - call = event.tag - rpc_state = self._rpc_states[call] - _no_longer_due(_FINISH, rpc_state, call, self._rpc_states) - code = event.status.code - if code == _intermediary_low.Code.OK: - return - - if code == _intermediary_low.Code.CANCELLED: - termination = links.Ticket.Termination.CANCELLATION - elif code == _intermediary_low.Code.DEADLINE_EXCEEDED: - termination = links.Ticket.Termination.EXPIRATION - else: - termination = links.Ticket.Termination.TRANSMISSION_FAILURE - ticket = links.Ticket( - call, rpc_state.sequence_number, None, None, None, None, None, None, - None, None, None, None, termination, None) - rpc_state.sequence_number += 1 - self._relay.add_value(ticket) - - def _spin(self, completion_queue, server): - while True: - event = completion_queue.get(None) - with self._lock: - if event.kind is _STOP: - self._due.remove(_STOP) - elif event.kind is _READ: - self._on_read_event(event) - elif event.kind is _WRITE: - self._on_write_event(event) - elif event.kind is _COMPLETE: - _no_longer_due( - _COMPLETE, self._rpc_states.get(event.tag), event.tag, - self._rpc_states) - elif event.kind is _intermediary_low.Event.Kind.FINISH: - self._on_finish_event(event) - elif event.kind is _SERVICE: - if self._server is None: - self._due.remove(_SERVICE) - else: - self._on_service_acceptance_event(event, server) - else: - logging.error('Illegal event! %s', (event,)) - - if not self._due and not self._rpc_states: - completion_queue.stop() - return - - def add_ticket(self, ticket): - with self._lock: - call = ticket.operation_id - rpc_state = self._rpc_states.get(call) - if rpc_state is None: - return - - if ticket.initial_metadata is not None: - _metadatafy(call, ticket.initial_metadata) - call.premetadata() - rpc_state.premetadataed = True - elif not rpc_state.premetadataed: - if (ticket.terminal_metadata is not None or - ticket.payload is not None or - ticket.termination is not None or - ticket.code is not None or - ticket.message is not None): - call.premetadata() - rpc_state.premetadataed = True - - if ticket.allowance is not None: - if rpc_state.early_read is None: - rpc_state.allowance += ticket.allowance - else: - payload = rpc_state.request_deserializer(rpc_state.early_read) - rpc_state.allowance += ticket.allowance - 1 - rpc_state.early_read = None - if rpc_state.read is _Read.READING: - call.read(call) - rpc_state.due.add(_READ) - termination = None - else: - termination = links.Ticket.Termination.COMPLETION - early_read_ticket = links.Ticket( - call, rpc_state.sequence_number, None, None, None, None, None, - None, payload, None, None, None, termination, None) - rpc_state.sequence_number += 1 - self._relay.add_value(early_read_ticket) - - if ticket.payload is not None: - disable_compression = rpc_state.context.next_compression_disabled() - if disable_compression: - flags = _intermediary_low.WriteFlags.WRITE_NO_COMPRESS - else: - flags = 0 - call.write(rpc_state.response_serializer(ticket.payload), call, flags) - rpc_state.due.add(_WRITE) - rpc_state.low_write = _LowWrite.ACTIVE - - if ticket.terminal_metadata is not None: - rpc_state.terminal_metadata = ticket.terminal_metadata - if ticket.code is not None: - rpc_state.code = ticket.code - if ticket.message is not None: - rpc_state.message = ticket.message - - if ticket.termination is links.Ticket.Termination.COMPLETION: - rpc_state.high_write = _HighWrite.CLOSED - if rpc_state.low_write is _LowWrite.OPEN: - if rpc_state.terminal_metadata is not None: - _metadatafy(call, rpc_state.terminal_metadata) - status = _status( - links.Ticket.Termination.COMPLETION, rpc_state.code, - rpc_state.message) - call.status(status, call) - rpc_state.due.add(_COMPLETE) - rpc_state.low_write = _LowWrite.CLOSED - elif ticket.termination is not None: - if rpc_state.terminal_metadata is not None: - _metadatafy(call, rpc_state.terminal_metadata) - status = _status( - ticket.termination, rpc_state.code, rpc_state.message) - call.status(status, call) - rpc_state.due.add(_COMPLETE) - - def add_port(self, address, server_credentials): - with self._lock: - if self._server is None: - self._completion_queue = _intermediary_low.CompletionQueue() - self._server = _intermediary_low.Server(self._completion_queue) - if server_credentials is None: - return self._server.add_http2_addr(address) - else: - return self._server.add_secure_http2_addr(address, server_credentials) - - def start(self): - with self._lock: - if self._server is None: - self._completion_queue = _intermediary_low.CompletionQueue() - self._server = _intermediary_low.Server(self._completion_queue) - self._pool = logging_pool.pool(1) - self._pool.submit(self._spin, self._completion_queue, self._server) - self._server.start() - self._server.service(None) - self._due.add(_SERVICE) - - def begin_stop(self): - with self._lock: - self._server.stop() - self._due.add(_STOP) - self._server = None - - def end_stop(self): - with self._lock: - pool = self._pool - pool.shutdown(wait=True) - - -class ServiceLink(links.Link): - """A links.Link for use on the service-side of a gRPC connection. - - Implementations of this interface are only valid for use between calls to - their start method and one of their stop methods. - """ - - @abc.abstractmethod - def add_port(self, address, server_credentials): - """Adds a port on which to service RPCs after this link has been started. - - Args: - address: The address on which to service RPCs with a port number of zero - requesting that a port number be automatically selected and used. - server_credentials: An _intermediary_low.ServerCredentials object, or - None for insecure service. - - Returns: - An integer port on which RPCs will be serviced after this link has been - started. This is typically the same number as the port number contained - in the passed address, but will likely be different if the port number - contained in the passed address was zero. - """ - raise NotImplementedError() - - @abc.abstractmethod - def start(self): - """Starts this object. - - This method must be called before attempting to use this Link in ticket - exchange. - """ - raise NotImplementedError() - - @abc.abstractmethod - def begin_stop(self): - """Indicate imminent link stop and immediate rejection of new RPCs. - - New RPCs will be rejected as soon as this method is called, but ongoing RPCs - will be allowed to continue until they terminate. This method does not - block. - """ - raise NotImplementedError() - - @abc.abstractmethod - def end_stop(self): - """Finishes stopping this link. - - begin_stop must have been called exactly once before calling this method. - - All in-progress RPCs will be terminated immediately. - """ - raise NotImplementedError() - - -class _ServiceLink(ServiceLink): - - def __init__(self, request_deserializers, response_serializers): - self._relay = relay.relay(None) - self._kernel = _Kernel( - {} if request_deserializers is None else request_deserializers, - {} if response_serializers is None else response_serializers, - self._relay) - - def accept_ticket(self, ticket): - self._kernel.add_ticket(ticket) - - def join_link(self, link): - self._relay.set_behavior(link.accept_ticket) - - def add_port(self, address, server_credentials): - return self._kernel.add_port(address, server_credentials) - - def start(self): - self._relay.start() - return self._kernel.start() - - def begin_stop(self): - self._kernel.begin_stop() - - def end_stop(self): - self._kernel.end_stop() - self._relay.stop() - - -def service_link(request_deserializers, response_serializers): - """Creates a ServiceLink. - - Args: - request_deserializers: A dict from group-method pair to request object - deserialization behavior. - response_serializers: A dict from group-method pair to response ojbect - serialization behavior. - - Returns: - A ServiceLink. - """ - return _ServiceLink(request_deserializers, response_serializers) diff --git a/src/python/grpcio/grpc/_plugin_wrapping.py b/src/python/grpcio/grpc/_plugin_wrapping.py index 4e9cfe710c..7cb5218c22 100644 --- a/src/python/grpcio/grpc/_plugin_wrapping.py +++ b/src/python/grpcio/grpc/_plugin_wrapping.py @@ -31,6 +31,7 @@ import collections import threading import grpc +from grpc import _common from grpc._cython import cygrpc @@ -62,17 +63,16 @@ class _WrappedCygrpcCallback(object): # TODO(atash) translate different Exception superclasses into different # status codes. self.cygrpc_callback( - cygrpc.Metadata([]), cygrpc.StatusCode.internal, error.message) + _common.EMPTY_METADATA, cygrpc.StatusCode.internal, + _common.encode(str(error))) def _invoke_success(self, metadata): try: - cygrpc_metadata = cygrpc.Metadata( - cygrpc.Metadatum(key, value) - for key, value in metadata) + cygrpc_metadata = _common.cygrpc_metadata(metadata) except Exception as error: self._invoke_failure(error) return - self.cygrpc_callback(cygrpc_metadata, cygrpc.StatusCode.ok, '') + self.cygrpc_callback(cygrpc_metadata, cygrpc.StatusCode.ok, b'') def __call__(self, metadata, error): with self.is_called_lock: @@ -101,7 +101,7 @@ class _WrappedPlugin(object): def __call__(self, context, cygrpc_callback): wrapped_cygrpc_callback = _WrappedCygrpcCallback(cygrpc_callback) wrapped_context = AuthMetadataContext( - context.service_url, context.method_name) + _common.decode(context.service_url), _common.decode(context.method_name)) try: self.plugin( wrapped_context, AuthMetadataPluginCallback(wrapped_cygrpc_callback)) @@ -120,4 +120,4 @@ def call_credentials_metadata_plugin(plugin, name): plugin's invocation must be non-blocking. """ return cygrpc.call_credentials_metadata_plugin( - cygrpc.CredentialsMetadataPlugin(_WrappedPlugin(plugin), name)) + cygrpc.CredentialsMetadataPlugin(_WrappedPlugin(plugin), _common.encode(name))) diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py index aae9f48ae6..94a13bfb2f 100644 --- a/src/python/grpcio/grpc/_server.py +++ b/src/python/grpcio/grpc/_server.py @@ -60,6 +60,8 @@ _CANCELLED = 'cancelled' _EMPTY_FLAGS = 0 _EMPTY_METADATA = cygrpc.Metadata(()) +_UNEXPECTED_EXIT_SERVER_GRACE = 1.0 + def _serialized_request(request_event): return request_event.batch_operations[0].received_message.bytes() @@ -144,18 +146,18 @@ def _abort(state, call, code, details): cygrpc.operation_send_initial_metadata( _EMPTY_METADATA, _EMPTY_FLAGS), cygrpc.operation_send_status_from_server( - _common.metadata(state.trailing_metadata), effective_code, + _common.cygrpc_metadata(state.trailing_metadata), effective_code, effective_details, _EMPTY_FLAGS), ) token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN else: operations = ( cygrpc.operation_send_status_from_server( - _common.metadata(state.trailing_metadata), effective_code, + _common.cygrpc_metadata(state.trailing_metadata), effective_code, effective_details, _EMPTY_FLAGS), ) token = _SEND_STATUS_FROM_SERVER_TOKEN - call.start_batch( + call.start_server_batch( cygrpc.Operations(operations), _send_status_from_server(state, token)) state.statused = True @@ -242,10 +244,10 @@ class _Context(grpc.ServicerContext): self._state.disable_next_compression = True def invocation_metadata(self): - return self._rpc_event.request_metadata + return _common.application_metadata(self._rpc_event.request_metadata) def peer(self): - return self._rpc_event.operation_call.peer() + return _common.decode(self._rpc_event.operation_call.peer()) def send_initial_metadata(self, initial_metadata): with self._state.condition: @@ -254,8 +256,8 @@ class _Context(grpc.ServicerContext): else: if self._state.initial_metadata_allowed: operation = cygrpc.operation_send_initial_metadata( - cygrpc.Metadata(initial_metadata), _EMPTY_FLAGS) - self._rpc_event.operation_call.start_batch( + _common.cygrpc_metadata(initial_metadata), _EMPTY_FLAGS) + self._rpc_event.operation_call.start_server_batch( cygrpc.Operations((operation,)), _send_initial_metadata(self._state)) self._state.initial_metadata_allowed = False @@ -265,7 +267,8 @@ class _Context(grpc.ServicerContext): def set_trailing_metadata(self, trailing_metadata): with self._state.condition: - self._state.trailing_metadata = trailing_metadata + self._state.trailing_metadata = _common.cygrpc_metadata( + trailing_metadata) def set_code(self, code): with self._state.condition: @@ -273,7 +276,7 @@ class _Context(grpc.ServicerContext): def set_details(self, details): with self._state.condition: - self._state.details = details + self._state.details = _common.encode(details) class _RequestIterator(object): @@ -289,7 +292,7 @@ class _RequestIterator(object): elif self._state.client is _CLOSED or self._state.statused: raise StopIteration() else: - self._call.start_batch( + self._call.start_server_batch( cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)), _receive_message(self._state, self._call, self._request_deserializer)) self._state.due.add(_RECEIVE_MESSAGE_TOKEN) @@ -330,7 +333,7 @@ def _unary_request(rpc_event, state, request_deserializer): if state.client is _CANCELLED or state.statused: return None else: - start_batch_result = rpc_event.operation_call.start_batch( + start_server_batch_result = rpc_event.operation_call.start_server_batch( cygrpc.Operations( (cygrpc.operation_receive_message(_EMPTY_FLAGS),)), _receive_message( @@ -340,12 +343,11 @@ def _unary_request(rpc_event, state, request_deserializer): state.condition.wait() if state.request is None: if state.client is _CLOSED: - details = b'"{}" requires exactly one request message.'.format( + details = '"{}" requires exactly one request message.'.format( rpc_event.request_call_details.method) - # TODO(5992#issuecomment-220761992): really, what status code? _abort( state, rpc_event.operation_call, - cygrpc.StatusCode.unavailable, details) + cygrpc.StatusCode.unimplemented, _common.encode(details)) return None elif state.client is _CANCELLED: return None @@ -363,10 +365,10 @@ def _call_behavior(rpc_event, state, behavior, argument, request_deserializer): except Exception as e: # pylint: disable=broad-except with state.condition: if e not in state.rpc_errors: - details = b'Exception calling application: {}'.format(e) + details = 'Exception calling application: {}'.format(e) logging.exception(details) - _abort( - state, rpc_event.operation_call, cygrpc.StatusCode.unknown, details) + _abort(state, rpc_event.operation_call, + cygrpc.StatusCode.unknown, _common.encode(details)) return None, False @@ -378,10 +380,10 @@ def _take_response_from_response_iterator(rpc_event, state, response_iterator): except Exception as e: # pylint: disable=broad-except with state.condition: if e not in state.rpc_errors: - details = b'Exception iterating responses: {}'.format(e) + details = 'Exception iterating responses: {}'.format(e) logging.exception(details) - _abort( - state, rpc_event.operation_call, cygrpc.StatusCode.unknown, details) + _abort(state, rpc_event.operation_call, + cygrpc.StatusCode.unknown, _common.encode(details)) return None, False @@ -415,7 +417,7 @@ def _send_response(rpc_event, state, serialized_response): cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS), ) token = _SEND_MESSAGE_TOKEN - rpc_event.operation_call.start_batch( + rpc_event.operation_call.start_server_batch( cygrpc.Operations(operations), _send_message(state, token)) state.due.add(token) while True: @@ -427,7 +429,7 @@ def _send_response(rpc_event, state, serialized_response): def _status(rpc_event, state, serialized_response): with state.condition: if state.client is not _CANCELLED: - trailing_metadata = _common.metadata(state.trailing_metadata) + trailing_metadata = _common.cygrpc_metadata(state.trailing_metadata) code = _completion_code(state) details = _details(state) operations = [ @@ -441,7 +443,7 @@ def _status(rpc_event, state, serialized_response): if serialized_response is not None: operations.append(cygrpc.operation_send_message( serialized_response, _EMPTY_FLAGS)) - rpc_event.operation_call.start_batch( + rpc_event.operation_call.start_server_batch( cygrpc.Operations(operations), _send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN)) state.statused = True @@ -531,7 +533,8 @@ def _find_method_handler(rpc_event, generic_handlers): for generic_handler in generic_handlers: method_handler = generic_handler.service( _HandlerCallDetails( - rpc_event.request_call_details.method, rpc_event.request_metadata)) + _common.decode(rpc_event.request_call_details.method), + rpc_event.request_metadata)) if method_handler is not None: return method_handler else: @@ -547,7 +550,7 @@ def _handle_unrecognized_method(rpc_event): b'Method not found!', _EMPTY_FLAGS), ) rpc_state = _RPCState() - rpc_event.operation_call.start_batch( + rpc_event.operation_call.start_server_batch( operations, lambda ignored_event: (rpc_state, (),)) return rpc_state @@ -555,7 +558,7 @@ def _handle_unrecognized_method(rpc_event): def _handle_with_method_handler(rpc_event, method_handler, thread_pool): state = _RPCState() with state.condition: - rpc_event.operation_call.start_batch( + rpc_event.operation_call.start_server_batch( cygrpc.Operations( (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)), _receive_close_on_server(state)) @@ -670,17 +673,6 @@ def _serve(state): return -def _start(state): - with state.lock: - if state.stage is not _ServerStage.STOPPED: - raise ValueError('Cannot start already-started server!') - state.server.start() - state.stage = _ServerStage.STARTED - _request_call(state) - thread = threading.Thread(target=_serve, args=(state,)) - thread.start() - - def _stop(state, grace): with state.lock: if state.stage is _ServerStage.STOPPED: @@ -719,9 +711,27 @@ def _stop(state, grace): return shutdown_event +def _start(state): + with state.lock: + if state.stage is not _ServerStage.STOPPED: + raise ValueError('Cannot start already-started server!') + state.server.start() + state.stage = _ServerStage.STARTED + _request_call(state) + def cleanup_server(timeout): + if timeout is None: + _stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait() + else: + _stop(state, timeout).wait() + + thread = _common.CleanupThread( + cleanup_server, target=_serve, args=(state,)) + thread.start() + + class Server(grpc.Server): - def __init__(self, generic_handlers, thread_pool): + def __init__(self, thread_pool, generic_handlers): completion_queue = cygrpc.CompletionQueue() server = cygrpc.Server() server.register_completion_queue(completion_queue) @@ -732,10 +742,10 @@ class Server(grpc.Server): _add_generic_handlers(self._state, generic_rpc_handlers) def add_insecure_port(self, address): - return _add_insecure_port(self._state, address) + return _add_insecure_port(self._state, _common.encode(address)) def add_secure_port(self, address, server_credentials): - return _add_secure_port(self._state, address, server_credentials) + return _add_secure_port(self._state, _common.encode(address), server_credentials) def start(self): _start(self._state) diff --git a/src/python/grpcio/grpc/_utilities.py b/src/python/grpcio/grpc/_utilities.py index a4ca9b7282..4850967fbc 100644 --- a/src/python/grpcio/grpc/_utilities.py +++ b/src/python/grpcio/grpc/_utilities.py @@ -29,16 +29,41 @@ """Internal utilities for gRPC Python.""" +import collections import threading import time +import six + import grpc +from grpc import _common from grpc.framework.foundation import callable_util _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = ( 'Exception calling connectivity future "done" callback!') +class RpcMethodHandler( + collections.namedtuple( + '_RpcMethodHandler', + ('request_streaming', 'response_streaming', 'request_deserializer', + 'response_serializer', 'unary_unary', 'unary_stream', 'stream_unary', + 'stream_stream',)), + grpc.RpcMethodHandler): + pass + + +class DictionaryGenericHandler(grpc.GenericRpcHandler): + + def __init__(self, service, method_handlers): + self._method_handlers = { + _common.fully_qualified_method(service, method): method_handler + for method, method_handler in six.iteritems(method_handlers)} + + def service(self, handler_call_details): + return self._method_handlers.get(handler_call_details.method) + + class _ChannelReadyFuture(grpc.Future): def __init__(self, channel): @@ -144,4 +169,3 @@ def channel_ready_future(channel): ready_future = _ChannelReadyFuture(channel) ready_future.start() return ready_future - diff --git a/src/python/grpcio/grpc/beta/_client_adaptations.py b/src/python/grpcio/grpc/beta/_client_adaptations.py index 621fcf2174..73415e0be7 100644 --- a/src/python/grpcio/grpc/beta/_client_adaptations.py +++ b/src/python/grpcio/grpc/beta/_client_adaptations.py @@ -30,6 +30,7 @@ """Translates gRPC's client-side API into gRPC's client-side Beta API.""" import grpc +from grpc import _common from grpc._cython import cygrpc from grpc.beta import interfaces from grpc.framework.common import cardinality @@ -48,10 +49,6 @@ _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS = { } -def _fully_qualified_method(group, method): - return b'/{}/{}'.format(group, method) - - def _effective_metadata(metadata, metadata_transformer): non_none_metadata = () if metadata is None else metadata if metadata_transformer is None: @@ -120,7 +117,10 @@ class _Rendezvous(future.Future, face.Call): def exception(self, timeout=None): try: rpc_error_call = self._future.exception(timeout=timeout) - return _abortion_error(rpc_error_call) + if rpc_error_call is None: + return None + else: + return _abortion_error(rpc_error_call) except grpc.FutureTimeoutError: raise future.TimeoutError() except grpc.FutureCancelledError: @@ -184,14 +184,14 @@ def _blocking_unary_unary( metadata_transformer, request, request_serializer, response_deserializer): try: multi_callable = channel.unary_unary( - _fully_qualified_method(group, method), + _common.fully_qualified_method(group, method), request_serializer=request_serializer, response_deserializer=response_deserializer) effective_metadata = _effective_metadata(metadata, metadata_transformer) if with_call: - response, call = multi_callable( + response, call = multi_callable.with_call( request, timeout=timeout, metadata=effective_metadata, - credentials=_credentials(protocol_options), with_call=True) + credentials=_credentials(protocol_options)) return response, _Rendezvous(None, None, call) else: return multi_callable( @@ -205,7 +205,7 @@ def _future_unary_unary( channel, group, method, timeout, protocol_options, metadata, metadata_transformer, request, request_serializer, response_deserializer): multi_callable = channel.unary_unary( - _fully_qualified_method(group, method), + _common.fully_qualified_method(group, method), request_serializer=request_serializer, response_deserializer=response_deserializer) effective_metadata = _effective_metadata(metadata, metadata_transformer) @@ -219,7 +219,7 @@ def _unary_stream( channel, group, method, timeout, protocol_options, metadata, metadata_transformer, request, request_serializer, response_deserializer): multi_callable = channel.unary_stream( - _fully_qualified_method(group, method), + _common.fully_qualified_method(group, method), request_serializer=request_serializer, response_deserializer=response_deserializer) effective_metadata = _effective_metadata(metadata, metadata_transformer) @@ -235,14 +235,14 @@ def _blocking_stream_unary( response_deserializer): try: multi_callable = channel.stream_unary( - _fully_qualified_method(group, method), + _common.fully_qualified_method(group, method), request_serializer=request_serializer, response_deserializer=response_deserializer) effective_metadata = _effective_metadata(metadata, metadata_transformer) if with_call: - response, call = multi_callable( + response, call = multi_callable.with_call( request_iterator, timeout=timeout, metadata=effective_metadata, - credentials=_credentials(protocol_options), with_call=True) + credentials=_credentials(protocol_options)) return response, _Rendezvous(None, None, call) else: return multi_callable( @@ -257,7 +257,7 @@ def _future_stream_unary( metadata_transformer, request_iterator, request_serializer, response_deserializer): multi_callable = channel.stream_unary( - _fully_qualified_method(group, method), + _common.fully_qualified_method(group, method), request_serializer=request_serializer, response_deserializer=response_deserializer) effective_metadata = _effective_metadata(metadata, metadata_transformer) @@ -272,7 +272,7 @@ def _stream_stream( metadata_transformer, request_iterator, request_serializer, response_deserializer): multi_callable = channel.stream_stream( - _fully_qualified_method(group, method), + _common.fully_qualified_method(group, method), request_serializer=request_serializer, response_deserializer=response_deserializer) effective_metadata = _effective_metadata(metadata, metadata_transformer) diff --git a/src/python/grpcio/grpc/beta/_server.py b/src/python/grpcio/grpc/beta/_server.py deleted file mode 100644 index eb0aadb42f..0000000000 --- a/src/python/grpcio/grpc/beta/_server.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Beta API server implementation.""" - -import threading - -from grpc._links import service -from grpc.beta import interfaces -from grpc.framework.core import implementations as _core_implementations -from grpc.framework.crust import implementations as _crust_implementations -from grpc.framework.foundation import logging_pool -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.links import utilities - -_DEFAULT_POOL_SIZE = 8 -_DEFAULT_TIMEOUT = 300 -_MAXIMUM_TIMEOUT = 24 * 60 * 60 - - -def _set_event(): - event = threading.Event() - event.set() - return event - - -class _GRPCServicer(base.Servicer): - - def __init__(self, delegate): - self._delegate = delegate - - def service(self, group, method, context, output_operator): - try: - return self._delegate.service(group, method, context, output_operator) - except base.NoSuchMethodError as e: - if e.code is None and e.details is None: - raise base.NoSuchMethodError( - interfaces.StatusCode.UNIMPLEMENTED, - 'Method "%s" of service "%s" not implemented!' % (method, group)) - else: - raise - - -class _Server(interfaces.Server): - - def __init__( - self, implementations, multi_implementation, pool, pool_size, - default_timeout, maximum_timeout, grpc_link): - self._lock = threading.Lock() - self._implementations = implementations - self._multi_implementation = multi_implementation - self._customer_pool = pool - self._pool_size = pool_size - self._default_timeout = default_timeout - self._maximum_timeout = maximum_timeout - self._grpc_link = grpc_link - - self._end_link = None - self._stop_events = None - self._pool = None - - def _start(self): - with self._lock: - if self._end_link is not None: - raise ValueError('Cannot start already-started server!') - - if self._customer_pool is None: - self._pool = logging_pool.pool(self._pool_size) - assembly_pool = self._pool - else: - assembly_pool = self._customer_pool - - servicer = _GRPCServicer( - _crust_implementations.servicer( - self._implementations, self._multi_implementation, assembly_pool)) - - self._end_link = _core_implementations.service_end_link( - servicer, self._default_timeout, self._maximum_timeout) - - self._grpc_link.join_link(self._end_link) - self._end_link.join_link(self._grpc_link) - self._grpc_link.start() - self._end_link.start() - - def _dissociate_links_and_shut_down_pool(self): - self._grpc_link.end_stop() - self._grpc_link.join_link(utilities.NULL_LINK) - self._end_link.join_link(utilities.NULL_LINK) - self._end_link = None - if self._pool is not None: - self._pool.shutdown(wait=True) - self._pool = None - - def _stop_stopping(self): - self._dissociate_links_and_shut_down_pool() - for stop_event in self._stop_events: - stop_event.set() - self._stop_events = None - - def _stop_started(self): - self._grpc_link.begin_stop() - self._end_link.stop(0).wait() - self._dissociate_links_and_shut_down_pool() - - def _foreign_thread_stop(self, end_stop_event, stop_events): - end_stop_event.wait() - with self._lock: - if self._stop_events is stop_events: - self._stop_stopping() - - def _schedule_stop(self, grace): - with self._lock: - if self._end_link is None: - return _set_event() - server_stop_event = threading.Event() - if self._stop_events is None: - self._stop_events = [server_stop_event] - self._grpc_link.begin_stop() - else: - self._stop_events.append(server_stop_event) - end_stop_event = self._end_link.stop(grace) - end_stop_thread = threading.Thread( - target=self._foreign_thread_stop, - args=(end_stop_event, self._stop_events)) - end_stop_thread.start() - return server_stop_event - - def _stop_now(self): - with self._lock: - if self._end_link is not None: - if self._stop_events is None: - self._stop_started() - else: - self._stop_stopping() - - def add_insecure_port(self, address): - with self._lock: - if self._end_link is None: - return self._grpc_link.add_port(address, None) - else: - raise ValueError('Can\'t add port to serving server!') - - def add_secure_port(self, address, server_credentials): - with self._lock: - if self._end_link is None: - return self._grpc_link.add_port( - address, server_credentials._low_credentials) # pylint: disable=protected-access - else: - raise ValueError('Can\'t add port to serving server!') - - def start(self): - self._start() - - def stop(self, grace): - if 0 < grace: - return self._schedule_stop(grace) - else: - self._stop_now() - return _set_event() - - def __enter__(self): - self._start() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self._stop_now() - return False - - def __del__(self): - self._stop_now() - - -def server( - implementations, multi_implementation, request_deserializers, - response_serializers, thread_pool, thread_pool_size, default_timeout, - maximum_timeout): - grpc_link = service.service_link(request_deserializers, response_serializers) - return _Server( - implementations, multi_implementation, thread_pool, - _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size, - _DEFAULT_TIMEOUT if default_timeout is None else default_timeout, - _MAXIMUM_TIMEOUT if maximum_timeout is None else maximum_timeout, - grpc_link) diff --git a/src/python/grpcio/grpc/beta/_server_adaptations.py b/src/python/grpcio/grpc/beta/_server_adaptations.py index 52eadf2315..cca4a1797a 100644 --- a/src/python/grpcio/grpc/beta/_server_adaptations.py +++ b/src/python/grpcio/grpc/beta/_server_adaptations.py @@ -33,6 +33,7 @@ import collections import threading import grpc +from grpc import _common from grpc.beta import interfaces from grpc.framework.common import cardinality from grpc.framework.common import style @@ -78,7 +79,8 @@ class _FaceServicerContext(face.ServicerContext): return _ServerProtocolContext(self._servicer_context) def invocation_metadata(self): - return self._servicer_context.invocation_metadata() + return _common.cygrpc_metadata( + self._servicer_context.invocation_metadata()) def initial_metadata(self, initial_metadata): self._servicer_context.send_initial_metadata(initial_metadata) @@ -160,14 +162,24 @@ class _Callback(stream.Consumer): self._condition.wait() -def _pipe_requests(request_iterator, request_consumer, servicer_context): - for request in request_iterator: - if not servicer_context.is_active(): - return - request_consumer.consume(request) - if not servicer_context.is_active(): - return - request_consumer.terminate() +def _run_request_pipe_thread(request_iterator, request_consumer, + servicer_context): + thread_joined = threading.Event() + def pipe_requests(): + for request in request_iterator: + if not servicer_context.is_active() or thread_joined.is_set(): + return + request_consumer.consume(request) + if not servicer_context.is_active() or thread_joined.is_set(): + return + request_consumer.terminate() + + def stop_request_pipe(timeout): + thread_joined.set() + + request_pipe_thread = _common.CleanupThread( + stop_request_pipe, target=pipe_requests) + request_pipe_thread.start() def _adapt_unary_unary_event(unary_unary_event): @@ -205,10 +217,8 @@ def _adapt_stream_unary_event(stream_unary_event): raise abandonment.Abandoned() request_consumer = stream_unary_event( callback.consume_and_terminate, _FaceServicerContext(servicer_context)) - request_pipe_thread = threading.Thread( - target=_pipe_requests, - args=(request_iterator, request_consumer, servicer_context,)) - request_pipe_thread.start() + _run_request_pipe_thread( + request_iterator, request_consumer, servicer_context) return callback.draw_all_values()[0] return adaptation @@ -220,10 +230,8 @@ def _adapt_stream_stream_event(stream_stream_event): raise abandonment.Abandoned() request_consumer = stream_stream_event( callback, _FaceServicerContext(servicer_context)) - request_pipe_thread = threading.Thread( - target=_pipe_requests, - args=(request_iterator, request_consumer, servicer_context,)) - request_pipe_thread.start() + _run_request_pipe_thread( + request_iterator, request_consumer, servicer_context) while True: response = callback.draw_one_value() if response is None: @@ -287,36 +295,43 @@ def _simple_method_handler( None, _adapt_stream_stream_event(implementation.stream_stream_event)) +def _flatten_method_pair_map(method_pair_map): + method_pair_map = method_pair_map or {} + flat_map = {} + for method_pair in method_pair_map: + method = _common.fully_qualified_method(method_pair[0], method_pair[1]) + flat_map[method] = method_pair_map[method_pair] + return flat_map + + class _GenericRpcHandler(grpc.GenericRpcHandler): def __init__( self, method_implementations, multi_method_implementation, request_deserializers, response_serializers): - self._method_implementations = method_implementations + self._method_implementations = _flatten_method_pair_map( + method_implementations) + self._request_deserializers = _flatten_method_pair_map( + request_deserializers) + self._response_serializers = _flatten_method_pair_map( + response_serializers) self._multi_method_implementation = multi_method_implementation - self._request_deserializers = request_deserializers or {} - self._response_serializers = response_serializers or {} def service(self, handler_call_details): - try: - group_name, method_name = handler_call_details.method.split(b'/')[1:3] - except ValueError: + method_implementation = self._method_implementations.get( + handler_call_details.method) + if method_implementation is not None: + return _simple_method_handler( + method_implementation, + self._request_deserializers.get(handler_call_details.method), + self._response_serializers.get(handler_call_details.method)) + elif self._multi_method_implementation is None: return None else: - method_implementation = self._method_implementations.get( - (group_name, method_name,)) - if method_implementation is not None: - return _simple_method_handler( - method_implementation, - self._request_deserializers.get((group_name, method_name,)), - self._response_serializers.get((group_name, method_name,))) - elif self._multi_method_implementation is None: + try: + return None #TODO(nathaniel): call the multimethod. + except face.NoSuchMethodError: return None - else: - try: - return None #TODO(nathaniel): call the multimethod. - except face.NoSuchMethodError: - return None class _Server(interfaces.Server): @@ -356,4 +371,5 @@ def server( _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size) else: effective_thread_pool = thread_pool - return _Server(grpc.server((generic_rpc_handler,), effective_thread_pool)) + return _Server( + grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,))) diff --git a/src/python/grpcio/grpc/beta/_stub.py b/src/python/grpcio/grpc/beta/_stub.py deleted file mode 100644 index 2af019309a..0000000000 --- a/src/python/grpcio/grpc/beta/_stub.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Beta API stub implementation.""" - -import threading - -from grpc._links import invocation -from grpc.framework.core import implementations as _core_implementations -from grpc.framework.crust import implementations as _crust_implementations -from grpc.framework.foundation import logging_pool -from grpc.framework.interfaces.links import utilities - -_DEFAULT_POOL_SIZE = 6 - - -class _AutoIntermediary(object): - - def __init__(self, up, down, delegate): - self._lock = threading.Lock() - self._up = up - self._down = down - self._in_context = False - self._delegate = delegate - - def __getattr__(self, attr): - with self._lock: - if self._delegate is None: - raise AttributeError('No useful attributes out of context!') - else: - return getattr(self._delegate, attr) - - def __enter__(self): - with self._lock: - if self._in_context: - raise ValueError('Already in context!') - elif self._delegate is None: - self._delegate = self._up() - self._in_context = True - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - with self._lock: - if not self._in_context: - raise ValueError('Not in context!') - self._down() - self._in_context = False - self._delegate = None - return False - - def __del__(self): - with self._lock: - if self._delegate is not None: - self._down() - self._delegate = None - - -class _StubAssemblyManager(object): - - def __init__( - self, thread_pool, thread_pool_size, end_link, grpc_link, stub_creator): - self._thread_pool = thread_pool - self._pool_size = thread_pool_size - self._end_link = end_link - self._grpc_link = grpc_link - self._stub_creator = stub_creator - self._own_pool = None - - def up(self): - if self._thread_pool is None: - self._own_pool = logging_pool.pool( - _DEFAULT_POOL_SIZE if self._pool_size is None else self._pool_size) - assembly_pool = self._own_pool - else: - assembly_pool = self._thread_pool - self._end_link.join_link(self._grpc_link) - self._grpc_link.join_link(self._end_link) - self._end_link.start() - self._grpc_link.start() - return self._stub_creator(self._end_link, assembly_pool) - - def down(self): - self._end_link.stop(0).wait() - self._grpc_link.stop() - self._end_link.join_link(utilities.NULL_LINK) - self._grpc_link.join_link(utilities.NULL_LINK) - if self._own_pool is not None: - self._own_pool.shutdown(wait=True) - self._own_pool = None - - -def _assemble( - channel, host, metadata_transformer, request_serializers, - response_deserializers, thread_pool, thread_pool_size, stub_creator): - end_link = _core_implementations.invocation_end_link() - grpc_link = invocation.invocation_link( - channel, host, metadata_transformer, request_serializers, - response_deserializers) - stub_assembly_manager = _StubAssemblyManager( - thread_pool, thread_pool_size, end_link, grpc_link, stub_creator) - stub = stub_assembly_manager.up() - return _AutoIntermediary( - stub_assembly_manager.up, stub_assembly_manager.down, stub) - - -def _dynamic_stub_creator(service, cardinalities): - def create_dynamic_stub(end_link, invocation_pool): - return _crust_implementations.dynamic_stub( - end_link, service, cardinalities, invocation_pool) - return create_dynamic_stub - - -def generic_stub( - channel, host, metadata_transformer, request_serializers, - response_deserializers, thread_pool, thread_pool_size): - return _assemble( - channel, host, metadata_transformer, request_serializers, - response_deserializers, thread_pool, thread_pool_size, - _crust_implementations.generic_stub) - - -def dynamic_stub( - channel, host, service, cardinalities, metadata_transformer, - request_serializers, response_deserializers, thread_pool, - thread_pool_size): - return _assemble( - channel, host, metadata_transformer, request_serializers, - response_deserializers, thread_pool, thread_pool_size, - _dynamic_stub_creator(service, cardinalities)) diff --git a/src/python/grpcio/grpc/beta/implementations.py b/src/python/grpcio/grpc/beta/implementations.py index 4ae6e7d675..ab25fd5eec 100644 --- a/src/python/grpcio/grpc/beta/implementations.py +++ b/src/python/grpcio/grpc/beta/implementations.py @@ -37,7 +37,6 @@ import threading # pylint: disable=unused-import # cardinality and face are referenced from specification in this module. import grpc from grpc import _auth -from grpc._adapter import _types from grpc.beta import _client_adaptations from grpc.beta import _server_adaptations from grpc.beta import interfaces diff --git a/src/python/grpcio/grpc/beta/interfaces.py b/src/python/grpcio/grpc/beta/interfaces.py index 4343b6c4b5..90f6bbbfcc 100644 --- a/src/python/grpcio/grpc/beta/interfaces.py +++ b/src/python/grpcio/grpc/beta/interfaces.py @@ -36,6 +36,9 @@ import six import grpc ChannelConnectivity = grpc.ChannelConnectivity +# FATAL_FAILURE was a Beta-API name for SHUTDOWN +ChannelConnectivity.FATAL_FAILURE = ChannelConnectivity.SHUTDOWN + StatusCode = grpc.StatusCode diff --git a/src/python/grpcio/grpc/framework/core/_constants.py b/src/python/grpcio/grpc/framework/core/_constants.py deleted file mode 100644 index 0f47cb48e0..0000000000 --- a/src/python/grpcio/grpc/framework/core/_constants.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Private constants for the package.""" - -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.links import links - -TICKET_SUBSCRIPTION_FOR_BASE_SUBSCRIPTION_KIND = { - base.Subscription.Kind.NONE: links.Ticket.Subscription.NONE, - base.Subscription.Kind.TERMINATION_ONLY: - links.Ticket.Subscription.TERMINATION, - base.Subscription.Kind.FULL: links.Ticket.Subscription.FULL, - } - -# Mapping from abortive operation outcome to ticket termination to be -# sent to the other side of the operation, or None to indicate that no -# ticket should be sent to the other side in the event of such an -# outcome. -ABORTION_OUTCOME_TO_TICKET_TERMINATION = { - base.Outcome.Kind.CANCELLED: links.Ticket.Termination.CANCELLATION, - base.Outcome.Kind.EXPIRED: links.Ticket.Termination.EXPIRATION, - base.Outcome.Kind.LOCAL_SHUTDOWN: links.Ticket.Termination.SHUTDOWN, - base.Outcome.Kind.REMOTE_SHUTDOWN: None, - base.Outcome.Kind.RECEPTION_FAILURE: - links.Ticket.Termination.RECEPTION_FAILURE, - base.Outcome.Kind.TRANSMISSION_FAILURE: None, - base.Outcome.Kind.LOCAL_FAILURE: links.Ticket.Termination.LOCAL_FAILURE, - base.Outcome.Kind.REMOTE_FAILURE: links.Ticket.Termination.REMOTE_FAILURE, -} - -INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Core) internal error! )-:' -TERMINATION_CALLBACK_EXCEPTION_LOG_MESSAGE = ( - 'Exception calling termination callback!') diff --git a/src/python/grpcio/grpc/framework/core/_context.py b/src/python/grpcio/grpc/framework/core/_context.py deleted file mode 100644 index a346e9d478..0000000000 --- a/src/python/grpcio/grpc/framework/core/_context.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for operation context.""" - -import time - -# _interfaces is referenced from specification in this module. -from grpc.framework.core import _interfaces # pylint: disable=unused-import -from grpc.framework.core import _utilities -from grpc.framework.interfaces.base import base - - -class OperationContext(base.OperationContext): - """An implementation of interfaces.OperationContext.""" - - def __init__( - self, lock, termination_manager, transmission_manager, - expiration_manager): - """Constructor. - - Args: - lock: The operation-wide lock. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - """ - self._lock = lock - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._expiration_manager = expiration_manager - - def _abort(self, outcome_kind): - with self._lock: - if self._termination_manager.outcome is None: - outcome = _utilities.Outcome(outcome_kind, None, None) - self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) - self._expiration_manager.terminate() - - def outcome(self): - """See base.OperationContext.outcome for specification.""" - with self._lock: - return self._termination_manager.outcome - - def add_termination_callback(self, callback): - """See base.OperationContext.add_termination_callback.""" - with self._lock: - if self._termination_manager.outcome is None: - self._termination_manager.add_callback(callback) - return None - else: - return self._termination_manager.outcome - - def time_remaining(self): - """See base.OperationContext.time_remaining for specification.""" - with self._lock: - deadline = self._expiration_manager.deadline() - return max(0.0, deadline - time.time()) - - def cancel(self): - """See base.OperationContext.cancel for specification.""" - self._abort(base.Outcome.Kind.CANCELLED) - - def fail(self, exception): - """See base.OperationContext.fail for specification.""" - self._abort(base.Outcome.Kind.LOCAL_FAILURE) diff --git a/src/python/grpcio/grpc/framework/core/_emission.py b/src/python/grpcio/grpc/framework/core/_emission.py deleted file mode 100644 index 8ab59dc3e5..0000000000 --- a/src/python/grpcio/grpc/framework/core/_emission.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for handling emitted values.""" - -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.interfaces.base import base - - -class EmissionManager(_interfaces.EmissionManager): - """An EmissionManager implementation.""" - - def __init__( - self, lock, termination_manager, transmission_manager, - expiration_manager): - """Constructor. - - Args: - lock: The operation-wide lock. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - """ - self._lock = lock - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._expiration_manager = expiration_manager - self._ingestion_manager = None - - self._initial_metadata_seen = False - self._payload_seen = False - self._completion_seen = False - - def set_ingestion_manager(self, ingestion_manager): - """Sets the ingestion manager with which this manager will cooperate. - - Args: - ingestion_manager: The _interfaces.IngestionManager for the operation. - """ - self._ingestion_manager = ingestion_manager - - def advance( - self, initial_metadata=None, payload=None, completion=None, - allowance=None): - initial_metadata_present = initial_metadata is not None - payload_present = payload is not None - completion_present = completion is not None - allowance_present = allowance is not None - with self._lock: - if self._termination_manager.outcome is None: - if (initial_metadata_present and ( - self._initial_metadata_seen or self._payload_seen or - self._completion_seen) or - payload_present and self._completion_seen or - completion_present and self._completion_seen or - allowance_present and allowance <= 0): - outcome = _utilities.Outcome( - base.Outcome.Kind.LOCAL_FAILURE, None, None) - self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) - self._expiration_manager.terminate() - else: - self._initial_metadata_seen |= initial_metadata_present - self._payload_seen |= payload_present - self._completion_seen |= completion_present - if completion_present: - self._termination_manager.emission_complete() - self._ingestion_manager.local_emissions_done() - self._transmission_manager.advance( - initial_metadata, payload, completion, allowance) - if allowance_present: - self._ingestion_manager.add_local_allowance(allowance) diff --git a/src/python/grpcio/grpc/framework/core/_end.py b/src/python/grpcio/grpc/framework/core/_end.py deleted file mode 100644 index 009d27c915..0000000000 --- a/src/python/grpcio/grpc/framework/core/_end.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Implementation of base.End.""" - -import abc -import threading -import uuid - -import six - -from grpc.framework.core import _operation -from grpc.framework.core import _utilities -from grpc.framework.foundation import callable_util -from grpc.framework.foundation import later -from grpc.framework.foundation import logging_pool -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.links import links -from grpc.framework.interfaces.links import utilities - -_IDLE_ACTION_EXCEPTION_LOG_MESSAGE = 'Exception calling idle action!' - - -class End(six.with_metaclass(abc.ABCMeta, base.End, links.Link)): - """A bridge between base.End and links.Link. - - Implementations of this interface translate arriving tickets into - calls on application objects implementing base interfaces and - translate calls from application objects implementing base interfaces - into tickets sent to a joined link. - """ - - -class _Cycle(object): - """State for a single start-stop End lifecycle.""" - - def __init__(self, pool): - self.pool = pool - self.grace = False - self.futures = [] - self.operations = {} - self.idle_actions = [] - - -def _abort(operations): - for operation in operations: - operation.abort(base.Outcome.Kind.LOCAL_SHUTDOWN) - - -def _cancel_futures(futures): - for future in futures: - future.cancel() - - -def _future_shutdown(lock, cycle, event): - def in_future(): - with lock: - _abort(cycle.operations.values()) - _cancel_futures(cycle.futures) - return in_future - - -class _End(End): - """An End implementation.""" - - def __init__(self, servicer_package): - """Constructor. - - Args: - servicer_package: A _ServicerPackage for servicing operations or None if - this end will not be used to service operations. - """ - self._lock = threading.Condition() - self._servicer_package = servicer_package - - self._stats = {outcome_kind: 0 for outcome_kind in base.Outcome.Kind} - - self._mate = None - - self._cycle = None - - def _termination_action(self, operation_id): - """Constructs the termination action for a single operation. - - Args: - operation_id: The operation ID for the termination action. - - Returns: - A callable that takes an operation outcome kind as its sole parameter and - that should be used as the termination action for the operation - associated with the given operation ID. - """ - def termination_action(outcome_kind): - with self._lock: - self._stats[outcome_kind] += 1 - self._cycle.operations.pop(operation_id, None) - if not self._cycle.operations: - for action in self._cycle.idle_actions: - self._cycle.pool.submit(action) - self._cycle.idle_actions = [] - if self._cycle.grace: - _cancel_futures(self._cycle.futures) - self._cycle.pool.shutdown(wait=False) - self._cycle = None - return termination_action - - def start(self): - """See base.End.start for specification.""" - with self._lock: - if self._cycle is not None: - raise ValueError('Tried to start a not-stopped End!') - else: - self._cycle = _Cycle(logging_pool.pool(1)) - - def stop(self, grace): - """See base.End.stop for specification.""" - with self._lock: - if self._cycle is None: - event = threading.Event() - event.set() - return event - elif not self._cycle.operations: - event = threading.Event() - self._cycle.pool.submit(event.set) - self._cycle.pool.shutdown(wait=False) - self._cycle = None - return event - else: - self._cycle.grace = True - event = threading.Event() - self._cycle.idle_actions.append(event.set) - if 0 < grace: - future = later.later( - grace, _future_shutdown(self._lock, self._cycle, event)) - self._cycle.futures.append(future) - else: - _abort(self._cycle.operations.values()) - return event - - def operate( - self, group, method, subscription, timeout, initial_metadata=None, - payload=None, completion=None, protocol_options=None): - """See base.End.operate for specification.""" - operation_id = uuid.uuid4() - with self._lock: - if self._cycle is None or self._cycle.grace: - raise ValueError('Can\'t operate on stopped or stopping End!') - termination_action = self._termination_action(operation_id) - operation = _operation.invocation_operate( - operation_id, group, method, subscription, timeout, protocol_options, - initial_metadata, payload, completion, self._mate.accept_ticket, - termination_action, self._cycle.pool) - self._cycle.operations[operation_id] = operation - return operation.context, operation.operator - - def operation_stats(self): - """See base.End.operation_stats for specification.""" - with self._lock: - return dict(self._stats) - - def add_idle_action(self, action): - """See base.End.add_idle_action for specification.""" - with self._lock: - if self._cycle is None: - raise ValueError('Can\'t add idle action to stopped End!') - action_with_exceptions_logged = callable_util.with_exceptions_logged( - action, _IDLE_ACTION_EXCEPTION_LOG_MESSAGE) - if self._cycle.operations: - self._cycle.idle_actions.append(action_with_exceptions_logged) - else: - self._cycle.pool.submit(action_with_exceptions_logged) - - def accept_ticket(self, ticket): - """See links.Link.accept_ticket for specification.""" - with self._lock: - if self._cycle is not None: - operation = self._cycle.operations.get(ticket.operation_id) - if operation is not None: - operation.handle_ticket(ticket) - elif self._servicer_package is not None and not self._cycle.grace: - termination_action = self._termination_action(ticket.operation_id) - operation = _operation.service_operate( - self._servicer_package, ticket, self._mate.accept_ticket, - termination_action, self._cycle.pool) - if operation is not None: - self._cycle.operations[ticket.operation_id] = operation - - def join_link(self, link): - """See links.Link.join_link for specification.""" - with self._lock: - self._mate = utilities.NULL_LINK if link is None else link - - -def serviceless_end_link(): - """Constructs an End usable only for invoking operations. - - Returns: - An End usable for translating operations into ticket exchange. - """ - return _End(None) - - -def serviceful_end_link(servicer, default_timeout, maximum_timeout): - """Constructs an End capable of servicing operations. - - Args: - servicer: An interfaces.Servicer for servicing operations. - default_timeout: A length of time in seconds to be used as the default - time alloted for a single operation. - maximum_timeout: A length of time in seconds to be used as the maximum - time alloted for a single operation. - - Returns: - An End capable of servicing the operations requested of it through ticket - exchange. - """ - return _End( - _utilities.ServicerPackage(servicer, default_timeout, maximum_timeout)) diff --git a/src/python/grpcio/grpc/framework/core/_expiration.py b/src/python/grpcio/grpc/framework/core/_expiration.py deleted file mode 100644 index ded0ab6bce..0000000000 --- a/src/python/grpcio/grpc/framework/core/_expiration.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for operation expiration.""" - -import time - -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.foundation import later -from grpc.framework.interfaces.base import base - - -class _ExpirationManager(_interfaces.ExpirationManager): - """An implementation of _interfaces.ExpirationManager.""" - - def __init__( - self, commencement, timeout, maximum_timeout, lock, termination_manager, - transmission_manager): - """Constructor. - - Args: - commencement: The time in seconds since the epoch at which the operation - began. - timeout: A length of time in seconds to allow for the operation to run. - maximum_timeout: The maximum length of time in seconds to allow for the - operation to run despite what is requested via this object's - change_timout method. - lock: The operation-wide lock. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - """ - self._lock = lock - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._commencement = commencement - self._maximum_timeout = maximum_timeout - - self._timeout = timeout - self._deadline = commencement + timeout - self._index = None - self._future = None - - def _expire(self, index): - def expire(): - with self._lock: - if self._future is not None and index == self._index: - self._future = None - self._termination_manager.expire() - self._transmission_manager.abort( - _utilities.Outcome(base.Outcome.Kind.EXPIRED, None, None)) - return expire - - def start(self): - self._index = 0 - self._future = later.later(self._timeout, self._expire(0)) - - def change_timeout(self, timeout): - if self._future is not None and timeout != self._timeout: - self._future.cancel() - new_timeout = min(timeout, self._maximum_timeout) - new_index = self._index + 1 - self._timeout = new_timeout - self._deadline = self._commencement + new_timeout - self._index = new_index - delay = self._deadline - time.time() - self._future = later.later(delay, self._expire(new_index)) - if new_timeout != timeout: - self._transmission_manager.timeout(new_timeout) - - def deadline(self): - return self._deadline - - def terminate(self): - if self._future: - self._future.cancel() - self._future = None - self._deadline_index = None - - -def invocation_expiration_manager( - timeout, lock, termination_manager, transmission_manager): - """Creates an _interfaces.ExpirationManager appropriate for front-side use. - - Args: - timeout: A length of time in seconds to allow for the operation to run. - lock: The operation-wide lock. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - - Returns: - An _interfaces.ExpirationManager appropriate for invocation-side use. - """ - expiration_manager = _ExpirationManager( - time.time(), timeout, timeout, lock, termination_manager, - transmission_manager) - expiration_manager.start() - return expiration_manager - - -def service_expiration_manager( - timeout, default_timeout, maximum_timeout, lock, termination_manager, - transmission_manager): - """Creates an _interfaces.ExpirationManager appropriate for back-side use. - - Args: - timeout: A length of time in seconds to allow for the operation to run. May - be None in which case default_timeout will be used. - default_timeout: The default length of time in seconds to allow for the - operation to run if the front-side customer has not specified such a value - (or if the value they specified is not yet known). - maximum_timeout: The maximum length of time in seconds to allow for the - operation to run. - lock: The operation-wide lock. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - - Returns: - An _interfaces.ExpirationManager appropriate for service-side use. - """ - expiration_manager = _ExpirationManager( - time.time(), default_timeout if timeout is None else timeout, - maximum_timeout, lock, termination_manager, transmission_manager) - expiration_manager.start() - return expiration_manager diff --git a/src/python/grpcio/grpc/framework/core/_ingestion.py b/src/python/grpcio/grpc/framework/core/_ingestion.py deleted file mode 100644 index f2767c981b..0000000000 --- a/src/python/grpcio/grpc/framework/core/_ingestion.py +++ /dev/null @@ -1,439 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for ingestion during an operation.""" - -import abc -import collections -import enum - -import six - -from grpc.framework.core import _constants -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.foundation import abandonment -from grpc.framework.foundation import callable_util -from grpc.framework.interfaces.base import base - -_CREATE_SUBSCRIPTION_EXCEPTION_LOG_MESSAGE = 'Exception initializing ingestion!' -_INGESTION_EXCEPTION_LOG_MESSAGE = 'Exception during ingestion!' - - -class _SubscriptionCreation( - collections.namedtuple( - '_SubscriptionCreation', - ('kind', 'subscription', 'code', 'details',))): - """A sum type for the outcome of ingestion initialization. - - Attributes: - kind: A Kind value coarsely indicating how subscription creation completed. - subscription: The created subscription. Only present if kind is - Kind.SUBSCRIPTION. - code: A code value to be sent to the other side of the operation along with - an indication that the operation is being aborted due to an error on the - remote side of the operation. Only present if kind is Kind.REMOTE_ERROR. - details: A details value to be sent to the other side of the operation - along with an indication that the operation is being aborted due to an - error on the remote side of the operation. Only present if kind is - Kind.REMOTE_ERROR. - """ - - @enum.unique - class Kind(enum.Enum): - SUBSCRIPTION = 'subscription' - REMOTE_ERROR = 'remote error' - ABANDONED = 'abandoned' - - -class _SubscriptionCreator(six.with_metaclass(abc.ABCMeta)): - """Common specification of subscription-creating behavior.""" - - @abc.abstractmethod - def create(self, group, method): - """Creates the base.Subscription of the local customer. - - Any exceptions raised by this method should be attributed to and treated as - defects in the customer code called by this method. - - Args: - group: The group identifier of the operation. - method: The method identifier of the operation. - - Returns: - A _SubscriptionCreation describing the result of subscription creation. - """ - raise NotImplementedError() - - -class _ServiceSubscriptionCreator(_SubscriptionCreator): - """A _SubscriptionCreator appropriate for service-side use.""" - - def __init__(self, servicer, operation_context, output_operator): - """Constructor. - - Args: - servicer: The base.Servicer that will service the operation. - operation_context: A base.OperationContext for the operation to be passed - to the customer. - output_operator: A base.Operator for the operation to be passed to the - customer and to be called by the customer to accept operation data - emitted by the customer. - """ - self._servicer = servicer - self._operation_context = operation_context - self._output_operator = output_operator - - def create(self, group, method): - try: - subscription = self._servicer.service( - group, method, self._operation_context, self._output_operator) - except base.NoSuchMethodError as e: - return _SubscriptionCreation( - _SubscriptionCreation.Kind.REMOTE_ERROR, None, e.code, e.details) - except abandonment.Abandoned: - return _SubscriptionCreation( - _SubscriptionCreation.Kind.ABANDONED, None, None, None) - else: - return _SubscriptionCreation( - _SubscriptionCreation.Kind.SUBSCRIPTION, subscription, None, None) - - -def _wrap(behavior): - def wrapped(*args, **kwargs): - try: - behavior(*args, **kwargs) - except abandonment.Abandoned: - return False - else: - return True - return wrapped - - -class _IngestionManager(_interfaces.IngestionManager): - """An implementation of _interfaces.IngestionManager.""" - - def __init__( - self, lock, pool, subscription, subscription_creator, termination_manager, - transmission_manager, expiration_manager, protocol_manager): - """Constructor. - - Args: - lock: The operation-wide lock. - pool: A thread pool in which to execute customer code. - subscription: A base.Subscription describing the customer's interest in - operation values from the other side. May be None if - subscription_creator is not None. - subscription_creator: A _SubscriptionCreator wrapping the portion of - customer code that when called returns the base.Subscription describing - the customer's interest in operation values from the other side. May be - None if subscription is not None. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - protocol_manager: The _interfaces.ProtocolManager for the operation. - """ - self._lock = lock - self._pool = pool - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._expiration_manager = expiration_manager - self._protocol_manager = protocol_manager - - if subscription is None: - self._subscription_creator = subscription_creator - self._wrapped_operator = None - elif subscription.kind is base.Subscription.Kind.FULL: - self._subscription_creator = None - self._wrapped_operator = _wrap(subscription.operator.advance) - else: - # TODO(nathaniel): Support other subscriptions. - raise ValueError('Unsupported subscription "%s"!' % subscription.kind) - self._pending_initial_metadata = None - self._pending_payloads = [] - self._pending_completion = None - self._local_allowance = 1 - # A nonnegative integer or None, with None indicating that the local - # customer is done emitting anyway so there's no need to bother it by - # informing it that the remote customer has granted it further permission to - # emit. - self._remote_allowance = 0 - self._processing = False - - def _abort_internal_only(self): - self._subscription_creator = None - self._wrapped_operator = None - self._pending_initial_metadata = None - self._pending_payloads = None - self._pending_completion = None - - def _abort_and_notify(self, outcome_kind, code, details): - self._abort_internal_only() - if self._termination_manager.outcome is None: - outcome = _utilities.Outcome(outcome_kind, code, details) - self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) - self._expiration_manager.terminate() - - def _operator_next(self): - """Computes the next step for full-subscription ingestion. - - Returns: - An initial_metadata, payload, completion, allowance, continue quintet - indicating what operation values (if any) are available to pass into - customer code and whether or not there is anything immediately - actionable to call customer code to do. - """ - if self._wrapped_operator is None: - return None, None, None, None, False - else: - initial_metadata, payload, completion, allowance, action = [None] * 5 - if self._pending_initial_metadata is not None: - initial_metadata = self._pending_initial_metadata - self._pending_initial_metadata = None - action = True - if self._pending_payloads and 0 < self._local_allowance: - payload = self._pending_payloads.pop(0) - self._local_allowance -= 1 - action = True - if not self._pending_payloads and self._pending_completion is not None: - completion = self._pending_completion - self._pending_completion = None - action = True - if self._remote_allowance is not None and 0 < self._remote_allowance: - allowance = self._remote_allowance - self._remote_allowance = 0 - action = True - return initial_metadata, payload, completion, allowance, bool(action) - - def _operator_process( - self, wrapped_operator, initial_metadata, payload, - completion, allowance): - while True: - advance_outcome = callable_util.call_logging_exceptions( - wrapped_operator, _INGESTION_EXCEPTION_LOG_MESSAGE, - initial_metadata=initial_metadata, payload=payload, - completion=completion, allowance=allowance) - if advance_outcome.exception is None: - if advance_outcome.return_value: - with self._lock: - if self._termination_manager.outcome is not None: - return - if completion is not None: - self._termination_manager.ingestion_complete() - initial_metadata, payload, completion, allowance, moar = ( - self._operator_next()) - if not moar: - self._processing = False - return - else: - with self._lock: - if self._termination_manager.outcome is None: - self._abort_and_notify( - base.Outcome.Kind.LOCAL_FAILURE, None, None) - return - else: - with self._lock: - if self._termination_manager.outcome is None: - self._abort_and_notify(base.Outcome.Kind.LOCAL_FAILURE, None, None) - return - - def _operator_post_create(self, subscription): - wrapped_operator = _wrap(subscription.operator.advance) - with self._lock: - if self._termination_manager.outcome is not None: - return - self._wrapped_operator = wrapped_operator - self._subscription_creator = None - metadata, payload, completion, allowance, moar = self._operator_next() - if not moar: - self._processing = False - return - self._operator_process( - wrapped_operator, metadata, payload, completion, allowance) - - def _create(self, subscription_creator, group, name): - outcome = callable_util.call_logging_exceptions( - subscription_creator.create, - _CREATE_SUBSCRIPTION_EXCEPTION_LOG_MESSAGE, group, name) - if outcome.return_value is None: - with self._lock: - if self._termination_manager.outcome is None: - self._abort_and_notify(base.Outcome.Kind.LOCAL_FAILURE, None, None) - elif outcome.return_value.kind is _SubscriptionCreation.Kind.ABANDONED: - with self._lock: - if self._termination_manager.outcome is None: - self._abort_and_notify(base.Outcome.Kind.LOCAL_FAILURE, None, None) - elif outcome.return_value.kind is _SubscriptionCreation.Kind.REMOTE_ERROR: - code = outcome.return_value.code - details = outcome.return_value.details - with self._lock: - if self._termination_manager.outcome is None: - self._abort_and_notify( - base.Outcome.Kind.REMOTE_FAILURE, code, details) - elif outcome.return_value.subscription.kind is base.Subscription.Kind.FULL: - self._protocol_manager.set_protocol_receiver( - outcome.return_value.subscription.protocol_receiver) - self._operator_post_create(outcome.return_value.subscription) - else: - # TODO(nathaniel): Support other subscriptions. - raise ValueError( - 'Unsupported "%s"!' % outcome.return_value.subscription.kind) - - def _store_advance(self, initial_metadata, payload, completion, allowance): - if initial_metadata is not None: - self._pending_initial_metadata = initial_metadata - if payload is not None: - self._pending_payloads.append(payload) - if completion is not None: - self._pending_completion = completion - if allowance is not None and self._remote_allowance is not None: - self._remote_allowance += allowance - - def _operator_advance(self, initial_metadata, payload, completion, allowance): - if self._processing: - self._store_advance(initial_metadata, payload, completion, allowance) - else: - action = False - if initial_metadata is not None: - action = True - if payload is not None: - if 0 < self._local_allowance: - self._local_allowance -= 1 - action = True - else: - self._pending_payloads.append(payload) - payload = False - if completion is not None: - if self._pending_payloads: - self._pending_completion = completion - else: - action = True - if allowance is not None and self._remote_allowance is not None: - allowance += self._remote_allowance - self._remote_allowance = 0 - action = True - if action: - self._pool.submit( - callable_util.with_exceptions_logged( - self._operator_process, _constants.INTERNAL_ERROR_LOG_MESSAGE), - self._wrapped_operator, initial_metadata, payload, completion, - allowance) - - def set_group_and_method(self, group, method): - """See _interfaces.IngestionManager.set_group_and_method for spec.""" - if self._subscription_creator is not None and not self._processing: - self._pool.submit( - callable_util.with_exceptions_logged( - self._create, _constants.INTERNAL_ERROR_LOG_MESSAGE), - self._subscription_creator, group, method) - self._processing = True - - def add_local_allowance(self, allowance): - """See _interfaces.IngestionManager.add_local_allowance for spec.""" - if any((self._subscription_creator, self._wrapped_operator,)): - self._local_allowance += allowance - if not self._processing: - initial_metadata, payload, completion, allowance, moar = ( - self._operator_next()) - if moar: - self._pool.submit( - callable_util.with_exceptions_logged( - self._operator_process, - _constants.INTERNAL_ERROR_LOG_MESSAGE), - initial_metadata, payload, completion, allowance) - - def local_emissions_done(self): - self._remote_allowance = None - - def advance(self, initial_metadata, payload, completion, allowance): - """See _interfaces.IngestionManager.advance for specification.""" - if self._subscription_creator is not None: - self._store_advance(initial_metadata, payload, completion, allowance) - elif self._wrapped_operator is not None: - self._operator_advance(initial_metadata, payload, completion, allowance) - - -def invocation_ingestion_manager( - subscription, lock, pool, termination_manager, transmission_manager, - expiration_manager, protocol_manager): - """Creates an IngestionManager appropriate for invocation-side use. - - Args: - subscription: A base.Subscription indicating the customer's interest in the - data and results from the service-side of the operation. - lock: The operation-wide lock. - pool: A thread pool in which to execute customer code. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - protocol_manager: The _interfaces.ProtocolManager for the operation. - - Returns: - An IngestionManager appropriate for invocation-side use. - """ - return _IngestionManager( - lock, pool, subscription, None, termination_manager, transmission_manager, - expiration_manager, protocol_manager) - - -def service_ingestion_manager( - servicer, operation_context, output_operator, lock, pool, - termination_manager, transmission_manager, expiration_manager, - protocol_manager): - """Creates an IngestionManager appropriate for service-side use. - - The returned IngestionManager will require its set_group_and_name method to be - called before its advance method may be called. - - Args: - servicer: A base.Servicer for servicing the operation. - operation_context: A base.OperationContext for the operation to be passed to - the customer. - output_operator: A base.Operator for the operation to be passed to the - customer and to be called by the customer to accept operation data output - by the customer. - lock: The operation-wide lock. - pool: A thread pool in which to execute customer code. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - protocol_manager: The _interfaces.ProtocolManager for the operation. - - Returns: - An IngestionManager appropriate for service-side use. - """ - subscription_creator = _ServiceSubscriptionCreator( - servicer, operation_context, output_operator) - return _IngestionManager( - lock, pool, None, subscription_creator, termination_manager, - transmission_manager, expiration_manager, protocol_manager) diff --git a/src/python/grpcio/grpc/framework/core/_interfaces.py b/src/python/grpcio/grpc/framework/core/_interfaces.py deleted file mode 100644 index 63ac82f80e..0000000000 --- a/src/python/grpcio/grpc/framework/core/_interfaces.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Package-internal interfaces.""" - -import abc - -import six - -from grpc.framework.interfaces.base import base - - -class TerminationManager(six.with_metaclass(abc.ABCMeta)): - """An object responsible for handling the termination of an operation. - - Attributes: - outcome: None if the operation is active or a base.Outcome value if it has - terminated. - """ - - @abc.abstractmethod - def add_callback(self, callback): - """Registers a callback to be called on operation termination. - - If the operation has already terminated the callback will not be called. - - Args: - callback: A callable that will be passed a base.Outcome value. - - Returns: - None if the operation has not yet terminated and the passed callback will - be called when it does, or a base.Outcome value describing the - operation termination if the operation has terminated and the callback - will not be called as a result of this method call. - """ - raise NotImplementedError() - - @abc.abstractmethod - def emission_complete(self): - """Indicates that emissions from customer code have completed.""" - raise NotImplementedError() - - @abc.abstractmethod - def transmission_complete(self): - """Indicates that transmissions to the remote end are complete. - - Returns: - True if the operation has terminated or False if the operation remains - ongoing. - """ - raise NotImplementedError() - - @abc.abstractmethod - def reception_complete(self, code, details): - """Indicates that reception from the other side is complete. - - Args: - code: An application-specific code value. - details: An application-specific details value. - """ - raise NotImplementedError() - - @abc.abstractmethod - def ingestion_complete(self): - """Indicates that customer code ingestion of received values is complete.""" - raise NotImplementedError() - - @abc.abstractmethod - def expire(self): - """Indicates that the operation must abort because it has taken too long.""" - raise NotImplementedError() - - @abc.abstractmethod - def abort(self, outcome): - """Indicates that the operation must abort for the indicated reason. - - Args: - outcome: A base.Outcome indicating operation abortion. - """ - raise NotImplementedError() - - -class TransmissionManager(six.with_metaclass(abc.ABCMeta)): - """A manager responsible for transmitting to the other end of an operation.""" - - @abc.abstractmethod - def kick_off( - self, group, method, timeout, protocol_options, initial_metadata, - payload, completion, allowance): - """Transmits the values associated with operation invocation.""" - raise NotImplementedError() - - @abc.abstractmethod - def advance(self, initial_metadata, payload, completion, allowance): - """Accepts values for transmission to the other end of the operation. - - Args: - initial_metadata: An initial metadata value to be transmitted to the other - side of the operation. May only ever be non-None once. - payload: A payload value. - completion: A base.Completion value. May only ever be non-None in the last - transmission to be made to the other side. - allowance: A positive integer communicating the number of additional - payloads allowed to be transmitted from the other side to this side of - the operation, or None if no additional allowance is being granted in - this call. - """ - raise NotImplementedError() - - @abc.abstractmethod - def timeout(self, timeout): - """Accepts for transmission to the other side a new timeout value. - - Args: - timeout: A positive float used as the new timeout value for the operation - to be transmitted to the other side. - """ - raise NotImplementedError() - - @abc.abstractmethod - def allowance(self, allowance): - """Indicates to this manager that the remote customer is allowing payloads. - - Args: - allowance: A positive integer indicating the number of additional payloads - the remote customer is allowing to be transmitted from this side of the - operation. - """ - raise NotImplementedError() - - @abc.abstractmethod - def remote_complete(self): - """Indicates to this manager that data from the remote side is complete.""" - raise NotImplementedError() - - @abc.abstractmethod - def abort(self, outcome): - """Indicates that the operation has aborted. - - Args: - outcome: A base.Outcome for the operation. If None, indicates that the - operation abortion should not be communicated to the other side of the - operation. - """ - raise NotImplementedError() - - -class ExpirationManager(six.with_metaclass(abc.ABCMeta)): - """A manager responsible for aborting the operation if it runs out of time.""" - - @abc.abstractmethod - def change_timeout(self, timeout): - """Changes the timeout allotted for the operation. - - Operation duration is always measure from the beginning of the operation; - calling this method changes the operation's allotted time to timeout total - seconds, not timeout seconds from the time of this method call. - - Args: - timeout: A length of time in seconds to allow for the operation. - """ - raise NotImplementedError() - - @abc.abstractmethod - def deadline(self): - """Returns the time until which the operation is allowed to run. - - Returns: - The time (seconds since the epoch) at which the operation will expire. - """ - raise NotImplementedError() - - @abc.abstractmethod - def terminate(self): - """Indicates to this manager that the operation has terminated.""" - raise NotImplementedError() - - -class ProtocolManager(six.with_metaclass(abc.ABCMeta)): - """A manager of protocol-specific values passing through an operation.""" - - @abc.abstractmethod - def set_protocol_receiver(self, protocol_receiver): - """Registers the customer object that will receive protocol objects. - - Args: - protocol_receiver: A base.ProtocolReceiver to which protocol objects for - the operation should be passed. - """ - raise NotImplementedError() - - @abc.abstractmethod - def accept_protocol_context(self, protocol_context): - """Accepts the protocol context object for the operation. - - Args: - protocol_context: An object designated for use as the protocol context - of the operation, with further semantics implementation-determined. - """ - raise NotImplementedError() - - -class EmissionManager(six.with_metaclass(abc.ABCMeta, base.Operator)): - """A manager of values emitted by customer code.""" - - @abc.abstractmethod - def advance( - self, initial_metadata=None, payload=None, completion=None, - allowance=None): - """Accepts a value emitted by customer code. - - This method should only be called by customer code. - - Args: - initial_metadata: An initial metadata value emitted by the local customer - to be sent to the other side of the operation. - payload: A payload value emitted by the local customer to be sent to the - other side of the operation. - completion: A Completion value emitted by the local customer to be sent to - the other side of the operation. - allowance: A positive integer indicating an additional number of payloads - that the local customer is willing to accept from the other side of the - operation. - """ - raise NotImplementedError() - - -class IngestionManager(six.with_metaclass(abc.ABCMeta)): - """A manager responsible for executing customer code. - - This name of this manager comes from its responsibility to pass successive - values from the other side of the operation into the code of the local - customer. - """ - - @abc.abstractmethod - def set_group_and_method(self, group, method): - """Communicates to this IngestionManager the operation group and method. - - Args: - group: The group identifier of the operation. - method: The method identifier of the operation. - """ - raise NotImplementedError() - - @abc.abstractmethod - def add_local_allowance(self, allowance): - """Communicates to this IngestionManager that more payloads may be ingested. - - Args: - allowance: A positive integer indicating an additional number of payloads - that the local customer is willing to ingest. - """ - raise NotImplementedError() - - @abc.abstractmethod - def local_emissions_done(self): - """Indicates to this manager that local emissions are done.""" - raise NotImplementedError() - - @abc.abstractmethod - def advance(self, initial_metadata, payload, completion, allowance): - """Advances the operation by passing values to the local customer.""" - raise NotImplementedError() - - -class ReceptionManager(six.with_metaclass(abc.ABCMeta)): - """A manager responsible for receiving tickets from the other end.""" - - @abc.abstractmethod - def receive_ticket(self, ticket): - """Handle a ticket from the other side of the operation. - - Args: - ticket: A links.Ticket for the operation. - """ - raise NotImplementedError() - - -class Operation(six.with_metaclass(abc.ABCMeta)): - """An ongoing operation. - - Attributes: - context: A base.OperationContext object for the operation. - operator: A base.Operator object for the operation for use by the customer - of the operation. - """ - - @abc.abstractmethod - def handle_ticket(self, ticket): - """Handle a ticket from the other side of the operation. - - Args: - ticket: A links.Ticket from the other side of the operation. - """ - raise NotImplementedError() - - @abc.abstractmethod - def abort(self, outcome_kind): - """Aborts the operation. - - Args: - outcome_kind: A base.Outcome.Kind value indicating operation abortion. - """ - raise NotImplementedError() diff --git a/src/python/grpcio/grpc/framework/core/_operation.py b/src/python/grpcio/grpc/framework/core/_operation.py deleted file mode 100644 index 020c0c9ed9..0000000000 --- a/src/python/grpcio/grpc/framework/core/_operation.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Implementation of operations.""" - -import threading - -from grpc.framework.core import _context -from grpc.framework.core import _emission -from grpc.framework.core import _expiration -from grpc.framework.core import _ingestion -from grpc.framework.core import _interfaces -from grpc.framework.core import _protocol -from grpc.framework.core import _reception -from grpc.framework.core import _termination -from grpc.framework.core import _transmission -from grpc.framework.core import _utilities - - -class _EasyOperation(_interfaces.Operation): - """A trivial implementation of interfaces.Operation.""" - - def __init__( - self, lock, termination_manager, transmission_manager, expiration_manager, - context, operator, reception_manager): - """Constructor. - - Args: - lock: The operation-wide lock. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - context: A base.OperationContext for use by the customer during the - operation. - operator: A base.Operator for use by the customer during the operation. - reception_manager: The _interfaces.ReceptionManager for the operation. - """ - self._lock = lock - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._expiration_manager = expiration_manager - self._reception_manager = reception_manager - - self.context = context - self.operator = operator - - def handle_ticket(self, ticket): - with self._lock: - self._reception_manager.receive_ticket(ticket) - - def abort(self, outcome_kind): - with self._lock: - if self._termination_manager.outcome is None: - outcome = _utilities.Outcome(outcome_kind, None, None) - self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) - self._expiration_manager.terminate() - - -def invocation_operate( - operation_id, group, method, subscription, timeout, protocol_options, - initial_metadata, payload, completion, ticket_sink, termination_action, - pool): - """Constructs objects necessary for front-side operation management. - - Args: - operation_id: An object identifying the operation. - group: The group identifier of the operation. - method: The method identifier of the operation. - subscription: A base.Subscription describing the customer's interest in the - results of the operation. - timeout: A length of time in seconds to allow for the operation. - protocol_options: A transport-specific, application-specific, and/or - protocol-specific value relating to the invocation. May be None. - initial_metadata: An initial metadata value to be sent to the other side of - the operation. May be None if the initial metadata will be passed later or - if there will be no initial metadata passed at all. - payload: The first payload value to be transmitted to the other side. May be - None if there is no such value or if the customer chose not to pass it at - operation invocation. - completion: A base.Completion value indicating the end of values passed to - the other side of the operation. - ticket_sink: A callable that accepts links.Tickets and delivers them to the - other side of the operation. - termination_action: A callable that accepts the outcome of the operation as - a base.Outcome value to be called on operation completion. - pool: A thread pool with which to do the work of the operation. - - Returns: - An _interfaces.Operation for the operation. - """ - lock = threading.Lock() - with lock: - termination_manager = _termination.invocation_termination_manager( - termination_action, pool) - transmission_manager = _transmission.TransmissionManager( - operation_id, ticket_sink, lock, pool, termination_manager) - expiration_manager = _expiration.invocation_expiration_manager( - timeout, lock, termination_manager, transmission_manager) - protocol_manager = _protocol.invocation_protocol_manager( - subscription, lock, pool, termination_manager, transmission_manager, - expiration_manager) - operation_context = _context.OperationContext( - lock, termination_manager, transmission_manager, expiration_manager) - emission_manager = _emission.EmissionManager( - lock, termination_manager, transmission_manager, expiration_manager) - ingestion_manager = _ingestion.invocation_ingestion_manager( - subscription, lock, pool, termination_manager, transmission_manager, - expiration_manager, protocol_manager) - reception_manager = _reception.ReceptionManager( - termination_manager, transmission_manager, expiration_manager, - protocol_manager, ingestion_manager) - - termination_manager.set_expiration_manager(expiration_manager) - transmission_manager.set_expiration_manager(expiration_manager) - emission_manager.set_ingestion_manager(ingestion_manager) - - transmission_manager.kick_off( - group, method, timeout, protocol_options, initial_metadata, payload, - completion, None) - - return _EasyOperation( - lock, termination_manager, transmission_manager, expiration_manager, - operation_context, emission_manager, reception_manager) - - -def service_operate( - servicer_package, ticket, ticket_sink, termination_action, pool): - """Constructs an Operation for service of an operation. - - Args: - servicer_package: A _utilities.ServicerPackage to be used servicing the - operation. - ticket: The first links.Ticket received for the operation. - ticket_sink: A callable that accepts links.Tickets and delivers them to the - other side of the operation. - termination_action: A callable that accepts the outcome of the operation as - a base.Outcome value to be called on operation completion. - pool: A thread pool with which to do the work of the operation. - - Returns: - An _interfaces.Operation for the operation. - """ - lock = threading.Lock() - with lock: - termination_manager = _termination.service_termination_manager( - termination_action, pool) - transmission_manager = _transmission.TransmissionManager( - ticket.operation_id, ticket_sink, lock, pool, termination_manager) - expiration_manager = _expiration.service_expiration_manager( - ticket.timeout, servicer_package.default_timeout, - servicer_package.maximum_timeout, lock, termination_manager, - transmission_manager) - protocol_manager = _protocol.service_protocol_manager( - lock, pool, termination_manager, transmission_manager, - expiration_manager) - operation_context = _context.OperationContext( - lock, termination_manager, transmission_manager, expiration_manager) - emission_manager = _emission.EmissionManager( - lock, termination_manager, transmission_manager, expiration_manager) - ingestion_manager = _ingestion.service_ingestion_manager( - servicer_package.servicer, operation_context, emission_manager, lock, - pool, termination_manager, transmission_manager, expiration_manager, - protocol_manager) - reception_manager = _reception.ReceptionManager( - termination_manager, transmission_manager, expiration_manager, - protocol_manager, ingestion_manager) - - termination_manager.set_expiration_manager(expiration_manager) - transmission_manager.set_expiration_manager(expiration_manager) - emission_manager.set_ingestion_manager(ingestion_manager) - - reception_manager.receive_ticket(ticket) - - return _EasyOperation( - lock, termination_manager, transmission_manager, expiration_manager, - operation_context, emission_manager, reception_manager) diff --git a/src/python/grpcio/grpc/framework/core/_protocol.py b/src/python/grpcio/grpc/framework/core/_protocol.py deleted file mode 100644 index 3177b5e302..0000000000 --- a/src/python/grpcio/grpc/framework/core/_protocol.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for passing protocol objects in an operation.""" - -import collections -import enum - -from grpc.framework.core import _constants -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.foundation import callable_util -from grpc.framework.interfaces.base import base - -_EXCEPTION_LOG_MESSAGE = 'Exception delivering protocol object!' - -_LOCAL_FAILURE_OUTCOME = _utilities.Outcome( - base.Outcome.Kind.LOCAL_FAILURE, None, None) - - -class _Awaited( - collections.namedtuple('_Awaited', ('kind', 'value',))): - - @enum.unique - class Kind(enum.Enum): - NOT_YET_ARRIVED = 'not yet arrived' - ARRIVED = 'arrived' - -_NOT_YET_ARRIVED = _Awaited(_Awaited.Kind.NOT_YET_ARRIVED, None) -_ARRIVED_AND_NONE = _Awaited(_Awaited.Kind.ARRIVED, None) - - -class _Transitory( - collections.namedtuple('_Transitory', ('kind', 'value',))): - - @enum.unique - class Kind(enum.Enum): - NOT_YET_SEEN = 'not yet seen' - PRESENT = 'present' - GONE = 'gone' - -_NOT_YET_SEEN = _Transitory(_Transitory.Kind.NOT_YET_SEEN, None) -_GONE = _Transitory(_Transitory.Kind.GONE, None) - - -class _ProtocolManager(_interfaces.ProtocolManager): - """An implementation of _interfaces.ExpirationManager.""" - - def __init__( - self, protocol_receiver, lock, pool, termination_manager, - transmission_manager, expiration_manager): - """Constructor. - - Args: - protocol_receiver: An _Awaited wrapping of the base.ProtocolReceiver to - which protocol objects should be passed during the operation. May be - of kind _Awaited.Kind.NOT_YET_ARRIVED if the customer's subscription is - not yet known and may be of kind _Awaited.Kind.ARRIVED but with a value - of None if the customer's subscription did not include a - ProtocolReceiver. - lock: The operation-wide lock. - pool: A thread pool. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - """ - self._lock = lock - self._pool = pool - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._expiration_manager = expiration_manager - - self._protocol_receiver = protocol_receiver - self._context = _NOT_YET_SEEN - - def _abort_and_notify(self, outcome): - if self._termination_manager.outcome is None: - self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) - self._expiration_manager.terminate() - - def _deliver(self, behavior, value): - def deliver(): - delivery_outcome = callable_util.call_logging_exceptions( - behavior, _EXCEPTION_LOG_MESSAGE, value) - if delivery_outcome.kind is callable_util.Outcome.Kind.RAISED: - with self._lock: - self._abort_and_notify(_LOCAL_FAILURE_OUTCOME) - self._pool.submit( - callable_util.with_exceptions_logged( - deliver, _constants.INTERNAL_ERROR_LOG_MESSAGE)) - - def set_protocol_receiver(self, protocol_receiver): - """See _interfaces.ProtocolManager.set_protocol_receiver for spec.""" - self._protocol_receiver = _Awaited(_Awaited.Kind.ARRIVED, protocol_receiver) - if (self._context.kind is _Transitory.Kind.PRESENT and - protocol_receiver is not None): - self._deliver(protocol_receiver.context, self._context.value) - self._context = _GONE - - def accept_protocol_context(self, protocol_context): - """See _interfaces.ProtocolManager.accept_protocol_context for spec.""" - if self._protocol_receiver.kind is _Awaited.Kind.ARRIVED: - if self._protocol_receiver.value is not None: - self._deliver(self._protocol_receiver.value.context, protocol_context) - self._context = _GONE - else: - self._context = _Transitory(_Transitory.Kind.PRESENT, protocol_context) - - -def invocation_protocol_manager( - subscription, lock, pool, termination_manager, transmission_manager, - expiration_manager): - """Creates an _interfaces.ProtocolManager for invocation-side use. - - Args: - subscription: The local customer's subscription to the operation. - lock: The operation-wide lock. - pool: A thread pool. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - """ - if subscription.kind is base.Subscription.Kind.FULL: - awaited_protocol_receiver = _Awaited( - _Awaited.Kind.ARRIVED, subscription.protocol_receiver) - else: - awaited_protocol_receiver = _ARRIVED_AND_NONE - return _ProtocolManager( - awaited_protocol_receiver, lock, pool, termination_manager, - transmission_manager, expiration_manager) - - -def service_protocol_manager( - lock, pool, termination_manager, transmission_manager, expiration_manager): - """Creates an _interfaces.ProtocolManager for service-side use. - - Args: - lock: The operation-wide lock. - pool: A thread pool. - termination_manager: The _interfaces.TerminationManager for the operation. - transmission_manager: The _interfaces.TransmissionManager for the - operation. - expiration_manager: The _interfaces.ExpirationManager for the operation. - """ - return _ProtocolManager( - _NOT_YET_ARRIVED, lock, pool, termination_manager, transmission_manager, - expiration_manager) diff --git a/src/python/grpcio/grpc/framework/core/_reception.py b/src/python/grpcio/grpc/framework/core/_reception.py deleted file mode 100644 index ff81450dee..0000000000 --- a/src/python/grpcio/grpc/framework/core/_reception.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for ticket reception.""" - -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.base import utilities -from grpc.framework.interfaces.links import links - -_REMOTE_TICKET_TERMINATION_TO_LOCAL_OUTCOME_KIND = { - links.Ticket.Termination.CANCELLATION: base.Outcome.Kind.CANCELLED, - links.Ticket.Termination.EXPIRATION: base.Outcome.Kind.EXPIRED, - links.Ticket.Termination.SHUTDOWN: base.Outcome.Kind.REMOTE_SHUTDOWN, - links.Ticket.Termination.RECEPTION_FAILURE: - base.Outcome.Kind.RECEPTION_FAILURE, - links.Ticket.Termination.TRANSMISSION_FAILURE: - base.Outcome.Kind.TRANSMISSION_FAILURE, - links.Ticket.Termination.LOCAL_FAILURE: base.Outcome.Kind.REMOTE_FAILURE, - links.Ticket.Termination.REMOTE_FAILURE: base.Outcome.Kind.LOCAL_FAILURE, -} - -_RECEPTION_FAILURE_OUTCOME = _utilities.Outcome( - base.Outcome.Kind.RECEPTION_FAILURE, None, None) - - -def _carrying_protocol_context(ticket): - return ticket.protocol is not None and ticket.protocol.kind in ( - links.Protocol.Kind.INVOCATION_CONTEXT, - links.Protocol.Kind.SERVICER_CONTEXT,) - - -class ReceptionManager(_interfaces.ReceptionManager): - """A ReceptionManager based around a _Receiver passed to it.""" - - def __init__( - self, termination_manager, transmission_manager, expiration_manager, - protocol_manager, ingestion_manager): - """Constructor. - - Args: - termination_manager: The operation's _interfaces.TerminationManager. - transmission_manager: The operation's _interfaces.TransmissionManager. - expiration_manager: The operation's _interfaces.ExpirationManager. - protocol_manager: The operation's _interfaces.ProtocolManager. - ingestion_manager: The operation's _interfaces.IngestionManager. - """ - self._termination_manager = termination_manager - self._transmission_manager = transmission_manager - self._expiration_manager = expiration_manager - self._protocol_manager = protocol_manager - self._ingestion_manager = ingestion_manager - - self._lowest_unseen_sequence_number = 0 - self._out_of_sequence_tickets = {} - self._aborted = False - - def _abort(self, outcome): - self._aborted = True - if self._termination_manager.outcome is None: - self._termination_manager.abort(outcome) - self._transmission_manager.abort(None) - self._expiration_manager.terminate() - - def _sequence_failure(self, ticket): - """Determines a just-arrived ticket's sequential legitimacy. - - Args: - ticket: A just-arrived ticket. - - Returns: - True if the ticket is sequentially legitimate; False otherwise. - """ - if ticket.sequence_number < self._lowest_unseen_sequence_number: - return True - elif ticket.sequence_number in self._out_of_sequence_tickets: - return True - else: - return False - - def _process_one(self, ticket): - if ticket.sequence_number == 0: - self._ingestion_manager.set_group_and_method(ticket.group, ticket.method) - if _carrying_protocol_context(ticket): - self._protocol_manager.accept_protocol_context(ticket.protocol.value) - else: - self._protocol_manager.accept_protocol_context(None) - if ticket.timeout is not None: - self._expiration_manager.change_timeout(ticket.timeout) - if ticket.termination is None: - completion = None - else: - completion = utilities.completion( - ticket.terminal_metadata, ticket.code, ticket.message) - self._termination_manager.reception_complete(ticket.code, ticket.message) - self._ingestion_manager.advance( - ticket.initial_metadata, ticket.payload, completion, ticket.allowance) - if ticket.allowance is not None: - self._transmission_manager.allowance(ticket.allowance) - - def _process(self, ticket): - """Process those tickets ready to be processed. - - Args: - ticket: A just-arrived ticket the sequence number of which matches this - _ReceptionManager's _lowest_unseen_sequence_number field. - """ - while True: - self._process_one(ticket) - next_ticket = self._out_of_sequence_tickets.pop( - ticket.sequence_number + 1, None) - if next_ticket is None: - self._lowest_unseen_sequence_number = ticket.sequence_number + 1 - return - else: - ticket = next_ticket - - def receive_ticket(self, ticket): - """See _interfaces.ReceptionManager.receive_ticket for specification.""" - if self._aborted: - return - elif self._sequence_failure(ticket): - self._abort(_RECEPTION_FAILURE_OUTCOME) - elif ticket.termination not in (None, links.Ticket.Termination.COMPLETION): - outcome_kind = _REMOTE_TICKET_TERMINATION_TO_LOCAL_OUTCOME_KIND[ - ticket.termination] - self._abort( - _utilities.Outcome(outcome_kind, ticket.code, ticket.message)) - elif ticket.sequence_number == self._lowest_unseen_sequence_number: - self._process(ticket) - else: - self._out_of_sequence_tickets[ticket.sequence_number] = ticket diff --git a/src/python/grpcio/grpc/framework/core/_termination.py b/src/python/grpcio/grpc/framework/core/_termination.py deleted file mode 100644 index fff3a3fc14..0000000000 --- a/src/python/grpcio/grpc/framework/core/_termination.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for operation termination.""" - -import abc - -import six - -from grpc.framework.core import _constants -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.foundation import callable_util -from grpc.framework.interfaces.base import base - - -def _invocation_completion_predicate( - unused_emission_complete, unused_transmission_complete, - unused_reception_complete, ingestion_complete): - return ingestion_complete - - -def _service_completion_predicate( - unused_emission_complete, transmission_complete, unused_reception_complete, - ingestion_complete): - return transmission_complete and ingestion_complete - - -class TerminationManager(six.with_metaclass(abc.ABCMeta, _interfaces.TerminationManager)): - """A _interfaces.TransmissionManager on which another manager may be set.""" - - @abc.abstractmethod - def set_expiration_manager(self, expiration_manager): - """Sets the expiration manager with which this manager will interact. - - Args: - expiration_manager: The _interfaces.ExpirationManager associated with the - current operation. - """ - raise NotImplementedError() - - -class _TerminationManager(TerminationManager): - """An implementation of TerminationManager.""" - - def __init__(self, predicate, action, pool): - """Constructor. - - Args: - predicate: One of _invocation_completion_predicate or - _service_completion_predicate to be used to determine when the operation - has completed. - action: A behavior to pass the operation outcome's kind on operation - termination. - pool: A thread pool. - """ - self._predicate = predicate - self._action = action - self._pool = pool - self._expiration_manager = None - - self._callbacks = [] - - self._code = None - self._details = None - self._emission_complete = False - self._transmission_complete = False - self._reception_complete = False - self._ingestion_complete = False - - # The None-ness of outcome is the operation-wide record of whether and how - # the operation has terminated. - self.outcome = None - - def set_expiration_manager(self, expiration_manager): - self._expiration_manager = expiration_manager - - def _terminate_internal_only(self, outcome): - """Terminates the operation. - - Args: - outcome: A base.Outcome describing the outcome of the operation. - """ - self.outcome = outcome - callbacks = list(self._callbacks) - self._callbacks = None - - act = callable_util.with_exceptions_logged( - self._action, _constants.INTERNAL_ERROR_LOG_MESSAGE) - - # TODO(issue 3202): Don't call the local application's callbacks if it has - # previously shown a programming defect. - if False and outcome.kind is base.Outcome.Kind.LOCAL_FAILURE: - self._pool.submit(act, base.Outcome.Kind.LOCAL_FAILURE) - else: - def call_callbacks_and_act(callbacks, outcome): - for callback in callbacks: - callback_outcome = callable_util.call_logging_exceptions( - callback, _constants.TERMINATION_CALLBACK_EXCEPTION_LOG_MESSAGE, - outcome) - if callback_outcome.exception is not None: - act_outcome_kind = base.Outcome.Kind.LOCAL_FAILURE - break - else: - act_outcome_kind = outcome.kind - act(act_outcome_kind) - - self._pool.submit( - callable_util.with_exceptions_logged( - call_callbacks_and_act, _constants.INTERNAL_ERROR_LOG_MESSAGE), - callbacks, outcome) - - def _terminate_and_notify(self, outcome): - self._terminate_internal_only(outcome) - self._expiration_manager.terminate() - - def _perhaps_complete(self): - if self._predicate( - self._emission_complete, self._transmission_complete, - self._reception_complete, self._ingestion_complete): - self._terminate_and_notify( - _utilities.Outcome( - base.Outcome.Kind.COMPLETED, self._code, self._details)) - return True - else: - return False - - def is_active(self): - """See _interfaces.TerminationManager.is_active for specification.""" - return self.outcome is None - - def add_callback(self, callback): - """See _interfaces.TerminationManager.add_callback for specification.""" - if self.outcome is None: - self._callbacks.append(callback) - return None - else: - return self.outcome - - def emission_complete(self): - """See superclass method for specification.""" - if self.outcome is None: - self._emission_complete = True - self._perhaps_complete() - - def transmission_complete(self): - """See superclass method for specification.""" - if self.outcome is None: - self._transmission_complete = True - return self._perhaps_complete() - else: - return False - - def reception_complete(self, code, details): - """See superclass method for specification.""" - if self.outcome is None: - self._reception_complete = True - self._code = code - self._details = details - self._perhaps_complete() - - def ingestion_complete(self): - """See superclass method for specification.""" - if self.outcome is None: - self._ingestion_complete = True - self._perhaps_complete() - - def expire(self): - """See _interfaces.TerminationManager.expire for specification.""" - self._terminate_internal_only( - _utilities.Outcome(base.Outcome.Kind.EXPIRED, None, None)) - - def abort(self, outcome): - """See _interfaces.TerminationManager.abort for specification.""" - self._terminate_and_notify(outcome) - - -def invocation_termination_manager(action, pool): - """Creates a TerminationManager appropriate for invocation-side use. - - Args: - action: An action to call on operation termination. - pool: A thread pool in which to execute the passed action and any - termination callbacks that are registered during the operation. - - Returns: - A TerminationManager appropriate for invocation-side use. - """ - return _TerminationManager(_invocation_completion_predicate, action, pool) - - -def service_termination_manager(action, pool): - """Creates a TerminationManager appropriate for service-side use. - - Args: - action: An action to call on operation termination. - pool: A thread pool in which to execute the passed action and any - termination callbacks that are registered during the operation. - - Returns: - A TerminationManager appropriate for service-side use. - """ - return _TerminationManager(_service_completion_predicate, action, pool) diff --git a/src/python/grpcio/grpc/framework/core/_transmission.py b/src/python/grpcio/grpc/framework/core/_transmission.py deleted file mode 100644 index 65b12c4160..0000000000 --- a/src/python/grpcio/grpc/framework/core/_transmission.py +++ /dev/null @@ -1,335 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for ticket transmission during an operation.""" - -import collections -import enum - -from grpc.framework.core import _constants -from grpc.framework.core import _interfaces -from grpc.framework.core import _utilities -from grpc.framework.foundation import callable_util -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.links import links - -_TRANSMISSION_EXCEPTION_LOG_MESSAGE = 'Exception during transmission!' - -_TRANSMISSION_FAILURE_OUTCOME = _utilities.Outcome( - base.Outcome.Kind.TRANSMISSION_FAILURE, None, None) - - -def _explode_completion(completion): - if completion is None: - return None, None, None, None - else: - return ( - completion.terminal_metadata, completion.code, completion.message, - links.Ticket.Termination.COMPLETION) - - -class _Abort( - collections.namedtuple( - '_Abort', ('kind', 'termination', 'code', 'details',))): - """Tracks whether the operation aborted and what is to be done about it. - - Attributes: - kind: A Kind value describing the overall kind of the _Abort. - termination: A links.Ticket.Termination value to be sent to the other side - of the operation. Only valid if kind is Kind.ABORTED_NOTIFY_NEEDED. - code: A code value to be sent to the other side of the operation. Only - valid if kind is Kind.ABORTED_NOTIFY_NEEDED. - details: A details value to be sent to the other side of the operation. - Only valid if kind is Kind.ABORTED_NOTIFY_NEEDED. - """ - - @enum.unique - class Kind(enum.Enum): - NOT_ABORTED = 'not aborted' - ABORTED_NOTIFY_NEEDED = 'aborted notify needed' - ABORTED_NO_NOTIFY = 'aborted no notify' - -_NOT_ABORTED = _Abort(_Abort.Kind.NOT_ABORTED, None, None, None) -_ABORTED_NO_NOTIFY = _Abort(_Abort.Kind.ABORTED_NO_NOTIFY, None, None, None) - - -class TransmissionManager(_interfaces.TransmissionManager): - """An _interfaces.TransmissionManager that sends links.Tickets.""" - - def __init__( - self, operation_id, ticket_sink, lock, pool, termination_manager): - """Constructor. - - Args: - operation_id: The operation's ID. - ticket_sink: A callable that accepts tickets and sends them to the other - side of the operation. - lock: The operation-servicing-wide lock object. - pool: A thread pool in which the work of transmitting tickets will be - performed. - termination_manager: The _interfaces.TerminationManager associated with - this operation. - """ - self._lock = lock - self._pool = pool - self._ticket_sink = ticket_sink - self._operation_id = operation_id - self._termination_manager = termination_manager - self._expiration_manager = None - - self._lowest_unused_sequence_number = 0 - self._remote_allowance = 1 - self._remote_complete = False - self._timeout = None - self._local_allowance = 0 - self._initial_metadata = None - self._payloads = [] - self._completion = None - self._abort = _NOT_ABORTED - self._transmitting = False - - def set_expiration_manager(self, expiration_manager): - """Sets the ExpirationManager with which this manager will cooperate.""" - self._expiration_manager = expiration_manager - - def _next_ticket(self): - """Creates the next ticket to be transmitted. - - Returns: - A links.Ticket to be sent to the other side of the operation or None if - there is nothing to be sent at this time. - """ - if self._abort.kind is _Abort.Kind.ABORTED_NO_NOTIFY: - return None - elif self._abort.kind is _Abort.Kind.ABORTED_NOTIFY_NEEDED: - termination = self._abort.termination - code, details = self._abort.code, self._abort.details - self._abort = _ABORTED_NO_NOTIFY - return links.Ticket( - self._operation_id, self._lowest_unused_sequence_number, None, None, - None, None, None, None, None, None, code, details, termination, None) - - action = False - # TODO(nathaniel): Support other subscriptions. - local_subscription = links.Ticket.Subscription.FULL - timeout = self._timeout - if timeout is not None: - self._timeout = None - action = True - if self._local_allowance <= 0: - allowance = None - else: - allowance = self._local_allowance - self._local_allowance = 0 - action = True - initial_metadata = self._initial_metadata - if initial_metadata is not None: - self._initial_metadata = None - action = True - if not self._payloads or self._remote_allowance <= 0: - payload = None - else: - payload = self._payloads.pop(0) - self._remote_allowance -= 1 - action = True - if self._completion is None or self._payloads: - terminal_metadata, code, message, termination = None, None, None, None - else: - terminal_metadata, code, message, termination = _explode_completion( - self._completion) - self._completion = None - action = True - - if action: - ticket = links.Ticket( - self._operation_id, self._lowest_unused_sequence_number, None, None, - local_subscription, timeout, allowance, initial_metadata, payload, - terminal_metadata, code, message, termination, None) - self._lowest_unused_sequence_number += 1 - return ticket - else: - return None - - def _transmit(self, ticket): - """Commences the transmission loop sending tickets. - - Args: - ticket: A links.Ticket to be sent to the other side of the operation. - """ - def transmit(ticket): - while True: - transmission_outcome = callable_util.call_logging_exceptions( - self._ticket_sink, _TRANSMISSION_EXCEPTION_LOG_MESSAGE, ticket) - if transmission_outcome.exception is None: - with self._lock: - if ticket.termination is links.Ticket.Termination.COMPLETION: - self._termination_manager.transmission_complete() - ticket = self._next_ticket() - if ticket is None: - self._transmitting = False - return - else: - with self._lock: - self._abort = _ABORTED_NO_NOTIFY - if self._termination_manager.outcome is None: - self._termination_manager.abort(_TRANSMISSION_FAILURE_OUTCOME) - self._expiration_manager.terminate() - return - - self._pool.submit(callable_util.with_exceptions_logged( - transmit, _constants.INTERNAL_ERROR_LOG_MESSAGE), ticket) - self._transmitting = True - - def kick_off( - self, group, method, timeout, protocol_options, initial_metadata, - payload, completion, allowance): - """See _interfaces.TransmissionManager.kickoff for specification.""" - # TODO(nathaniel): Support other subscriptions. - subscription = links.Ticket.Subscription.FULL - terminal_metadata, code, message, termination = _explode_completion( - completion) - self._remote_allowance = 1 if payload is None else 0 - protocol = links.Protocol(links.Protocol.Kind.CALL_OPTION, protocol_options) - ticket = links.Ticket( - self._operation_id, 0, group, method, subscription, timeout, allowance, - initial_metadata, payload, terminal_metadata, code, message, - termination, protocol) - self._lowest_unused_sequence_number = 1 - self._transmit(ticket) - - def advance(self, initial_metadata, payload, completion, allowance): - """See _interfaces.TransmissionManager.advance for specification.""" - if self._abort.kind is not _Abort.Kind.NOT_ABORTED: - return - - effective_initial_metadata = initial_metadata - effective_payload = payload - effective_completion = completion - if allowance is not None and not self._remote_complete: - effective_allowance = allowance - else: - effective_allowance = None - if self._transmitting: - if effective_initial_metadata is not None: - self._initial_metadata = effective_initial_metadata - if effective_payload is not None: - self._payloads.append(effective_payload) - if effective_completion is not None: - self._completion = effective_completion - if effective_allowance is not None: - self._local_allowance += effective_allowance - else: - if effective_payload is not None: - if 0 < self._remote_allowance: - ticket_payload = effective_payload - self._remote_allowance -= 1 - else: - self._payloads.append(effective_payload) - ticket_payload = None - else: - ticket_payload = None - if effective_completion is not None and not self._payloads: - ticket_completion = effective_completion - else: - self._completion = effective_completion - ticket_completion = None - if any( - (effective_initial_metadata, ticket_payload, ticket_completion, - effective_allowance)): - terminal_metadata, code, message, termination = _explode_completion( - completion) - ticket = links.Ticket( - self._operation_id, self._lowest_unused_sequence_number, None, None, - None, None, allowance, effective_initial_metadata, ticket_payload, - terminal_metadata, code, message, termination, None) - self._lowest_unused_sequence_number += 1 - self._transmit(ticket) - - def timeout(self, timeout): - """See _interfaces.TransmissionManager.timeout for specification.""" - if self._abort.kind is not _Abort.Kind.NOT_ABORTED: - return - elif self._transmitting: - self._timeout = timeout - else: - ticket = links.Ticket( - self._operation_id, self._lowest_unused_sequence_number, None, None, - None, timeout, None, None, None, None, None, None, None, None) - self._lowest_unused_sequence_number += 1 - self._transmit(ticket) - - def allowance(self, allowance): - """See _interfaces.TransmissionManager.allowance for specification.""" - if self._abort.kind is not _Abort.Kind.NOT_ABORTED: - return - elif self._transmitting or not self._payloads: - self._remote_allowance += allowance - else: - self._remote_allowance += allowance - 1 - payload = self._payloads.pop(0) - if self._payloads: - completion = None - else: - completion = self._completion - self._completion = None - terminal_metadata, code, message, termination = _explode_completion( - completion) - ticket = links.Ticket( - self._operation_id, self._lowest_unused_sequence_number, None, None, - None, None, None, None, payload, terminal_metadata, code, message, - termination, None) - self._lowest_unused_sequence_number += 1 - self._transmit(ticket) - - def remote_complete(self): - """See _interfaces.TransmissionManager.remote_complete for specification.""" - self._remote_complete = True - self._local_allowance = 0 - - def abort(self, outcome): - """See _interfaces.TransmissionManager.abort for specification.""" - if self._abort.kind is _Abort.Kind.NOT_ABORTED: - if outcome is None: - self._abort = _ABORTED_NO_NOTIFY - else: - termination = _constants.ABORTION_OUTCOME_TO_TICKET_TERMINATION.get( - outcome.kind) - if termination is None: - self._abort = _ABORTED_NO_NOTIFY - elif self._transmitting: - self._abort = _Abort( - _Abort.Kind.ABORTED_NOTIFY_NEEDED, termination, outcome.code, - outcome.details) - else: - ticket = links.Ticket( - self._operation_id, self._lowest_unused_sequence_number, None, - None, None, None, None, None, None, None, outcome.code, - outcome.details, termination, None) - self._transmit(ticket) - self._abort = _ABORTED_NO_NOTIFY diff --git a/src/python/grpcio/grpc/framework/core/_utilities.py b/src/python/grpcio/grpc/framework/core/_utilities.py deleted file mode 100644 index abedc727e4..0000000000 --- a/src/python/grpcio/grpc/framework/core/_utilities.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Package-internal utilities.""" - -import collections - -from grpc.framework.interfaces.base import base - - -class ServicerPackage( - collections.namedtuple( - 'ServicerPackage', ('servicer', 'default_timeout', 'maximum_timeout'))): - """A trivial bundle class. - - Attributes: - servicer: A base.Servicer. - default_timeout: A float indicating the length of time in seconds to allow - for an operation invoked without a timeout. - maximum_timeout: A float indicating the maximum length of time in seconds to - allow for an operation. - """ - - -class Outcome( - base.Outcome, - collections.namedtuple('Outcome', ('kind', 'code', 'details',))): - """A trivial implementation of base.Outcome.""" diff --git a/src/python/grpcio/grpc/framework/core/implementations.py b/src/python/grpcio/grpc/framework/core/implementations.py deleted file mode 100644 index 364a7faed4..0000000000 --- a/src/python/grpcio/grpc/framework/core/implementations.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Entry points into the ticket-exchange-based base layer implementation.""" - -# base and links are referenced from specification in this module. -from grpc.framework.core import _end -from grpc.framework.interfaces.base import base # pylint: disable=unused-import -from grpc.framework.interfaces.links import links # pylint: disable=unused-import - - -def invocation_end_link(): - """Creates a base.End-links.Link suitable for operation invocation. - - Returns: - An object that is both a base.End and a links.Link, that supports operation - invocation, and that translates operation invocation into ticket exchange. - """ - return _end.serviceless_end_link() - - -def service_end_link(servicer, default_timeout, maximum_timeout): - """Creates a base.End-links.Link suitable for operation service. - - Args: - servicer: A base.Servicer for servicing operations. - default_timeout: A length of time in seconds to be used as the default - time alloted for a single operation. - maximum_timeout: A length of time in seconds to be used as the maximum - time alloted for a single operation. - - Returns: - An object that is both a base.End and a links.Link and that services - operations that arrive at it through ticket exchange. - """ - return _end.serviceful_end_link(servicer, default_timeout, maximum_timeout) diff --git a/src/python/grpcio/grpc/framework/crust/_calls.py b/src/python/grpcio/grpc/framework/crust/_calls.py deleted file mode 100644 index bff940d747..0000000000 --- a/src/python/grpcio/grpc/framework/crust/_calls.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Utility functions for invoking RPCs.""" - -from grpc.framework.crust import _control -from grpc.framework.interfaces.base import utilities -from grpc.framework.interfaces.face import face - -_ITERATOR_EXCEPTION_LOG_MESSAGE = 'Exception iterating over requests!' - -_EMPTY_COMPLETION = utilities.completion(None, None, None) - - -def _invoke( - end, group, method, timeout, protocol_options, initial_metadata, payload, - complete): - rendezvous = _control.Rendezvous(None, None) - subscription = utilities.full_subscription( - rendezvous, _control.protocol_receiver(rendezvous)) - operation_context, operator = end.operate( - group, method, subscription, timeout, protocol_options=protocol_options, - initial_metadata=initial_metadata, payload=payload, - completion=_EMPTY_COMPLETION if complete else None) - rendezvous.set_operator_and_context(operator, operation_context) - outcome = operation_context.add_termination_callback(rendezvous.set_outcome) - if outcome is not None: - rendezvous.set_outcome(outcome) - return rendezvous, operation_context, outcome - - -def _event_return_unary( - receiver, abortion_callback, rendezvous, operation_context, outcome, pool): - if outcome is None: - def in_pool(): - abortion = rendezvous.add_abortion_callback(abortion_callback) - if abortion is None: - try: - receiver.initial_metadata(rendezvous.initial_metadata()) - receiver.response(next(rendezvous)) - receiver.complete( - rendezvous.terminal_metadata(), rendezvous.code(), - rendezvous.details()) - except face.AbortionError: - pass - else: - abortion_callback(abortion) - pool.submit(_control.pool_wrap(in_pool, operation_context)) - return rendezvous - - -def _event_return_stream( - receiver, abortion_callback, rendezvous, operation_context, outcome, pool): - if outcome is None: - def in_pool(): - abortion = rendezvous.add_abortion_callback(abortion_callback) - if abortion is None: - try: - receiver.initial_metadata(rendezvous.initial_metadata()) - for response in rendezvous: - receiver.response(response) - receiver.complete( - rendezvous.terminal_metadata(), rendezvous.code(), - rendezvous.details()) - except face.AbortionError: - pass - else: - abortion_callback(abortion) - pool.submit(_control.pool_wrap(in_pool, operation_context)) - return rendezvous - - -def blocking_unary_unary( - end, group, method, timeout, with_call, protocol_options, initial_metadata, - payload): - """Services in a blocking fashion a unary-unary servicer method.""" - rendezvous, unused_operation_context, unused_outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, payload, - True) - if with_call: - return next(rendezvous), rendezvous - else: - return next(rendezvous) - - -def future_unary_unary( - end, group, method, timeout, protocol_options, initial_metadata, payload): - """Services a value-in value-out servicer method by returning a Future.""" - rendezvous, unused_operation_context, unused_outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, payload, - True) - return rendezvous - - -def inline_unary_stream( - end, group, method, timeout, protocol_options, initial_metadata, payload): - """Services a value-in stream-out servicer method.""" - rendezvous, unused_operation_context, unused_outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, payload, - True) - return rendezvous - - -def blocking_stream_unary( - end, group, method, timeout, with_call, protocol_options, initial_metadata, - payload_iterator, pool): - """Services in a blocking fashion a stream-in value-out servicer method.""" - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, None, - False) - if outcome is None: - def in_pool(): - for payload in payload_iterator: - rendezvous.consume(payload) - rendezvous.terminate() - pool.submit(_control.pool_wrap(in_pool, operation_context)) - if with_call: - return next(rendezvous), rendezvous - else: - return next(rendezvous) - else: - if with_call: - return next(rendezvous), rendezvous - else: - return next(rendezvous) - - -def future_stream_unary( - end, group, method, timeout, protocol_options, initial_metadata, - payload_iterator, pool): - """Services a stream-in value-out servicer method by returning a Future.""" - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, None, - False) - if outcome is None: - def in_pool(): - for payload in payload_iterator: - rendezvous.consume(payload) - rendezvous.terminate() - pool.submit(_control.pool_wrap(in_pool, operation_context)) - return rendezvous - - -def inline_stream_stream( - end, group, method, timeout, protocol_options, initial_metadata, - payload_iterator, pool): - """Services a stream-in stream-out servicer method.""" - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, None, - False) - if outcome is None: - def in_pool(): - for payload in payload_iterator: - rendezvous.consume(payload) - rendezvous.terminate() - pool.submit(_control.pool_wrap(in_pool, operation_context)) - return rendezvous - - -def event_unary_unary( - end, group, method, timeout, protocol_options, initial_metadata, payload, - receiver, abortion_callback, pool): - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, payload, - True) - return _event_return_unary( - receiver, abortion_callback, rendezvous, operation_context, outcome, pool) - - -def event_unary_stream( - end, group, method, timeout, protocol_options, initial_metadata, payload, - receiver, abortion_callback, pool): - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, payload, - True) - return _event_return_stream( - receiver, abortion_callback, rendezvous, operation_context, outcome, pool) - - -def event_stream_unary( - end, group, method, timeout, protocol_options, initial_metadata, receiver, - abortion_callback, pool): - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, None, - False) - return _event_return_unary( - receiver, abortion_callback, rendezvous, operation_context, outcome, pool) - - -def event_stream_stream( - end, group, method, timeout, protocol_options, initial_metadata, receiver, - abortion_callback, pool): - rendezvous, operation_context, outcome = _invoke( - end, group, method, timeout, protocol_options, initial_metadata, None, - False) - return _event_return_stream( - receiver, abortion_callback, rendezvous, operation_context, outcome, pool) diff --git a/src/python/grpcio/grpc/framework/crust/_control.py b/src/python/grpcio/grpc/framework/crust/_control.py deleted file mode 100644 index 9b4167bda0..0000000000 --- a/src/python/grpcio/grpc/framework/crust/_control.py +++ /dev/null @@ -1,584 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior for translating between sync and async control flow.""" - -import collections -import enum -import sys -import threading -import time - -from grpc.framework.foundation import abandonment -from grpc.framework.foundation import callable_util -from grpc.framework.foundation import future -from grpc.framework.foundation import stream -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.base import utilities -from grpc.framework.interfaces.face import face - -_DONE_CALLBACK_LOG_MESSAGE = 'Exception calling Future "done" callback!' -_INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Crust) Internal Error! )-:' - -_CANNOT_SET_INITIAL_METADATA = ( - 'Could not set initial metadata - has it already been set, or has a ' + - 'payload already been sent?') -_CANNOT_SET_TERMINAL_METADATA = ( - 'Could not set terminal metadata - has it already been set, or has RPC ' + - 'completion already been indicated?') -_CANNOT_SET_CODE = ( - 'Could not set code - has it already been set, or has RPC completion ' + - 'already been indicated?') -_CANNOT_SET_DETAILS = ( - 'Could not set details - has it already been set, or has RPC completion ' + - 'already been indicated?') - - -class _DummyOperator(base.Operator): - - def advance( - self, initial_metadata=None, payload=None, completion=None, - allowance=None): - pass - -_DUMMY_OPERATOR = _DummyOperator() - - -class _Awaited( - collections.namedtuple('_Awaited', ('kind', 'value',))): - - @enum.unique - class Kind(enum.Enum): - NOT_YET_ARRIVED = 'not yet arrived' - ARRIVED = 'arrived' - -_NOT_YET_ARRIVED = _Awaited(_Awaited.Kind.NOT_YET_ARRIVED, None) -_ARRIVED_AND_NONE = _Awaited(_Awaited.Kind.ARRIVED, None) - - -class _Transitory( - collections.namedtuple('_Transitory', ('kind', 'value',))): - - @enum.unique - class Kind(enum.Enum): - NOT_YET_SEEN = 'not yet seen' - PRESENT = 'present' - GONE = 'gone' - -_NOT_YET_SEEN = _Transitory(_Transitory.Kind.NOT_YET_SEEN, None) -_GONE = _Transitory(_Transitory.Kind.GONE, None) - - -class _Termination( - collections.namedtuple( - '_Termination', ('terminated', 'abortion', 'abortion_error',))): - """Values indicating whether and how an RPC has terminated. - - Attributes: - terminated: A boolean indicating whether or not the RPC has terminated. - abortion: A face.Abortion value describing the RPC's abortion or None if the - RPC did not abort. - abortion_error: A face.AbortionError describing the RPC's abortion or None - if the RPC did not abort. - """ - -_NOT_TERMINATED = _Termination(False, None, None) - -_OPERATION_OUTCOME_KIND_TO_TERMINATION_CONSTRUCTOR = { - base.Outcome.Kind.COMPLETED: lambda *unused_args: _Termination( - True, None, None), - base.Outcome.Kind.CANCELLED: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.CANCELLED, *args), - face.CancellationError(*args)), - base.Outcome.Kind.EXPIRED: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.EXPIRED, *args), - face.ExpirationError(*args)), - base.Outcome.Kind.LOCAL_SHUTDOWN: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.LOCAL_SHUTDOWN, *args), - face.LocalShutdownError(*args)), - base.Outcome.Kind.REMOTE_SHUTDOWN: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.REMOTE_SHUTDOWN, *args), - face.RemoteShutdownError(*args)), - base.Outcome.Kind.RECEPTION_FAILURE: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.NETWORK_FAILURE, *args), - face.NetworkError(*args)), - base.Outcome.Kind.TRANSMISSION_FAILURE: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.NETWORK_FAILURE, *args), - face.NetworkError(*args)), - base.Outcome.Kind.LOCAL_FAILURE: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.LOCAL_FAILURE, *args), - face.LocalError(*args)), - base.Outcome.Kind.REMOTE_FAILURE: lambda *args: _Termination( - True, face.Abortion(face.Abortion.Kind.REMOTE_FAILURE, *args), - face.RemoteError(*args)), -} - - -def _wait_once_until(condition, until): - if until is None: - condition.wait() - else: - remaining = until - time.time() - if remaining < 0: - raise future.TimeoutError() - else: - condition.wait(timeout=remaining) - - -def _done_callback_as_operation_termination_callback( - done_callback, rendezvous): - def operation_termination_callback(operation_outcome): - rendezvous.set_outcome(operation_outcome) - done_callback(rendezvous) - return operation_termination_callback - - -def _abortion_callback_as_operation_termination_callback( - rpc_abortion_callback, rendezvous_set_outcome): - def operation_termination_callback(operation_outcome): - termination = rendezvous_set_outcome(operation_outcome) - if termination.abortion is not None: - rpc_abortion_callback(termination.abortion) - return operation_termination_callback - - -class Rendezvous(base.Operator, future.Future, stream.Consumer, face.Call): - """A rendez-vous for the threads of an operation. - - Instances of this object present iterator and stream.Consumer interfaces for - interacting with application code and present a base.Operator interface and - maintain a base.Operator internally for interacting with base interface code. - """ - - def __init__(self, operator, operation_context): - self._condition = threading.Condition() - - self._operator = operator - self._operation_context = operation_context - - self._protocol_context = _NOT_YET_ARRIVED - - self._up_initial_metadata = _NOT_YET_ARRIVED - self._up_payload = None - self._up_allowance = 1 - self._up_completion = _NOT_YET_ARRIVED - self._down_initial_metadata = _NOT_YET_SEEN - self._down_payload = None - self._down_allowance = 1 - self._down_terminal_metadata = _NOT_YET_SEEN - self._down_code = _NOT_YET_SEEN - self._down_details = _NOT_YET_SEEN - - self._termination = _NOT_TERMINATED - - # The semantics of future.Future.cancel and future.Future.cancelled are - # slightly wonky, so they have to be tracked separately from the rest of the - # result of the RPC. This field tracks whether cancellation was requested - # prior to termination of the RPC - self._cancelled = False - - def set_operator_and_context(self, operator, operation_context): - with self._condition: - self._operator = operator - self._operation_context = operation_context - - def _down_completion(self): - if self._down_terminal_metadata.kind is _Transitory.Kind.NOT_YET_SEEN: - terminal_metadata = None - self._down_terminal_metadata = _GONE - elif self._down_terminal_metadata.kind is _Transitory.Kind.PRESENT: - terminal_metadata = self._down_terminal_metadata.value - self._down_terminal_metadata = _GONE - else: - terminal_metadata = None - if self._down_code.kind is _Transitory.Kind.NOT_YET_SEEN: - code = None - self._down_code = _GONE - elif self._down_code.kind is _Transitory.Kind.PRESENT: - code = self._down_code.value - self._down_code = _GONE - else: - code = None - if self._down_details.kind is _Transitory.Kind.NOT_YET_SEEN: - details = None - self._down_details = _GONE - elif self._down_details.kind is _Transitory.Kind.PRESENT: - details = self._down_details.value - self._down_details = _GONE - else: - details = None - return utilities.completion(terminal_metadata, code, details) - - def _set_outcome(self, outcome): - if not self._termination.terminated: - self._operator = _DUMMY_OPERATOR - self._operation_context = None - self._down_initial_metadata = _GONE - self._down_payload = None - self._down_terminal_metadata = _GONE - self._down_code = _GONE - self._down_details = _GONE - - if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED: - initial_metadata = None - else: - initial_metadata = self._up_initial_metadata.value - if self._up_completion.kind is _Awaited.Kind.NOT_YET_ARRIVED: - terminal_metadata = None - else: - terminal_metadata = self._up_completion.value.terminal_metadata - if outcome.kind is base.Outcome.Kind.COMPLETED: - code = self._up_completion.value.code - details = self._up_completion.value.message - else: - code = outcome.code - details = outcome.details - self._termination = _OPERATION_OUTCOME_KIND_TO_TERMINATION_CONSTRUCTOR[ - outcome.kind](initial_metadata, terminal_metadata, code, details) - - self._condition.notify_all() - - return self._termination - - def advance( - self, initial_metadata=None, payload=None, completion=None, - allowance=None): - with self._condition: - if initial_metadata is not None: - self._up_initial_metadata = _Awaited( - _Awaited.Kind.ARRIVED, initial_metadata) - if payload is not None: - if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED: - self._up_initial_metadata = _ARRIVED_AND_NONE - self._up_payload = payload - self._up_allowance -= 1 - if completion is not None: - if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED: - self._up_initial_metadata = _ARRIVED_AND_NONE - self._up_completion = _Awaited( - _Awaited.Kind.ARRIVED, completion) - if allowance is not None: - if self._down_payload is not None: - self._operator.advance(payload=self._down_payload) - self._down_payload = None - self._down_allowance += allowance - 1 - else: - self._down_allowance += allowance - self._condition.notify_all() - - def cancel(self): - with self._condition: - if self._operation_context is not None: - self._operation_context.cancel() - self._cancelled = True - return False - - def cancelled(self): - with self._condition: - return self._cancelled - - def running(self): - with self._condition: - return not self._termination.terminated - - def done(self): - with self._condition: - return self._termination.terminated - - def result(self, timeout=None): - until = None if timeout is None else time.time() + timeout - with self._condition: - while True: - if self._termination.terminated: - if self._termination.abortion is None: - return self._up_payload - elif self._termination.abortion.kind is face.Abortion.Kind.CANCELLED: - raise future.CancelledError() - else: - raise self._termination.abortion_error # pylint: disable=raising-bad-type - else: - _wait_once_until(self._condition, until) - - def exception(self, timeout=None): - until = None if timeout is None else time.time() + timeout - with self._condition: - while True: - if self._termination.terminated: - if self._termination.abortion is None: - return None - else: - return self._termination.abortion_error - else: - _wait_once_until(self._condition, until) - - def traceback(self, timeout=None): - until = None if timeout is None else time.time() + timeout - with self._condition: - while True: - if self._termination.terminated: - if self._termination.abortion_error is None: - return None - else: - abortion_error = self._termination.abortion_error - break - else: - _wait_once_until(self._condition, until) - - try: - raise abortion_error - except face.AbortionError: - return sys.exc_info()[2] - - def add_done_callback(self, fn): - with self._condition: - if self._operation_context is not None: - outcome = self._operation_context.add_termination_callback( - _done_callback_as_operation_termination_callback(fn, self)) - if outcome is None: - return - else: - self._set_outcome(outcome) - - fn(self) - - def consume(self, value): - with self._condition: - while True: - if self._termination.terminated: - return - elif 0 < self._down_allowance: - self._operator.advance(payload=value) - self._down_allowance -= 1 - return - else: - self._condition.wait() - - def terminate(self): - with self._condition: - if self._termination.terminated: - return - elif self._down_code.kind is _Transitory.Kind.GONE: - # Conform to specified idempotence of terminate by ignoring extra calls. - return - else: - completion = self._down_completion() - self._operator.advance(completion=completion) - - def consume_and_terminate(self, value): - with self._condition: - while True: - if self._termination.terminated: - return - elif 0 < self._down_allowance: - completion = self._down_completion() - self._operator.advance(payload=value, completion=completion) - return - else: - self._condition.wait() - - def __iter__(self): - return self - - def __next__(self): - return self.next() - - def next(self): - with self._condition: - while True: - if self._termination.abortion_error is not None: - raise self._termination.abortion_error - elif self._up_payload is not None: - payload = self._up_payload - self._up_payload = None - if self._up_completion.kind is _Awaited.Kind.NOT_YET_ARRIVED: - self._operator.advance(allowance=1) - return payload - elif self._up_completion.kind is _Awaited.Kind.ARRIVED: - raise StopIteration() - else: - self._condition.wait() - - def is_active(self): - with self._condition: - return not self._termination.terminated - - def time_remaining(self): - if self._operation_context is None: - return 0 - else: - return self._operation_context.time_remaining() - - def add_abortion_callback(self, abortion_callback): - with self._condition: - if self._operation_context is None: - return self._termination.abortion - else: - outcome = self._operation_context.add_termination_callback( - _abortion_callback_as_operation_termination_callback( - abortion_callback, self.set_outcome)) - if outcome is not None: - return self._set_outcome(outcome).abortion - else: - return self._termination.abortion - - def protocol_context(self): - with self._condition: - while True: - if self._protocol_context.kind is _Awaited.Kind.ARRIVED: - return self._protocol_context.value - elif self._termination.abortion_error is not None: - raise self._termination.abortion_error - else: - self._condition.wait() - - def initial_metadata(self): - with self._condition: - while True: - if self._up_initial_metadata.kind is _Awaited.Kind.ARRIVED: - return self._up_initial_metadata.value - elif self._termination.terminated: - return None - else: - self._condition.wait() - - def terminal_metadata(self): - with self._condition: - while True: - if self._up_completion.kind is _Awaited.Kind.ARRIVED: - return self._up_completion.value.terminal_metadata - elif self._termination.terminated: - return None - else: - self._condition.wait() - - def code(self): - with self._condition: - while True: - if self._up_completion.kind is _Awaited.Kind.ARRIVED: - return self._up_completion.value.code - elif self._termination.terminated: - return None - else: - self._condition.wait() - - def details(self): - with self._condition: - while True: - if self._up_completion.kind is _Awaited.Kind.ARRIVED: - return self._up_completion.value.message - elif self._termination.terminated: - return None - else: - self._condition.wait() - - def set_initial_metadata(self, initial_metadata): - with self._condition: - if (self._down_initial_metadata.kind is not - _Transitory.Kind.NOT_YET_SEEN): - raise ValueError(_CANNOT_SET_INITIAL_METADATA) - else: - self._down_initial_metadata = _GONE - self._operator.advance(initial_metadata=initial_metadata) - - def set_terminal_metadata(self, terminal_metadata): - with self._condition: - if (self._down_terminal_metadata.kind is not - _Transitory.Kind.NOT_YET_SEEN): - raise ValueError(_CANNOT_SET_TERMINAL_METADATA) - else: - self._down_terminal_metadata = _Transitory( - _Transitory.Kind.PRESENT, terminal_metadata) - - def set_code(self, code): - with self._condition: - if self._down_code.kind is not _Transitory.Kind.NOT_YET_SEEN: - raise ValueError(_CANNOT_SET_CODE) - else: - self._down_code = _Transitory(_Transitory.Kind.PRESENT, code) - - def set_details(self, details): - with self._condition: - if self._down_details.kind is not _Transitory.Kind.NOT_YET_SEEN: - raise ValueError(_CANNOT_SET_DETAILS) - else: - self._down_details = _Transitory(_Transitory.Kind.PRESENT, details) - - def set_protocol_context(self, protocol_context): - with self._condition: - self._protocol_context = _Awaited( - _Awaited.Kind.ARRIVED, protocol_context) - self._condition.notify_all() - - def set_outcome(self, outcome): - with self._condition: - return self._set_outcome(outcome) - - -class _ProtocolReceiver(base.ProtocolReceiver): - - def __init__(self, rendezvous): - self._rendezvous = rendezvous - - def context(self, protocol_context): - self._rendezvous.set_protocol_context(protocol_context) - - -def protocol_receiver(rendezvous): - return _ProtocolReceiver(rendezvous) - - -def pool_wrap(behavior, operation_context): - """Wraps an operation-related behavior so that it may be called in a pool. - - Args: - behavior: A callable related to carrying out an operation. - operation_context: A base_interfaces.OperationContext for the operation. - - Returns: - A callable that when called carries out the behavior of the given callable - and handles whatever exceptions it raises appropriately. - """ - def translation(*args): - try: - behavior(*args) - except ( - abandonment.Abandoned, - face.CancellationError, - face.ExpirationError, - face.LocalShutdownError, - face.RemoteShutdownError, - face.NetworkError, - face.RemoteError, - ) as e: - if operation_context.outcome() is None: - operation_context.fail(e) - except Exception as e: - operation_context.fail(e) - return callable_util.with_exceptions_logged( - translation, _INTERNAL_ERROR_LOG_MESSAGE) diff --git a/src/python/grpcio/grpc/framework/crust/_service.py b/src/python/grpcio/grpc/framework/crust/_service.py deleted file mode 100644 index 9903415c09..0000000000 --- a/src/python/grpcio/grpc/framework/crust/_service.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Behaviors for servicing RPCs.""" - -from grpc.framework.crust import _control -from grpc.framework.foundation import abandonment -from grpc.framework.interfaces.base import utilities -from grpc.framework.interfaces.face import face - - -class _ServicerContext(face.ServicerContext): - - def __init__(self, rendezvous): - self._rendezvous = rendezvous - - def is_active(self): - return self._rendezvous.is_active() - - def time_remaining(self): - return self._rendezvous.time_remaining() - - def add_abortion_callback(self, abortion_callback): - return self._rendezvous.add_abortion_callback(abortion_callback) - - def cancel(self): - self._rendezvous.cancel() - - def protocol_context(self): - return self._rendezvous.protocol_context() - - def invocation_metadata(self): - return self._rendezvous.initial_metadata() - - def initial_metadata(self, initial_metadata): - self._rendezvous.set_initial_metadata(initial_metadata) - - def terminal_metadata(self, terminal_metadata): - self._rendezvous.set_terminal_metadata(terminal_metadata) - - def code(self, code): - self._rendezvous.set_code(code) - - def details(self, details): - self._rendezvous.set_details(details) - - -def _adaptation(pool, in_pool): - def adaptation(operator, operation_context): - rendezvous = _control.Rendezvous(operator, operation_context) - subscription = utilities.full_subscription( - rendezvous, _control.protocol_receiver(rendezvous)) - outcome = operation_context.add_termination_callback(rendezvous.set_outcome) - if outcome is None: - pool.submit(_control.pool_wrap(in_pool, operation_context), rendezvous) - return subscription - else: - raise abandonment.Abandoned() - return adaptation - - -def adapt_inline_unary_unary(method, pool): - def in_pool(rendezvous): - request = next(rendezvous) - response = method(request, _ServicerContext(rendezvous)) - rendezvous.consume_and_terminate(response) - return _adaptation(pool, in_pool) - - -def adapt_inline_unary_stream(method, pool): - def in_pool(rendezvous): - request = next(rendezvous) - response_iterator = method(request, _ServicerContext(rendezvous)) - for response in response_iterator: - rendezvous.consume(response) - rendezvous.terminate() - return _adaptation(pool, in_pool) - - -def adapt_inline_stream_unary(method, pool): - def in_pool(rendezvous): - response = method(rendezvous, _ServicerContext(rendezvous)) - rendezvous.consume_and_terminate(response) - return _adaptation(pool, in_pool) - - -def adapt_inline_stream_stream(method, pool): - def in_pool(rendezvous): - response_iterator = method(rendezvous, _ServicerContext(rendezvous)) - for response in response_iterator: - rendezvous.consume(response) - rendezvous.terminate() - return _adaptation(pool, in_pool) - - -def adapt_event_unary_unary(method, pool): - def in_pool(rendezvous): - request = next(rendezvous) - method( - request, rendezvous.consume_and_terminate, _ServicerContext(rendezvous)) - return _adaptation(pool, in_pool) - - -def adapt_event_unary_stream(method, pool): - def in_pool(rendezvous): - request = next(rendezvous) - method(request, rendezvous, _ServicerContext(rendezvous)) - return _adaptation(pool, in_pool) - - -def adapt_event_stream_unary(method, pool): - def in_pool(rendezvous): - request_consumer = method( - rendezvous.consume_and_terminate, _ServicerContext(rendezvous)) - for request in rendezvous: - request_consumer.consume(request) - request_consumer.terminate() - return _adaptation(pool, in_pool) - - -def adapt_event_stream_stream(method, pool): - def in_pool(rendezvous): - request_consumer = method(rendezvous, _ServicerContext(rendezvous)) - for request in rendezvous: - request_consumer.consume(request) - request_consumer.terminate() - return _adaptation(pool, in_pool) - - -def adapt_multi_method(multi_method, pool): - def adaptation(group, method, operator, operation_context): - rendezvous = _control.Rendezvous(operator, operation_context) - subscription = utilities.full_subscription( - rendezvous, _control.protocol_receiver(rendezvous)) - outcome = operation_context.add_termination_callback(rendezvous.set_outcome) - if outcome is None: - def in_pool(): - request_consumer = multi_method.service( - group, method, rendezvous, _ServicerContext(rendezvous)) - for request in rendezvous: - request_consumer.consume(request) - request_consumer.terminate() - pool.submit(_control.pool_wrap(in_pool, operation_context), rendezvous) - return subscription - else: - raise abandonment.Abandoned() - return adaptation diff --git a/src/python/grpcio/grpc/framework/crust/implementations.py b/src/python/grpcio/grpc/framework/crust/implementations.py deleted file mode 100644 index 2d3ab733b6..0000000000 --- a/src/python/grpcio/grpc/framework/crust/implementations.py +++ /dev/null @@ -1,366 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Entry points into the Crust layer of RPC Framework.""" - -import six - -from grpc.framework.common import cardinality -from grpc.framework.common import style -from grpc.framework.crust import _calls -from grpc.framework.crust import _service -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.face import face - - -class _BaseServicer(base.Servicer): - - def __init__(self, adapted_methods, adapted_multi_method): - self._adapted_methods = adapted_methods - self._adapted_multi_method = adapted_multi_method - - def service(self, group, method, context, output_operator): - adapted_method = self._adapted_methods.get((group, method), None) - if adapted_method is not None: - return adapted_method(output_operator, context) - elif self._adapted_multi_method is not None: - try: - return self._adapted_multi_method( - group, method, output_operator, context) - except face.NoSuchMethodError: - raise base.NoSuchMethodError(None, None) - else: - raise base.NoSuchMethodError(None, None) - - -class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable): - - def __init__(self, end, group, method, pool): - self._end = end - self._group = group - self._method = method - self._pool = pool - - def __call__( - self, request, timeout, metadata=None, with_call=False, - protocol_options=None): - return _calls.blocking_unary_unary( - self._end, self._group, self._method, timeout, with_call, - protocol_options, metadata, request) - - def future(self, request, timeout, metadata=None, protocol_options=None): - return _calls.future_unary_unary( - self._end, self._group, self._method, timeout, protocol_options, - metadata, request) - - def event( - self, request, receiver, abortion_callback, timeout, - metadata=None, protocol_options=None): - return _calls.event_unary_unary( - self._end, self._group, self._method, timeout, protocol_options, - metadata, request, receiver, abortion_callback, self._pool) - - -class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable): - - def __init__(self, end, group, method, pool): - self._end = end - self._group = group - self._method = method - self._pool = pool - - def __call__(self, request, timeout, metadata=None, protocol_options=None): - return _calls.inline_unary_stream( - self._end, self._group, self._method, timeout, protocol_options, - metadata, request) - - def event( - self, request, receiver, abortion_callback, timeout, - metadata=None, protocol_options=None): - return _calls.event_unary_stream( - self._end, self._group, self._method, timeout, protocol_options, - metadata, request, receiver, abortion_callback, self._pool) - - -class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable): - - def __init__(self, end, group, method, pool): - self._end = end - self._group = group - self._method = method - self._pool = pool - - def __call__( - self, request_iterator, timeout, metadata=None, - with_call=False, protocol_options=None): - return _calls.blocking_stream_unary( - self._end, self._group, self._method, timeout, with_call, - protocol_options, metadata, request_iterator, self._pool) - - def future( - self, request_iterator, timeout, metadata=None, protocol_options=None): - return _calls.future_stream_unary( - self._end, self._group, self._method, timeout, protocol_options, - metadata, request_iterator, self._pool) - - def event( - self, receiver, abortion_callback, timeout, metadata=None, - protocol_options=None): - return _calls.event_stream_unary( - self._end, self._group, self._method, timeout, protocol_options, - metadata, receiver, abortion_callback, self._pool) - - -class _StreamStreamMultiCallable(face.StreamStreamMultiCallable): - - def __init__(self, end, group, method, pool): - self._end = end - self._group = group - self._method = method - self._pool = pool - - def __call__( - self, request_iterator, timeout, metadata=None, protocol_options=None): - return _calls.inline_stream_stream( - self._end, self._group, self._method, timeout, protocol_options, - metadata, request_iterator, self._pool) - - def event( - self, receiver, abortion_callback, timeout, metadata=None, - protocol_options=None): - return _calls.event_stream_stream( - self._end, self._group, self._method, timeout, protocol_options, - metadata, receiver, abortion_callback, self._pool) - - -class _GenericStub(face.GenericStub): - """An face.GenericStub implementation.""" - - def __init__(self, end, pool): - self._end = end - self._pool = pool - - def blocking_unary_unary( - self, group, method, request, timeout, metadata=None, - with_call=None, protocol_options=None): - return _calls.blocking_unary_unary( - self._end, group, method, timeout, with_call, protocol_options, - metadata, request) - - def future_unary_unary( - self, group, method, request, timeout, metadata=None, - protocol_options=None): - return _calls.future_unary_unary( - self._end, group, method, timeout, protocol_options, metadata, request) - - def inline_unary_stream( - self, group, method, request, timeout, metadata=None, - protocol_options=None): - return _calls.inline_unary_stream( - self._end, group, method, timeout, protocol_options, metadata, request) - - def blocking_stream_unary( - self, group, method, request_iterator, timeout, metadata=None, - with_call=None, protocol_options=None): - return _calls.blocking_stream_unary( - self._end, group, method, timeout, with_call, protocol_options, - metadata, request_iterator, self._pool) - - def future_stream_unary( - self, group, method, request_iterator, timeout, metadata=None, - protocol_options=None): - return _calls.future_stream_unary( - self._end, group, method, timeout, protocol_options, metadata, - request_iterator, self._pool) - - def inline_stream_stream( - self, group, method, request_iterator, timeout, metadata=None, - protocol_options=None): - return _calls.inline_stream_stream( - self._end, group, method, timeout, protocol_options, metadata, - request_iterator, self._pool) - - def event_unary_unary( - self, group, method, request, receiver, abortion_callback, timeout, - metadata=None, protocol_options=None): - return _calls.event_unary_unary( - self._end, group, method, timeout, protocol_options, metadata, request, - receiver, abortion_callback, self._pool) - - def event_unary_stream( - self, group, method, request, receiver, abortion_callback, timeout, - metadata=None, protocol_options=None): - return _calls.event_unary_stream( - self._end, group, method, timeout, protocol_options, metadata, request, - receiver, abortion_callback, self._pool) - - def event_stream_unary( - self, group, method, receiver, abortion_callback, timeout, - metadata=None, protocol_options=None): - return _calls.event_stream_unary( - self._end, group, method, timeout, protocol_options, metadata, receiver, - abortion_callback, self._pool) - - def event_stream_stream( - self, group, method, receiver, abortion_callback, timeout, - metadata=None, protocol_options=None): - return _calls.event_stream_stream( - self._end, group, method, timeout, protocol_options, metadata, receiver, - abortion_callback, self._pool) - - def unary_unary(self, group, method): - return _UnaryUnaryMultiCallable(self._end, group, method, self._pool) - - def unary_stream(self, group, method): - return _UnaryStreamMultiCallable(self._end, group, method, self._pool) - - def stream_unary(self, group, method): - return _StreamUnaryMultiCallable(self._end, group, method, self._pool) - - def stream_stream(self, group, method): - return _StreamStreamMultiCallable(self._end, group, method, self._pool) - - -class _DynamicStub(face.DynamicStub): - """An face.DynamicStub implementation.""" - - def __init__(self, end, group, cardinalities, pool): - self._end = end - self._group = group - self._cardinalities = cardinalities - self._pool = pool - - def __getattr__(self, attr): - method_cardinality = self._cardinalities.get(attr) - if method_cardinality is cardinality.Cardinality.UNARY_UNARY: - return _UnaryUnaryMultiCallable(self._end, self._group, attr, self._pool) - elif method_cardinality is cardinality.Cardinality.UNARY_STREAM: - return _UnaryStreamMultiCallable(self._end, self._group, attr, self._pool) - elif method_cardinality is cardinality.Cardinality.STREAM_UNARY: - return _StreamUnaryMultiCallable(self._end, self._group, attr, self._pool) - elif method_cardinality is cardinality.Cardinality.STREAM_STREAM: - return _StreamStreamMultiCallable( - self._end, self._group, attr, self._pool) - else: - raise AttributeError('_DynamicStub object has no attribute "%s"!' % attr) - - -def _adapt_method_implementations(method_implementations, pool): - adapted_implementations = {} - for name, method_implementation in six.iteritems(method_implementations): - if method_implementation.style is style.Service.INLINE: - if method_implementation.cardinality is cardinality.Cardinality.UNARY_UNARY: - adapted_implementations[name] = _service.adapt_inline_unary_unary( - method_implementation.unary_unary_inline, pool) - elif method_implementation.cardinality is cardinality.Cardinality.UNARY_STREAM: - adapted_implementations[name] = _service.adapt_inline_unary_stream( - method_implementation.unary_stream_inline, pool) - elif method_implementation.cardinality is cardinality.Cardinality.STREAM_UNARY: - adapted_implementations[name] = _service.adapt_inline_stream_unary( - method_implementation.stream_unary_inline, pool) - elif method_implementation.cardinality is cardinality.Cardinality.STREAM_STREAM: - adapted_implementations[name] = _service.adapt_inline_stream_stream( - method_implementation.stream_stream_inline, pool) - elif method_implementation.style is style.Service.EVENT: - if method_implementation.cardinality is cardinality.Cardinality.UNARY_UNARY: - adapted_implementations[name] = _service.adapt_event_unary_unary( - method_implementation.unary_unary_event, pool) - elif method_implementation.cardinality is cardinality.Cardinality.UNARY_STREAM: - adapted_implementations[name] = _service.adapt_event_unary_stream( - method_implementation.unary_stream_event, pool) - elif method_implementation.cardinality is cardinality.Cardinality.STREAM_UNARY: - adapted_implementations[name] = _service.adapt_event_stream_unary( - method_implementation.stream_unary_event, pool) - elif method_implementation.cardinality is cardinality.Cardinality.STREAM_STREAM: - adapted_implementations[name] = _service.adapt_event_stream_stream( - method_implementation.stream_stream_event, pool) - return adapted_implementations - - -def servicer(method_implementations, multi_method_implementation, pool): - """Creates a base.Servicer. - - It is guaranteed that any passed face.MultiMethodImplementation will - only be called to service an RPC if there is no - face.MethodImplementation for the RPC method in the passed - method_implementations dictionary. - - Args: - method_implementations: A dictionary from RPC method name to - face.MethodImplementation object to be used to service the named - RPC method. - multi_method_implementation: An face.MultiMethodImplementation to be - used to service any RPCs not serviced by the - face.MethodImplementations given in the method_implementations - dictionary, or None. - pool: A thread pool. - - Returns: - A base.Servicer that services RPCs via the given implementations. - """ - adapted_implementations = _adapt_method_implementations( - method_implementations, pool) - if multi_method_implementation is None: - adapted_multi_method_implementation = None - else: - adapted_multi_method_implementation = _service.adapt_multi_method( - multi_method_implementation, pool) - return _BaseServicer( - adapted_implementations, adapted_multi_method_implementation) - - -def generic_stub(end, pool): - """Creates an face.GenericStub. - - Args: - end: A base.End. - pool: A futures.ThreadPoolExecutor. - - Returns: - A face.GenericStub that performs RPCs via the given base.End. - """ - return _GenericStub(end, pool) - - -def dynamic_stub(end, group, cardinalities, pool): - """Creates an face.DynamicStub. - - Args: - end: A base.End. - group: The group identifier for all RPCs to be made with the created - face.DynamicStub. - cardinalities: A dict from method identifier to cardinality.Cardinality - value identifying the cardinality of every RPC method to be supported by - the created face.DynamicStub. - pool: A futures.ThreadPoolExecutor. - - Returns: - A face.DynamicStub that performs RPCs via the given base.End. - """ - return _DynamicStub(end, group, cardinalities, pool) diff --git a/src/python/grpcio/grpc/framework/foundation/_timer_future.py b/src/python/grpcio/grpc/framework/foundation/_timer_future.py deleted file mode 100644 index 2c9996aa9d..0000000000 --- a/src/python/grpcio/grpc/framework/foundation/_timer_future.py +++ /dev/null @@ -1,228 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Affords a Future implementation based on Python's threading.Timer.""" - -import sys -import threading -import time - -from grpc.framework.foundation import future - - -class TimerFuture(future.Future): - """A Future implementation based around Timer objects.""" - - def __init__(self, compute_time, computation): - """Constructor. - - Args: - compute_time: The time after which to begin this future's computation. - computation: The computation to be performed within this Future. - """ - self._lock = threading.Lock() - self._compute_time = compute_time - self._computation = computation - self._timer = None - self._computing = False - self._computed = False - self._cancelled = False - self._result = None - self._exception = None - self._traceback = None - self._waiting = [] - - def _compute(self): - """Performs the computation embedded in this Future. - - Or doesn't, if the time to perform it has not yet arrived. - """ - with self._lock: - time_remaining = self._compute_time - time.time() - if 0 < time_remaining: - self._timer = threading.Timer(time_remaining, self._compute) - self._timer.start() - return - else: - self._computing = True - - try: - return_value = self._computation() - exception = None - traceback = None - except Exception as e: # pylint: disable=broad-except - return_value = None - exception = e - traceback = sys.exc_info()[2] - - with self._lock: - self._computing = False - self._computed = True - self._return_value = return_value - self._exception = exception - self._traceback = traceback - waiting = self._waiting - - for callback in waiting: - callback(self) - - def start(self): - """Starts this Future. - - This must be called exactly once, immediately after construction. - """ - with self._lock: - self._timer = threading.Timer( - self._compute_time - time.time(), self._compute) - self._timer.start() - - def cancel(self): - """See future.Future.cancel for specification.""" - with self._lock: - if self._computing or self._computed: - return False - elif self._cancelled: - return True - else: - self._timer.cancel() - self._cancelled = True - waiting = self._waiting - - for callback in waiting: - try: - callback(self) - except Exception: # pylint: disable=broad-except - pass - - return True - - def cancelled(self): - """See future.Future.cancelled for specification.""" - with self._lock: - return self._cancelled - - def running(self): - """See future.Future.running for specification.""" - with self._lock: - return not self._computed and not self._cancelled - - def done(self): - """See future.Future.done for specification.""" - with self._lock: - return self._computed or self._cancelled - - def result(self, timeout=None): - """See future.Future.result for specification.""" - with self._lock: - if self._cancelled: - raise future.CancelledError() - elif self._computed: - if self._exception is None: - return self._return_value - else: - raise self._exception # pylint: disable=raising-bad-type - - condition = threading.Condition() - def notify_condition(unused_future): - with condition: - condition.notify() - self._waiting.append(notify_condition) - - with condition: - condition.wait(timeout=timeout) - - with self._lock: - if self._cancelled: - raise future.CancelledError() - elif self._computed: - if self._exception is None: - return self._return_value - else: - raise self._exception # pylint: disable=raising-bad-type - else: - raise future.TimeoutError() - - def exception(self, timeout=None): - """See future.Future.exception for specification.""" - with self._lock: - if self._cancelled: - raise future.CancelledError() - elif self._computed: - return self._exception - - condition = threading.Condition() - def notify_condition(unused_future): - with condition: - condition.notify() - self._waiting.append(notify_condition) - - with condition: - condition.wait(timeout=timeout) - - with self._lock: - if self._cancelled: - raise future.CancelledError() - elif self._computed: - return self._exception - else: - raise future.TimeoutError() - - def traceback(self, timeout=None): - """See future.Future.traceback for specification.""" - with self._lock: - if self._cancelled: - raise future.CancelledError() - elif self._computed: - return self._traceback - - condition = threading.Condition() - def notify_condition(unused_future): - with condition: - condition.notify() - self._waiting.append(notify_condition) - - with condition: - condition.wait(timeout=timeout) - - with self._lock: - if self._cancelled: - raise future.CancelledError() - elif self._computed: - return self._traceback - else: - raise future.TimeoutError() - - def add_done_callback(self, fn): - """See future.Future.add_done_callback for specification.""" - with self._lock: - if not self._computed and not self._cancelled: - self._waiting.append(fn) - return - - fn(self) diff --git a/src/python/grpcio/grpc/framework/foundation/later.py b/src/python/grpcio/grpc/framework/foundation/later.py deleted file mode 100644 index 1d1e065041..0000000000 --- a/src/python/grpcio/grpc/framework/foundation/later.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Enables scheduling execution at a later time.""" - -import time - -from grpc.framework.foundation import _timer_future - - -def later(delay, computation): - """Schedules later execution of a callable. - - Args: - delay: Any numeric value. Represents the minimum length of time in seconds - to allow to pass before beginning the computation. No guarantees are made - about the maximum length of time that will pass. - computation: A callable that accepts no arguments. - - Returns: - A Future representing the scheduled computation. - """ - timer_future = _timer_future.TimerFuture(time.time() + delay, computation) - timer_future.start() - return timer_future diff --git a/src/python/grpcio/grpc/framework/foundation/relay.py b/src/python/grpcio/grpc/framework/foundation/relay.py deleted file mode 100644 index 20f41b2738..0000000000 --- a/src/python/grpcio/grpc/framework/foundation/relay.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Implementations of in-order work deference.""" - -import abc -import enum -import threading - -from grpc.framework.foundation import activated -from grpc.framework.foundation import logging_pool - -_NULL_BEHAVIOR = lambda unused_value: None - - -class Relay(object): - """Performs work submitted to it in another thread. - - Performs work in the order in which work was submitted to it; otherwise there - would be no reason to use an implementation of this interface instead of a - thread pool. - """ - - @abc.abstractmethod - def add_value(self, value): - """Adds a value to be passed to the behavior registered with this Relay. - - Args: - value: A value that will be passed to a call made in another thread to the - behavior registered with this Relay. - """ - raise NotImplementedError() - - @abc.abstractmethod - def set_behavior(self, behavior): - """Sets the behavior that this Relay should call when passed values. - - Args: - behavior: The behavior that this Relay should call in another thread when - passed a value, or None to have passed values ignored. - """ - raise NotImplementedError() - - -class _PoolRelay(activated.Activated, Relay): - - @enum.unique - class _State(enum.Enum): - INACTIVE = 'inactive' - IDLE = 'idle' - SPINNING = 'spinning' - - def __init__(self, pool, behavior): - self._condition = threading.Condition() - self._pool = pool - self._own_pool = pool is None - self._state = _PoolRelay._State.INACTIVE - self._activated = False - self._spinning = False - self._values = [] - self._behavior = _NULL_BEHAVIOR if behavior is None else behavior - - def _spin(self, behavior, value): - while True: - behavior(value) - with self._condition: - if self._values: - value = self._values.pop(0) - behavior = self._behavior - else: - self._state = _PoolRelay._State.IDLE - self._condition.notify_all() - break - - def add_value(self, value): - with self._condition: - if self._state is _PoolRelay._State.INACTIVE: - raise ValueError('add_value not valid on inactive Relay!') - elif self._state is _PoolRelay._State.IDLE: - self._pool.submit(self._spin, self._behavior, value) - self._state = _PoolRelay._State.SPINNING - else: - self._values.append(value) - - def set_behavior(self, behavior): - with self._condition: - self._behavior = _NULL_BEHAVIOR if behavior is None else behavior - - def _start(self): - with self._condition: - self._state = _PoolRelay._State.IDLE - if self._own_pool: - self._pool = logging_pool.pool(1) - return self - - def _stop(self): - with self._condition: - while self._state is _PoolRelay._State.SPINNING: - self._condition.wait() - if self._own_pool: - self._pool.shutdown(wait=True) - self._state = _PoolRelay._State.INACTIVE - - def __enter__(self): - return self._start() - - def __exit__(self, exc_type, exc_val, exc_tb): - self._stop() - return False - - def start(self): - return self._start() - - def stop(self): - self._stop() - - -def relay(behavior): - """Creates a Relay. - - Args: - behavior: The behavior to be called by the created Relay, or None to have - passed values dropped until a different behavior is given to the returned - Relay later. - - Returns: - An object that is both an activated.Activated and a Relay. The object is - only valid for use as a Relay when activated. - """ - return _PoolRelay(None, behavior) - - -def pool_relay(pool, behavior): - """Creates a Relay that uses a given thread pool. - - This object will make use of at most one thread in the given pool. - - Args: - pool: A futures.ThreadPoolExecutor for use by the created Relay. - behavior: The behavior to be called by the created Relay, or None to have - passed values dropped until a different behavior is given to the returned - Relay later. - - Returns: - An object that is both an activated.Activated and a Relay. The object is - only valid for use as a Relay when activated. - """ - return _PoolRelay(pool, behavior) diff --git a/src/python/grpcio/grpc/framework/interfaces/links/links.py b/src/python/grpcio/grpc/framework/interfaces/links/links.py deleted file mode 100644 index 9631b19078..0000000000 --- a/src/python/grpcio/grpc/framework/interfaces/links/links.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""The low-level ticket-exchanging-links interface of RPC Framework.""" - -import abc -import collections -import enum - -import six - - -class Protocol(collections.namedtuple('Protocol', ('kind', 'value',))): - """A sum type for handles to a system that transmits tickets. - - Attributes: - kind: A Kind value identifying the kind of value being passed. - value: The value being passed between the high-level application and the - system affording ticket transport. - """ - - @enum.unique - class Kind(enum.Enum): - CALL_OPTION = 'call option' - SERVICER_CONTEXT = 'servicer context' - INVOCATION_CONTEXT = 'invocation context' - - -class Ticket( - collections.namedtuple( - 'Ticket', - ('operation_id', 'sequence_number', 'group', 'method', 'subscription', - 'timeout', 'allowance', 'initial_metadata', 'payload', - 'terminal_metadata', 'code', 'message', 'termination', 'protocol',))): - """A sum type for all values sent from a front to a back. - - Attributes: - operation_id: A unique-with-respect-to-equality hashable object identifying - a particular operation. - sequence_number: A zero-indexed integer sequence number identifying the - ticket's place in the stream of tickets sent in one direction for the - particular operation. - group: The group to which the method of the operation belongs. Must be - present in the first ticket from invocation side to service side. Ignored - for all other tickets exchanged during the operation. - method: The name of an operation. Must be present in the first ticket from - invocation side to service side. Ignored for all other tickets exchanged - during the operation. - subscription: A Subscription value describing the interest one side has in - receiving information from the other side. Must be present in the first - ticket from either side. Ignored for all other tickets exchanged during - the operation. - timeout: A nonzero length of time (measured from the beginning of the - operation) to allow for the entire operation. Must be present in the first - ticket from invocation side to service side. Optional for all other - tickets exchanged during the operation. Receipt of a value from the other - side of the operation indicates the value in use by that side. Setting a - value on a later ticket allows either side to request time extensions (or - even time reductions!) on in-progress operations. - allowance: A positive integer granting permission for a number of payloads - to be transmitted to the communicating side of the operation, or None if - no additional allowance is being granted with this ticket. - initial_metadata: An optional metadata value communicated from one side to - the other at the beginning of the operation. May be non-None in at most - one ticket from each side. Any non-None value must appear no later than - the first payload value. - payload: A customer payload object. May be None. - terminal_metadata: A metadata value comminicated from one side to the other - at the end of the operation. May be non-None in the same ticket as - the code and message, but must be None for all earlier tickets. - code: A value communicated at operation completion. May be None. - message: A value communicated at operation completion. May be None. - termination: A Termination value describing the end of the operation, or - None if the operation has not yet terminated. If set, no further tickets - may be sent in the same direction. - protocol: A Protocol value or None, with further semantics being a matter - between high-level application and underlying ticket transport. - """ - - @enum.unique - class Subscription(enum.Enum): - """Identifies the level of subscription of a side of an operation.""" - - NONE = 'none' - TERMINATION = 'termination' - FULL = 'full' - - @enum.unique - class Termination(enum.Enum): - """Identifies the termination of an operation.""" - - COMPLETION = 'completion' - CANCELLATION = 'cancellation' - EXPIRATION = 'expiration' - SHUTDOWN = 'shutdown' - RECEPTION_FAILURE = 'reception failure' - TRANSMISSION_FAILURE = 'transmission failure' - LOCAL_FAILURE = 'local failure' - REMOTE_FAILURE = 'remote failure' - - -class Link(six.with_metaclass(abc.ABCMeta)): - """Accepts and emits tickets.""" - - @abc.abstractmethod - def accept_ticket(self, ticket): - """Accept a Ticket. - - Args: - ticket: Any Ticket. - """ - raise NotImplementedError() - - @abc.abstractmethod - def join_link(self, link): - """Mates this object with a peer with which it will exchange tickets.""" - raise NotImplementedError() diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index b668455b72..83722ba69b 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -45,7 +45,6 @@ CORE_SOURCE_FILES = [ 'src/core/lib/support/env_windows.c', 'src/core/lib/support/histogram.c', 'src/core/lib/support/host_port.c', - 'src/core/lib/support/load_file.c', 'src/core/lib/support/log.c', 'src/core/lib/support/log_android.c', 'src/core/lib/support/log_linux.c', @@ -84,7 +83,7 @@ CORE_SOURCE_FILES = [ 'src/core/lib/channel/connected_channel.c', 'src/core/lib/channel/http_client_filter.c', 'src/core/lib/channel/http_server_filter.c', - 'src/core/lib/compression/compression_algorithm.c', + 'src/core/lib/compression/compression.c', 'src/core/lib/compression/message_compress.c', 'src/core/lib/debug/trace.c', 'src/core/lib/http/format_request.c', @@ -94,6 +93,8 @@ CORE_SOURCE_FILES = [ 'src/core/lib/iomgr/endpoint.c', 'src/core/lib/iomgr/endpoint_pair_posix.c', 'src/core/lib/iomgr/endpoint_pair_windows.c', + 'src/core/lib/iomgr/error.c', + 'src/core/lib/iomgr/ev_epoll_linux.c', 'src/core/lib/iomgr/ev_poll_and_epoll_posix.c', 'src/core/lib/iomgr/ev_poll_posix.c', 'src/core/lib/iomgr/ev_posix.c', @@ -103,6 +104,9 @@ CORE_SOURCE_FILES = [ 'src/core/lib/iomgr/iomgr.c', 'src/core/lib/iomgr/iomgr_posix.c', 'src/core/lib/iomgr/iomgr_windows.c', + 'src/core/lib/iomgr/load_file.c', + 'src/core/lib/iomgr/network_status_tracker.c', + 'src/core/lib/iomgr/polling_entity.c', 'src/core/lib/iomgr/pollset_set_windows.c', 'src/core/lib/iomgr/pollset_windows.c', 'src/core/lib/iomgr/resolve_address_posix.c', @@ -160,6 +164,7 @@ CORE_SOURCE_FILES = [ 'src/core/lib/transport/transport.c', 'src/core/lib/transport/transport_op_string.c', 'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c', + 'src/core/ext/transport/chttp2/transport/bin_decoder.c', 'src/core/ext/transport/chttp2/transport/bin_encoder.c', 'src/core/ext/transport/chttp2/transport/chttp2_plugin.c', 'src/core/ext/transport/chttp2/transport/chttp2_transport.c', @@ -203,6 +208,7 @@ CORE_SOURCE_FILES = [ 'src/core/lib/security/transport/secure_endpoint.c', 'src/core/lib/security/transport/security_connector.c', 'src/core/lib/security/transport/server_auth_filter.c', + 'src/core/lib/security/transport/tsi_error.c', 'src/core/lib/security/util/b64.c', 'src/core/lib/security/util/json_util.c', 'src/core/lib/surface/init_secure.c', @@ -230,7 +236,9 @@ CORE_SOURCE_FILES = [ 'src/core/ext/client_config/subchannel_index.c', 'src/core/ext/client_config/uri_parser.c', 'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c', + 'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c', 'src/core/ext/transport/chttp2/client/insecure/channel_create.c', + 'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c', 'src/core/ext/lb_policy/grpclb/load_balancer_api.c', 'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c', 'third_party/nanopb/pb_common.c', diff --git a/src/python/grpcio/grpc_version.py b/src/python/grpcio/grpc_version.py index 0c13104d9d..ea38526a28 100644 --- a/src/python/grpcio/grpc_version.py +++ b/src/python/grpcio/grpc_version.py @@ -29,4 +29,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_version.py.template`!!! -VERSION='0.15.0.dev0' +VERSION='1.1.0.dev0' diff --git a/src/python/grpcio/support.py b/src/python/grpcio/support.py index 33244eb388..7730374df0 100644 --- a/src/python/grpcio/support.py +++ b/src/python/grpcio/support.py @@ -50,7 +50,6 @@ Could not find <Python.h>. This could mean the following: (check your environment variables or try re-installing?) * You're on Windows and your Python installation was somehow corrupted (check your environment variables or try re-installing?) - * Note: Windows users should look into installing `vcpython27`. """ C_CHECKS = { diff --git a/src/python/grpcio/tests/tests.json b/src/python/grpcio/tests/tests.json deleted file mode 100644 index 8dc47bf69d..0000000000 --- a/src/python/grpcio/tests/tests.json +++ /dev/null @@ -1,62 +0,0 @@ -[ - "_auth_test.AccessTokenCallCredentialsTest", - "_auth_test.GoogleCallCredentialsTest", - "_base_interface_test.AsyncEasyTest", - "_base_interface_test.AsyncPeasyTest", - "_base_interface_test.SyncEasyTest", - "_base_interface_test.SyncPeasyTest", - "_beta_features_test.BetaFeaturesTest", - "_beta_features_test.ContextManagementAndLifecycleTest", - "_cancel_many_calls_test.CancelManyCallsTest", - "_channel_connectivity_test.ChannelConnectivityTest", - "_channel_ready_future_test.ChannelReadyFutureTest", - "_channel_test.ChannelTest", - "_connectivity_channel_test.ChannelConnectivityTest", - "_core_over_links_base_interface_test.AsyncEasyTest", - "_core_over_links_base_interface_test.AsyncPeasyTest", - "_core_over_links_base_interface_test.SyncEasyTest", - "_core_over_links_base_interface_test.SyncPeasyTest", - "_crust_over_core_face_interface_test.DynamicInvokerBlockingInvocationInlineServiceTest", - "_crust_over_core_face_interface_test.DynamicInvokerFutureInvocationAsynchronousEventServiceTest", - "_crust_over_core_face_interface_test.GenericInvokerBlockingInvocationInlineServiceTest", - "_crust_over_core_face_interface_test.GenericInvokerFutureInvocationAsynchronousEventServiceTest", - "_crust_over_core_face_interface_test.MultiCallableInvokerBlockingInvocationInlineServiceTest", - "_crust_over_core_face_interface_test.MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest", - "_crust_over_core_over_links_face_interface_test.DynamicInvokerBlockingInvocationInlineServiceTest", - "_crust_over_core_over_links_face_interface_test.DynamicInvokerFutureInvocationAsynchronousEventServiceTest", - "_crust_over_core_over_links_face_interface_test.GenericInvokerBlockingInvocationInlineServiceTest", - "_crust_over_core_over_links_face_interface_test.GenericInvokerFutureInvocationAsynchronousEventServiceTest", - "_crust_over_core_over_links_face_interface_test.MultiCallableInvokerBlockingInvocationInlineServiceTest", - "_crust_over_core_over_links_face_interface_test.MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest", - "_face_interface_test.DynamicInvokerBlockingInvocationInlineServiceTest", - "_face_interface_test.DynamicInvokerFutureInvocationAsynchronousEventServiceTest", - "_face_interface_test.GenericInvokerBlockingInvocationInlineServiceTest", - "_face_interface_test.GenericInvokerFutureInvocationAsynchronousEventServiceTest", - "_face_interface_test.MultiCallableInvokerBlockingInvocationInlineServiceTest", - "_face_interface_test.MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest", - "_health_servicer_test.HealthServicerTest", - "_implementations_test.CallCredentialsTest", - "_implementations_test.ChannelCredentialsTest", - "_insecure_interop_test.InsecureInteropTest", - "_intermediary_low_test.CancellationTest", - "_intermediary_low_test.EchoTest", - "_intermediary_low_test.ExpirationTest", - "_intermediary_low_test.LonelyClientTest", - "_later_test.LaterTest", - "_logging_pool_test.LoggingPoolTest", - "_lonely_invocation_link_test.LonelyInvocationLinkTest", - "_low_test.HangingServerShutdown", - "_low_test.InsecureServerInsecureClient", - "_not_found_test.NotFoundTest", - "_read_some_but_not_all_responses_test.ReadSomeButNotAllResponsesTest", - "_rpc_test.RPCTest", - "_sanity_test.Sanity", - "_secure_interop_test.SecureInteropTest", - "_transmission_test.RoundTripTest", - "_transmission_test.TransmissionTest", - "_utilities_test.ChannelConnectivityTest", - "beta_python_plugin_test.PythonPluginTest", - "cygrpc_test.InsecureServerInsecureClient", - "cygrpc_test.SecureServerSecureClient", - "cygrpc_test.TypeSmokeTest" -] diff --git a/src/python/grpcio/tests/unit/_adapter/.gitignore b/src/python/grpcio/tests/unit/_adapter/.gitignore deleted file mode 100644 index a6f96cd6db..0000000000 --- a/src/python/grpcio/tests/unit/_adapter/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.a -*.so -*.dll -*.pyc -*.pyd diff --git a/src/python/grpcio/tests/unit/_adapter/_intermediary_low_test.py b/src/python/grpcio/tests/unit/_adapter/_intermediary_low_test.py deleted file mode 100644 index 22d4b019c7..0000000000 --- a/src/python/grpcio/tests/unit/_adapter/_intermediary_low_test.py +++ /dev/null @@ -1,428 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tests for the old '_low'.""" - -import threading -import time -import unittest - -import six -from six.moves import queue - -from grpc._adapter import _intermediary_low as _low - -_STREAM_LENGTH = 300 -_TIMEOUT = 5 -_AFTER_DELAY = 2 -_FUTURE = time.time() + 60 * 60 * 24 -_BYTE_SEQUENCE = b'\abcdefghijklmnopqrstuvwxyz0123456789' * 200 -_BYTE_SEQUENCE_SEQUENCE = tuple( - bytes(bytearray((row + column) % 256 for column in range(row))) - for row in range(_STREAM_LENGTH)) - - -class LonelyClientTest(unittest.TestCase): - - def testLonelyClient(self): - host = 'nosuchhostexists' - port = 54321 - method = 'test method' - deadline = time.time() + _TIMEOUT - after_deadline = deadline + _AFTER_DELAY - metadata_tag = object() - finish_tag = object() - - completion_queue = _low.CompletionQueue() - channel = _low.Channel('%s:%d' % (host, port), None) - client_call = _low.Call(channel, completion_queue, method, host, deadline) - - client_call.invoke(completion_queue, metadata_tag, finish_tag) - first_event = completion_queue.get(after_deadline) - self.assertIsNotNone(first_event) - second_event = completion_queue.get(after_deadline) - self.assertIsNotNone(second_event) - kinds = [event.kind for event in (first_event, second_event)] - six.assertCountEqual(self, - (_low.Event.Kind.METADATA_ACCEPTED, _low.Event.Kind.FINISH), - kinds) - - self.assertIsNone(completion_queue.get(after_deadline)) - - completion_queue.stop() - stop_event = completion_queue.get(_FUTURE) - self.assertEqual(_low.Event.Kind.STOP, stop_event.kind) - - del client_call - del channel - del completion_queue - - -def _drive_completion_queue(completion_queue, event_queue): - while True: - event = completion_queue.get(_FUTURE) - if event.kind is _low.Event.Kind.STOP: - break - event_queue.put(event) - - -class EchoTest(unittest.TestCase): - - def setUp(self): - self.host = 'localhost' - - self.server_completion_queue = _low.CompletionQueue() - self.server = _low.Server(self.server_completion_queue) - port = self.server.add_http2_addr('[::]:0') - self.server.start() - self.server_events = queue.Queue() - self.server_completion_queue_thread = threading.Thread( - target=_drive_completion_queue, - args=(self.server_completion_queue, self.server_events)) - self.server_completion_queue_thread.start() - - self.client_completion_queue = _low.CompletionQueue() - self.channel = _low.Channel('%s:%d' % (self.host, port), None) - self.client_events = queue.Queue() - self.client_completion_queue_thread = threading.Thread( - target=_drive_completion_queue, - args=(self.client_completion_queue, self.client_events)) - self.client_completion_queue_thread.start() - - def tearDown(self): - self.server.stop() - self.server.cancel_all_calls() - self.server_completion_queue.stop() - self.client_completion_queue.stop() - self.server_completion_queue_thread.join() - self.client_completion_queue_thread.join() - del self.server - - def _perform_echo_test(self, test_data): - method = 'test method' - details = 'test details' - server_leading_metadata_key = 'my_server_leading_key' - server_leading_metadata_value = 'my_server_leading_value' - server_trailing_metadata_key = 'my_server_trailing_key' - server_trailing_metadata_value = 'my_server_trailing_value' - client_metadata_key = 'my_client_key' - client_metadata_value = 'my_client_value' - server_leading_binary_metadata_key = 'my_server_leading_key-bin' - server_leading_binary_metadata_value = b'\0'*2047 - server_trailing_binary_metadata_key = 'my_server_trailing_key-bin' - server_trailing_binary_metadata_value = b'\0'*2047 - client_binary_metadata_key = 'my_client_key-bin' - client_binary_metadata_value = b'\0'*2047 - deadline = _FUTURE - metadata_tag = object() - finish_tag = object() - write_tag = object() - complete_tag = object() - service_tag = object() - read_tag = object() - status_tag = object() - - server_data = [] - client_data = [] - - client_call = _low.Call(self.channel, self.client_completion_queue, - method, self.host, deadline) - client_call.add_metadata(client_metadata_key, client_metadata_value) - client_call.add_metadata(client_binary_metadata_key, - client_binary_metadata_value) - - client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag) - - self.server.service(service_tag) - service_accepted = self.server_events.get() - self.assertIsNotNone(service_accepted) - self.assertIs(service_accepted.kind, _low.Event.Kind.SERVICE_ACCEPTED) - self.assertIs(service_accepted.tag, service_tag) - self.assertEqual(method, service_accepted.service_acceptance.method) - self.assertEqual(self.host, service_accepted.service_acceptance.host) - self.assertIsNotNone(service_accepted.service_acceptance.call) - metadata = dict(service_accepted.metadata) - self.assertIn(client_metadata_key, metadata) - self.assertEqual(client_metadata_value, metadata[client_metadata_key]) - self.assertIn(client_binary_metadata_key, metadata) - self.assertEqual(client_binary_metadata_value, - metadata[client_binary_metadata_key]) - server_call = service_accepted.service_acceptance.call - server_call.accept(self.server_completion_queue, finish_tag) - server_call.add_metadata(server_leading_metadata_key, - server_leading_metadata_value) - server_call.add_metadata(server_leading_binary_metadata_key, - server_leading_binary_metadata_value) - server_call.premetadata() - - metadata_accepted = self.client_events.get() - self.assertIsNotNone(metadata_accepted) - self.assertEqual(_low.Event.Kind.METADATA_ACCEPTED, metadata_accepted.kind) - self.assertEqual(metadata_tag, metadata_accepted.tag) - metadata = dict(metadata_accepted.metadata) - self.assertIn(server_leading_metadata_key, metadata) - self.assertEqual(server_leading_metadata_value, - metadata[server_leading_metadata_key]) - self.assertIn(server_leading_binary_metadata_key, metadata) - self.assertEqual(server_leading_binary_metadata_value, - metadata[server_leading_binary_metadata_key]) - - for datum in test_data: - client_call.write(datum, write_tag, _low.WriteFlags.WRITE_NO_COMPRESS) - write_accepted = self.client_events.get() - self.assertIsNotNone(write_accepted) - self.assertIs(write_accepted.kind, _low.Event.Kind.WRITE_ACCEPTED) - self.assertIs(write_accepted.tag, write_tag) - self.assertIs(write_accepted.write_accepted, True) - - server_call.read(read_tag) - read_accepted = self.server_events.get() - self.assertIsNotNone(read_accepted) - self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind) - self.assertEqual(read_tag, read_accepted.tag) - self.assertIsNotNone(read_accepted.bytes) - server_data.append(read_accepted.bytes) - - server_call.write(read_accepted.bytes, write_tag, 0) - write_accepted = self.server_events.get() - self.assertIsNotNone(write_accepted) - self.assertEqual(_low.Event.Kind.WRITE_ACCEPTED, write_accepted.kind) - self.assertEqual(write_tag, write_accepted.tag) - self.assertTrue(write_accepted.write_accepted) - - client_call.read(read_tag) - read_accepted = self.client_events.get() - self.assertIsNotNone(read_accepted) - self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind) - self.assertEqual(read_tag, read_accepted.tag) - self.assertIsNotNone(read_accepted.bytes) - client_data.append(read_accepted.bytes) - - client_call.complete(complete_tag) - complete_accepted = self.client_events.get() - self.assertIsNotNone(complete_accepted) - self.assertIs(complete_accepted.kind, _low.Event.Kind.COMPLETE_ACCEPTED) - self.assertIs(complete_accepted.tag, complete_tag) - self.assertIs(complete_accepted.complete_accepted, True) - - server_call.read(read_tag) - read_accepted = self.server_events.get() - self.assertIsNotNone(read_accepted) - self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind) - self.assertEqual(read_tag, read_accepted.tag) - self.assertIsNone(read_accepted.bytes) - - server_call.add_metadata(server_trailing_metadata_key, - server_trailing_metadata_value) - server_call.add_metadata(server_trailing_binary_metadata_key, - server_trailing_binary_metadata_value) - - server_call.status(_low.Status(_low.Code.OK, details), status_tag) - server_terminal_event_one = self.server_events.get() - server_terminal_event_two = self.server_events.get() - if server_terminal_event_one.kind == _low.Event.Kind.COMPLETE_ACCEPTED: - status_accepted = server_terminal_event_one - rpc_accepted = server_terminal_event_two - else: - status_accepted = server_terminal_event_two - rpc_accepted = server_terminal_event_one - self.assertIsNotNone(status_accepted) - self.assertIsNotNone(rpc_accepted) - self.assertEqual(_low.Event.Kind.COMPLETE_ACCEPTED, status_accepted.kind) - self.assertEqual(status_tag, status_accepted.tag) - self.assertTrue(status_accepted.complete_accepted) - self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind) - self.assertEqual(finish_tag, rpc_accepted.tag) - self.assertEqual(_low.Status(_low.Code.OK, ''), rpc_accepted.status) - - client_call.read(read_tag) - client_terminal_event_one = self.client_events.get() - client_terminal_event_two = self.client_events.get() - if client_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED: - read_accepted = client_terminal_event_one - finish_accepted = client_terminal_event_two - else: - read_accepted = client_terminal_event_two - finish_accepted = client_terminal_event_one - self.assertIsNotNone(read_accepted) - self.assertIsNotNone(finish_accepted) - self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind) - self.assertEqual(read_tag, read_accepted.tag) - self.assertIsNone(read_accepted.bytes) - self.assertEqual(_low.Event.Kind.FINISH, finish_accepted.kind) - self.assertEqual(finish_tag, finish_accepted.tag) - self.assertEqual(_low.Status(_low.Code.OK, details), finish_accepted.status) - metadata = dict(finish_accepted.metadata) - self.assertIn(server_trailing_metadata_key, metadata) - self.assertEqual(server_trailing_metadata_value, - metadata[server_trailing_metadata_key]) - self.assertIn(server_trailing_binary_metadata_key, metadata) - self.assertEqual(server_trailing_binary_metadata_value, - metadata[server_trailing_binary_metadata_key]) - self.assertSetEqual(set(key for key, _ in finish_accepted.metadata), - set((server_trailing_metadata_key, - server_trailing_binary_metadata_key,))) - - self.assertSequenceEqual(test_data, server_data) - self.assertSequenceEqual(test_data, client_data) - - def testNoEcho(self): - self._perform_echo_test(()) - - def testOneByteEcho(self): - self._perform_echo_test([b'\x07']) - - def testOneManyByteEcho(self): - self._perform_echo_test([_BYTE_SEQUENCE]) - - def testManyOneByteEchoes(self): - self._perform_echo_test(_BYTE_SEQUENCE) - - def testManyManyByteEchoes(self): - self._perform_echo_test(_BYTE_SEQUENCE_SEQUENCE) - - -class CancellationTest(unittest.TestCase): - - def setUp(self): - self.host = 'localhost' - - self.server_completion_queue = _low.CompletionQueue() - self.server = _low.Server(self.server_completion_queue) - port = self.server.add_http2_addr('[::]:0') - self.server.start() - self.server_events = queue.Queue() - self.server_completion_queue_thread = threading.Thread( - target=_drive_completion_queue, - args=(self.server_completion_queue, self.server_events)) - self.server_completion_queue_thread.start() - - self.client_completion_queue = _low.CompletionQueue() - self.channel = _low.Channel('%s:%d' % (self.host, port), None) - self.client_events = queue.Queue() - self.client_completion_queue_thread = threading.Thread( - target=_drive_completion_queue, - args=(self.client_completion_queue, self.client_events)) - self.client_completion_queue_thread.start() - - def tearDown(self): - self.server.stop() - self.server.cancel_all_calls() - self.server_completion_queue.stop() - self.client_completion_queue.stop() - self.server_completion_queue_thread.join() - self.client_completion_queue_thread.join() - del self.server - - def testCancellation(self): - method = 'test method' - deadline = _FUTURE - metadata_tag = object() - finish_tag = object() - write_tag = object() - service_tag = object() - read_tag = object() - test_data = _BYTE_SEQUENCE_SEQUENCE - - server_data = [] - client_data = [] - - client_call = _low.Call(self.channel, self.client_completion_queue, - method, self.host, deadline) - - client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag) - - self.server.service(service_tag) - service_accepted = self.server_events.get() - server_call = service_accepted.service_acceptance.call - - server_call.accept(self.server_completion_queue, finish_tag) - server_call.premetadata() - - metadata_accepted = self.client_events.get() - self.assertIsNotNone(metadata_accepted) - - for datum in test_data: - client_call.write(datum, write_tag, 0) - write_accepted = self.client_events.get() - - server_call.read(read_tag) - read_accepted = self.server_events.get() - server_data.append(read_accepted.bytes) - - server_call.write(read_accepted.bytes, write_tag, 0) - write_accepted = self.server_events.get() - self.assertIsNotNone(write_accepted) - - client_call.read(read_tag) - read_accepted = self.client_events.get() - client_data.append(read_accepted.bytes) - - client_call.cancel() - # cancel() is idempotent. - client_call.cancel() - client_call.cancel() - client_call.cancel() - - server_call.read(read_tag) - - server_terminal_event_one = self.server_events.get() - server_terminal_event_two = self.server_events.get() - if server_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED: - read_accepted = server_terminal_event_one - rpc_accepted = server_terminal_event_two - else: - read_accepted = server_terminal_event_two - rpc_accepted = server_terminal_event_one - self.assertIsNotNone(read_accepted) - self.assertIsNotNone(rpc_accepted) - self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind) - self.assertIsNone(read_accepted.bytes) - self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind) - self.assertEqual(_low.Status(_low.Code.CANCELLED, ''), rpc_accepted.status) - - finish_event = self.client_events.get() - self.assertEqual(_low.Event.Kind.FINISH, finish_event.kind) - self.assertEqual(_low.Status(_low.Code.CANCELLED, 'Cancelled'), - finish_event.status) - - self.assertSequenceEqual(test_data, server_data) - self.assertSequenceEqual(test_data, client_data) - - -class ExpirationTest(unittest.TestCase): - - @unittest.skip('TODO(nathaniel): Expiration test!') - def testExpiration(self): - pass - - -if __name__ == '__main__': - unittest.main(verbosity=2) - diff --git a/src/python/grpcio/tests/unit/_adapter/_low_test.py b/src/python/grpcio/tests/unit/_adapter/_low_test.py deleted file mode 100644 index ec46617996..0000000000 --- a/src/python/grpcio/tests/unit/_adapter/_low_test.py +++ /dev/null @@ -1,319 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import threading -import time -import unittest - -from grpc import _grpcio_metadata -from grpc._adapter import _types -from grpc._adapter import _low -from tests.unit import test_common - - -def wait_for_events(completion_queues, deadline): - """ - Args: - completion_queues: list of completion queues to wait for events on - deadline: absolute deadline to wait until - - Returns: - a sequence of events of length len(completion_queues). - """ - - results = [None] * len(completion_queues) - lock = threading.Lock() - threads = [] - def set_ith_result(i, completion_queue): - result = completion_queue.next(deadline) - with lock: - results[i] = result - for i, completion_queue in enumerate(completion_queues): - thread = threading.Thread(target=set_ith_result, - args=[i, completion_queue]) - thread.start() - threads.append(thread) - for thread in threads: - thread.join() - return results - - -class InsecureServerInsecureClient(unittest.TestCase): - - def setUp(self): - self.server_completion_queue = _low.CompletionQueue() - self.server = _low.Server(self.server_completion_queue, []) - self.port = self.server.add_http2_port('[::]:0') - self.client_completion_queue = _low.CompletionQueue() - self.client_channel = _low.Channel('localhost:%d'%self.port, []) - - self.server.start() - - def tearDown(self): - self.server.shutdown() - del self.client_channel - - self.client_completion_queue.shutdown() - while (self.client_completion_queue.next(float('+inf')).type != - _types.EventType.QUEUE_SHUTDOWN): - pass - self.server_completion_queue.shutdown() - while (self.server_completion_queue.next(float('+inf')).type != - _types.EventType.QUEUE_SHUTDOWN): - pass - - del self.client_completion_queue - del self.server_completion_queue - del self.server - - def testEcho(self): - deadline = time.time() + 5 - event_time_tolerance = 2 - deadline_tolerance = 0.25 - client_metadata_ascii_key = 'key' - client_metadata_ascii_value = 'val' - client_metadata_bin_key = 'key-bin' - client_metadata_bin_value = b'\0'*1000 - server_initial_metadata_key = 'init_me_me_me' - server_initial_metadata_value = 'whodawha?' - server_trailing_metadata_key = 'california_is_in_a_drought' - server_trailing_metadata_value = 'zomg it is' - server_status_code = _types.StatusCode.OK - server_status_details = 'our work is never over' - request = 'blarghaflargh' - response = 'his name is robert paulson' - method = 'twinkies' - host = 'hostess' - server_request_tag = object() - request_call_result = self.server.request_call(self.server_completion_queue, - server_request_tag) - - self.assertEqual(_types.CallError.OK, request_call_result) - - client_call_tag = object() - client_call = self.client_channel.create_call( - self.client_completion_queue, method, host, deadline) - client_initial_metadata = [ - (client_metadata_ascii_key, client_metadata_ascii_value), - (client_metadata_bin_key, client_metadata_bin_value) - ] - client_start_batch_result = client_call.start_batch([ - _types.OpArgs.send_initial_metadata(client_initial_metadata), - _types.OpArgs.send_message(request, 0), - _types.OpArgs.send_close_from_client(), - _types.OpArgs.recv_initial_metadata(), - _types.OpArgs.recv_message(), - _types.OpArgs.recv_status_on_client() - ], client_call_tag) - self.assertEqual(_types.CallError.OK, client_start_batch_result) - - client_no_event, request_event, = wait_for_events( - [self.client_completion_queue, self.server_completion_queue], - time.time() + event_time_tolerance) - self.assertEqual(client_no_event, None) - self.assertEqual(_types.EventType.OP_COMPLETE, request_event.type) - self.assertIsInstance(request_event.call, _low.Call) - self.assertIs(server_request_tag, request_event.tag) - self.assertEqual(1, len(request_event.results)) - received_initial_metadata = request_event.results[0].initial_metadata - # Check that our metadata were transmitted - self.assertTrue(test_common.metadata_transmitted(client_initial_metadata, - received_initial_metadata)) - # Check that Python's user agent string is a part of the full user agent - # string - received_initial_metadata_dict = dict(received_initial_metadata) - self.assertIn('user-agent', received_initial_metadata_dict) - self.assertIn('Python-gRPC-{}'.format(_grpcio_metadata.__version__), - received_initial_metadata_dict['user-agent']) - self.assertEqual(method, request_event.call_details.method) - self.assertEqual(host, request_event.call_details.host) - self.assertLess(abs(deadline - request_event.call_details.deadline), - deadline_tolerance) - - # Check that the channel is connected, and that both it and the call have - # the proper target and peer; do this after the first flurry of messages to - # avoid the possibility that connection was delayed by the core until the - # first message was sent. - self.assertEqual(_types.ConnectivityState.READY, - self.client_channel.check_connectivity_state(False)) - self.assertIsNotNone(self.client_channel.target()) - self.assertIsNotNone(client_call.peer()) - - server_call_tag = object() - server_call = request_event.call - server_initial_metadata = [ - (server_initial_metadata_key, server_initial_metadata_value) - ] - server_trailing_metadata = [ - (server_trailing_metadata_key, server_trailing_metadata_value) - ] - server_start_batch_result = server_call.start_batch([ - _types.OpArgs.send_initial_metadata(server_initial_metadata), - _types.OpArgs.recv_message(), - _types.OpArgs.send_message(response, 0), - _types.OpArgs.recv_close_on_server(), - _types.OpArgs.send_status_from_server( - server_trailing_metadata, server_status_code, server_status_details) - ], server_call_tag) - self.assertEqual(_types.CallError.OK, server_start_batch_result) - - client_event, server_event, = wait_for_events( - [self.client_completion_queue, self.server_completion_queue], - time.time() + event_time_tolerance) - - self.assertEqual(6, len(client_event.results)) - found_client_op_types = set() - for client_result in client_event.results: - # we expect each op type to be unique - self.assertNotIn(client_result.type, found_client_op_types) - found_client_op_types.add(client_result.type) - if client_result.type == _types.OpType.RECV_INITIAL_METADATA: - self.assertTrue( - test_common.metadata_transmitted(server_initial_metadata, - client_result.initial_metadata)) - elif client_result.type == _types.OpType.RECV_MESSAGE: - self.assertEqual(response, client_result.message) - elif client_result.type == _types.OpType.RECV_STATUS_ON_CLIENT: - self.assertTrue( - test_common.metadata_transmitted(server_trailing_metadata, - client_result.trailing_metadata)) - self.assertEqual(server_status_details, client_result.status.details) - self.assertEqual(server_status_code, client_result.status.code) - self.assertEqual(set([ - _types.OpType.SEND_INITIAL_METADATA, - _types.OpType.SEND_MESSAGE, - _types.OpType.SEND_CLOSE_FROM_CLIENT, - _types.OpType.RECV_INITIAL_METADATA, - _types.OpType.RECV_MESSAGE, - _types.OpType.RECV_STATUS_ON_CLIENT - ]), found_client_op_types) - - self.assertEqual(5, len(server_event.results)) - found_server_op_types = set() - for server_result in server_event.results: - self.assertNotIn(client_result.type, found_server_op_types) - found_server_op_types.add(server_result.type) - if server_result.type == _types.OpType.RECV_MESSAGE: - self.assertEqual(request, server_result.message) - elif server_result.type == _types.OpType.RECV_CLOSE_ON_SERVER: - self.assertFalse(server_result.cancelled) - self.assertEqual(set([ - _types.OpType.SEND_INITIAL_METADATA, - _types.OpType.RECV_MESSAGE, - _types.OpType.SEND_MESSAGE, - _types.OpType.RECV_CLOSE_ON_SERVER, - _types.OpType.SEND_STATUS_FROM_SERVER - ]), found_server_op_types) - - del client_call - del server_call - - -class HangingServerShutdown(unittest.TestCase): - - def setUp(self): - self.server_completion_queue = _low.CompletionQueue() - self.server = _low.Server(self.server_completion_queue, []) - self.port = self.server.add_http2_port('[::]:0') - self.client_completion_queue = _low.CompletionQueue() - self.client_channel = _low.Channel('localhost:%d'%self.port, []) - - self.server.start() - - def tearDown(self): - self.server.shutdown() - del self.client_channel - - self.client_completion_queue.shutdown() - self.server_completion_queue.shutdown() - while True: - client_event, server_event = wait_for_events( - [self.client_completion_queue, self.server_completion_queue], - float("+inf")) - if (client_event.type == _types.EventType.QUEUE_SHUTDOWN and - server_event.type == _types.EventType.QUEUE_SHUTDOWN): - break - - del self.client_completion_queue - del self.server_completion_queue - del self.server - - def testHangingServerCall(self): - deadline = time.time() + 5 - deadline_tolerance = 0.25 - event_time_tolerance = 2 - cancel_all_calls_time_tolerance = 0.5 - request = 'blarghaflargh' - method = 'twinkies' - host = 'hostess' - server_request_tag = object() - request_call_result = self.server.request_call(self.server_completion_queue, - server_request_tag) - - client_call_tag = object() - client_call = self.client_channel.create_call(self.client_completion_queue, - method, host, deadline) - client_start_batch_result = client_call.start_batch([ - _types.OpArgs.send_initial_metadata([]), - _types.OpArgs.send_message(request, 0), - _types.OpArgs.send_close_from_client(), - _types.OpArgs.recv_initial_metadata(), - _types.OpArgs.recv_message(), - _types.OpArgs.recv_status_on_client() - ], client_call_tag) - - client_no_event, request_event, = wait_for_events( - [self.client_completion_queue, self.server_completion_queue], - time.time() + event_time_tolerance) - - # Now try to shutdown the server and expect that we see server shutdown - # almost immediately after calling cancel_all_calls. - - # First attempt to cancel all calls before shutting down, and expect - # our state machine to catch the erroneous API use. - with self.assertRaises(RuntimeError): - self.server.cancel_all_calls() - - shutdown_tag = object() - self.server.shutdown(shutdown_tag) - pre_cancel_timestamp = time.time() - self.server.cancel_all_calls() - finish_shutdown_timestamp = None - client_call_event, server_shutdown_event = wait_for_events( - [self.client_completion_queue, self.server_completion_queue], - time.time() + event_time_tolerance) - self.assertIs(shutdown_tag, server_shutdown_event.tag) - self.assertGreater(pre_cancel_timestamp + cancel_all_calls_time_tolerance, - time.time()) - - del client_call - - -if __name__ == '__main__': - unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/_adapter/_proto_scenarios.py b/src/python/grpcio/tests/unit/_adapter/_proto_scenarios.py deleted file mode 100644 index 7a90eacf77..0000000000 --- a/src/python/grpcio/tests/unit/_adapter/_proto_scenarios.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Test scenarios using protocol buffers.""" - -import abc -import threading - -import six - -from tests.unit._junkdrawer import math_pb2 - - -class ProtoScenario(six.with_metaclass(abc.ABCMeta)): - """An RPC test scenario using protocol buffers.""" - - @abc.abstractmethod - def method(self): - """Access the test method name. - - Returns: - The test method name. - """ - raise NotImplementedError() - - @abc.abstractmethod - def serialize_request(self, request): - """Serialize a request protocol buffer. - - Args: - request: A request protocol buffer. - - Returns: - The bytestring serialization of the given request protocol buffer. - """ - raise NotImplementedError() - - @abc.abstractmethod - def deserialize_request(self, request_bytestring): - """Deserialize a request protocol buffer. - - Args: - request_bytestring: The bytestring serialization of a request protocol - buffer. - - Returns: - The request protocol buffer deserialized from the given byte string. - """ - raise NotImplementedError() - - @abc.abstractmethod - def serialize_response(self, response): - """Serialize a response protocol buffer. - - Args: - response: A response protocol buffer. - - Returns: - The bytestring serialization of the given response protocol buffer. - """ - raise NotImplementedError() - - @abc.abstractmethod - def deserialize_response(self, response_bytestring): - """Deserialize a response protocol buffer. - - Args: - response_bytestring: The bytestring serialization of a response protocol - buffer. - - Returns: - The response protocol buffer deserialized from the given byte string. - """ - raise NotImplementedError() - - @abc.abstractmethod - def requests(self): - """Access the sequence of requests for this scenario. - - Returns: - A sequence of request protocol buffers. - """ - raise NotImplementedError() - - @abc.abstractmethod - def response_for_request(self, request): - """Access the response for a particular request. - - Args: - request: A request protocol buffer. - - Returns: - The response protocol buffer appropriate for the given request. - """ - raise NotImplementedError() - - @abc.abstractmethod - def verify_requests(self, experimental_requests): - """Verify the requests transmitted through the system under test. - - Args: - experimental_requests: The request protocol buffers transmitted through - the system under test. - - Returns: - True if the requests satisfy this test scenario; False otherwise. - """ - raise NotImplementedError() - - @abc.abstractmethod - def verify_responses(self, experimental_responses): - """Verify the responses transmitted through the system under test. - - Args: - experimental_responses: The response protocol buffers transmitted through - the system under test. - - Returns: - True if the responses satisfy this test scenario; False otherwise. - """ - raise NotImplementedError() - - -class EmptyScenario(ProtoScenario): - """A scenario that transmits no protocol buffers in either direction.""" - - def method(self): - return 'DivMany' - - def serialize_request(self, request): - raise ValueError('This should not be necessary to call!') - - def deserialize_request(self, request_bytestring): - raise ValueError('This should not be necessary to call!') - - def serialize_response(self, response): - raise ValueError('This should not be necessary to call!') - - def deserialize_response(self, response_bytestring): - raise ValueError('This should not be necessary to call!') - - def requests(self): - return () - - def response_for_request(self, request): - raise ValueError('This should not be necessary to call!') - - def verify_requests(self, experimental_requests): - return not experimental_requests - - def verify_responses(self, experimental_responses): - return not experimental_responses - - -class BidirectionallyUnaryScenario(ProtoScenario): - """A scenario that transmits no protocol buffers in either direction.""" - - _DIVIDEND = 59 - _DIVISOR = 7 - _QUOTIENT = 8 - _REMAINDER = 3 - - _REQUEST = math_pb2.DivArgs(dividend=_DIVIDEND, divisor=_DIVISOR) - _RESPONSE = math_pb2.DivReply(quotient=_QUOTIENT, remainder=_REMAINDER) - - def method(self): - return 'Div' - - def serialize_request(self, request): - return request.SerializeToString() - - def deserialize_request(self, request_bytestring): - return math_pb2.DivArgs.FromString(request_bytestring) - - def serialize_response(self, response): - return response.SerializeToString() - - def deserialize_response(self, response_bytestring): - return math_pb2.DivReply.FromString(response_bytestring) - - def requests(self): - return [self._REQUEST] - - def response_for_request(self, request): - return self._RESPONSE - - def verify_requests(self, experimental_requests): - return tuple(experimental_requests) == (self._REQUEST,) - - def verify_responses(self, experimental_responses): - return tuple(experimental_responses) == (self._RESPONSE,) - - -class BidirectionallyStreamingScenario(ProtoScenario): - """A scenario that transmits no protocol buffers in either direction.""" - - _STREAM_LENGTH = 200 - _REQUESTS = tuple( - math_pb2.DivArgs(dividend=59 + index, divisor=7 + index) - for index in range(_STREAM_LENGTH)) - - def __init__(self): - self._lock = threading.Lock() - self._responses = [] - - def method(self): - return 'DivMany' - - def serialize_request(self, request): - return request.SerializeToString() - - def deserialize_request(self, request_bytestring): - return math_pb2.DivArgs.FromString(request_bytestring) - - def serialize_response(self, response): - return response.SerializeToString() - - def deserialize_response(self, response_bytestring): - return math_pb2.DivReply.FromString(response_bytestring) - - def requests(self): - return self._REQUESTS - - def response_for_request(self, request): - quotient, remainder = divmod(request.dividend, request.divisor) - response = math_pb2.DivReply(quotient=quotient, remainder=remainder) - with self._lock: - self._responses.append(response) - return response - - def verify_requests(self, experimental_requests): - return tuple(experimental_requests) == self._REQUESTS - - def verify_responses(self, experimental_responses): - with self._lock: - return tuple(experimental_responses) == tuple(self._responses) diff --git a/src/python/grpcio/tests/unit/_core_over_links_base_interface_test.py b/src/python/grpcio/tests/unit/_core_over_links_base_interface_test.py deleted file mode 100644 index 2b8981c752..0000000000 --- a/src/python/grpcio/tests/unit/_core_over_links_base_interface_test.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tests Base interface compliance of the core-over-gRPC-links stack.""" - -import collections -import logging -import random -import time -import unittest - -import six - -from grpc._adapter import _intermediary_low -from grpc._links import invocation -from grpc._links import service -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.core import implementations -from grpc.framework.interfaces.base import utilities -from tests.unit import test_common as grpc_test_common -from tests.unit.framework.common import test_constants -from tests.unit.framework.interfaces.base import test_cases -from tests.unit.framework.interfaces.base import test_interfaces - - -class _SerializationBehaviors( - collections.namedtuple( - '_SerializationBehaviors', - ('request_serializers', 'request_deserializers', 'response_serializers', - 'response_deserializers',))): - pass - - -class _Links( - collections.namedtuple( - '_Links', - ('invocation_end_link', 'invocation_grpc_link', 'service_grpc_link', - 'service_end_link'))): - pass - - -def _serialization_behaviors_from_serializations(serializations): - request_serializers = {} - request_deserializers = {} - response_serializers = {} - response_deserializers = {} - for (group, method), serialization in six.iteritems(serializations): - request_serializers[group, method] = serialization.serialize_request - request_deserializers[group, method] = serialization.deserialize_request - response_serializers[group, method] = serialization.serialize_response - response_deserializers[group, method] = serialization.deserialize_response - return _SerializationBehaviors( - request_serializers, request_deserializers, response_serializers, - response_deserializers) - - -class _Implementation(test_interfaces.Implementation): - - def instantiate(self, serializations, servicer): - serialization_behaviors = _serialization_behaviors_from_serializations( - serializations) - invocation_end_link = implementations.invocation_end_link() - service_end_link = implementations.service_end_link( - servicer, test_constants.DEFAULT_TIMEOUT, - test_constants.MAXIMUM_TIMEOUT) - service_grpc_link = service.service_link( - serialization_behaviors.request_deserializers, - serialization_behaviors.response_serializers) - port = service_grpc_link.add_port('[::]:0', None) - channel = _intermediary_low.Channel('localhost:%d' % port, None) - invocation_grpc_link = invocation.invocation_link( - channel, b'localhost', None, - serialization_behaviors.request_serializers, - serialization_behaviors.response_deserializers) - - invocation_end_link.join_link(invocation_grpc_link) - invocation_grpc_link.join_link(invocation_end_link) - service_end_link.join_link(service_grpc_link) - service_grpc_link.join_link(service_end_link) - invocation_grpc_link.start() - service_grpc_link.start() - return invocation_end_link, service_end_link, ( - invocation_grpc_link, service_grpc_link) - - def destantiate(self, memo): - invocation_grpc_link, service_grpc_link = memo - invocation_grpc_link.stop() - service_grpc_link.begin_stop() - service_grpc_link.end_stop() - - def invocation_initial_metadata(self): - return grpc_test_common.INVOCATION_INITIAL_METADATA - - def service_initial_metadata(self): - return grpc_test_common.SERVICE_INITIAL_METADATA - - def invocation_completion(self): - return utilities.completion(None, None, None) - - def service_completion(self): - return utilities.completion( - grpc_test_common.SERVICE_TERMINAL_METADATA, - beta_interfaces.StatusCode.OK, grpc_test_common.DETAILS) - - def metadata_transmitted(self, original_metadata, transmitted_metadata): - return original_metadata is None or grpc_test_common.metadata_transmitted( - original_metadata, transmitted_metadata) - - def completion_transmitted(self, original_completion, transmitted_completion): - if (original_completion.terminal_metadata is not None and - not grpc_test_common.metadata_transmitted( - original_completion.terminal_metadata, - transmitted_completion.terminal_metadata)): - return False - elif original_completion.code is not transmitted_completion.code: - return False - elif original_completion.message != transmitted_completion.message: - return False - else: - return True - - -def load_tests(loader, tests, pattern): - return unittest.TestSuite( - tests=tuple( - loader.loadTestsFromTestCase(test_case_class) - for test_case_class in test_cases.test_cases(_Implementation()))) - - -if __name__ == '__main__': - unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/_crust_over_core_over_links_face_interface_test.py b/src/python/grpcio/tests/unit/_crust_over_core_over_links_face_interface_test.py deleted file mode 100644 index 50b9a5a824..0000000000 --- a/src/python/grpcio/tests/unit/_crust_over_core_over_links_face_interface_test.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tests Face compliance of the crust-over-core-over-gRPC-links stack.""" - -import collections -import unittest - -import six - -from grpc._adapter import _intermediary_low -from grpc._links import invocation -from grpc._links import service -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.core import implementations as core_implementations -from grpc.framework.crust import implementations as crust_implementations -from grpc.framework.foundation import logging_pool -from grpc.framework.interfaces.links import utilities -from tests.unit import test_common as grpc_test_common -from tests.unit.framework.common import test_constants -from tests.unit.framework.interfaces.face import test_cases -from tests.unit.framework.interfaces.face import test_interfaces - - -class _SerializationBehaviors( - collections.namedtuple( - '_SerializationBehaviors', - ('request_serializers', 'request_deserializers', 'response_serializers', - 'response_deserializers',))): - pass - - -def _serialization_behaviors_from_test_methods(test_methods): - request_serializers = {} - request_deserializers = {} - response_serializers = {} - response_deserializers = {} - for (group, method), test_method in six.iteritems(test_methods): - request_serializers[group, method] = test_method.serialize_request - request_deserializers[group, method] = test_method.deserialize_request - response_serializers[group, method] = test_method.serialize_response - response_deserializers[group, method] = test_method.deserialize_response - return _SerializationBehaviors( - request_serializers, request_deserializers, response_serializers, - response_deserializers) - - -class _Implementation(test_interfaces.Implementation): - - def instantiate( - self, methods, method_implementations, multi_method_implementation): - pool = logging_pool.pool(test_constants.POOL_SIZE) - servicer = crust_implementations.servicer( - method_implementations, multi_method_implementation, pool) - serialization_behaviors = _serialization_behaviors_from_test_methods( - methods) - invocation_end_link = core_implementations.invocation_end_link() - service_end_link = core_implementations.service_end_link( - servicer, test_constants.DEFAULT_TIMEOUT, - test_constants.MAXIMUM_TIMEOUT) - service_grpc_link = service.service_link( - serialization_behaviors.request_deserializers, - serialization_behaviors.response_serializers) - port = service_grpc_link.add_port('[::]:0', None) - channel = _intermediary_low.Channel('localhost:%d' % port, None) - invocation_grpc_link = invocation.invocation_link( - channel, b'localhost', None, - serialization_behaviors.request_serializers, - serialization_behaviors.response_deserializers) - - invocation_end_link.join_link(invocation_grpc_link) - invocation_grpc_link.join_link(invocation_end_link) - service_grpc_link.join_link(service_end_link) - service_end_link.join_link(service_grpc_link) - service_end_link.start() - invocation_end_link.start() - invocation_grpc_link.start() - service_grpc_link.start() - - generic_stub = crust_implementations.generic_stub(invocation_end_link, pool) - # TODO(nathaniel): Add a "groups" attribute to _digest.TestServiceDigest. - group = next(iter(methods))[0] - # TODO(nathaniel): Add a "cardinalities_by_group" attribute to - # _digest.TestServiceDigest. - cardinalities = { - method: method_object.cardinality() - for (group, method), method_object in six.iteritems(methods)} - dynamic_stub = crust_implementations.dynamic_stub( - invocation_end_link, group, cardinalities, pool) - - return generic_stub, {group: dynamic_stub}, ( - invocation_end_link, invocation_grpc_link, service_grpc_link, - service_end_link, pool) - - def destantiate(self, memo): - (invocation_end_link, invocation_grpc_link, service_grpc_link, - service_end_link, pool) = memo - invocation_end_link.stop(0).wait() - invocation_grpc_link.stop() - service_grpc_link.begin_stop() - service_end_link.stop(0).wait() - service_grpc_link.end_stop() - invocation_end_link.join_link(utilities.NULL_LINK) - invocation_grpc_link.join_link(utilities.NULL_LINK) - service_grpc_link.join_link(utilities.NULL_LINK) - service_end_link.join_link(utilities.NULL_LINK) - pool.shutdown(wait=True) - - def invocation_metadata(self): - return grpc_test_common.INVOCATION_INITIAL_METADATA - - def initial_metadata(self): - return grpc_test_common.SERVICE_INITIAL_METADATA - - def terminal_metadata(self): - return grpc_test_common.SERVICE_TERMINAL_METADATA - - def code(self): - return beta_interfaces.StatusCode.OK - - def details(self): - return grpc_test_common.DETAILS - - def metadata_transmitted(self, original_metadata, transmitted_metadata): - return original_metadata is None or grpc_test_common.metadata_transmitted( - original_metadata, transmitted_metadata) - - -def load_tests(loader, tests, pattern): - return unittest.TestSuite( - tests=tuple( - loader.loadTestsFromTestCase(test_case_class) - for test_case_class in test_cases.test_cases(_Implementation()))) - - -if __name__ == '__main__': - unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/_junkdrawer/math_pb2.py b/src/python/grpcio/tests/unit/_junkdrawer/math_pb2.py deleted file mode 100644 index 20165955b4..0000000000 --- a/src/python/grpcio/tests/unit/_junkdrawer/math_pb2.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# TODO(nathaniel): Remove this from source control after having made -# generation from the math.proto source part of GRPC's build-and-test -# process. - -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: math.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='math.proto', - package='math', - serialized_pb=_b('\n\nmath.proto\x12\x04math\",\n\x07\x44ivArgs\x12\x10\n\x08\x64ividend\x18\x01 \x02(\x03\x12\x0f\n\x07\x64ivisor\x18\x02 \x02(\x03\"/\n\x08\x44ivReply\x12\x10\n\x08quotient\x18\x01 \x02(\x03\x12\x11\n\tremainder\x18\x02 \x02(\x03\"\x18\n\x07\x46ibArgs\x12\r\n\x05limit\x18\x01 \x01(\x03\"\x12\n\x03Num\x12\x0b\n\x03num\x18\x01 \x02(\x03\"\x19\n\x08\x46ibReply\x12\r\n\x05\x63ount\x18\x01 \x02(\x03\x32\xa4\x01\n\x04Math\x12&\n\x03\x44iv\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00\x12.\n\x07\x44ivMany\x12\r.math.DivArgs\x1a\x0e.math.DivReply\"\x00(\x01\x30\x01\x12#\n\x03\x46ib\x12\r.math.FibArgs\x1a\t.math.Num\"\x00\x30\x01\x12\x1f\n\x03Sum\x12\t.math.Num\x1a\t.math.Num\"\x00(\x01') -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_DIVARGS = _descriptor.Descriptor( - name='DivArgs', - full_name='math.DivArgs', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='dividend', full_name='math.DivArgs.dividend', index=0, - number=1, type=3, cpp_type=2, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='divisor', full_name='math.DivArgs.divisor', index=1, - number=2, type=3, cpp_type=2, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=20, - serialized_end=64, -) - - -_DIVREPLY = _descriptor.Descriptor( - name='DivReply', - full_name='math.DivReply', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='quotient', full_name='math.DivReply.quotient', index=0, - number=1, type=3, cpp_type=2, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='remainder', full_name='math.DivReply.remainder', index=1, - number=2, type=3, cpp_type=2, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=66, - serialized_end=113, -) - - -_FIBARGS = _descriptor.Descriptor( - name='FibArgs', - full_name='math.FibArgs', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='limit', full_name='math.FibArgs.limit', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=115, - serialized_end=139, -) - - -_NUM = _descriptor.Descriptor( - name='Num', - full_name='math.Num', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='num', full_name='math.Num.num', index=0, - number=1, type=3, cpp_type=2, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=141, - serialized_end=159, -) - - -_FIBREPLY = _descriptor.Descriptor( - name='FibReply', - full_name='math.FibReply', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='count', full_name='math.FibReply.count', index=0, - number=1, type=3, cpp_type=2, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=161, - serialized_end=186, -) - -DESCRIPTOR.message_types_by_name['DivArgs'] = _DIVARGS -DESCRIPTOR.message_types_by_name['DivReply'] = _DIVREPLY -DESCRIPTOR.message_types_by_name['FibArgs'] = _FIBARGS -DESCRIPTOR.message_types_by_name['Num'] = _NUM -DESCRIPTOR.message_types_by_name['FibReply'] = _FIBREPLY - -DivArgs = _reflection.GeneratedProtocolMessageType('DivArgs', (_message.Message,), dict( - DESCRIPTOR = _DIVARGS, - __module__ = 'math_pb2' - # @@protoc_insertion_point(class_scope:math.DivArgs) - )) -_sym_db.RegisterMessage(DivArgs) - -DivReply = _reflection.GeneratedProtocolMessageType('DivReply', (_message.Message,), dict( - DESCRIPTOR = _DIVREPLY, - __module__ = 'math_pb2' - # @@protoc_insertion_point(class_scope:math.DivReply) - )) -_sym_db.RegisterMessage(DivReply) - -FibArgs = _reflection.GeneratedProtocolMessageType('FibArgs', (_message.Message,), dict( - DESCRIPTOR = _FIBARGS, - __module__ = 'math_pb2' - # @@protoc_insertion_point(class_scope:math.FibArgs) - )) -_sym_db.RegisterMessage(FibArgs) - -Num = _reflection.GeneratedProtocolMessageType('Num', (_message.Message,), dict( - DESCRIPTOR = _NUM, - __module__ = 'math_pb2' - # @@protoc_insertion_point(class_scope:math.Num) - )) -_sym_db.RegisterMessage(Num) - -FibReply = _reflection.GeneratedProtocolMessageType('FibReply', (_message.Message,), dict( - DESCRIPTOR = _FIBREPLY, - __module__ = 'math_pb2' - # @@protoc_insertion_point(class_scope:math.FibReply) - )) -_sym_db.RegisterMessage(FibReply) - - -# @@protoc_insertion_point(module_scope) diff --git a/src/python/grpcio/tests/unit/_links/_lonely_invocation_link_test.py b/src/python/grpcio/tests/unit/_links/_lonely_invocation_link_test.py deleted file mode 100644 index 890755f81c..0000000000 --- a/src/python/grpcio/tests/unit/_links/_lonely_invocation_link_test.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""A test of invocation-side code unconnected to an RPC server.""" - -import unittest - -from grpc._adapter import _intermediary_low -from grpc._links import invocation -from grpc.framework.interfaces.links import links -from tests.unit.framework.common import test_constants -from tests.unit.framework.interfaces.links import test_cases -from tests.unit.framework.interfaces.links import test_utilities - -_NULL_BEHAVIOR = lambda unused_argument: None - - -class LonelyInvocationLinkTest(unittest.TestCase): - - def testUpAndDown(self): - channel = _intermediary_low.Channel('nonexistent:54321', None) - invocation_link = invocation.invocation_link( - channel, 'nonexistent', None, {}, {}) - - invocation_link.start() - invocation_link.stop() - - def _test_lonely_invocation_with_termination(self, termination): - test_operation_id = object() - test_group = 'test package.Test Service' - test_method = 'test method' - invocation_link_mate = test_utilities.RecordingLink() - - channel = _intermediary_low.Channel('nonexistent:54321', None) - invocation_link = invocation.invocation_link( - channel, 'nonexistent', None, {}, {}) - invocation_link.join_link(invocation_link_mate) - invocation_link.start() - - ticket = links.Ticket( - test_operation_id, 0, test_group, test_method, - links.Ticket.Subscription.FULL, test_constants.SHORT_TIMEOUT, 1, None, - None, None, None, None, termination, None) - invocation_link.accept_ticket(ticket) - invocation_link_mate.block_until_tickets_satisfy(test_cases.terminated) - - invocation_link.stop() - - self.assertIsNot( - invocation_link_mate.tickets()[-1].termination, - links.Ticket.Termination.COMPLETION) - - def testLonelyInvocationLinkWithCommencementTicket(self): - self._test_lonely_invocation_with_termination(None) - - def testLonelyInvocationLinkWithEntireTicket(self): - self._test_lonely_invocation_with_termination( - links.Ticket.Termination.COMPLETION) - - -if __name__ == '__main__': - unittest.main() diff --git a/src/python/grpcio/tests/unit/_links/_proto_scenarios.py b/src/python/grpcio/tests/unit/_links/_proto_scenarios.py deleted file mode 100644 index 50661085f9..0000000000 --- a/src/python/grpcio/tests/unit/_links/_proto_scenarios.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Test scenarios using protocol buffers.""" - -import abc -import threading - -import six - -from tests.unit._junkdrawer import math_pb2 -from tests.unit.framework.common import test_constants - - -class ProtoScenario(six.with_metaclass(abc.ABCMeta)): - """An RPC test scenario using protocol buffers.""" - - @abc.abstractmethod - def group_and_method(self): - """Access the test group and method. - - Returns: - The test group and method as a pair. - """ - raise NotImplementedError() - - @abc.abstractmethod - def serialize_request(self, request): - """Serialize a request protocol buffer. - - Args: - request: A request protocol buffer. - - Returns: - The bytestring serialization of the given request protocol buffer. - """ - raise NotImplementedError() - - @abc.abstractmethod - def deserialize_request(self, request_bytestring): - """Deserialize a request protocol buffer. - - Args: - request_bytestring: The bytestring serialization of a request protocol - buffer. - - Returns: - The request protocol buffer deserialized from the given byte string. - """ - raise NotImplementedError() - - @abc.abstractmethod - def serialize_response(self, response): - """Serialize a response protocol buffer. - - Args: - response: A response protocol buffer. - - Returns: - The bytestring serialization of the given response protocol buffer. - """ - raise NotImplementedError() - - @abc.abstractmethod - def deserialize_response(self, response_bytestring): - """Deserialize a response protocol buffer. - - Args: - response_bytestring: The bytestring serialization of a response protocol - buffer. - - Returns: - The response protocol buffer deserialized from the given byte string. - """ - raise NotImplementedError() - - @abc.abstractmethod - def requests(self): - """Access the sequence of requests for this scenario. - - Returns: - A sequence of request protocol buffers. - """ - raise NotImplementedError() - - @abc.abstractmethod - def response_for_request(self, request): - """Access the response for a particular request. - - Args: - request: A request protocol buffer. - - Returns: - The response protocol buffer appropriate for the given request. - """ - raise NotImplementedError() - - @abc.abstractmethod - def verify_requests(self, experimental_requests): - """Verify the requests transmitted through the system under test. - - Args: - experimental_requests: The request protocol buffers transmitted through - the system under test. - - Returns: - True if the requests satisfy this test scenario; False otherwise. - """ - raise NotImplementedError() - - @abc.abstractmethod - def verify_responses(self, experimental_responses): - """Verify the responses transmitted through the system under test. - - Args: - experimental_responses: The response protocol buffers transmitted through - the system under test. - - Returns: - True if the responses satisfy this test scenario; False otherwise. - """ - raise NotImplementedError() - - -class EmptyScenario(ProtoScenario): - """A scenario that transmits no protocol buffers in either direction.""" - - def group_and_method(self): - return 'math.Math', 'DivMany' - - def serialize_request(self, request): - raise ValueError('This should not be necessary to call!') - - def deserialize_request(self, request_bytestring): - raise ValueError('This should not be necessary to call!') - - def serialize_response(self, response): - raise ValueError('This should not be necessary to call!') - - def deserialize_response(self, response_bytestring): - raise ValueError('This should not be necessary to call!') - - def requests(self): - return () - - def response_for_request(self, request): - raise ValueError('This should not be necessary to call!') - - def verify_requests(self, experimental_requests): - return not experimental_requests - - def verify_responses(self, experimental_responses): - return not experimental_responses - - -class BidirectionallyUnaryScenario(ProtoScenario): - """A scenario that transmits no protocol buffers in either direction.""" - - _DIVIDEND = 59 - _DIVISOR = 7 - _QUOTIENT = 8 - _REMAINDER = 3 - - _REQUEST = math_pb2.DivArgs(dividend=_DIVIDEND, divisor=_DIVISOR) - _RESPONSE = math_pb2.DivReply(quotient=_QUOTIENT, remainder=_REMAINDER) - - def group_and_method(self): - return 'math.Math', 'Div' - - def serialize_request(self, request): - return request.SerializeToString() - - def deserialize_request(self, request_bytestring): - return math_pb2.DivArgs.FromString(request_bytestring) - - def serialize_response(self, response): - return response.SerializeToString() - - def deserialize_response(self, response_bytestring): - return math_pb2.DivReply.FromString(response_bytestring) - - def requests(self): - return [self._REQUEST] - - def response_for_request(self, request): - return self._RESPONSE - - def verify_requests(self, experimental_requests): - return tuple(experimental_requests) == (self._REQUEST,) - - def verify_responses(self, experimental_responses): - return tuple(experimental_responses) == (self._RESPONSE,) - - -class BidirectionallyStreamingScenario(ProtoScenario): - """A scenario that transmits no protocol buffers in either direction.""" - - _REQUESTS = tuple( - math_pb2.DivArgs(dividend=59 + index, divisor=7 + index) - for index in range(test_constants.STREAM_LENGTH)) - - def __init__(self): - self._lock = threading.Lock() - self._responses = [] - - def group_and_method(self): - return 'math.Math', 'DivMany' - - def serialize_request(self, request): - return request.SerializeToString() - - def deserialize_request(self, request_bytestring): - return math_pb2.DivArgs.FromString(request_bytestring) - - def serialize_response(self, response): - return response.SerializeToString() - - def deserialize_response(self, response_bytestring): - return math_pb2.DivReply.FromString(response_bytestring) - - def requests(self): - return self._REQUESTS - - def response_for_request(self, request): - quotient, remainder = divmod(request.dividend, request.divisor) - response = math_pb2.DivReply(quotient=quotient, remainder=remainder) - with self._lock: - self._responses.append(response) - return response - - def verify_requests(self, experimental_requests): - return tuple(experimental_requests) == self._REQUESTS - - def verify_responses(self, experimental_responses): - with self._lock: - return tuple(experimental_responses) == tuple(self._responses) diff --git a/src/python/grpcio/tests/unit/_links/_transmission_test.py b/src/python/grpcio/tests/unit/_links/_transmission_test.py deleted file mode 100644 index 888684d197..0000000000 --- a/src/python/grpcio/tests/unit/_links/_transmission_test.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tests transmission of tickets across gRPC-on-the-wire.""" - -import unittest - -from grpc._adapter import _intermediary_low -from grpc._links import invocation -from grpc._links import service -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.interfaces.links import links -from tests.unit import test_common -from tests.unit._links import _proto_scenarios -from tests.unit.framework.common import test_constants -from tests.unit.framework.interfaces.links import test_cases -from tests.unit.framework.interfaces.links import test_utilities - -_IDENTITY = lambda x: x - - -class TransmissionTest(test_cases.TransmissionTest, unittest.TestCase): - - def create_transmitting_links(self): - service_link = service.service_link( - {self.group_and_method(): self.deserialize_request}, - {self.group_and_method(): self.serialize_response}) - port = service_link.add_port('[::]:0', None) - service_link.start() - channel = _intermediary_low.Channel('localhost:%d' % port, None) - invocation_link = invocation.invocation_link( - channel, 'localhost', None, - {self.group_and_method(): self.serialize_request}, - {self.group_and_method(): self.deserialize_response}) - invocation_link.start() - return invocation_link, service_link - - def destroy_transmitting_links(self, invocation_side_link, service_side_link): - invocation_side_link.stop() - service_side_link.begin_stop() - service_side_link.end_stop() - - def create_invocation_initial_metadata(self): - return ( - ('first_invocation_initial_metadata_key', 'just a string value'), - ('second_invocation_initial_metadata_key', '0123456789'), - ('third_invocation_initial_metadata_key-bin', '\x00\x57' * 100), - ) - - def create_invocation_terminal_metadata(self): - return None - - def create_service_initial_metadata(self): - return ( - ('first_service_initial_metadata_key', 'just another string value'), - ('second_service_initial_metadata_key', '9876543210'), - ('third_service_initial_metadata_key-bin', '\x00\x59\x02' * 100), - ) - - def create_service_terminal_metadata(self): - return ( - ('first_service_terminal_metadata_key', 'yet another string value'), - ('second_service_terminal_metadata_key', 'abcdefghij'), - ('third_service_terminal_metadata_key-bin', '\x00\x37' * 100), - ) - - def create_invocation_completion(self): - return None, None - - def create_service_completion(self): - return ( - beta_interfaces.StatusCode.OK, b'An exuberant test "details" message!') - - def assertMetadataTransmitted(self, original_metadata, transmitted_metadata): - self.assertTrue( - test_common.metadata_transmitted( - original_metadata, transmitted_metadata), - '%s erroneously transmitted as %s' % ( - original_metadata, transmitted_metadata)) - - -class RoundTripTest(unittest.TestCase): - - def testZeroMessageRoundTrip(self): - test_operation_id = object() - test_group = 'test package.Test Group' - test_method = 'test method' - identity_transformation = {(test_group, test_method): _IDENTITY} - test_code = beta_interfaces.StatusCode.OK - test_message = 'a test message' - - service_link = service.service_link( - identity_transformation, identity_transformation) - service_mate = test_utilities.RecordingLink() - service_link.join_link(service_mate) - port = service_link.add_port('[::]:0', None) - service_link.start() - channel = _intermediary_low.Channel('localhost:%d' % port, None) - invocation_link = invocation.invocation_link( - channel, None, None, identity_transformation, identity_transformation) - invocation_mate = test_utilities.RecordingLink() - invocation_link.join_link(invocation_mate) - invocation_link.start() - - invocation_ticket = links.Ticket( - test_operation_id, 0, test_group, test_method, - links.Ticket.Subscription.FULL, test_constants.LONG_TIMEOUT, None, None, - None, None, None, None, links.Ticket.Termination.COMPLETION, None) - invocation_link.accept_ticket(invocation_ticket) - service_mate.block_until_tickets_satisfy(test_cases.terminated) - - service_ticket = links.Ticket( - service_mate.tickets()[-1].operation_id, 0, None, None, None, None, - None, None, None, None, test_code, test_message, - links.Ticket.Termination.COMPLETION, None) - service_link.accept_ticket(service_ticket) - invocation_mate.block_until_tickets_satisfy(test_cases.terminated) - - invocation_link.stop() - service_link.begin_stop() - service_link.end_stop() - - self.assertIs( - service_mate.tickets()[-1].termination, - links.Ticket.Termination.COMPLETION) - self.assertIs( - invocation_mate.tickets()[-1].termination, - links.Ticket.Termination.COMPLETION) - self.assertIs(invocation_mate.tickets()[-1].code, test_code) - self.assertEqual(invocation_mate.tickets()[-1].message, test_message) - - def _perform_scenario_test(self, scenario): - test_operation_id = object() - test_group, test_method = scenario.group_and_method() - test_code = beta_interfaces.StatusCode.OK - test_message = 'a scenario test message' - - service_link = service.service_link( - {(test_group, test_method): scenario.deserialize_request}, - {(test_group, test_method): scenario.serialize_response}) - service_mate = test_utilities.RecordingLink() - service_link.join_link(service_mate) - port = service_link.add_port('[::]:0', None) - service_link.start() - channel = _intermediary_low.Channel('localhost:%d' % port, None) - invocation_link = invocation.invocation_link( - channel, 'localhost', None, - {(test_group, test_method): scenario.serialize_request}, - {(test_group, test_method): scenario.deserialize_response}) - invocation_mate = test_utilities.RecordingLink() - invocation_link.join_link(invocation_mate) - invocation_link.start() - - invocation_ticket = links.Ticket( - test_operation_id, 0, test_group, test_method, - links.Ticket.Subscription.FULL, test_constants.LONG_TIMEOUT, None, None, - None, None, None, None, None, None) - invocation_link.accept_ticket(invocation_ticket) - requests = scenario.requests() - for request_index, request in enumerate(requests): - request_ticket = links.Ticket( - test_operation_id, 1 + request_index, None, None, None, None, 1, None, - request, None, None, None, None, None) - invocation_link.accept_ticket(request_ticket) - service_mate.block_until_tickets_satisfy( - test_cases.at_least_n_payloads_received_predicate(1 + request_index)) - response_ticket = links.Ticket( - service_mate.tickets()[0].operation_id, request_index, None, None, - None, None, 1, None, scenario.response_for_request(request), None, - None, None, None, None) - service_link.accept_ticket(response_ticket) - invocation_mate.block_until_tickets_satisfy( - test_cases.at_least_n_payloads_received_predicate(1 + request_index)) - request_count = len(requests) - invocation_completion_ticket = links.Ticket( - test_operation_id, request_count + 1, None, None, None, None, None, - None, None, None, None, None, links.Ticket.Termination.COMPLETION, - None) - invocation_link.accept_ticket(invocation_completion_ticket) - service_mate.block_until_tickets_satisfy(test_cases.terminated) - service_completion_ticket = links.Ticket( - service_mate.tickets()[0].operation_id, request_count, None, None, None, - None, None, None, None, None, test_code, test_message, - links.Ticket.Termination.COMPLETION, None) - service_link.accept_ticket(service_completion_ticket) - invocation_mate.block_until_tickets_satisfy(test_cases.terminated) - - invocation_link.stop() - service_link.begin_stop() - service_link.end_stop() - - observed_requests = tuple( - ticket.payload for ticket in service_mate.tickets() - if ticket.payload is not None) - observed_responses = tuple( - ticket.payload for ticket in invocation_mate.tickets() - if ticket.payload is not None) - self.assertTrue(scenario.verify_requests(observed_requests)) - self.assertTrue(scenario.verify_responses(observed_responses)) - - def testEmptyScenario(self): - self._perform_scenario_test(_proto_scenarios.EmptyScenario()) - - def testBidirectionallyUnaryScenario(self): - self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario()) - - def testBidirectionallyStreamingScenario(self): - self._perform_scenario_test( - _proto_scenarios.BidirectionallyStreamingScenario()) - - -if __name__ == '__main__': - unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/beta/_connectivity_channel_test.py b/src/python/grpcio/tests/unit/beta/_connectivity_channel_test.py deleted file mode 100644 index 5dc8720639..0000000000 --- a/src/python/grpcio/tests/unit/beta/_connectivity_channel_test.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tests of grpc.beta._connectivity_channel.""" - -import threading -import time -import unittest - -from grpc._adapter import _low -from grpc._adapter import _types -from grpc.beta import _connectivity_channel -from grpc.beta import interfaces -from tests.unit.framework.common import test_constants - - -def _drive_completion_queue(completion_queue): - while True: - event = completion_queue.next(time.time() + 24 * 60 * 60) - if event.type == _types.EventType.QUEUE_SHUTDOWN: - break - - -class _Callback(object): - - def __init__(self): - self._condition = threading.Condition() - self._connectivities = [] - - def update(self, connectivity): - with self._condition: - self._connectivities.append(connectivity) - self._condition.notify() - - def connectivities(self): - with self._condition: - return tuple(self._connectivities) - - def block_until_connectivities_satisfy(self, predicate): - with self._condition: - while True: - connectivities = tuple(self._connectivities) - if predicate(connectivities): - return connectivities - else: - self._condition.wait() - - -class ChannelConnectivityTest(unittest.TestCase): - - def test_lonely_channel_connectivity(self): - low_channel = _low.Channel('localhost:12345', ()) - callback = _Callback() - - connectivity_channel = _connectivity_channel.ConnectivityChannel( - low_channel) - connectivity_channel.subscribe(callback.update, try_to_connect=False) - first_connectivities = callback.block_until_connectivities_satisfy(bool) - connectivity_channel.subscribe(callback.update, try_to_connect=True) - second_connectivities = callback.block_until_connectivities_satisfy( - lambda connectivities: 2 <= len(connectivities)) - # Wait for a connection that will never happen. - time.sleep(test_constants.SHORT_TIMEOUT) - third_connectivities = callback.connectivities() - connectivity_channel.unsubscribe(callback.update) - fourth_connectivities = callback.connectivities() - connectivity_channel.unsubscribe(callback.update) - fifth_connectivities = callback.connectivities() - - self.assertSequenceEqual( - (interfaces.ChannelConnectivity.IDLE,), first_connectivities) - self.assertNotIn( - interfaces.ChannelConnectivity.READY, second_connectivities) - self.assertNotIn( - interfaces.ChannelConnectivity.READY, third_connectivities) - self.assertNotIn( - interfaces.ChannelConnectivity.READY, fourth_connectivities) - self.assertNotIn( - interfaces.ChannelConnectivity.READY, fifth_connectivities) - - def test_immediately_connectable_channel_connectivity(self): - server_completion_queue = _low.CompletionQueue() - server = _low.Server(server_completion_queue, []) - port = server.add_http2_port('[::]:0') - server.start() - server_completion_queue_thread = threading.Thread( - target=_drive_completion_queue, args=(server_completion_queue,)) - server_completion_queue_thread.start() - low_channel = _low.Channel('localhost:%d' % port, ()) - first_callback = _Callback() - second_callback = _Callback() - - connectivity_channel = _connectivity_channel.ConnectivityChannel( - low_channel) - connectivity_channel.subscribe(first_callback.update, try_to_connect=False) - first_connectivities = first_callback.block_until_connectivities_satisfy( - bool) - # Wait for a connection that will never happen because try_to_connect=True - # has not yet been passed. - time.sleep(test_constants.SHORT_TIMEOUT) - second_connectivities = first_callback.connectivities() - connectivity_channel.subscribe(second_callback.update, try_to_connect=True) - third_connectivities = first_callback.block_until_connectivities_satisfy( - lambda connectivities: 2 <= len(connectivities)) - fourth_connectivities = second_callback.block_until_connectivities_satisfy( - bool) - # Wait for a connection that will happen (or may already have happened). - first_callback.block_until_connectivities_satisfy( - lambda connectivities: - interfaces.ChannelConnectivity.READY in connectivities) - second_callback.block_until_connectivities_satisfy( - lambda connectivities: - interfaces.ChannelConnectivity.READY in connectivities) - connectivity_channel.unsubscribe(first_callback.update) - connectivity_channel.unsubscribe(second_callback.update) - - server.shutdown() - server_completion_queue.shutdown() - server_completion_queue_thread.join() - - self.assertSequenceEqual( - (interfaces.ChannelConnectivity.IDLE,), first_connectivities) - self.assertSequenceEqual( - (interfaces.ChannelConnectivity.IDLE,), second_connectivities) - self.assertNotIn( - interfaces.ChannelConnectivity.TRANSIENT_FAILURE, third_connectivities) - self.assertNotIn( - interfaces.ChannelConnectivity.FATAL_FAILURE, third_connectivities) - self.assertNotIn( - interfaces.ChannelConnectivity.TRANSIENT_FAILURE, - fourth_connectivities) - self.assertNotIn( - interfaces.ChannelConnectivity.FATAL_FAILURE, fourth_connectivities) - - def test_reachable_then_unreachable_channel_connectivity(self): - server_completion_queue = _low.CompletionQueue() - server = _low.Server(server_completion_queue, []) - port = server.add_http2_port('[::]:0') - server.start() - server_completion_queue_thread = threading.Thread( - target=_drive_completion_queue, args=(server_completion_queue,)) - server_completion_queue_thread.start() - low_channel = _low.Channel('localhost:%d' % port, ()) - callback = _Callback() - - connectivity_channel = _connectivity_channel.ConnectivityChannel( - low_channel) - connectivity_channel.subscribe(callback.update, try_to_connect=True) - callback.block_until_connectivities_satisfy( - lambda connectivities: - interfaces.ChannelConnectivity.READY in connectivities) - # Now take down the server and confirm that channel readiness is repudiated. - server.shutdown() - callback.block_until_connectivities_satisfy( - lambda connectivities: - connectivities[-1] is not interfaces.ChannelConnectivity.READY) - connectivity_channel.unsubscribe(callback.update) - - server.shutdown() - server_completion_queue.shutdown() - server_completion_queue_thread.join() - - -if __name__ == '__main__': - unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/framework/_crust_over_core_face_interface_test.py b/src/python/grpcio/tests/unit/framework/_crust_over_core_face_interface_test.py deleted file mode 100644 index 43457be362..0000000000 --- a/src/python/grpcio/tests/unit/framework/_crust_over_core_face_interface_test.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tests Face interface compliance of the crust-over-core stack.""" - -import collections -import unittest - -import six - -from grpc.framework.core import implementations as core_implementations -from grpc.framework.crust import implementations as crust_implementations -from grpc.framework.foundation import logging_pool -from grpc.framework.interfaces.links import utilities -from tests.unit.framework.common import test_constants -from tests.unit.framework.interfaces.face import test_cases -from tests.unit.framework.interfaces.face import test_interfaces -from tests.unit.framework.interfaces.links import test_utilities - - -class _Implementation(test_interfaces.Implementation): - - def instantiate( - self, methods, method_implementations, multi_method_implementation): - pool = logging_pool.pool(test_constants.POOL_SIZE) - servicer = crust_implementations.servicer( - method_implementations, multi_method_implementation, pool) - - service_end_link = core_implementations.service_end_link( - servicer, test_constants.DEFAULT_TIMEOUT, - test_constants.MAXIMUM_TIMEOUT) - invocation_end_link = core_implementations.invocation_end_link() - invocation_end_link.join_link(service_end_link) - service_end_link.join_link(invocation_end_link) - service_end_link.start() - invocation_end_link.start() - - generic_stub = crust_implementations.generic_stub(invocation_end_link, pool) - # TODO(nathaniel): Add a "groups" attribute to _digest.TestServiceDigest. - group = next(iter(methods))[0] - # TODO(nathaniel): Add a "cardinalities_by_group" attribute to - # _digest.TestServiceDigest. - cardinalities = { - method: method_object.cardinality() - for (group, method), method_object in six.iteritems(methods)} - dynamic_stub = crust_implementations.dynamic_stub( - invocation_end_link, group, cardinalities, pool) - - return generic_stub, {group: dynamic_stub}, ( - invocation_end_link, service_end_link, pool) - - def destantiate(self, memo): - invocation_end_link, service_end_link, pool = memo - invocation_end_link.stop(0).wait() - service_end_link.stop(0).wait() - invocation_end_link.join_link(utilities.NULL_LINK) - service_end_link.join_link(utilities.NULL_LINK) - pool.shutdown(wait=True) - - def invocation_metadata(self): - return object() - - def initial_metadata(self): - return object() - - def terminal_metadata(self): - return object() - - def code(self): - return object() - - def details(self): - return object() - - def metadata_transmitted(self, original_metadata, transmitted_metadata): - return original_metadata is transmitted_metadata - - -def load_tests(loader, tests, pattern): - return unittest.TestSuite( - tests=tuple( - loader.loadTestsFromTestCase(test_case_class) - for test_case_class in test_cases.test_cases(_Implementation()))) - - -if __name__ == '__main__': - unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/framework/core/_base_interface_test.py b/src/python/grpcio/tests/unit/framework/core/_base_interface_test.py deleted file mode 100644 index 1310292306..0000000000 --- a/src/python/grpcio/tests/unit/framework/core/_base_interface_test.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tests the RPC Framework Core's implementation of the Base interface.""" - -import logging -import random -import time -import unittest - -from grpc.framework.core import implementations -from grpc.framework.interfaces.base import utilities -from tests.unit.framework.common import test_constants -from tests.unit.framework.interfaces.base import test_cases -from tests.unit.framework.interfaces.base import test_interfaces - - -class _Implementation(test_interfaces.Implementation): - - def __init__(self): - self._invocation_initial_metadata = object() - self._service_initial_metadata = object() - self._invocation_terminal_metadata = object() - self._service_terminal_metadata = object() - - def instantiate(self, serializations, servicer): - invocation = implementations.invocation_end_link() - service = implementations.service_end_link( - servicer, test_constants.DEFAULT_TIMEOUT, - test_constants.MAXIMUM_TIMEOUT) - invocation.join_link(service) - service.join_link(invocation) - return invocation, service, None - - def destantiate(self, memo): - pass - - def invocation_initial_metadata(self): - return self._invocation_initial_metadata - - def service_initial_metadata(self): - return self._service_initial_metadata - - def invocation_completion(self): - return utilities.completion(self._invocation_terminal_metadata, None, None) - - def service_completion(self): - return utilities.completion(self._service_terminal_metadata, None, None) - - def metadata_transmitted(self, original_metadata, transmitted_metadata): - return transmitted_metadata is original_metadata - - def completion_transmitted(self, original_completion, transmitted_completion): - return ( - (original_completion.terminal_metadata is - transmitted_completion.terminal_metadata) and - original_completion.code is transmitted_completion.code and - original_completion.message is transmitted_completion.message - ) - - -def load_tests(loader, tests, pattern): - return unittest.TestSuite( - tests=tuple( - loader.loadTestsFromTestCase(test_case_class) - for test_case_class in test_cases.test_cases(_Implementation()))) - - -if __name__ == '__main__': - unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/framework/foundation/_later_test.py b/src/python/grpcio/tests/unit/framework/foundation/_later_test.py deleted file mode 100644 index 6c2459e185..0000000000 --- a/src/python/grpcio/tests/unit/framework/foundation/_later_test.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tests of the later module.""" - -import threading -import time -import unittest - -from grpc.framework.foundation import later - -TICK = 0.1 - - -class LaterTest(unittest.TestCase): - - def test_simple_delay(self): - lock = threading.Lock() - cell = [0] - return_value = object() - - def computation(): - with lock: - cell[0] += 1 - return return_value - computation_future = later.later(TICK * 2, computation) - - self.assertFalse(computation_future.done()) - self.assertFalse(computation_future.cancelled()) - time.sleep(TICK) - self.assertFalse(computation_future.done()) - self.assertFalse(computation_future.cancelled()) - with lock: - self.assertEqual(0, cell[0]) - time.sleep(TICK * 2) - self.assertTrue(computation_future.done()) - self.assertFalse(computation_future.cancelled()) - with lock: - self.assertEqual(1, cell[0]) - self.assertEqual(return_value, computation_future.result()) - - def test_callback(self): - lock = threading.Lock() - cell = [0] - callback_called = [False] - future_passed_to_callback = [None] - def computation(): - with lock: - cell[0] += 1 - computation_future = later.later(TICK * 2, computation) - def callback(outcome): - with lock: - callback_called[0] = True - future_passed_to_callback[0] = outcome - computation_future.add_done_callback(callback) - time.sleep(TICK) - with lock: - self.assertFalse(callback_called[0]) - time.sleep(TICK * 2) - with lock: - self.assertTrue(callback_called[0]) - self.assertTrue(future_passed_to_callback[0].done()) - - callback_called[0] = False - future_passed_to_callback[0] = None - - computation_future.add_done_callback(callback) - with lock: - self.assertTrue(callback_called[0]) - self.assertTrue(future_passed_to_callback[0].done()) - - def test_cancel(self): - lock = threading.Lock() - cell = [0] - callback_called = [False] - future_passed_to_callback = [None] - def computation(): - with lock: - cell[0] += 1 - computation_future = later.later(TICK * 2, computation) - def callback(outcome): - with lock: - callback_called[0] = True - future_passed_to_callback[0] = outcome - computation_future.add_done_callback(callback) - time.sleep(TICK) - with lock: - self.assertFalse(callback_called[0]) - computation_future.cancel() - self.assertTrue(computation_future.cancelled()) - self.assertFalse(computation_future.running()) - self.assertTrue(computation_future.done()) - with lock: - self.assertTrue(callback_called[0]) - self.assertTrue(future_passed_to_callback[0].cancelled()) - - def test_result(self): - lock = threading.Lock() - cell = [0] - callback_called = [False] - future_passed_to_callback_cell = [None] - return_value = object() - - def computation(): - with lock: - cell[0] += 1 - return return_value - computation_future = later.later(TICK * 2, computation) - - def callback(future_passed_to_callback): - with lock: - callback_called[0] = True - future_passed_to_callback_cell[0] = future_passed_to_callback - computation_future.add_done_callback(callback) - returned_value = computation_future.result() - self.assertEqual(return_value, returned_value) - - # The callback may not yet have been called! Sleep a tick. - time.sleep(TICK) - with lock: - self.assertTrue(callback_called[0]) - self.assertEqual(return_value, future_passed_to_callback_cell[0].result()) - -if __name__ == '__main__': - unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/framework/interfaces/__init__.py b/src/python/grpcio/tests/unit/framework/interfaces/__init__.py deleted file mode 100644 index 7086519106..0000000000 --- a/src/python/grpcio/tests/unit/framework/interfaces/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/__init__.py b/src/python/grpcio/tests/unit/framework/interfaces/base/__init__.py deleted file mode 100644 index 7086519106..0000000000 --- a/src/python/grpcio/tests/unit/framework/interfaces/base/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/_control.py b/src/python/grpcio/tests/unit/framework/interfaces/base/_control.py deleted file mode 100644 index 0eb38abf22..0000000000 --- a/src/python/grpcio/tests/unit/framework/interfaces/base/_control.py +++ /dev/null @@ -1,570 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Part of the tests of the base interface of RPC Framework.""" - -from __future__ import division - -import abc -import collections -import enum -import random # pylint: disable=unused-import -import threading -import time - -import six - -from grpc.framework.interfaces.base import base -from tests.unit.framework.common import test_constants -from tests.unit.framework.interfaces.base import _sequence -from tests.unit.framework.interfaces.base import _state -from tests.unit.framework.interfaces.base import test_interfaces # pylint: disable=unused-import - -_GROUP = 'base test cases test group' -_METHOD = 'base test cases test method' - -_PAYLOAD_RANDOM_SECTION_MAXIMUM_SIZE = test_constants.PAYLOAD_SIZE // 20 -_MINIMUM_PAYLOAD_SIZE = test_constants.PAYLOAD_SIZE // 600 - - -def _create_payload(randomness): - length = randomness.randint( - _MINIMUM_PAYLOAD_SIZE, test_constants.PAYLOAD_SIZE) - random_section_length = randomness.randint( - 0, min(_PAYLOAD_RANDOM_SECTION_MAXIMUM_SIZE, length)) - random_section = bytes( - bytearray( - randomness.getrandbits(8) for _ in range(random_section_length))) - sevens_section = b'\x07' * (length - random_section_length) - return b''.join(randomness.sample((random_section, sevens_section), 2)) - - -def _anything_in_flight(state): - return ( - state.invocation_initial_metadata_in_flight is not None or - state.invocation_payloads_in_flight or - state.invocation_completion_in_flight is not None or - state.service_initial_metadata_in_flight is not None or - state.service_payloads_in_flight or - state.service_completion_in_flight is not None or - 0 < state.invocation_allowance_in_flight or - 0 < state.service_allowance_in_flight - ) - - -def _verify_service_advance_and_update_state( - initial_metadata, payload, completion, allowance, state, implementation): - if initial_metadata is not None: - if state.invocation_initial_metadata_received: - return 'Later invocation initial metadata received: %s' % ( - initial_metadata,) - if state.invocation_payloads_received: - return 'Invocation initial metadata received after payloads: %s' % ( - state.invocation_payloads_received) - if state.invocation_completion_received: - return 'Invocation initial metadata received after invocation completion!' - if not implementation.metadata_transmitted( - state.invocation_initial_metadata_in_flight, initial_metadata): - return 'Invocation initial metadata maltransmitted: %s, %s' % ( - state.invocation_initial_metadata_in_flight, initial_metadata) - else: - state.invocation_initial_metadata_in_flight = None - state.invocation_initial_metadata_received = True - - if payload is not None: - if state.invocation_completion_received: - return 'Invocation payload received after invocation completion!' - elif not state.invocation_payloads_in_flight: - return 'Invocation payload "%s" received but not in flight!' % (payload,) - elif state.invocation_payloads_in_flight[0] != payload: - return 'Invocation payload mismatch: %s, %s' % ( - state.invocation_payloads_in_flight[0], payload) - elif state.service_side_invocation_allowance < 1: - return 'Disallowed invocation payload!' - else: - state.invocation_payloads_in_flight.pop(0) - state.invocation_payloads_received += 1 - state.service_side_invocation_allowance -= 1 - - if completion is not None: - if state.invocation_completion_received: - return 'Later invocation completion received: %s' % (completion,) - elif not implementation.completion_transmitted( - state.invocation_completion_in_flight, completion): - return 'Invocation completion maltransmitted: %s, %s' % ( - state.invocation_completion_in_flight, completion) - else: - state.invocation_completion_in_flight = None - state.invocation_completion_received = True - - if allowance is not None: - if allowance <= 0: - return 'Illegal allowance value: %s' % (allowance,) - else: - state.service_allowance_in_flight -= allowance - state.service_side_service_allowance += allowance - - -def _verify_invocation_advance_and_update_state( - initial_metadata, payload, completion, allowance, state, implementation): - if initial_metadata is not None: - if state.service_initial_metadata_received: - return 'Later service initial metadata received: %s' % (initial_metadata,) - if state.service_payloads_received: - return 'Service initial metadata received after service payloads: %s' % ( - state.service_payloads_received) - if state.service_completion_received: - return 'Service initial metadata received after service completion!' - if not implementation.metadata_transmitted( - state.service_initial_metadata_in_flight, initial_metadata): - return 'Service initial metadata maltransmitted: %s, %s' % ( - state.service_initial_metadata_in_flight, initial_metadata) - else: - state.service_initial_metadata_in_flight = None - state.service_initial_metadata_received = True - - if payload is not None: - if state.service_completion_received: - return 'Service payload received after service completion!' - elif not state.service_payloads_in_flight: - return 'Service payload "%s" received but not in flight!' % (payload,) - elif state.service_payloads_in_flight[0] != payload: - return 'Service payload mismatch: %s, %s' % ( - state.invocation_payloads_in_flight[0], payload) - elif state.invocation_side_service_allowance < 1: - return 'Disallowed service payload!' - else: - state.service_payloads_in_flight.pop(0) - state.service_payloads_received += 1 - state.invocation_side_service_allowance -= 1 - - if completion is not None: - if state.service_completion_received: - return 'Later service completion received: %s' % (completion,) - elif not implementation.completion_transmitted( - state.service_completion_in_flight, completion): - return 'Service completion maltransmitted: %s, %s' % ( - state.service_completion_in_flight, completion) - else: - state.service_completion_in_flight = None - state.service_completion_received = True - - if allowance is not None: - if allowance <= 0: - return 'Illegal allowance value: %s' % (allowance,) - else: - state.invocation_allowance_in_flight -= allowance - state.invocation_side_service_allowance += allowance - - -class Invocation( - collections.namedtuple( - 'Invocation', - ('group', 'method', 'subscription_kind', 'timeout', 'initial_metadata', - 'payload', 'completion',))): - """A description of operation invocation. - - Attributes: - group: The group identifier for the operation. - method: The method identifier for the operation. - subscription_kind: A base.Subscription.Kind value describing the kind of - subscription to use for the operation. - timeout: A duration in seconds to pass as the timeout value for the - operation. - initial_metadata: An object to pass as the initial metadata for the - operation or None. - payload: An object to pass as a payload value for the operation or None. - completion: An object to pass as a completion value for the operation or - None. - """ - - -class OnAdvance( - collections.namedtuple( - 'OnAdvance', - ('kind', 'initial_metadata', 'payload', 'completion', 'allowance'))): - """Describes action to be taken in a test in response to an advance call. - - Attributes: - kind: A Kind value describing the overall kind of response. - initial_metadata: An initial metadata value to pass to a call of the advance - method of the operator under test. Only valid if kind is Kind.ADVANCE and - may be None. - payload: A payload value to pass to a call of the advance method of the - operator under test. Only valid if kind is Kind.ADVANCE and may be None. - completion: A base.Completion value to pass to a call of the advance method - of the operator under test. Only valid if kind is Kind.ADVANCE and may be - None. - allowance: An allowance value to pass to a call of the advance method of the - operator under test. Only valid if kind is Kind.ADVANCE and may be None. - """ - - @enum.unique - class Kind(enum.Enum): - ADVANCE = 'advance' - DEFECT = 'defect' - IDLE = 'idle' - - -_DEFECT_ON_ADVANCE = OnAdvance(OnAdvance.Kind.DEFECT, None, None, None, None) -_IDLE_ON_ADVANCE = OnAdvance(OnAdvance.Kind.IDLE, None, None, None, None) - - -class Instruction( - collections.namedtuple( - 'Instruction', - ('kind', 'advance_args', 'advance_kwargs', 'conclude_success', - 'conclude_message', 'conclude_invocation_outcome_kind', - 'conclude_service_outcome_kind',))): - """""" - - @enum.unique - class Kind(enum.Enum): - ADVANCE = 'ADVANCE' - CANCEL = 'CANCEL' - CONCLUDE = 'CONCLUDE' - - -class Controller(six.with_metaclass(abc.ABCMeta)): - - @abc.abstractmethod - def failed(self, message): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def serialize_request(self, request): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def deserialize_request(self, serialized_request): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def serialize_response(self, response): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def deserialize_response(self, serialized_response): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def invocation(self): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def poll(self): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def on_service_advance( - self, initial_metadata, payload, completion, allowance): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def on_invocation_advance( - self, initial_metadata, payload, completion, allowance): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def service_on_termination(self, outcome): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def invocation_on_termination(self, outcome): - """""" - raise NotImplementedError() - - -class ControllerCreator(six.with_metaclass(abc.ABCMeta)): - - @abc.abstractmethod - def name(self): - """""" - raise NotImplementedError() - - @abc.abstractmethod - def controller(self, implementation, randomness): - """""" - raise NotImplementedError() - - -class _Remainder( - collections.namedtuple( - '_Remainder', - ('invocation_payloads', 'service_payloads', 'invocation_completion', - 'service_completion',))): - """Describes work remaining to be done in a portion of a test. - - Attributes: - invocation_payloads: The number of payloads to be sent from the invocation - side of the operation to the service side of the operation. - service_payloads: The number of payloads to be sent from the service side of - the operation to the invocation side of the operation. - invocation_completion: Whether or not completion from the invocation side of - the operation should be indicated and has yet to be indicated. - service_completion: Whether or not completion from the service side of the - operation should be indicated and has yet to be indicated. - """ - - -class _SequenceController(Controller): - - def __init__(self, sequence, implementation, randomness): - """Constructor. - - Args: - sequence: A _sequence.Sequence describing the steps to be taken in the - test at a relatively high level. - implementation: A test_interfaces.Implementation encapsulating the - base interface implementation that is the system under test. - randomness: A random.Random instance for use in the test. - """ - self._condition = threading.Condition() - self._sequence = sequence - self._implementation = implementation - self._randomness = randomness - - self._until = None - self._remaining_elements = None - self._poll_next = None - self._message = None - - self._state = _state.OperationState() - self._todo = None - - # called with self._condition - def _failed(self, message): - self._message = message - self._condition.notify_all() - - def _passed(self, invocation_outcome, service_outcome): - self._poll_next = Instruction( - Instruction.Kind.CONCLUDE, None, None, True, None, invocation_outcome, - service_outcome) - self._condition.notify_all() - - def failed(self, message): - with self._condition: - self._failed(message) - - def serialize_request(self, request): - return request + request - - def deserialize_request(self, serialized_request): - return serialized_request[:len(serialized_request) // 2] - - def serialize_response(self, response): - return response * 3 - - def deserialize_response(self, serialized_response): - return serialized_response[2 * len(serialized_response) // 3:] - - def invocation(self): - with self._condition: - self._until = time.time() + self._sequence.maximum_duration - self._remaining_elements = list(self._sequence.elements) - if self._sequence.invocation.initial_metadata: - initial_metadata = self._implementation.invocation_initial_metadata() - self._state.invocation_initial_metadata_in_flight = initial_metadata - else: - initial_metadata = None - if self._sequence.invocation.payload: - payload = _create_payload(self._randomness) - self._state.invocation_payloads_in_flight.append(payload) - else: - payload = None - if self._sequence.invocation.complete: - completion = self._implementation.invocation_completion() - self._state.invocation_completion_in_flight = completion - else: - completion = None - return Invocation( - _GROUP, _METHOD, base.Subscription.Kind.FULL, - self._sequence.invocation.timeout, initial_metadata, payload, - completion) - - def poll(self): - with self._condition: - while True: - if self._message is not None: - return Instruction( - Instruction.Kind.CONCLUDE, None, None, False, self._message, None, - None) - elif self._poll_next: - poll_next = self._poll_next - self._poll_next = None - return poll_next - elif self._until < time.time(): - return Instruction( - Instruction.Kind.CONCLUDE, None, None, False, - 'overran allotted time!', None, None) - else: - self._condition.wait(timeout=self._until-time.time()) - - def on_service_advance( - self, initial_metadata, payload, completion, allowance): - with self._condition: - message = _verify_service_advance_and_update_state( - initial_metadata, payload, completion, allowance, self._state, - self._implementation) - if message is not None: - self._failed(message) - if self._todo is not None: - raise ValueError('TODO!!!') - elif _anything_in_flight(self._state): - return _IDLE_ON_ADVANCE - elif self._remaining_elements: - element = self._remaining_elements.pop(0) - if element.kind is _sequence.Element.Kind.SERVICE_TRANSMISSION: - if element.transmission.initial_metadata: - initial_metadata = self._implementation.service_initial_metadata() - self._state.service_initial_metadata_in_flight = initial_metadata - else: - initial_metadata = None - if element.transmission.payload: - payload = _create_payload(self._randomness) - self._state.service_payloads_in_flight.append(payload) - self._state.service_side_service_allowance -= 1 - else: - payload = None - if element.transmission.complete: - completion = self._implementation.service_completion() - self._state.service_completion_in_flight = completion - else: - completion = None - if (not self._state.invocation_completion_received and - 0 <= self._state.service_side_invocation_allowance): - allowance = 1 - self._state.service_side_invocation_allowance += 1 - self._state.invocation_allowance_in_flight += 1 - else: - allowance = None - return OnAdvance( - OnAdvance.Kind.ADVANCE, initial_metadata, payload, completion, - allowance) - else: - raise ValueError('TODO!!!') - else: - return _IDLE_ON_ADVANCE - - def on_invocation_advance( - self, initial_metadata, payload, completion, allowance): - with self._condition: - message = _verify_invocation_advance_and_update_state( - initial_metadata, payload, completion, allowance, self._state, - self._implementation) - if message is not None: - self._failed(message) - if self._todo is not None: - raise ValueError('TODO!!!') - elif _anything_in_flight(self._state): - return _IDLE_ON_ADVANCE - elif self._remaining_elements: - element = self._remaining_elements.pop(0) - if element.kind is _sequence.Element.Kind.INVOCATION_TRANSMISSION: - if element.transmission.initial_metadata: - initial_metadata = self._implementation.invocation_initial_metadata() - self._state.invocation_initial_metadata_in_fight = initial_metadata - else: - initial_metadata = None - if element.transmission.payload: - payload = _create_payload(self._randomness) - self._state.invocation_payloads_in_flight.append(payload) - self._state.invocation_side_invocation_allowance -= 1 - else: - payload = None - if element.transmission.complete: - completion = self._implementation.invocation_completion() - self._state.invocation_completion_in_flight = completion - else: - completion = None - if (not self._state.service_completion_received and - 0 <= self._state.invocation_side_service_allowance): - allowance = 1 - self._state.invocation_side_service_allowance += 1 - self._state.service_allowance_in_flight += 1 - else: - allowance = None - return OnAdvance( - OnAdvance.Kind.ADVANCE, initial_metadata, payload, completion, - allowance) - else: - raise ValueError('TODO!!!') - else: - return _IDLE_ON_ADVANCE - - def service_on_termination(self, outcome): - with self._condition: - self._state.service_side_outcome = outcome - if self._todo is not None or self._remaining_elements: - self._failed('Premature service-side outcome %s!' % (outcome,)) - elif outcome.kind is not self._sequence.outcome_kinds.service: - self._failed( - 'Incorrect service-side outcome kind: %s should have been %s' % ( - outcome.kind, self._sequence.outcome_kinds.service)) - elif self._state.invocation_side_outcome is not None: - self._passed(self._state.invocation_side_outcome.kind, outcome.kind) - - def invocation_on_termination(self, outcome): - with self._condition: - self._state.invocation_side_outcome = outcome - if self._todo is not None or self._remaining_elements: - self._failed('Premature invocation-side outcome %s!' % (outcome,)) - elif outcome.kind is not self._sequence.outcome_kinds.invocation: - self._failed( - 'Incorrect invocation-side outcome kind: %s should have been %s' % ( - outcome.kind, self._sequence.outcome_kinds.invocation)) - elif self._state.service_side_outcome is not None: - self._passed(outcome.kind, self._state.service_side_outcome.kind) - - -class _SequenceControllerCreator(ControllerCreator): - - def __init__(self, sequence): - self._sequence = sequence - - def name(self): - return self._sequence.name - - def controller(self, implementation, randomness): - return _SequenceController(self._sequence, implementation, randomness) - - -CONTROLLER_CREATORS = tuple( - _SequenceControllerCreator(sequence) for sequence in _sequence.SEQUENCES) diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/_sequence.py b/src/python/grpcio/tests/unit/framework/interfaces/base/_sequence.py deleted file mode 100644 index 571d0e1e63..0000000000 --- a/src/python/grpcio/tests/unit/framework/interfaces/base/_sequence.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Part of the tests of the base interface of RPC Framework.""" - -import collections -import enum - -from grpc.framework.interfaces.base import base -from tests.unit.framework.common import test_constants - - -class Invocation( - collections.namedtuple( - 'Invocation', ('timeout', 'initial_metadata', 'payload', 'complete',))): - """A recipe for operation invocation. - - Attributes: - timeout: A duration in seconds to pass to the system under test as the - operation's timeout value. - initial_metadata: A boolean indicating whether or not to pass initial - metadata when invoking the operation. - payload: A boolean indicating whether or not to pass a payload when - invoking the operation. - complete: A boolean indicating whether or not to indicate completion of - transmissions from the invoking side of the operation when invoking the - operation. - """ - - -class Transmission( - collections.namedtuple( - 'Transmission', ('initial_metadata', 'payload', 'complete',))): - """A recipe for a single transmission in an operation. - - Attributes: - initial_metadata: A boolean indicating whether or not to pass initial - metadata as part of the transmission. - payload: A boolean indicating whether or not to pass a payload as part of - the transmission. - complete: A boolean indicating whether or not to indicate completion of - transmission from the transmitting side of the operation as part of the - transmission. - """ - - -class Intertransmission( - collections.namedtuple('Intertransmission', ('invocation', 'service',))): - """A recipe for multiple transmissions in an operation. - - Attributes: - invocation: An integer describing the number of payloads to send from the - invocation side of the operation to the service side. - service: An integer describing the number of payloads to send from the - service side of the operation to the invocation side. - """ - - -class Element(collections.namedtuple('Element', ('kind', 'transmission',))): - """A sum type for steps to perform when testing an operation. - - Attributes: - kind: A Kind value describing the kind of step to perform in the test. - transmission: Only valid for kinds Kind.INVOCATION_TRANSMISSION and - Kind.SERVICE_TRANSMISSION, a Transmission value describing the details of - the transmission to be made. - """ - - @enum.unique - class Kind(enum.Enum): - INVOCATION_TRANSMISSION = 'invocation transmission' - SERVICE_TRANSMISSION = 'service transmission' - INTERTRANSMISSION = 'intertransmission' - INVOCATION_CANCEL = 'invocation cancel' - SERVICE_CANCEL = 'service cancel' - INVOCATION_FAILURE = 'invocation failure' - SERVICE_FAILURE = 'service failure' - - -class OutcomeKinds( - collections.namedtuple('Outcome', ('invocation', 'service',))): - """A description of the expected outcome of an operation test. - - Attributes: - invocation: The base.Outcome.Kind value expected on the invocation side of - the operation. - service: The base.Outcome.Kind value expected on the service side of the - operation. - """ - - -class Sequence( - collections.namedtuple( - 'Sequence', - ('name', 'maximum_duration', 'invocation', 'elements', - 'outcome_kinds',))): - """Describes at a high level steps to perform in a test. - - Attributes: - name: The string name of the sequence. - maximum_duration: A length of time in seconds to allow for the test before - declaring it to have failed. - invocation: An Invocation value describing how to invoke the operation - under test. - elements: A sequence of Element values describing at coarse granularity - actions to take during the operation under test. - outcome_kinds: An OutcomeKinds value describing the expected outcome kinds - of the test. - """ - -_EASY = Sequence( - 'Easy', - test_constants.TIME_ALLOWANCE, - Invocation(test_constants.LONG_TIMEOUT, True, True, True), - ( - Element( - Element.Kind.SERVICE_TRANSMISSION, Transmission(True, True, True)), - ), - OutcomeKinds(base.Outcome.Kind.COMPLETED, base.Outcome.Kind.COMPLETED)) - -_PEASY = Sequence( - 'Peasy', - test_constants.TIME_ALLOWANCE, - Invocation(test_constants.LONG_TIMEOUT, True, True, False), - ( - Element( - Element.Kind.SERVICE_TRANSMISSION, Transmission(True, True, False)), - Element( - Element.Kind.INVOCATION_TRANSMISSION, - Transmission(False, True, True)), - Element( - Element.Kind.SERVICE_TRANSMISSION, Transmission(False, True, True)), - ), - OutcomeKinds(base.Outcome.Kind.COMPLETED, base.Outcome.Kind.COMPLETED)) - - -# TODO(issue 2959): Finish this test suite. This tuple of sequences should -# contain at least the values in the Cartesian product of (half-duplex, -# full-duplex) * (zero payloads, one payload, test_constants.STREAM_LENGTH -# payloads) * (completion, cancellation, expiration, programming defect in -# servicer code). -SEQUENCES = ( - _EASY, - _PEASY, -) diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/_state.py b/src/python/grpcio/tests/unit/framework/interfaces/base/_state.py deleted file mode 100644 index 21cf33aeb6..0000000000 --- a/src/python/grpcio/tests/unit/framework/interfaces/base/_state.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Part of the tests of the base interface of RPC Framework.""" - - -class OperationState(object): - - def __init__(self): - self.invocation_initial_metadata_in_flight = None - self.invocation_initial_metadata_received = False - self.invocation_payloads_in_flight = [] - self.invocation_payloads_received = 0 - self.invocation_completion_in_flight = None - self.invocation_completion_received = False - self.service_initial_metadata_in_flight = None - self.service_initial_metadata_received = False - self.service_payloads_in_flight = [] - self.service_payloads_received = 0 - self.service_completion_in_flight = None - self.service_completion_received = False - self.invocation_side_invocation_allowance = 1 - self.invocation_side_service_allowance = 1 - self.service_side_invocation_allowance = 1 - self.service_side_service_allowance = 1 - self.invocation_allowance_in_flight = 0 - self.service_allowance_in_flight = 0 - self.invocation_side_outcome = None - self.service_side_outcome = None diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/test_cases.py b/src/python/grpcio/tests/unit/framework/interfaces/base/test_cases.py deleted file mode 100644 index 4f8e26c9a2..0000000000 --- a/src/python/grpcio/tests/unit/framework/interfaces/base/test_cases.py +++ /dev/null @@ -1,277 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tests of the base interface of RPC Framework.""" - -import logging -import random -import threading -import time -import unittest - -from grpc.framework.foundation import logging_pool -from grpc.framework.interfaces.base import base -from grpc.framework.interfaces.base import utilities -from tests.unit.framework.common import test_constants -from tests.unit.framework.interfaces.base import _control -from tests.unit.framework.interfaces.base import test_interfaces - -_SYNCHRONICITY_VARIATION = (('Sync', False), ('Async', True)) - -_EMPTY_OUTCOME_KIND_DICT = { - outcome_kind: 0 for outcome_kind in base.Outcome.Kind} - - -class _Serialization(test_interfaces.Serialization): - - def serialize_request(self, request): - return request + request - - def deserialize_request(self, serialized_request): - return serialized_request[:len(serialized_request) / 2] - - def serialize_response(self, response): - return response * 3 - - def deserialize_response(self, serialized_response): - return serialized_response[2 * len(serialized_response) / 3:] - - -def _advance(quadruples, operator, controller): - try: - for quadruple in quadruples: - operator.advance( - initial_metadata=quadruple[0], payload=quadruple[1], - completion=quadruple[2], allowance=quadruple[3]) - except Exception as e: # pylint: disable=broad-except - controller.failed('Exception on advance: %e' % e) - - -class _Operator(base.Operator): - - def __init__(self, controller, on_advance, pool, operator_under_test): - self._condition = threading.Condition() - self._controller = controller - self._on_advance = on_advance - self._pool = pool - self._operator_under_test = operator_under_test - self._pending_advances = [] - - def set_operator_under_test(self, operator_under_test): - with self._condition: - self._operator_under_test = operator_under_test - pent_advances = self._pending_advances - self._pending_advances = [] - pool = self._pool - controller = self._controller - - if pool is None: - _advance(pent_advances, operator_under_test, controller) - else: - pool.submit(_advance, pent_advances, operator_under_test, controller) - - def advance( - self, initial_metadata=None, payload=None, completion=None, - allowance=None): - on_advance = self._on_advance( - initial_metadata, payload, completion, allowance) - if on_advance.kind is _control.OnAdvance.Kind.ADVANCE: - with self._condition: - pool = self._pool - operator_under_test = self._operator_under_test - controller = self._controller - - quadruple = ( - on_advance.initial_metadata, on_advance.payload, - on_advance.completion, on_advance.allowance) - if pool is None: - _advance((quadruple,), operator_under_test, controller) - else: - pool.submit(_advance, (quadruple,), operator_under_test, controller) - elif on_advance.kind is _control.OnAdvance.Kind.DEFECT: - raise ValueError( - 'Deliberately raised exception from Operator.advance (in a test)!') - - -class _ProtocolReceiver(base.ProtocolReceiver): - - def __init__(self): - self._condition = threading.Condition() - self._contexts = [] - - def context(self, protocol_context): - with self._condition: - self._contexts.append(protocol_context) - - -class _Servicer(base.Servicer): - """A base.Servicer with instrumented for testing.""" - - def __init__(self, group, method, controllers, pool): - self._condition = threading.Condition() - self._group = group - self._method = method - self._pool = pool - self._controllers = list(controllers) - - def service(self, group, method, context, output_operator): - with self._condition: - controller = self._controllers.pop(0) - if group != self._group or method != self._method: - controller.fail( - '%s != %s or %s != %s' % (group, self._group, method, self._method)) - raise base.NoSuchMethodError(None, None) - else: - operator = _Operator( - controller, controller.on_service_advance, self._pool, - output_operator) - outcome = context.add_termination_callback( - controller.service_on_termination) - if outcome is not None: - controller.service_on_termination(outcome) - return utilities.full_subscription(operator, _ProtocolReceiver()) - - -class _OperationTest(unittest.TestCase): - - def setUp(self): - if self._synchronicity_variation: - self._pool = logging_pool.pool(test_constants.POOL_SIZE) - else: - self._pool = None - self._controller = self._controller_creator.controller( - self._implementation, self._randomness) - - def tearDown(self): - if self._synchronicity_variation: - self._pool.shutdown(wait=True) - else: - self._pool = None - - def test_operation(self): - invocation = self._controller.invocation() - if invocation.subscription_kind is base.Subscription.Kind.FULL: - test_operator = _Operator( - self._controller, self._controller.on_invocation_advance, - self._pool, None) - subscription = utilities.full_subscription( - test_operator, _ProtocolReceiver()) - else: - # TODO(nathaniel): support and test other subscription kinds. - self.fail('Non-full subscriptions not yet supported!') - - servicer = _Servicer( - invocation.group, invocation.method, (self._controller,), self._pool) - - invocation_end, service_end, memo = self._implementation.instantiate( - {(invocation.group, invocation.method): _Serialization()}, servicer) - - try: - invocation_end.start() - service_end.start() - operation_context, operator_under_test = invocation_end.operate( - invocation.group, invocation.method, subscription, invocation.timeout, - initial_metadata=invocation.initial_metadata, payload=invocation.payload, - completion=invocation.completion) - test_operator.set_operator_under_test(operator_under_test) - outcome = operation_context.add_termination_callback( - self._controller.invocation_on_termination) - if outcome is not None: - self._controller.invocation_on_termination(outcome) - except Exception as e: # pylint: disable=broad-except - self._controller.failed('Exception on invocation: %s' % e) - self.fail(e) - - while True: - instruction = self._controller.poll() - if instruction.kind is _control.Instruction.Kind.ADVANCE: - try: - test_operator.advance( - *instruction.advance_args, **instruction.advance_kwargs) - except Exception as e: # pylint: disable=broad-except - self._controller.failed('Exception on instructed advance: %s' % e) - elif instruction.kind is _control.Instruction.Kind.CANCEL: - try: - operation_context.cancel() - except Exception as e: # pylint: disable=broad-except - self._controller.failed('Exception on cancel: %s' % e) - elif instruction.kind is _control.Instruction.Kind.CONCLUDE: - break - - invocation_stop_event = invocation_end.stop(0) - service_stop_event = service_end.stop(0) - invocation_stop_event.wait() - service_stop_event.wait() - invocation_stats = invocation_end.operation_stats() - service_stats = service_end.operation_stats() - - self._implementation.destantiate(memo) - - self.assertTrue( - instruction.conclude_success, msg=instruction.conclude_message) - - expected_invocation_stats = dict(_EMPTY_OUTCOME_KIND_DICT) - expected_invocation_stats[ - instruction.conclude_invocation_outcome_kind] += 1 - self.assertDictEqual(expected_invocation_stats, invocation_stats) - expected_service_stats = dict(_EMPTY_OUTCOME_KIND_DICT) - expected_service_stats[instruction.conclude_service_outcome_kind] += 1 - self.assertDictEqual(expected_service_stats, service_stats) - - -def test_cases(implementation): - """Creates unittest.TestCase classes for a given Base implementation. - - Args: - implementation: A test_interfaces.Implementation specifying creation and - destruction of the Base implementation under test. - - Returns: - A sequence of subclasses of unittest.TestCase defining tests of the - specified Base layer implementation. - """ - random_seed = hash(time.time()) - logging.warning('Random seed for this execution: %s', random_seed) - randomness = random.Random(x=random_seed) - - test_case_classes = [] - for synchronicity_variation in _SYNCHRONICITY_VARIATION: - for controller_creator in _control.CONTROLLER_CREATORS: - name = ''.join( - (synchronicity_variation[0], controller_creator.name(), 'Test',)) - test_case_classes.append( - type(name, (_OperationTest,), - {'_implementation': implementation, - '_randomness': randomness, - '_synchronicity_variation': synchronicity_variation[1], - '_controller_creator': controller_creator, - '__module__': implementation.__module__, - })) - - return test_case_classes diff --git a/src/python/grpcio/tests/unit/framework/interfaces/base/test_interfaces.py b/src/python/grpcio/tests/unit/framework/interfaces/base/test_interfaces.py deleted file mode 100644 index 5eba475ba8..0000000000 --- a/src/python/grpcio/tests/unit/framework/interfaces/base/test_interfaces.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Interfaces used in tests of implementations of the Base layer.""" - -import abc - -import six - -from grpc.framework.interfaces.base import base # pylint: disable=unused-import - - -class Serialization(six.with_metaclass(abc.ABCMeta)): - """Specifies serialization and deserialization of test payloads.""" - - def serialize_request(self, request): - """Serializes a request value used in a test. - - Args: - request: A request value created by a test. - - Returns: - A bytestring that is the serialization of the given request. - """ - raise NotImplementedError() - - def deserialize_request(self, serialized_request): - """Deserializes a request value used in a test. - - Args: - serialized_request: A bytestring that is the serialization of some request - used in a test. - - Returns: - The request value encoded by the given bytestring. - """ - raise NotImplementedError() - - def serialize_response(self, response): - """Serializes a response value used in a test. - - Args: - response: A response value created by a test. - - Returns: - A bytestring that is the serialization of the given response. - """ - raise NotImplementedError() - - def deserialize_response(self, serialized_response): - """Deserializes a response value used in a test. - - Args: - serialized_response: A bytestring that is the serialization of some - response used in a test. - - Returns: - The response value encoded by the given bytestring. - """ - raise NotImplementedError() - - -class Implementation(six.with_metaclass(abc.ABCMeta)): - """Specifies an implementation of the Base layer.""" - - @abc.abstractmethod - def instantiate(self, serializations, servicer): - """Instantiates the Base layer implementation to be used in a test. - - Args: - serializations: A dict from group-method pair to Serialization object - specifying how to serialize and deserialize payload values used in the - test. - servicer: A base.Servicer object to be called to service RPCs made during - the test. - - Returns: - A sequence of length three the first element of which is a - base.End to be used to invoke RPCs, the second element of which is a - base.End to be used to service invoked RPCs, and the third element of - which is an arbitrary memo object to be kept and passed to destantiate - at the conclusion of the test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def destantiate(self, memo): - """Destroys the Base layer implementation under test. - - Args: - memo: The object from the third position of the return value of a call to - instantiate. - """ - raise NotImplementedError() - - @abc.abstractmethod - def invocation_initial_metadata(self): - """Provides an operation's invocation-side initial metadata. - - Returns: - A value to use for an operation's invocation-side initial metadata, or - None. - """ - raise NotImplementedError() - - @abc.abstractmethod - def service_initial_metadata(self): - """Provides an operation's service-side initial metadata. - - Returns: - A value to use for an operation's service-side initial metadata, or - None. - """ - raise NotImplementedError() - - @abc.abstractmethod - def invocation_completion(self): - """Provides an operation's invocation-side completion. - - Returns: - A base.Completion to use for an operation's invocation-side completion. - """ - raise NotImplementedError() - - @abc.abstractmethod - def service_completion(self): - """Provides an operation's service-side completion. - - Returns: - A base.Completion to use for an operation's service-side completion. - """ - raise NotImplementedError() - - @abc.abstractmethod - def metadata_transmitted(self, original_metadata, transmitted_metadata): - """Identifies whether or not metadata was properly transmitted. - - Args: - original_metadata: A metadata value passed to the system under test. - transmitted_metadata: The same metadata value after having been - transmitted through the system under test. - - Returns: - Whether or not the metadata was properly transmitted. - """ - raise NotImplementedError() - - @abc.abstractmethod - def completion_transmitted(self, original_completion, transmitted_completion): - """Identifies whether or not a base.Completion was properly transmitted. - - Args: - original_completion: A base.Completion passed to the system under test. - transmitted_completion: The same completion value after having been - transmitted through the system under test. - - Returns: - Whether or not the completion was properly transmitted. - """ - raise NotImplementedError() diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_receiver.py b/src/python/grpcio/tests/unit/framework/interfaces/face/_receiver.py deleted file mode 100644 index 48f31fc677..0000000000 --- a/src/python/grpcio/tests/unit/framework/interfaces/face/_receiver.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""A utility useful in tests of asynchronous, event-driven interfaces.""" - -import threading - -from grpc.framework.interfaces.face import face - - -class Receiver(face.ResponseReceiver): - """A utility object useful in tests of asynchronous code.""" - - def __init__(self): - self._condition = threading.Condition() - self._initial_metadata = None - self._responses = [] - self._terminal_metadata = None - self._code = None - self._details = None - self._completed = False - self._abortion = None - - def abort(self, abortion): - with self._condition: - self._abortion = abortion - self._condition.notify_all() - - def initial_metadata(self, initial_metadata): - with self._condition: - self._initial_metadata = initial_metadata - - def response(self, response): - with self._condition: - self._responses.append(response) - - def complete(self, terminal_metadata, code, details): - with self._condition: - self._terminal_metadata = terminal_metadata - self._code = code - self._details = details - self._completed = True - self._condition.notify_all() - - def block_until_terminated(self): - with self._condition: - while self._abortion is None and not self._completed: - self._condition.wait() - - def unary_response(self): - with self._condition: - if self._abortion is not None: - raise AssertionError('Aborted: "{}"!'.format(self._abortion)) - elif len(self._responses) != 1: - raise AssertionError( - '%d responses received, not exactly one!', len(self._responses)) - else: - return self._responses[0] - - def stream_responses(self): - with self._condition: - if self._abortion is None: - return list(self._responses) - else: - raise AssertionError('Aborted: "{}"!'.format(self._abortion)) - - def abortion(self): - with self._condition: - return self._abortion diff --git a/src/python/grpcio/tests/unit/framework/interfaces/links/test_cases.py b/src/python/grpcio/tests/unit/framework/interfaces/links/test_cases.py deleted file mode 100644 index 608e64119e..0000000000 --- a/src/python/grpcio/tests/unit/framework/interfaces/links/test_cases.py +++ /dev/null @@ -1,327 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Tests of the links interface of RPC Framework.""" - -# unittest is referenced from specification in this module. -import abc -import unittest # pylint: disable=unused-import - -import six - -from grpc.framework.interfaces.links import links -from tests.unit.framework.common import test_constants -from tests.unit.framework.interfaces.links import test_utilities - - -def at_least_n_payloads_received_predicate(n): - def predicate(ticket_sequence): - payload_count = 0 - for ticket in ticket_sequence: - if ticket.payload is not None: - payload_count += 1 - if n <= payload_count: - return True - else: - return False - return predicate - - -def terminated(ticket_sequence): - return ticket_sequence and ticket_sequence[-1].termination is not None - -_TRANSMISSION_GROUP = 'test.Group' -_TRANSMISSION_METHOD = 'TestMethod' - - -class TransmissionTest(six.with_metaclass(abc.ABCMeta)): - """Tests ticket transmission between two connected links. - - This class must be mixed into a unittest.TestCase that implements the abstract - methods it provides. - """ - - # This is a unittest.TestCase mix-in. - # pylint: disable=invalid-name - - @abc.abstractmethod - def create_transmitting_links(self): - """Creates two connected links for use in this test. - - Returns: - Two links.Links, the first of which will be used on the invocation side - of RPCs and the second of which will be used on the service side of - RPCs. - """ - raise NotImplementedError() - - @abc.abstractmethod - def destroy_transmitting_links(self, invocation_side_link, service_side_link): - """Destroys the two connected links created for this test. - - - Args: - invocation_side_link: The link used on the invocation side of RPCs in - this test. - service_side_link: The link used on the service side of RPCs in this - test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def create_invocation_initial_metadata(self): - """Creates a value for use as invocation-side initial metadata. - - Returns: - A metadata value appropriate for use as invocation-side initial metadata - or None if invocation-side initial metadata transmission is not - supported by the links under test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def create_invocation_terminal_metadata(self): - """Creates a value for use as invocation-side terminal metadata. - - Returns: - A metadata value appropriate for use as invocation-side terminal - metadata or None if invocation-side terminal metadata transmission is - not supported by the links under test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def create_service_initial_metadata(self): - """Creates a value for use as service-side initial metadata. - - Returns: - A metadata value appropriate for use as service-side initial metadata or - None if service-side initial metadata transmission is not supported by - the links under test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def create_service_terminal_metadata(self): - """Creates a value for use as service-side terminal metadata. - - Returns: - A metadata value appropriate for use as service-side terminal metadata or - None if service-side terminal metadata transmission is not supported by - the links under test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def create_invocation_completion(self): - """Creates values for use as invocation-side code and message. - - Returns: - An invocation-side code value and an invocation-side message value. - Either or both may be None if invocation-side code and/or - invocation-side message transmission is not supported by the links - under test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def create_service_completion(self): - """Creates values for use as service-side code and message. - - Returns: - A service-side code value and a service-side message value. Either or - both may be None if service-side code and/or service-side message - transmission is not supported by the links under test. - """ - raise NotImplementedError() - - @abc.abstractmethod - def assertMetadataTransmitted(self, original_metadata, transmitted_metadata): - """Asserts that transmitted_metadata contains original_metadata. - - Args: - original_metadata: A metadata object used in this test. - transmitted_metadata: A metadata object obtained after transmission - through the system under test. - - Raises: - AssertionError: if the transmitted_metadata object does not contain - original_metadata. - """ - raise NotImplementedError() - - def group_and_method(self): - """Returns the group and method used in this test case. - - Returns: - A pair of the group and method used in this test case. - """ - return _TRANSMISSION_GROUP, _TRANSMISSION_METHOD - - def serialize_request(self, request): - """Serializes a request value used in this test case. - - Args: - request: A request value created by this test case. - - Returns: - A bytestring that is the serialization of the given request. - """ - return request - - def deserialize_request(self, serialized_request): - """Deserializes a request value used in this test case. - - Args: - serialized_request: A bytestring that is the serialization of some request - used in this test case. - - Returns: - The request value encoded by the given bytestring. - """ - return serialized_request - - def serialize_response(self, response): - """Serializes a response value used in this test case. - - Args: - response: A response value created by this test case. - - Returns: - A bytestring that is the serialization of the given response. - """ - return response - - def deserialize_response(self, serialized_response): - """Deserializes a response value used in this test case. - - Args: - serialized_response: A bytestring that is the serialization of some - response used in this test case. - - Returns: - The response value encoded by the given bytestring. - """ - return serialized_response - - def _assert_is_valid_metadata_payload_sequence( - self, ticket_sequence, payloads, initial_metadata, terminal_metadata): - initial_metadata_seen = False - seen_payloads = [] - terminal_metadata_seen = False - - for ticket in ticket_sequence: - if ticket.initial_metadata is not None: - self.assertFalse(initial_metadata_seen) - self.assertFalse(seen_payloads) - self.assertFalse(terminal_metadata_seen) - self.assertMetadataTransmitted(initial_metadata, ticket.initial_metadata) - initial_metadata_seen = True - - if ticket.payload is not None: - self.assertFalse(terminal_metadata_seen) - seen_payloads.append(ticket.payload) - - if ticket.terminal_metadata is not None: - self.assertFalse(terminal_metadata_seen) - self.assertMetadataTransmitted(terminal_metadata, ticket.terminal_metadata) - terminal_metadata_seen = True - self.assertSequenceEqual(payloads, seen_payloads) - - def _assert_is_valid_invocation_sequence( - self, ticket_sequence, group, method, payloads, initial_metadata, - terminal_metadata, termination): - self.assertLess(0, len(ticket_sequence)) - self.assertEqual(group, ticket_sequence[0].group) - self.assertEqual(method, ticket_sequence[0].method) - self._assert_is_valid_metadata_payload_sequence( - ticket_sequence, payloads, initial_metadata, terminal_metadata) - self.assertIs(termination, ticket_sequence[-1].termination) - - def _assert_is_valid_service_sequence( - self, ticket_sequence, payloads, initial_metadata, terminal_metadata, - code, message, termination): - self.assertLess(0, len(ticket_sequence)) - self._assert_is_valid_metadata_payload_sequence( - ticket_sequence, payloads, initial_metadata, terminal_metadata) - self.assertEqual(code, ticket_sequence[-1].code) - self.assertEqual(message, ticket_sequence[-1].message) - self.assertIs(termination, ticket_sequence[-1].termination) - - def setUp(self): - self._invocation_link, self._service_link = self.create_transmitting_links() - self._invocation_mate = test_utilities.RecordingLink() - self._service_mate = test_utilities.RecordingLink() - self._invocation_link.join_link(self._invocation_mate) - self._service_link.join_link(self._service_mate) - - def tearDown(self): - self.destroy_transmitting_links(self._invocation_link, self._service_link) - - def testSimplestRoundTrip(self): - """Tests transmission of one ticket in each direction.""" - invocation_operation_id = object() - invocation_payload = b'\x07' * 1023 - timeout = test_constants.LONG_TIMEOUT - invocation_initial_metadata = self.create_invocation_initial_metadata() - invocation_terminal_metadata = self.create_invocation_terminal_metadata() - invocation_code, invocation_message = self.create_invocation_completion() - service_payload = b'\x08' * 1025 - service_initial_metadata = self.create_service_initial_metadata() - service_terminal_metadata = self.create_service_terminal_metadata() - service_code, service_message = self.create_service_completion() - - original_invocation_ticket = links.Ticket( - invocation_operation_id, 0, _TRANSMISSION_GROUP, _TRANSMISSION_METHOD, - links.Ticket.Subscription.FULL, timeout, 0, invocation_initial_metadata, - invocation_payload, invocation_terminal_metadata, invocation_code, - invocation_message, links.Ticket.Termination.COMPLETION, None) - self._invocation_link.accept_ticket(original_invocation_ticket) - - self._service_mate.block_until_tickets_satisfy( - at_least_n_payloads_received_predicate(1)) - service_operation_id = self._service_mate.tickets()[0].operation_id - - self._service_mate.block_until_tickets_satisfy(terminated) - self._assert_is_valid_invocation_sequence( - self._service_mate.tickets(), _TRANSMISSION_GROUP, _TRANSMISSION_METHOD, - (invocation_payload,), invocation_initial_metadata, - invocation_terminal_metadata, links.Ticket.Termination.COMPLETION) - - original_service_ticket = links.Ticket( - service_operation_id, 0, None, None, links.Ticket.Subscription.FULL, - timeout, 0, service_initial_metadata, service_payload, - service_terminal_metadata, service_code, service_message, - links.Ticket.Termination.COMPLETION, None) - self._service_link.accept_ticket(original_service_ticket) - self._invocation_mate.block_until_tickets_satisfy(terminated) - self._assert_is_valid_service_sequence( - self._invocation_mate.tickets(), (service_payload,), - service_initial_metadata, service_terminal_metadata, service_code, - service_message, links.Ticket.Termination.COMPLETION) diff --git a/src/python/grpcio/tests/unit/framework/interfaces/links/test_utilities.py b/src/python/grpcio/tests/unit/framework/interfaces/links/test_utilities.py deleted file mode 100644 index 39c7f2fc63..0000000000 --- a/src/python/grpcio/tests/unit/framework/interfaces/links/test_utilities.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2015, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""State and behavior appropriate for use in tests.""" - -import logging -import threading -import time - -from grpc.framework.interfaces.links import links -from grpc.framework.interfaces.links import utilities - -# A more-or-less arbitrary limit on the length of raw data values to be logged. -_UNCOMFORTABLY_LONG = 48 - - -def _safe_for_log_ticket(ticket): - """Creates a safe-for-printing-to-the-log ticket for a given ticket. - - Args: - ticket: Any links.Ticket. - - Returns: - A links.Ticket that is as much as can be equal to the given ticket but - possibly features values like the string "<payload of length 972321>" in - place of the actual values of the given ticket. - """ - if isinstance(ticket.payload, (basestring,)): - payload_length = len(ticket.payload) - else: - payload_length = -1 - if payload_length < _UNCOMFORTABLY_LONG: - return ticket - else: - return links.Ticket( - ticket.operation_id, ticket.sequence_number, - ticket.group, ticket.method, ticket.subscription, ticket.timeout, - ticket.allowance, ticket.initial_metadata, - '<payload of length {}>'.format(payload_length), - ticket.terminal_metadata, ticket.code, ticket.message, - ticket.termination, None) - - -class RecordingLink(links.Link): - """A Link that records every ticket passed to it.""" - - def __init__(self): - self._condition = threading.Condition() - self._tickets = [] - - def accept_ticket(self, ticket): - with self._condition: - self._tickets.append(ticket) - self._condition.notify_all() - - def join_link(self, link): - pass - - def block_until_tickets_satisfy(self, predicate): - """Blocks until the received tickets satisfy the given predicate. - - Args: - predicate: A callable that takes a sequence of tickets and returns a - boolean value. - """ - with self._condition: - while not predicate(self._tickets): - self._condition.wait() - - def tickets(self): - """Returns a copy of the list of all tickets received by this Link.""" - with self._condition: - return tuple(self._tickets) - - -class _Pipe(object): - """A conduit that logs all tickets passed through it.""" - - def __init__(self, name): - self._lock = threading.Lock() - self._name = name - self._left_mate = utilities.NULL_LINK - self._right_mate = utilities.NULL_LINK - - def accept_left_to_right_ticket(self, ticket): - with self._lock: - logging.warning( - '%s: moving left to right through %s: %s', time.time(), self._name, - _safe_for_log_ticket(ticket)) - try: - self._right_mate.accept_ticket(ticket) - except Exception as e: # pylint: disable=broad-except - logging.exception(e) - - def accept_right_to_left_ticket(self, ticket): - with self._lock: - logging.warning( - '%s: moving right to left through %s: %s', time.time(), self._name, - _safe_for_log_ticket(ticket)) - try: - self._left_mate.accept_ticket(ticket) - except Exception as e: # pylint: disable=broad-except - logging.exception(e) - - def join_left_mate(self, left_mate): - with self._lock: - self._left_mate = utilities.NULL_LINK if left_mate is None else left_mate - - def join_right_mate(self, right_mate): - with self._lock: - self._right_mate = ( - utilities.NULL_LINK if right_mate is None else right_mate) - - -class _Facade(links.Link): - - def __init__(self, accept, join): - self._accept = accept - self._join = join - - def accept_ticket(self, ticket): - self._accept(ticket) - - def join_link(self, link): - self._join(link) - - -def logging_links(name): - """Creates a conduit that logs all tickets passed through it. - - Args: - name: A name to use for the conduit to identify itself in logging output. - - Returns: - Two links.Links, the first of which is the "left" side of the conduit - and the second of which is the "right" side of the conduit. - """ - pipe = _Pipe(name) - left_facade = _Facade(pipe.accept_left_to_right_ticket, pipe.join_left_mate) - right_facade = _Facade(pipe.accept_right_to_left_ticket, pipe.join_right_mate) - return left_facade, right_facade diff --git a/src/python/grpcio_health_checking/health_commands.py b/src/python/grpcio_health_checking/health_commands.py index 631066f331..a7a59f6974 100644 --- a/src/python/grpcio_health_checking/health_commands.py +++ b/src/python/grpcio_health_checking/health_commands.py @@ -39,14 +39,13 @@ import sys import setuptools from setuptools.command import build_py -from setuptools.command import sdist ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__))) HEALTH_PROTO = os.path.join(ROOT_DIR, '../../proto/grpc/health/v1/health.proto') -class BuildProtoModules(setuptools.Command): - """Command to generate project *_pb2.py modules from proto files.""" +class CopyProtoModules(setuptools.Command): + """Command to copy proto modules from grpc/src/proto.""" description = '' user_options = [] @@ -55,39 +54,6 @@ class BuildProtoModules(setuptools.Command): pass def finalize_options(self): - self.protoc_command = distutils.spawn.find_executable('protoc') - self.grpc_python_plugin_command = distutils.spawn.find_executable( - 'grpc_python_plugin') - - def run(self): - paths = [] - root_directory = os.getcwd() - for walk_root, directories, filenames in os.walk(root_directory): - for filename in filenames: - if filename.endswith('.proto'): - paths.append(os.path.join(walk_root, filename)) - command = [ - self.protoc_command, - '--plugin=protoc-gen-python-grpc={}'.format( - self.grpc_python_plugin_command), - '-I {}'.format(root_directory), - '--python_out={}'.format(root_directory), - '--python-grpc_out={}'.format(root_directory), - ] + paths - try: - subprocess.check_output(' '.join(command), cwd=root_directory, shell=True, - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - raise Exception('{}\nOutput:\n{}'.format(e.message, e.output)) - - -class CopyProtoModules(setuptools.Command): - """Command to copy proto modules from grpc/src/proto.""" - - def initialize_options(self): - pass - - def finalize_options(self): pass def run(self): @@ -101,14 +67,5 @@ class BuildPy(build_py.build_py): """Custom project build command.""" def run(self): - self.run_command('copy_proto_modules') self.run_command('build_proto_modules') build_py.build_py.run(self) - - -class SDist(sdist.sdist): - """Custom project build command.""" - - def run(self): - self.run_command('copy_proto_modules') - sdist.sdist.run(self) diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py index d68a7ced8e..70b4575bf5 100644 --- a/src/python/grpcio_health_checking/setup.py +++ b/src/python/grpcio_health_checking/setup.py @@ -36,36 +36,44 @@ import sys from distutils import core as _core import setuptools +import grpc.tools.command + # Ensure we're in the proper directory whether or not we're being used by pip. os.chdir(os.path.dirname(os.path.abspath(__file__))) # Break import-style to ensure we can actually find our commands module. import health_commands -_PACKAGES = ( +PACKAGES = ( setuptools.find_packages('.') ) -_PACKAGE_DIRECTORIES = { +PACKAGE_DIRECTORIES = { '': '.', } -_INSTALL_REQUIRES = ( +SETUP_REQUIRES = ( + 'grpcio-tools>=0.14.0', +) + +INSTALL_REQUIRES = ( 'grpcio>=0.13.1', ) -_COMMAND_CLASS = { - 'copy_proto_modules': health_commands.CopyProtoModules, - 'build_proto_modules': health_commands.BuildProtoModules, +COMMAND_CLASS = { + # Run preprocess from the repository *before* doing any packaging! + 'preprocess': health_commands.CopyProtoModules, + + 'build_proto_modules': grpc.tools.command.BuildProtoModules, 'build_py': health_commands.BuildPy, - 'sdist': health_commands.SDist, } setuptools.setup( - name='grpcio_health_checking', - version='0.14.0b0', - packages=list(_PACKAGES), - package_dir=_PACKAGE_DIRECTORIES, - install_requires=_INSTALL_REQUIRES, - cmdclass=_COMMAND_CLASS + name='grpcio-health-checking', + version='0.14.0', + packages=list(PACKAGES), + package_dir=PACKAGE_DIRECTORIES, + install_requires=INSTALL_REQUIRES, + setup_requires=SETUP_REQUIRES, + cmdclass=COMMAND_CLASS ) diff --git a/src/python/grpcio_tests/.gitignore b/src/python/grpcio_tests/.gitignore new file mode 100644 index 0000000000..fc620135dc --- /dev/null +++ b/src/python/grpcio_tests/.gitignore @@ -0,0 +1,4 @@ +proto/ +src/ +*_pb2.py +*.egg-info/ diff --git a/src/python/grpcio_tests/commands.py b/src/python/grpcio_tests/commands.py new file mode 100644 index 0000000000..171829b62f --- /dev/null +++ b/src/python/grpcio_tests/commands.py @@ -0,0 +1,217 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides distutils command classes for the gRPC Python setup process.""" + +import distutils +import glob +import os +import os.path +import platform +import re +import shutil +import subprocess +import sys +import traceback + +import setuptools +from setuptools.command import build_ext +from setuptools.command import build_py +from setuptools.command import easy_install +from setuptools.command import install +from setuptools.command import test + +PYTHON_STEM = os.path.dirname(os.path.abspath(__file__)) +GRPC_STEM = os.path.abspath(PYTHON_STEM + '../../../../') +GRPC_PROTO_STEM = os.path.join(GRPC_STEM, 'src', 'proto') +PROTO_STEM = os.path.join(PYTHON_STEM, 'src', 'proto') +PYTHON_PROTO_TOP_LEVEL = os.path.join(PYTHON_STEM, 'src') + + +class CommandError(object): + pass + + +class GatherProto(setuptools.Command): + + description = 'gather proto dependencies' + user_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + # TODO(atash) ensure that we're running from the repository directory when + # this command is used + try: + shutil.rmtree(PROTO_STEM) + except Exception as error: + # We don't care if this command fails + pass + shutil.copytree(GRPC_PROTO_STEM, PROTO_STEM) + for root, _, _ in os.walk(PYTHON_PROTO_TOP_LEVEL): + path = os.path.join(root, '__init__.py') + open(path, 'a').close() + + +class BuildProtoModules(setuptools.Command): + """Command to generate project *_pb2.py modules from proto files.""" + + description = 'build protobuf modules' + user_options = [ + ('include=', None, 'path patterns to include in protobuf generation'), + ('exclude=', None, 'path patterns to exclude from protobuf generation') + ] + + def initialize_options(self): + self.exclude = None + self.include = r'.*\.proto$' + + def finalize_options(self): + pass + + def run(self): + import grpc.tools.protoc as protoc + + include_regex = re.compile(self.include) + exclude_regex = re.compile(self.exclude) if self.exclude else None + paths = [] + for walk_root, directories, filenames in os.walk(PROTO_STEM): + for filename in filenames: + path = os.path.join(walk_root, filename) + if include_regex.match(path) and not ( + exclude_regex and exclude_regex.match(path)): + paths.append(path) + + # TODO(kpayson): It would be nice to do this in a batch command, + # but we currently have name conflicts in src/proto + for path in paths: + command = [ + 'grpc.tools.protoc', + '-I {}'.format(PROTO_STEM), + '--python_out={}'.format(PROTO_STEM), + '--grpc_python_out={}'.format(PROTO_STEM), + ] + [path] + if protoc.main(command) != 0: + sys.stderr.write( + 'warning: Command:\n{}\nFailed'.format( + command)) + + # Generated proto directories dont include __init__.py, but + # these are needed for python package resolution + for walk_root, _, _ in os.walk(PROTO_STEM): + path = os.path.join(walk_root, '__init__.py') + open(path, 'a').close() + + +class BuildPy(build_py.build_py): + """Custom project build command.""" + + def run(self): + try: + self.run_command('build_proto_modules') + except CommandError as error: + sys.stderr.write('warning: %s\n' % error.message) + build_py.build_py.run(self) + + +class TestLite(setuptools.Command): + """Command to run tests without fetching or building anything.""" + + description = 'run tests without fetching or building anything.' + user_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + # distutils requires this override. + pass + + def run(self): + self._add_eggs_to_path() + + import tests + loader = tests.Loader() + loader.loadTestsFromNames(['tests']) + runner = tests.Runner() + result = runner.run(loader.suite) + if not result.wasSuccessful(): + sys.exit('Test failure') + + def _add_eggs_to_path(self): + """Fetch install and test requirements""" + self.distribution.fetch_build_eggs(self.distribution.install_requires) + self.distribution.fetch_build_eggs(self.distribution.tests_require) + + +class RunInterop(test.test): + + description = 'run interop test client/server' + user_options = [ + ('args=', 'a', 'pass-thru arguments for the client/server'), + ('client', 'c', 'flag indicating to run the client'), + ('server', 's', 'flag indicating to run the server') + ] + + def initialize_options(self): + self.args = '' + self.client = False + self.server = False + + def finalize_options(self): + if self.client and self.server: + raise DistutilsOptionError('you may only specify one of client or server') + + def run(self): + if self.distribution.install_requires: + self.distribution.fetch_build_eggs(self.distribution.install_requires) + if self.distribution.tests_require: + self.distribution.fetch_build_eggs(self.distribution.tests_require) + if self.client: + self.run_client() + elif self.server: + self.run_server() + + def run_server(self): + # We import here to ensure that our setuptools parent has had a chance to + # edit the Python system path. + from tests.interop import server + sys.argv[1:] = self.args.split() + server.serve() + + def run_client(self): + # We import here to ensure that our setuptools parent has had a chance to + # edit the Python system path. + from tests.interop import client + sys.argv[1:] = self.args.split() + client.test_interoperability() diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/__init__.py b/src/python/grpcio_tests/grpc_version.py index 7086519106..90f68a5741 100644 --- a/src/python/grpcio/tests/unit/framework/interfaces/face/__init__.py +++ b/src/python/grpcio_tests/grpc_version.py @@ -1,4 +1,4 @@ -# Copyright 2015, Google Inc. +# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,4 +27,6 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_tests/grpc_version.py.template`!!! +VERSION='1.1.0.dev0' diff --git a/src/python/grpcio_tests/setup.py b/src/python/grpcio_tests/setup.py new file mode 100644 index 0000000000..7eef420bdb --- /dev/null +++ b/src/python/grpcio_tests/setup.py @@ -0,0 +1,124 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A setup module for the gRPC Python package.""" + +import os +import os.path +import shutil +import sys + +from distutils import core as _core +from distutils import extension as _extension +import setuptools +from setuptools.command import egg_info + +import grpc.tools.command + +PY3 = sys.version_info.major == 3 + +# Ensure we're in the proper directory whether or not we're being used by pip. +os.chdir(os.path.dirname(os.path.abspath(__file__))) + +# Break import-style to ensure we can actually find our in-repo dependencies. +import commands +import grpc_version + +LICENSE = '3-clause BSD' + +PACKAGE_DIRECTORIES = { + '': '.', +} + +INSTALL_REQUIRES = ( + 'coverage>=4.0', + 'enum34>=1.0.4', + 'futures>=2.2.0', + 'grpcio>=0.14.0', + 'grpcio-health-checking>=0.14.0', + 'oauth2client>=1.4.7', + 'protobuf>=3.0.0a3', + 'six>=1.10', +) + +SETUP_REQUIRES = ( + 'grpcio-tools>=0.14.0', +) + +COMMAND_CLASS = { + # Run `preprocess` *before* doing any packaging! + 'preprocess': commands.GatherProto, + + 'build_proto_modules': grpc.tools.command.BuildProtoModules, + 'build_py': commands.BuildPy, + 'run_interop': commands.RunInterop, + 'test_lite': commands.TestLite +} + +PACKAGE_DATA = { + 'tests.interop': [ + 'credentials/ca.pem', + 'credentials/server1.key', + 'credentials/server1.pem', + ], + 'tests.protoc_plugin': [ + 'protoc_plugin_test.proto', + ], + 'tests.unit': [ + 'credentials/ca.pem', + 'credentials/server1.key', + 'credentials/server1.pem', + ], + 'tests': [ + 'tests.json' + ], +} + +TEST_SUITE = 'tests' +TEST_LOADER = 'tests:Loader' +TEST_RUNNER = 'tests:Runner' +TESTS_REQUIRE = INSTALL_REQUIRES + +PACKAGES = setuptools.find_packages('.') + +setuptools.setup( + name='grpcio-tests', + version=grpc_version.VERSION, + license=LICENSE, + packages=list(PACKAGES), + package_dir=PACKAGE_DIRECTORIES, + package_data=PACKAGE_DATA, + install_requires=INSTALL_REQUIRES, + setup_requires=SETUP_REQUIRES, + cmdclass=COMMAND_CLASS, + tests_require=TESTS_REQUIRE, + test_suite=TEST_SUITE, + test_loader=TEST_LOADER, + test_runner=TEST_RUNNER, +) diff --git a/src/python/grpcio/tests/__init__.py b/src/python/grpcio_tests/tests/__init__.py index a70a1b1f1d..a70a1b1f1d 100644 --- a/src/python/grpcio/tests/__init__.py +++ b/src/python/grpcio_tests/tests/__init__.py diff --git a/src/python/grpcio/tests/_loader.py b/src/python/grpcio_tests/tests/_loader.py index c2f097f6c6..c2f097f6c6 100644 --- a/src/python/grpcio/tests/_loader.py +++ b/src/python/grpcio_tests/tests/_loader.py diff --git a/src/python/grpcio/tests/_result.py b/src/python/grpcio_tests/tests/_result.py index 1acec6a9b5..1acec6a9b5 100644 --- a/src/python/grpcio/tests/_result.py +++ b/src/python/grpcio_tests/tests/_result.py diff --git a/src/python/grpcio/tests/_runner.py b/src/python/grpcio_tests/tests/_runner.py index f0718573e2..926dcbe23a 100644 --- a/src/python/grpcio/tests/_runner.py +++ b/src/python/grpcio_tests/tests/_runner.py @@ -30,7 +30,6 @@ from __future__ import absolute_import import collections -import fcntl import multiprocessing import os import select @@ -178,15 +177,20 @@ class Runner(object): stderr_pipe.write_bypass( '\ninterrupted stderr:\n{}\n'.format(stderr_pipe.output().decode())) os._exit(1) - signal.signal(signal.SIGINT, sigint_handler) - signal.signal(signal.SIGSEGV, fault_handler) - signal.signal(signal.SIGBUS, fault_handler) - signal.signal(signal.SIGABRT, fault_handler) - signal.signal(signal.SIGFPE, fault_handler) - signal.signal(signal.SIGILL, fault_handler) + def try_set_handler(name, handler): + try: + signal.signal(getattr(signal, name), handler) + except AttributeError: + pass + try_set_handler('SIGINT', sigint_handler) + try_set_handler('SIGSEGV', fault_handler) + try_set_handler('SIGBUS', fault_handler) + try_set_handler('SIGABRT', fault_handler) + try_set_handler('SIGFPE', fault_handler) + try_set_handler('SIGILL', fault_handler) # Sometimes output will lag after a test has successfully finished; we # ignore such writes to our pipes. - signal.signal(signal.SIGPIPE, signal.SIG_IGN) + try_set_handler('SIGPIPE', signal.SIG_IGN) # Run the tests result.startTestRun() diff --git a/src/python/grpcio/tests/health_check/__init__.py b/src/python/grpcio_tests/tests/health_check/__init__.py index 100a624dc9..100a624dc9 100644 --- a/src/python/grpcio/tests/health_check/__init__.py +++ b/src/python/grpcio_tests/tests/health_check/__init__.py diff --git a/src/python/grpcio/tests/health_check/_health_servicer_test.py b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py index 1b63388663..1b63388663 100644 --- a/src/python/grpcio/tests/health_check/_health_servicer_test.py +++ b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py diff --git a/src/python/grpcio/grpc/_adapter/__init__.py b/src/python/grpcio_tests/tests/interop/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/grpc/_adapter/__init__.py +++ b/src/python/grpcio_tests/tests/interop/__init__.py diff --git a/src/python/grpcio/tests/interop/_insecure_interop_test.py b/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py index 91519b6fba..c753d6faf0 100644 --- a/src/python/grpcio/tests/interop/_insecure_interop_test.py +++ b/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py @@ -48,7 +48,7 @@ class InsecureInteropTest( port = self.server.add_insecure_port('[::]:0') self.server.start() self.stub = test_pb2.beta_create_TestService_stub( - implementations.insecure_channel('[::]', port)) + implementations.insecure_channel('localhost', port)) def tearDown(self): self.server.stop(0) diff --git a/src/python/grpcio/tests/interop/_interop_test_case.py b/src/python/grpcio_tests/tests/interop/_interop_test_case.py index ccea17a66d..ccea17a66d 100644 --- a/src/python/grpcio/tests/interop/_interop_test_case.py +++ b/src/python/grpcio_tests/tests/interop/_interop_test_case.py diff --git a/src/python/grpcio/tests/interop/_secure_interop_test.py b/src/python/grpcio_tests/tests/interop/_secure_interop_test.py index c61547b977..cb09f54a34 100644 --- a/src/python/grpcio/tests/interop/_secure_interop_test.py +++ b/src/python/grpcio_tests/tests/interop/_secure_interop_test.py @@ -55,7 +55,7 @@ class SecureInteropTest( self.server.start() self.stub = test_pb2.beta_create_TestService_stub( test_utilities.not_really_secure_channel( - '[::]', port, implementations.ssl_channel_credentials( + 'localhost', port, implementations.ssl_channel_credentials( resources.test_root_certificates()), _SERVER_HOST_OVERRIDE)) diff --git a/src/python/grpcio/tests/interop/client.py b/src/python/grpcio_tests/tests/interop/client.py index e3d5545a02..8aa1ce30c1 100644 --- a/src/python/grpcio/tests/interop/client.py +++ b/src/python/grpcio_tests/tests/interop/client.py @@ -76,6 +76,9 @@ def _stub(args): creds = oauth2client_client.GoogleCredentials.get_application_default() scoped_creds = creds.create_scoped([args.oauth_scope]) call_creds = implementations.google_call_credentials(scoped_creds) + elif args.test_case == 'jwt_token_creds': + creds = oauth2client_client.GoogleCredentials.get_application_default() + call_creds = implementations.google_call_credentials(creds) else: call_creds = None if args.use_tls: diff --git a/src/python/grpcio/tests/interop/credentials/README b/src/python/grpcio_tests/tests/interop/credentials/README index cb20dcb49f..cb20dcb49f 100644 --- a/src/python/grpcio/tests/interop/credentials/README +++ b/src/python/grpcio_tests/tests/interop/credentials/README diff --git a/src/python/grpcio/tests/interop/credentials/ca.pem b/src/python/grpcio_tests/tests/interop/credentials/ca.pem index 6c8511a73c..6c8511a73c 100755 --- a/src/python/grpcio/tests/interop/credentials/ca.pem +++ b/src/python/grpcio_tests/tests/interop/credentials/ca.pem diff --git a/src/python/grpcio/tests/interop/credentials/server1.key b/src/python/grpcio_tests/tests/interop/credentials/server1.key index 143a5b8765..143a5b8765 100755 --- a/src/python/grpcio/tests/interop/credentials/server1.key +++ b/src/python/grpcio_tests/tests/interop/credentials/server1.key diff --git a/src/python/grpcio/tests/interop/credentials/server1.pem b/src/python/grpcio_tests/tests/interop/credentials/server1.pem index f3d43fcc5b..f3d43fcc5b 100755 --- a/src/python/grpcio/tests/interop/credentials/server1.pem +++ b/src/python/grpcio_tests/tests/interop/credentials/server1.pem diff --git a/src/python/grpcio/tests/interop/methods.py b/src/python/grpcio_tests/tests/interop/methods.py index d5ef0c68bb..97e6c9e27e 100644 --- a/src/python/grpcio/tests/interop/methods.py +++ b/src/python/grpcio_tests/tests/interop/methods.py @@ -39,6 +39,7 @@ import time from oauth2client import client as oauth2client_client +import grpc from grpc.beta import implementations from grpc.beta import interfaces from grpc.framework.common import cardinality @@ -57,12 +58,18 @@ class TestService(test_pb2.BetaTestServiceServicer): return empty_pb2.Empty() def UnaryCall(self, request, context): + if request.HasField('response_status'): + context.code(request.response_status.code) + context.details(request.response_status.message) return messages_pb2.SimpleResponse( payload=messages_pb2.Payload( type=messages_pb2.COMPRESSABLE, body=b'\x00' * request.response_size)) def StreamingOutputCall(self, request, context): + if request.HasField('response_status'): + context.code(request.response_status.code) + context.details(request.response_status.message) for response_parameters in request.response_parameters: yield messages_pb2.StreamingOutputCallResponse( payload=messages_pb2.Payload( @@ -79,10 +86,14 @@ class TestService(test_pb2.BetaTestServiceServicer): def FullDuplexCall(self, request_iterator, context): for request in request_iterator: - yield messages_pb2.StreamingOutputCallResponse( - payload=messages_pb2.Payload( - type=request.payload.type, - body=b'\x00' * request.response_parameters[0].size)) + if request.HasField('response_status'): + context.code(request.response_status.code) + context.details(request.response_status.message) + for response_parameters in request.response_parameters: + yield messages_pb2.StreamingOutputCallResponse( + payload=messages_pb2.Payload( + type=request.payload.type, + body=b'\x00' * response_parameters.size)) # NOTE(nathaniel): Apparently this is the same as the full-duplex call? # NOTE(atash): It isn't even called in the interop spec (Oct 22 2015)... @@ -288,6 +299,39 @@ def _empty_stream(stub): pass +def _status_code_and_message(stub): + with stub: + message = 'test status message' + code = 2 + status = grpc.StatusCode.UNKNOWN # code = 2 + request = messages_pb2.SimpleRequest( + response_type=messages_pb2.COMPRESSABLE, + response_size=1, + payload=messages_pb2.Payload(body=b'\x00'), + response_status=messages_pb2.EchoStatus(code=code, message=message) + ) + response_future = stub.UnaryCall.future(request, _TIMEOUT) + if response_future.code() != status: + raise ValueError( + 'expected code %s, got %s' % (status, response_future.code())) + if response_future.details() != message: + raise ValueError( + 'expected message %s, got %s' % (message, response_future.details())) + + request = messages_pb2.StreamingOutputCallRequest( + response_type=messages_pb2.COMPRESSABLE, + response_parameters=( + messages_pb2.ResponseParameters(size=1),), + response_status=messages_pb2.EchoStatus(code=code, message=message)) + response_iterator = stub.StreamingOutputCall(request, _TIMEOUT) + if response_future.code() != status: + raise ValueError( + 'expected code %s, got %s' % (status, response_iterator.code())) + if response_future.details() != message: + raise ValueError( + 'expected message %s, got %s' % (message, response_iterator.details())) + + def _compute_engine_creds(stub, args): response = _large_unary_common_behavior(stub, True, True) if args.default_service_account != response.username: @@ -310,6 +354,16 @@ def _oauth2_auth_token(stub, args): (response.oauth_scope, args.oauth_scope)) +def _jwt_token_creds(stub, args): + json_key_filename = os.environ[ + oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS] + wanted_email = json.load(open(json_key_filename, 'rb'))['client_email'] + response = _large_unary_common_behavior(stub, True, False) + if wanted_email != response.username: + raise ValueError( + 'expected username %s, got %s' % (wanted_email, response.username)) + + def _per_rpc_creds(stub, args): json_key_filename = os.environ[ oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS] @@ -336,8 +390,10 @@ class TestCase(enum.Enum): CANCEL_AFTER_BEGIN = 'cancel_after_begin' CANCEL_AFTER_FIRST_RESPONSE = 'cancel_after_first_response' EMPTY_STREAM = 'empty_stream' + STATUS_CODE_AND_MESSAGE = 'status_code_and_message' COMPUTE_ENGINE_CREDS = 'compute_engine_creds' OAUTH2_AUTH_TOKEN = 'oauth2_auth_token' + JWT_TOKEN_CREDS = 'jwt_token_creds' PER_RPC_CREDS = 'per_rpc_creds' TIMEOUT_ON_SLEEPING_SERVER = 'timeout_on_sleeping_server' @@ -360,10 +416,14 @@ class TestCase(enum.Enum): _timeout_on_sleeping_server(stub) elif self is TestCase.EMPTY_STREAM: _empty_stream(stub) + elif self is TestCase.STATUS_CODE_AND_MESSAGE: + _status_code_and_message(stub) elif self is TestCase.COMPUTE_ENGINE_CREDS: _compute_engine_creds(stub, args) elif self is TestCase.OAUTH2_AUTH_TOKEN: _oauth2_auth_token(stub, args) + elif self is TestCase.JWT_TOKEN_CREDS: + _jwt_token_creds(stub, args) elif self is TestCase.PER_RPC_CREDS: _per_rpc_creds(stub, args) else: diff --git a/src/python/grpcio/tests/interop/resources.py b/src/python/grpcio_tests/tests/interop/resources.py index c424385cf6..c424385cf6 100644 --- a/src/python/grpcio/tests/interop/resources.py +++ b/src/python/grpcio_tests/tests/interop/resources.py diff --git a/src/python/grpcio/tests/interop/server.py b/src/python/grpcio_tests/tests/interop/server.py index ab2c3c708f..ab2c3c708f 100644 --- a/src/python/grpcio/tests/interop/server.py +++ b/src/python/grpcio_tests/tests/interop/server.py diff --git a/src/python/grpcio/grpc/_links/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/grpc/_links/__init__.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/__init__.py diff --git a/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py new file mode 100644 index 0000000000..7ca2bcff38 --- /dev/null +++ b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py @@ -0,0 +1,493 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import collections +from concurrent import futures +import contextlib +import distutils.spawn +import errno +import os +import shutil +import subprocess +import sys +import tempfile +import threading +import unittest + +from six import moves + +import grpc +from tests.unit.framework.common import test_constants + +import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2 +import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2 +import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2 +import tests.protoc_plugin.protos.service.test_service_pb2 as service_pb2 + +# Identifiers of entities we expect to find in the generated module. +STUB_IDENTIFIER = 'TestServiceStub' +SERVICER_IDENTIFIER = 'TestServiceServicer' +ADD_SERVICER_TO_SERVER_IDENTIFIER = 'add_TestServiceServicer_to_server' + + +class _ServicerMethods(object): + + def __init__(self): + self._condition = threading.Condition() + self._paused = False + self._fail = False + + @contextlib.contextmanager + def pause(self): # pylint: disable=invalid-name + with self._condition: + self._paused = True + yield + with self._condition: + self._paused = False + self._condition.notify_all() + + @contextlib.contextmanager + def fail(self): # pylint: disable=invalid-name + with self._condition: + self._fail = True + yield + with self._condition: + self._fail = False + + def _control(self): # pylint: disable=invalid-name + with self._condition: + if self._fail: + raise ValueError() + while self._paused: + self._condition.wait() + + def UnaryCall(self, request, unused_rpc_context): + response = response_pb2.SimpleResponse() + response.payload.payload_type = payload_pb2.COMPRESSABLE + response.payload.payload_compressable = 'a' * request.response_size + self._control() + return response + + def StreamingOutputCall(self, request, unused_rpc_context): + for parameter in request.response_parameters: + response = response_pb2.StreamingOutputCallResponse() + response.payload.payload_type = payload_pb2.COMPRESSABLE + response.payload.payload_compressable = 'a' * parameter.size + self._control() + yield response + + def StreamingInputCall(self, request_iter, unused_rpc_context): + response = response_pb2.StreamingInputCallResponse() + aggregated_payload_size = 0 + for request in request_iter: + aggregated_payload_size += len(request.payload.payload_compressable) + response.aggregated_payload_size = aggregated_payload_size + self._control() + return response + + def FullDuplexCall(self, request_iter, unused_rpc_context): + for request in request_iter: + for parameter in request.response_parameters: + response = response_pb2.StreamingOutputCallResponse() + response.payload.payload_type = payload_pb2.COMPRESSABLE + response.payload.payload_compressable = 'a' * parameter.size + self._control() + yield response + + def HalfDuplexCall(self, request_iter, unused_rpc_context): + responses = [] + for request in request_iter: + for parameter in request.response_parameters: + response = response_pb2.StreamingOutputCallResponse() + response.payload.payload_type = payload_pb2.COMPRESSABLE + response.payload.payload_compressable = 'a' * parameter.size + self._control() + responses.append(response) + for response in responses: + yield response + + +class _Service( + collections.namedtuple( + '_Service', ('servicer_methods', 'server', 'stub',))): + """A live and running service. + + Attributes: + servicer_methods: The _ServicerMethods servicing RPCs. + server: The grpc.Server servicing RPCs. + stub: A stub on which to invoke RPCs. + """ + + +def _CreateService(): + """Provides a servicer backend and a stub. + + Returns: + A _Service with which to test RPCs. + """ + servicer_methods = _ServicerMethods() + + class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)): + + def UnaryCall(self, request, context): + return servicer_methods.UnaryCall(request, context) + + def StreamingOutputCall(self, request, context): + return servicer_methods.StreamingOutputCall(request, context) + + def StreamingInputCall(self, request_iter, context): + return servicer_methods.StreamingInputCall(request_iter, context) + + def FullDuplexCall(self, request_iter, context): + return servicer_methods.FullDuplexCall(request_iter, context) + + def HalfDuplexCall(self, request_iter, context): + return servicer_methods.HalfDuplexCall(request_iter, context) + + server = grpc.server( + futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE)) + getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server) + port = server.add_insecure_port('[::]:0') + server.start() + channel = grpc.insecure_channel('localhost:{}'.format(port)) + stub = getattr(service_pb2, STUB_IDENTIFIER)(channel) + return _Service(servicer_methods, server, stub) + + +def _CreateIncompleteService(): + """Provides a servicer backend that fails to implement methods and its stub. + + Returns: + A _Service with which to test RPCs. The returned _Service's + servicer_methods implements none of the methods required of it. + """ + + class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)): + pass + + server = grpc.server( + futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE)) + getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server) + port = server.add_insecure_port('[::]:0') + server.start() + channel = grpc.insecure_channel('localhost:{}'.format(port)) + stub = getattr(service_pb2, STUB_IDENTIFIER)(channel) + return _Service(None, server, stub) + + +def _streaming_input_request_iterator(): + for _ in range(3): + request = request_pb2.StreamingInputCallRequest() + request.payload.payload_type = payload_pb2.COMPRESSABLE + request.payload.payload_compressable = 'a' + yield request + + +def _streaming_output_request(): + request = request_pb2.StreamingOutputCallRequest() + sizes = [1, 2, 3] + request.response_parameters.add(size=sizes[0], interval_us=0) + request.response_parameters.add(size=sizes[1], interval_us=0) + request.response_parameters.add(size=sizes[2], interval_us=0) + return request + + +def _full_duplex_request_iterator(): + request = request_pb2.StreamingOutputCallRequest() + request.response_parameters.add(size=1, interval_us=0) + yield request + request = request_pb2.StreamingOutputCallRequest() + request.response_parameters.add(size=2, interval_us=0) + request.response_parameters.add(size=3, interval_us=0) + yield request + + +class PythonPluginTest(unittest.TestCase): + """Test case for the gRPC Python protoc-plugin. + + While reading these tests, remember that the futures API + (`stub.method.future()`) only gives futures for the *response-unary* + methods and does not exist for response-streaming methods. + """ + + def testImportAttributes(self): + # check that we can access the generated module and its members. + self.assertIsNotNone( + getattr(service_pb2, STUB_IDENTIFIER, None)) + self.assertIsNotNone( + getattr(service_pb2, SERVICER_IDENTIFIER, None)) + self.assertIsNotNone( + getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER, None)) + + def testUpDown(self): + service = _CreateService() + self.assertIsNotNone(service.servicer_methods) + self.assertIsNotNone(service.server) + self.assertIsNotNone(service.stub) + + def testIncompleteServicer(self): + service = _CreateIncompleteService() + request = request_pb2.SimpleRequest(response_size=13) + with self.assertRaises(grpc.RpcError) as exception_context: + service.stub.UnaryCall(request) + self.assertIs( + exception_context.exception.code(), grpc.StatusCode.UNIMPLEMENTED) + + def testUnaryCall(self): + service = _CreateService() + request = request_pb2.SimpleRequest(response_size=13) + response = service.stub.UnaryCall(request) + expected_response = service.servicer_methods.UnaryCall( + request, 'not a real context!') + self.assertEqual(expected_response, response) + + def testUnaryCallFuture(self): + service = _CreateService() + request = request_pb2.SimpleRequest(response_size=13) + # Check that the call does not block waiting for the server to respond. + with service.servicer_methods.pause(): + response_future = service.stub.UnaryCall.future(request) + response = response_future.result() + expected_response = service.servicer_methods.UnaryCall( + request, 'not a real RpcContext!') + self.assertEqual(expected_response, response) + + def testUnaryCallFutureExpired(self): + service = _CreateService() + request = request_pb2.SimpleRequest(response_size=13) + with service.servicer_methods.pause(): + response_future = service.stub.UnaryCall.future( + request, timeout=test_constants.SHORT_TIMEOUT) + with self.assertRaises(grpc.RpcError) as exception_context: + response_future.result() + self.assertIs( + exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED) + self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED) + + def testUnaryCallFutureCancelled(self): + service = _CreateService() + request = request_pb2.SimpleRequest(response_size=13) + with service.servicer_methods.pause(): + response_future = service.stub.UnaryCall.future(request) + response_future.cancel() + self.assertTrue(response_future.cancelled()) + self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED) + + def testUnaryCallFutureFailed(self): + service = _CreateService() + request = request_pb2.SimpleRequest(response_size=13) + with service.servicer_methods.fail(): + response_future = service.stub.UnaryCall.future(request) + self.assertIsNotNone(response_future.exception()) + self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN) + + def testStreamingOutputCall(self): + service = _CreateService() + request = _streaming_output_request() + responses = service.stub.StreamingOutputCall(request) + expected_responses = service.servicer_methods.StreamingOutputCall( + request, 'not a real RpcContext!') + for expected_response, response in moves.zip_longest( + expected_responses, responses): + self.assertEqual(expected_response, response) + + def testStreamingOutputCallExpired(self): + service = _CreateService() + request = _streaming_output_request() + with service.servicer_methods.pause(): + responses = service.stub.StreamingOutputCall( + request, timeout=test_constants.SHORT_TIMEOUT) + with self.assertRaises(grpc.RpcError) as exception_context: + list(responses) + self.assertIs( + exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED) + + def testStreamingOutputCallCancelled(self): + service = _CreateService() + request = _streaming_output_request() + responses = service.stub.StreamingOutputCall(request) + next(responses) + responses.cancel() + with self.assertRaises(grpc.RpcError) as exception_context: + next(responses) + self.assertIs(responses.code(), grpc.StatusCode.CANCELLED) + + def testStreamingOutputCallFailed(self): + service = _CreateService() + request = _streaming_output_request() + with service.servicer_methods.fail(): + responses = service.stub.StreamingOutputCall(request) + self.assertIsNotNone(responses) + with self.assertRaises(grpc.RpcError) as exception_context: + next(responses) + self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNKNOWN) + + def testStreamingInputCall(self): + service = _CreateService() + response = service.stub.StreamingInputCall( + _streaming_input_request_iterator()) + expected_response = service.servicer_methods.StreamingInputCall( + _streaming_input_request_iterator(), + 'not a real RpcContext!') + self.assertEqual(expected_response, response) + + def testStreamingInputCallFuture(self): + service = _CreateService() + with service.servicer_methods.pause(): + response_future = service.stub.StreamingInputCall.future( + _streaming_input_request_iterator()) + response = response_future.result() + expected_response = service.servicer_methods.StreamingInputCall( + _streaming_input_request_iterator(), + 'not a real RpcContext!') + self.assertEqual(expected_response, response) + + def testStreamingInputCallFutureExpired(self): + service = _CreateService() + with service.servicer_methods.pause(): + response_future = service.stub.StreamingInputCall.future( + _streaming_input_request_iterator(), + timeout=test_constants.SHORT_TIMEOUT) + with self.assertRaises(grpc.RpcError) as exception_context: + response_future.result() + self.assertIsInstance(response_future.exception(), grpc.RpcError) + self.assertIs( + response_future.exception().code(), grpc.StatusCode.DEADLINE_EXCEEDED) + self.assertIs( + exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED) + + def testStreamingInputCallFutureCancelled(self): + service = _CreateService() + with service.servicer_methods.pause(): + response_future = service.stub.StreamingInputCall.future( + _streaming_input_request_iterator()) + response_future.cancel() + self.assertTrue(response_future.cancelled()) + with self.assertRaises(grpc.FutureCancelledError): + response_future.result() + + def testStreamingInputCallFutureFailed(self): + service = _CreateService() + with service.servicer_methods.fail(): + response_future = service.stub.StreamingInputCall.future( + _streaming_input_request_iterator()) + self.assertIsNotNone(response_future.exception()) + self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN) + + def testFullDuplexCall(self): + service = _CreateService() + responses = service.stub.FullDuplexCall( + _full_duplex_request_iterator()) + expected_responses = service.servicer_methods.FullDuplexCall( + _full_duplex_request_iterator(), + 'not a real RpcContext!') + for expected_response, response in moves.zip_longest( + expected_responses, responses): + self.assertEqual(expected_response, response) + + def testFullDuplexCallExpired(self): + request_iterator = _full_duplex_request_iterator() + service = _CreateService() + with service.servicer_methods.pause(): + responses = service.stub.FullDuplexCall( + request_iterator, timeout=test_constants.SHORT_TIMEOUT) + with self.assertRaises(grpc.RpcError) as exception_context: + list(responses) + self.assertIs( + exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED) + + def testFullDuplexCallCancelled(self): + service = _CreateService() + request_iterator = _full_duplex_request_iterator() + responses = service.stub.FullDuplexCall(request_iterator) + next(responses) + responses.cancel() + with self.assertRaises(grpc.RpcError) as exception_context: + next(responses) + self.assertIs( + exception_context.exception.code(), grpc.StatusCode.CANCELLED) + + def testFullDuplexCallFailed(self): + request_iterator = _full_duplex_request_iterator() + service = _CreateService() + with service.servicer_methods.fail(): + responses = service.stub.FullDuplexCall(request_iterator) + with self.assertRaises(grpc.RpcError) as exception_context: + next(responses) + self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNKNOWN) + + def testHalfDuplexCall(self): + service = _CreateService() + def half_duplex_request_iterator(): + request = request_pb2.StreamingOutputCallRequest() + request.response_parameters.add(size=1, interval_us=0) + yield request + request = request_pb2.StreamingOutputCallRequest() + request.response_parameters.add(size=2, interval_us=0) + request.response_parameters.add(size=3, interval_us=0) + yield request + responses = service.stub.HalfDuplexCall(half_duplex_request_iterator()) + expected_responses = service.servicer_methods.HalfDuplexCall( + half_duplex_request_iterator(), 'not a real RpcContext!') + for expected_response, response in moves.zip_longest( + expected_responses, responses): + self.assertEqual(expected_response, response) + + def testHalfDuplexCallWedged(self): + condition = threading.Condition() + wait_cell = [False] + @contextlib.contextmanager + def wait(): # pylint: disable=invalid-name + # Where's Python 3's 'nonlocal' statement when you need it? + with condition: + wait_cell[0] = True + yield + with condition: + wait_cell[0] = False + condition.notify_all() + def half_duplex_request_iterator(): + request = request_pb2.StreamingOutputCallRequest() + request.response_parameters.add(size=1, interval_us=0) + yield request + with condition: + while wait_cell[0]: + condition.wait() + service = _CreateService() + with wait(): + responses = service.stub.HalfDuplexCall( + half_duplex_request_iterator(), timeout=test_constants.SHORT_TIMEOUT) + # half-duplex waits for the client to send all info + with self.assertRaises(grpc.RpcError) as exception_context: + next(responses) + self.assertIs( + exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/protoc_plugin/beta_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py index 7466f88059..1eba9c9354 100644 --- a/src/python/grpcio/tests/protoc_plugin/beta_python_plugin_test.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py @@ -50,6 +50,11 @@ from grpc.framework.foundation import future from grpc.framework.interfaces.face import face from tests.unit.framework.common import test_constants +import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2 +import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2 +import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2 +import tests.protoc_plugin.protos.service.test_service_pb2 as service_pb2 + # Identifiers of entities we expect to find in the generated module. SERVICER_IDENTIFIER = 'BetaTestServiceServicer' STUB_IDENTIFIER = 'BetaTestServiceStub' @@ -59,12 +64,10 @@ STUB_FACTORY_IDENTIFIER = 'beta_create_TestService_stub' class _ServicerMethods(object): - def __init__(self, response_pb2, payload_pb2): + def __init__(self): self._condition = threading.Condition() self._paused = False self._fail = False - self._response_pb2 = response_pb2 - self._payload_pb2 = payload_pb2 @contextlib.contextmanager def pause(self): # pylint: disable=invalid-name @@ -91,22 +94,22 @@ class _ServicerMethods(object): self._condition.wait() def UnaryCall(self, request, unused_rpc_context): - response = self._response_pb2.SimpleResponse() - response.payload.payload_type = self._payload_pb2.COMPRESSABLE + response = response_pb2.SimpleResponse() + response.payload.payload_type = payload_pb2.COMPRESSABLE response.payload.payload_compressable = 'a' * request.response_size self._control() return response def StreamingOutputCall(self, request, unused_rpc_context): for parameter in request.response_parameters: - response = self._response_pb2.StreamingOutputCallResponse() - response.payload.payload_type = self._payload_pb2.COMPRESSABLE + response = response_pb2.StreamingOutputCallResponse() + response.payload.payload_type = payload_pb2.COMPRESSABLE response.payload.payload_compressable = 'a' * parameter.size self._control() yield response def StreamingInputCall(self, request_iter, unused_rpc_context): - response = self._response_pb2.StreamingInputCallResponse() + response = response_pb2.StreamingInputCallResponse() aggregated_payload_size = 0 for request in request_iter: aggregated_payload_size += len(request.payload.payload_compressable) @@ -117,8 +120,8 @@ class _ServicerMethods(object): def FullDuplexCall(self, request_iter, unused_rpc_context): for request in request_iter: for parameter in request.response_parameters: - response = self._response_pb2.StreamingOutputCallResponse() - response.payload.payload_type = self._payload_pb2.COMPRESSABLE + response = response_pb2.StreamingOutputCallResponse() + response.payload.payload_type = payload_pb2.COMPRESSABLE response.payload.payload_compressable = 'a' * parameter.size self._control() yield response @@ -127,8 +130,8 @@ class _ServicerMethods(object): responses = [] for request in request_iter: for parameter in request.response_parameters: - response = self._response_pb2.StreamingOutputCallResponse() - response.payload.payload_type = self._payload_pb2.COMPRESSABLE + response = response_pb2.StreamingOutputCallResponse() + response.payload.payload_type = payload_pb2.COMPRESSABLE response.payload.payload_compressable = 'a' * parameter.size self._control() responses.append(response) @@ -137,23 +140,18 @@ class _ServicerMethods(object): @contextlib.contextmanager -def _CreateService(service_pb2, response_pb2, payload_pb2): +def _CreateService(): """Provides a servicer backend and a stub. The servicer is just the implementation of the actual servicer passed to the face player of the python RPC implementation; the two are detached. - Args: - service_pb2: The service_pb2 module generated by this test. - response_pb2: The response_pb2 module generated by this test - payload_pb2: The payload_pb2 module generated by this test - Yields: A (servicer_methods, stub) pair where servicer_methods is the back-end of the service bound to the stub and and stub is the stub on which to invoke RPCs. """ - servicer_methods = _ServicerMethods(response_pb2, payload_pb2) + servicer_methods = _ServicerMethods() class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)): @@ -183,7 +181,7 @@ def _CreateService(service_pb2, response_pb2, payload_pb2): @contextlib.contextmanager -def _CreateIncompleteService(service_pb2): +def _CreateIncompleteService(): """Provides a servicer backend that fails to implement methods and its stub. The servicer is just the implementation of the actual servicer passed to the @@ -209,7 +207,7 @@ def _CreateIncompleteService(service_pb2): server.stop(0) -def _streaming_input_request_iterator(request_pb2, payload_pb2): +def _streaming_input_request_iterator(): for _ in range(3): request = request_pb2.StreamingInputCallRequest() request.payload.payload_type = payload_pb2.COMPRESSABLE @@ -217,7 +215,7 @@ def _streaming_input_request_iterator(request_pb2, payload_pb2): yield request -def _streaming_output_request(request_pb2): +def _streaming_output_request(): request = request_pb2.StreamingOutputCallRequest() sizes = [1, 2, 3] request.response_parameters.add(size=sizes[0], interval_us=0) @@ -226,7 +224,7 @@ def _streaming_output_request(request_pb2): return request -def _full_duplex_request_iterator(request_pb2): +def _full_duplex_request_iterator(): request = request_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=1, interval_us=0) yield request @@ -244,101 +242,39 @@ class PythonPluginTest(unittest.TestCase): methods and does not exist for response-streaming methods. """ - def setUp(self): - # Assume that the appropriate protoc and grpc_python_plugins are on the - # path. - protoc_command = 'protoc' - protoc_plugin_filename = distutils.spawn.find_executable( - 'grpc_python_plugin') - if not os.path.isfile(protoc_command): - # Assume that if we haven't built protoc that it's on the system. - protoc_command = 'protoc' - - # Ensure that the output directory exists. - self.outdir = tempfile.mkdtemp() - - # Find all proto files - paths = [] - root_dir = os.path.dirname(os.path.realpath(__file__)) - proto_dir = os.path.join(root_dir, 'protos') - for walk_root, _, filenames in os.walk(proto_dir): - for filename in filenames: - if filename.endswith('.proto'): - path = os.path.join(walk_root, filename) - paths.append(path) - - # Invoke protoc with the plugin. - cmd = [ - protoc_command, - '--plugin=protoc-gen-python-grpc=%s' % protoc_plugin_filename, - '-I %s' % root_dir, - '--python_out=%s' % self.outdir, - '--python-grpc_out=%s' % self.outdir - ] + paths - subprocess.check_call(' '.join(cmd), shell=True, env=os.environ, - cwd=os.path.dirname(os.path.realpath(__file__))) - - # Generated proto directories dont include __init__.py, but - # these are needed for python package resolution - for walk_root, _, _ in os.walk(os.path.join(self.outdir, 'protos')): - path = os.path.join(walk_root, '__init__.py') - open(path, 'a').close() - - sys.path.insert(0, self.outdir) - - import protos.payload.test_payload_pb2 as payload_pb2 # pylint: disable=g-import-not-at-top - import protos.requests.r.test_requests_pb2 as request_pb2 # pylint: disable=g-import-not-at-top - import protos.responses.test_responses_pb2 as response_pb2 # pylint: disable=g-import-not-at-top - import protos.service.test_service_pb2 as service_pb2 # pylint: disable=g-import-not-at-top - self._payload_pb2 = payload_pb2 - self._request_pb2 = request_pb2 - self._response_pb2 = response_pb2 - self._service_pb2 = service_pb2 - - def tearDown(self): - try: - shutil.rmtree(self.outdir) - except OSError as exc: - if exc.errno != errno.ENOENT: - raise - sys.path.remove(self.outdir) - def testImportAttributes(self): # check that we can access the generated module and its members. self.assertIsNotNone( - getattr(self._service_pb2, SERVICER_IDENTIFIER, None)) + getattr(service_pb2, SERVICER_IDENTIFIER, None)) self.assertIsNotNone( - getattr(self._service_pb2, STUB_IDENTIFIER, None)) + getattr(service_pb2, STUB_IDENTIFIER, None)) self.assertIsNotNone( - getattr(self._service_pb2, SERVER_FACTORY_IDENTIFIER, None)) + getattr(service_pb2, SERVER_FACTORY_IDENTIFIER, None)) self.assertIsNotNone( - getattr(self._service_pb2, STUB_FACTORY_IDENTIFIER, None)) + getattr(service_pb2, STUB_FACTORY_IDENTIFIER, None)) def testUpDown(self): - with _CreateService( - self._service_pb2, self._response_pb2, self._payload_pb2): - self._request_pb2.SimpleRequest(response_size=13) + with _CreateService(): + request_pb2.SimpleRequest(response_size=13) def testIncompleteServicer(self): - with _CreateIncompleteService(self._service_pb2) as (_, stub): - request = self._request_pb2.SimpleRequest(response_size=13) + with _CreateIncompleteService() as (_, stub): + request = request_pb2.SimpleRequest(response_size=13) try: stub.UnaryCall(request, test_constants.LONG_TIMEOUT) except face.AbortionError as error: self.assertEqual(interfaces.StatusCode.UNIMPLEMENTED, error.code) def testUnaryCall(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): - request = self._request_pb2.SimpleRequest(response_size=13) + with _CreateService() as (methods, stub): + request = request_pb2.SimpleRequest(response_size=13) response = stub.UnaryCall(request, test_constants.LONG_TIMEOUT) expected_response = methods.UnaryCall(request, 'not a real context!') self.assertEqual(expected_response, response) def testUnaryCallFuture(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): - request = self._request_pb2.SimpleRequest(response_size=13) + with _CreateService() as (methods, stub): + request = request_pb2.SimpleRequest(response_size=13) # Check that the call does not block waiting for the server to respond. with methods.pause(): response_future = stub.UnaryCall.future( @@ -348,9 +284,8 @@ class PythonPluginTest(unittest.TestCase): self.assertEqual(expected_response, response) def testUnaryCallFutureExpired(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): - request = self._request_pb2.SimpleRequest(response_size=13) + with _CreateService() as (methods, stub): + request = request_pb2.SimpleRequest(response_size=13) with methods.pause(): response_future = stub.UnaryCall.future( request, test_constants.SHORT_TIMEOUT) @@ -358,27 +293,24 @@ class PythonPluginTest(unittest.TestCase): response_future.result() def testUnaryCallFutureCancelled(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): - request = self._request_pb2.SimpleRequest(response_size=13) + with _CreateService() as (methods, stub): + request = request_pb2.SimpleRequest(response_size=13) with methods.pause(): response_future = stub.UnaryCall.future(request, 1) response_future.cancel() self.assertTrue(response_future.cancelled()) def testUnaryCallFutureFailed(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): - request = self._request_pb2.SimpleRequest(response_size=13) + with _CreateService() as (methods, stub): + request = request_pb2.SimpleRequest(response_size=13) with methods.fail(): response_future = stub.UnaryCall.future( request, test_constants.LONG_TIMEOUT) self.assertIsNotNone(response_future.exception()) def testStreamingOutputCall(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): - request = _streaming_output_request(self._request_pb2) + with _CreateService() as (methods, stub): + request = _streaming_output_request() responses = stub.StreamingOutputCall( request, test_constants.LONG_TIMEOUT) expected_responses = methods.StreamingOutputCall( @@ -388,9 +320,8 @@ class PythonPluginTest(unittest.TestCase): self.assertEqual(expected_response, response) def testStreamingOutputCallExpired(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): - request = _streaming_output_request(self._request_pb2) + with _CreateService() as (methods, stub): + request = _streaming_output_request() with methods.pause(): responses = stub.StreamingOutputCall( request, test_constants.SHORT_TIMEOUT) @@ -398,9 +329,8 @@ class PythonPluginTest(unittest.TestCase): list(responses) def testStreamingOutputCallCancelled(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): - request = _streaming_output_request(self._request_pb2) + with _CreateService() as (methods, stub): + request = _streaming_output_request() responses = stub.StreamingOutputCall( request, test_constants.LONG_TIMEOUT) next(responses) @@ -409,9 +339,8 @@ class PythonPluginTest(unittest.TestCase): next(responses) def testStreamingOutputCallFailed(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): - request = _streaming_output_request(self._request_pb2) + with _CreateService() as (methods, stub): + request = _streaming_output_request() with methods.fail(): responses = stub.StreamingOutputCall(request, 1) self.assertIsNotNone(responses) @@ -419,38 +348,32 @@ class PythonPluginTest(unittest.TestCase): next(responses) def testStreamingInputCall(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): + with _CreateService() as (methods, stub): response = stub.StreamingInputCall( - _streaming_input_request_iterator( - self._request_pb2, self._payload_pb2), + _streaming_input_request_iterator(), test_constants.LONG_TIMEOUT) expected_response = methods.StreamingInputCall( - _streaming_input_request_iterator(self._request_pb2, self._payload_pb2), + _streaming_input_request_iterator(), 'not a real RpcContext!') self.assertEqual(expected_response, response) def testStreamingInputCallFuture(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): + with _CreateService() as (methods, stub): with methods.pause(): response_future = stub.StreamingInputCall.future( - _streaming_input_request_iterator( - self._request_pb2, self._payload_pb2), + _streaming_input_request_iterator(), test_constants.LONG_TIMEOUT) response = response_future.result() expected_response = methods.StreamingInputCall( - _streaming_input_request_iterator(self._request_pb2, self._payload_pb2), + _streaming_input_request_iterator(), 'not a real RpcContext!') self.assertEqual(expected_response, response) def testStreamingInputCallFutureExpired(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): + with _CreateService() as (methods, stub): with methods.pause(): response_future = stub.StreamingInputCall.future( - _streaming_input_request_iterator( - self._request_pb2, self._payload_pb2), + _streaming_input_request_iterator(), test_constants.SHORT_TIMEOUT) with self.assertRaises(face.ExpirationError): response_future.result() @@ -458,12 +381,10 @@ class PythonPluginTest(unittest.TestCase): response_future.exception(), face.ExpirationError) def testStreamingInputCallFutureCancelled(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): + with _CreateService() as (methods, stub): with methods.pause(): response_future = stub.StreamingInputCall.future( - _streaming_input_request_iterator( - self._request_pb2, self._payload_pb2), + _streaming_input_request_iterator(), test_constants.LONG_TIMEOUT) response_future.cancel() self.assertTrue(response_future.cancelled()) @@ -471,32 +392,28 @@ class PythonPluginTest(unittest.TestCase): response_future.result() def testStreamingInputCallFutureFailed(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): + with _CreateService() as (methods, stub): with methods.fail(): response_future = stub.StreamingInputCall.future( - _streaming_input_request_iterator( - self._request_pb2, self._payload_pb2), + _streaming_input_request_iterator(), test_constants.LONG_TIMEOUT) self.assertIsNotNone(response_future.exception()) def testFullDuplexCall(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): + with _CreateService() as (methods, stub): responses = stub.FullDuplexCall( - _full_duplex_request_iterator(self._request_pb2), + _full_duplex_request_iterator(), test_constants.LONG_TIMEOUT) expected_responses = methods.FullDuplexCall( - _full_duplex_request_iterator(self._request_pb2), + _full_duplex_request_iterator(), 'not a real RpcContext!') for expected_response, response in moves.zip_longest( expected_responses, responses): self.assertEqual(expected_response, response) def testFullDuplexCallExpired(self): - request_iterator = _full_duplex_request_iterator(self._request_pb2) - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): + request_iterator = _full_duplex_request_iterator() + with _CreateService() as (methods, stub): with methods.pause(): responses = stub.FullDuplexCall( request_iterator, test_constants.SHORT_TIMEOUT) @@ -504,9 +421,8 @@ class PythonPluginTest(unittest.TestCase): list(responses) def testFullDuplexCallCancelled(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): - request_iterator = _full_duplex_request_iterator(self._request_pb2) + with _CreateService() as (methods, stub): + request_iterator = _full_duplex_request_iterator() responses = stub.FullDuplexCall( request_iterator, test_constants.LONG_TIMEOUT) next(responses) @@ -515,9 +431,8 @@ class PythonPluginTest(unittest.TestCase): next(responses) def testFullDuplexCallFailed(self): - request_iterator = _full_duplex_request_iterator(self._request_pb2) - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): + request_iterator = _full_duplex_request_iterator() + with _CreateService() as (methods, stub): with methods.fail(): responses = stub.FullDuplexCall( request_iterator, test_constants.LONG_TIMEOUT) @@ -526,13 +441,12 @@ class PythonPluginTest(unittest.TestCase): next(responses) def testHalfDuplexCall(self): - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): + with _CreateService() as (methods, stub): def half_duplex_request_iterator(): - request = self._request_pb2.StreamingOutputCallRequest() + request = request_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=1, interval_us=0) yield request - request = self._request_pb2.StreamingOutputCallRequest() + request = request_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=2, interval_us=0) request.response_parameters.add(size=3, interval_us=0) yield request @@ -557,14 +471,13 @@ class PythonPluginTest(unittest.TestCase): wait_cell[0] = False condition.notify_all() def half_duplex_request_iterator(): - request = self._request_pb2.StreamingOutputCallRequest() + request = request_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=1, interval_us=0) yield request with condition: while wait_cell[0]: condition.wait() - with _CreateService(self._service_pb2, self._response_pb2, - self._payload_pb2) as (methods, stub): + with _CreateService() as (methods, stub): with wait(): responses = stub.HalfDuplexCall( half_duplex_request_iterator(), test_constants.SHORT_TIMEOUT) @@ -574,5 +487,4 @@ class PythonPluginTest(unittest.TestCase): if __name__ == '__main__': - #os.chdir(os.path.dirname(sys.argv[0])) unittest.main(verbosity=2) diff --git a/src/python/grpcio/grpc/framework/core/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/grpc/framework/core/__init__.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/__init__.py diff --git a/src/python/grpcio/grpc/framework/crust/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/payload/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/grpc/framework/crust/__init__.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/payload/__init__.py diff --git a/src/python/grpcio/tests/protoc_plugin/protos/payload/test_payload.proto b/src/python/grpcio_tests/tests/protoc_plugin/protos/payload/test_payload.proto index 457543aa79..457543aa79 100644 --- a/src/python/grpcio/tests/protoc_plugin/protos/payload/test_payload.proto +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/payload/test_payload.proto diff --git a/src/python/grpcio/grpc/framework/interfaces/links/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/grpc/framework/interfaces/links/__init__.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/__init__.py diff --git a/src/python/grpcio/tests/interop/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/tests/interop/__init__.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/__init__.py diff --git a/src/python/grpcio/tests/protoc_plugin/protos/requests/r/test_requests.proto b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/test_requests.proto index 54105df6a5..365ae738e1 100644 --- a/src/python/grpcio/tests/protoc_plugin/protos/requests/r/test_requests.proto +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/requests/r/test_requests.proto @@ -29,7 +29,7 @@ syntax = "proto3"; -import "protos/payload/test_payload.proto"; +import "tests/protoc_plugin/protos/payload/test_payload.proto"; package grpc_protoc_plugin; diff --git a/src/python/grpcio/tests/protoc_plugin/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/responses/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/tests/protoc_plugin/__init__.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/responses/__init__.py diff --git a/src/python/grpcio/tests/protoc_plugin/protos/responses/test_responses.proto b/src/python/grpcio_tests/tests/protoc_plugin/protos/responses/test_responses.proto index 734fbda86e..1d54d58db1 100644 --- a/src/python/grpcio/tests/protoc_plugin/protos/responses/test_responses.proto +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/responses/test_responses.proto @@ -29,7 +29,7 @@ syntax = "proto3"; -import "protos/payload/test_payload.proto"; +import "tests/protoc_plugin/protos/payload/test_payload.proto"; package grpc_protoc_plugin; diff --git a/src/python/grpcio/tests/unit/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/service/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/tests/unit/__init__.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/service/__init__.py diff --git a/src/python/grpcio/tests/protoc_plugin/protos/service/test_service.proto b/src/python/grpcio_tests/tests/protoc_plugin/protos/service/test_service.proto index fe715ee7f9..003dbbb9eb 100644 --- a/src/python/grpcio/tests/protoc_plugin/protos/service/test_service.proto +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/service/test_service.proto @@ -29,8 +29,8 @@ syntax = "proto3"; -import "protos/requests/r/test_requests.proto"; -import "protos/responses/test_responses.proto"; +import "tests/protoc_plugin/protos/requests/r/test_requests.proto"; +import "tests/protoc_plugin/protos/responses/test_responses.proto"; package grpc_protoc_plugin; diff --git a/src/python/grpcio/tests/qps/__init__.py b/src/python/grpcio_tests/tests/qps/__init__.py index 100a624dc9..100a624dc9 100644 --- a/src/python/grpcio/tests/qps/__init__.py +++ b/src/python/grpcio_tests/tests/qps/__init__.py diff --git a/src/python/grpcio/tests/qps/benchmark_client.py b/src/python/grpcio_tests/tests/qps/benchmark_client.py index b372ea01ad..83b46c914e 100644 --- a/src/python/grpcio/tests/qps/benchmark_client.py +++ b/src/python/grpcio_tests/tests/qps/benchmark_client.py @@ -30,24 +30,30 @@ """Defines test client behaviors (UNARY/STREAMING) (SYNC/ASYNC).""" import abc +import threading import time -try: - import Queue as queue # Python 2.x -except ImportError: - import queue # Python 3 from concurrent import futures +from six.moves import queue -from grpc.beta import implementations -from grpc.framework.interfaces.face import face +import grpc from src.proto.grpc.testing import messages_pb2 from src.proto.grpc.testing import services_pb2 from tests.unit import resources -from tests.unit.beta import test_utilities +from tests.unit import test_common _TIMEOUT = 60 * 60 * 24 +class GenericStub(object): + + def __init__(self, channel): + self.UnaryCall = channel.unary_unary( + '/grpc.testing.BenchmarkService/UnaryCall') + self.StreamingCall = channel.stream_stream( + '/grpc.testing.BenchmarkService/StreamingCall') + + class BenchmarkClient: """Benchmark client interface that exposes a non-blocking send_request().""" @@ -55,19 +61,23 @@ class BenchmarkClient: def __init__(self, server, config, hist): # Create the stub - host, port = server.split(':') - port = int(port) if config.HasField('security_params'): - creds = implementations.ssl_channel_credentials( - resources.test_root_certificates()) - channel = test_utilities.not_really_secure_channel( - host, port, creds, config.security_params.server_host_override) + creds = grpc.ssl_channel_credentials(resources.test_root_certificates()) + channel = test_common.test_secure_channel( + server, creds, config.security_params.server_host_override) else: - channel = implementations.insecure_channel(host, port) + channel = grpc.insecure_channel(server) + + connected_event = threading.Event() + def wait_for_ready(connectivity): + if connectivity == grpc.ChannelConnectivity.READY: + connected_event.set() + channel.subscribe(wait_for_ready, try_to_connect=True) + connected_event.wait() if config.payload_config.WhichOneof('payload') == 'simple_params': self._generic = False - self._stub = services_pb2.beta_create_BenchmarkService_stub(channel) + self._stub = services_pb2.BenchmarkServiceStub(channel) payload = messages_pb2.Payload( body='\0' * config.payload_config.simple_params.req_size) self._request = messages_pb2.SimpleRequest( @@ -75,13 +85,14 @@ class BenchmarkClient: response_size=config.payload_config.simple_params.resp_size) else: self._generic = True - self._stub = implementations.generic_stub(channel) + self._stub = GenericStub(channel) self._request = '\0' * config.payload_config.bytebuf_params.req_size self._hist = hist self._response_callbacks = [] def add_response_callback(self, callback): + """callback will be invoked as callback(client, query_time)""" self._response_callbacks.append(callback) @abc.abstractmethod @@ -95,10 +106,10 @@ class BenchmarkClient: def stop(self): pass - def _handle_response(self, query_time): + def _handle_response(self, client, query_time): self._hist.add(query_time * 1e9) # Report times in nanoseconds for callback in self._response_callbacks: - callback(query_time) + callback(client, query_time) class UnarySyncBenchmarkClient(BenchmarkClient): @@ -121,7 +132,7 @@ class UnarySyncBenchmarkClient(BenchmarkClient): start_time = time.time() self._stub.UnaryCall(self._request, _TIMEOUT) end_time = time.time() - self._handle_response(end_time - start_time) + self._handle_response(self, end_time - start_time) class UnaryAsyncBenchmarkClient(BenchmarkClient): @@ -136,19 +147,20 @@ class UnaryAsyncBenchmarkClient(BenchmarkClient): def _response_received(self, start_time, resp): resp.result() end_time = time.time() - self._handle_response(end_time - start_time) + self._handle_response(self, end_time - start_time) def stop(self): self._stub = None -class StreamingSyncBenchmarkClient(BenchmarkClient): +class _SyncStream(object): - def __init__(self, server, config, hist): - super(StreamingSyncBenchmarkClient, self).__init__(server, config, hist) + def __init__(self, stub, generic, request, handle_response): + self._stub = stub + self._generic = generic + self._request = request + self._handle_response = handle_response self._is_streaming = False - self._pool = futures.ThreadPoolExecutor(max_workers=1) - # Use a thread-safe queue to put requests on the stream self._request_queue = queue.Queue() self._send_time_queue = queue.Queue() @@ -158,25 +170,14 @@ class StreamingSyncBenchmarkClient(BenchmarkClient): def start(self): self._is_streaming = True - self._pool.submit(self._request_stream) + response_stream = self._stub.StreamingCall( + self._request_generator(), _TIMEOUT) + for _ in response_stream: + self._handle_response( + self, time.time() - self._send_time_queue.get_nowait()) def stop(self): self._is_streaming = False - self._pool.shutdown(wait=True) - self._stub = None - - def _request_stream(self): - self._is_streaming = True - if self._generic: - stream_callable = self._stub.stream_stream( - 'grpc.testing.BenchmarkService', 'StreamingCall') - else: - stream_callable = self._stub.StreamingCall - - response_stream = stream_callable(self._request_generator(), _TIMEOUT) - for _ in response_stream: - end_time = time.time() - self._handle_response(end_time - self._send_time_queue.get_nowait()) def _request_generator(self): while self._is_streaming: @@ -187,46 +188,28 @@ class StreamingSyncBenchmarkClient(BenchmarkClient): pass -class AsyncReceiver(face.ResponseReceiver): - """Receiver for async stream responses.""" - - def __init__(self, send_time_queue, response_handler): - self._send_time_queue = send_time_queue - self._response_handler = response_handler - - def initial_metadata(self, initial_mdetadata): - pass - - def response(self, response): - end_time = time.time() - self._response_handler(end_time - self._send_time_queue.get_nowait()) - - def complete(self, terminal_metadata, code, details): - pass - - -class StreamingAsyncBenchmarkClient(BenchmarkClient): +class StreamingSyncBenchmarkClient(BenchmarkClient): def __init__(self, server, config, hist): - super(StreamingAsyncBenchmarkClient, self).__init__(server, config, hist) - self._send_time_queue = queue.Queue() - self._receiver = AsyncReceiver(self._send_time_queue, self._handle_response) - self._rendezvous = None + super(StreamingSyncBenchmarkClient, self).__init__(server, config, hist) + self._pool = futures.ThreadPoolExecutor( + max_workers=config.outstanding_rpcs_per_channel) + self._streams = [_SyncStream(self._stub, self._generic, + self._request, self._handle_response) + for _ in xrange(config.outstanding_rpcs_per_channel)] + self._curr_stream = 0 def send_request(self): - if self._rendezvous is not None: - self._send_time_queue.put(time.time()) - self._rendezvous.consume(self._request) + # Use a round_robin scheduler to determine what stream to send on + self._streams[self._curr_stream].send_request() + self._curr_stream = (self._curr_stream + 1) % len(self._streams) def start(self): - if self._generic: - stream_callable = self._stub.stream_stream( - 'grpc.testing.BenchmarkService', 'StreamingCall') - else: - stream_callable = self._stub.StreamingCall - self._rendezvous = stream_callable.event( - self._receiver, lambda *args: None, _TIMEOUT) + for stream in self._streams: + self._pool.submit(stream.start) def stop(self): - self._rendezvous.terminate() - self._rendezvous = None + for stream in self._streams: + stream.stop() + self._pool.shutdown(wait=True) + self._stub = None diff --git a/src/python/grpcio/tests/qps/benchmark_server.py b/src/python/grpcio_tests/tests/qps/benchmark_server.py index 8cbf480d58..2b76b810cd 100644 --- a/src/python/grpcio/tests/qps/benchmark_server.py +++ b/src/python/grpcio_tests/tests/qps/benchmark_server.py @@ -31,7 +31,7 @@ from src.proto.grpc.testing import messages_pb2 from src.proto.grpc.testing import services_pb2 -class BenchmarkServer(services_pb2.BetaBenchmarkServiceServicer): +class BenchmarkServer(services_pb2.BenchmarkServiceServicer): """Synchronous Server implementation for the Benchmark service.""" def UnaryCall(self, request, context): @@ -44,7 +44,7 @@ class BenchmarkServer(services_pb2.BetaBenchmarkServiceServicer): yield messages_pb2.SimpleResponse(payload=payload) -class GenericBenchmarkServer(services_pb2.BetaBenchmarkServiceServicer): +class GenericBenchmarkServer(services_pb2.BenchmarkServiceServicer): """Generic Server implementation for the Benchmark service.""" def __init__(self, resp_size): diff --git a/src/python/grpcio/tests/qps/client_runner.py b/src/python/grpcio_tests/tests/qps/client_runner.py index 1ede7d2af1..1fd58687ad 100644 --- a/src/python/grpcio/tests/qps/client_runner.py +++ b/src/python/grpcio_tests/tests/qps/client_runner.py @@ -34,7 +34,7 @@ ClientRunner invokes either periodically or in response to some event. """ import abc -import thread +import threading import time @@ -61,15 +61,18 @@ class OpenLoopClientRunner(ClientRunner): super(OpenLoopClientRunner, self).__init__(client) self._is_running = False self._interval_generator = interval_generator + self._dispatch_thread = threading.Thread( + target=self._dispatch_requests, args=()) def start(self): self._is_running = True self._client.start() - thread.start_new_thread(self._dispatch_requests, ()) - + self._dispatch_thread.start() + def stop(self): self._is_running = False self._client.stop() + self._dispatch_thread.join() self._client = None def _dispatch_requests(self): @@ -98,7 +101,6 @@ class ClosedLoopClientRunner(ClientRunner): self._client.stop() self._client = None - def _send_request(self, response_time): + def _send_request(self, client, response_time): if self._is_running: - self._client.send_request() - + client.send_request() diff --git a/src/python/grpcio/tests/qps/histogram.py b/src/python/grpcio_tests/tests/qps/histogram.py index 9a7b5eb2ba..9a7b5eb2ba 100644 --- a/src/python/grpcio/tests/qps/histogram.py +++ b/src/python/grpcio_tests/tests/qps/histogram.py diff --git a/src/python/grpcio/tests/qps/qps_worker.py b/src/python/grpcio_tests/tests/qps/qps_worker.py index 3dda718638..2371ff0956 100644 --- a/src/python/grpcio/tests/qps/qps_worker.py +++ b/src/python/grpcio_tests/tests/qps/qps_worker.py @@ -32,19 +32,20 @@ import argparse import time +from concurrent import futures +import grpc from src.proto.grpc.testing import services_pb2 from tests.qps import worker_server def run_worker_server(port): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=5)) servicer = worker_server.WorkerServer() - server = services_pb2.beta_create_WorkerService_server(servicer) + services_pb2.add_WorkerServiceServicer_to_server(servicer, server) server.add_insecure_port('[::]:{}'.format(port)) server.start() servicer.wait_for_quit() - # Drain outstanding requests for clean exit - time.sleep(2) server.stop(0) diff --git a/src/python/grpcio/tests/qps/worker_server.py b/src/python/grpcio_tests/tests/qps/worker_server.py index 1f9af5482c..46d542940f 100644 --- a/src/python/grpcio/tests/qps/worker_server.py +++ b/src/python/grpcio_tests/tests/qps/worker_server.py @@ -32,8 +32,8 @@ import random import threading import time -from grpc.beta import implementations -from grpc.framework.interfaces.face import utilities +from concurrent import futures +import grpc from src.proto.grpc.testing import control_pb2 from src.proto.grpc.testing import services_pb2 from src.proto.grpc.testing import stats_pb2 @@ -45,7 +45,7 @@ from tests.qps import histogram from tests.unit import resources -class WorkerServer(services_pb2.BetaWorkerServiceServicer): +class WorkerServer(services_pb2.WorkerServiceServicer): """Python Worker Server implementation.""" def __init__(self): @@ -65,7 +65,7 @@ class WorkerServer(services_pb2.BetaWorkerServiceServicer): if request.mark.reset: start_time = end_time yield status - server.stop(0) + server.stop(None) def _get_server_status(self, start_time, end_time, port, cores): end_time = time.time() @@ -76,25 +76,35 @@ class WorkerServer(services_pb2.BetaWorkerServiceServicer): return control_pb2.ServerStatus(stats=stats, port=port, cores=cores) def _create_server(self, config): - if config.server_type == control_pb2.SYNC_SERVER: + if config.async_server_threads == 0: + # This is the default concurrent.futures thread pool size, but + # None doesn't seem to work + server_threads = multiprocessing.cpu_count() * 5 + else: + server_threads = config.async_server_threads + server = grpc.server(futures.ThreadPoolExecutor( + max_workers=server_threads)) + if config.server_type == control_pb2.ASYNC_SERVER: servicer = benchmark_server.BenchmarkServer() - server = services_pb2.beta_create_BenchmarkService_server(servicer) + services_pb2.add_BenchmarkServiceServicer_to_server(servicer, server) elif config.server_type == control_pb2.ASYNC_GENERIC_SERVER: resp_size = config.payload_config.bytebuf_params.resp_size servicer = benchmark_server.GenericBenchmarkServer(resp_size) method_implementations = { - ('grpc.testing.BenchmarkService', 'StreamingCall'): - utilities.stream_stream_inline(servicer.StreamingCall), - ('grpc.testing.BenchmarkService', 'UnaryCall'): - utilities.unary_unary_inline(servicer.UnaryCall), + 'StreamingCall': + grpc.stream_stream_rpc_method_handler(servicer.StreamingCall), + 'UnaryCall': + grpc.unary_unary_rpc_method_handler(servicer.UnaryCall), } - server = implementations.server(method_implementations) + handler = grpc.method_handlers_generic_handler( + 'grpc.testing.BenchmarkService', method_implementations) + server.add_generic_rpc_handlers((handler,)) else: raise Exception('Unsupported server type {}'.format(config.server_type)) if config.HasField('security_params'): # Use SSL - server_creds = implementations.ssl_server_credentials([( - resources.private_key(), resources.certificate_chain())]) + server_creds = grpc.ssl_server_credentials( + ((resources.private_key(), resources.certificate_chain()),)) port = server.add_secure_port('[::]:{}'.format(config.port), server_creds) else: port = server.add_insecure_port('[::]:{}'.format(config.port)) @@ -153,9 +163,8 @@ class WorkerServer(services_pb2.BetaWorkerServiceServicer): if config.rpc_type == control_pb2.UNARY: client = benchmark_client.UnaryAsyncBenchmarkClient( server, config, qps_data) - elif config.rpc_type == control_pb2.STREAMING: - client = benchmark_client.StreamingAsyncBenchmarkClient( - server, config, qps_data) + else: + raise Exception('Async streaming client not supported') else: raise Exception('Unsupported client type {}'.format(config.client_type)) diff --git a/src/python/grpcio/tests/stress/__init__.py b/src/python/grpcio_tests/tests/stress/__init__.py index 100a624dc9..100a624dc9 100644 --- a/src/python/grpcio/tests/stress/__init__.py +++ b/src/python/grpcio_tests/tests/stress/__init__.py diff --git a/src/python/grpcio/tests/stress/client.py b/src/python/grpcio_tests/tests/stress/client.py index e2e016760c..0de2532cd8 100644 --- a/src/python/grpcio/tests/stress/client.py +++ b/src/python/grpcio_tests/tests/stress/client.py @@ -30,10 +30,10 @@ """Entry point for running stress tests.""" import argparse -import Queue import threading from grpc.beta import implementations +from six.moves import queue from src.proto.grpc.testing import metrics_pb2 from src.proto.grpc.testing import test_pb2 @@ -94,7 +94,7 @@ def run_test(args): test_cases = _parse_weighted_test_cases(args.test_cases) test_servers = args.server_addresses.split(',') # Propagate any client exceptions with a queue - exception_queue = Queue.Queue() + exception_queue = queue.Queue() stop_event = threading.Event() hist = histogram.Histogram(1, 1) runners = [] @@ -121,7 +121,7 @@ def run_test(args): if timeout_secs < 0: timeout_secs = None raise exception_queue.get(block=True, timeout=timeout_secs) - except Queue.Empty: + except queue.Empty: # No exceptions thrown, success pass finally: diff --git a/src/python/grpcio/tests/stress/metrics_server.py b/src/python/grpcio_tests/tests/stress/metrics_server.py index b994e4643e..b994e4643e 100644 --- a/src/python/grpcio/tests/stress/metrics_server.py +++ b/src/python/grpcio_tests/tests/stress/metrics_server.py diff --git a/src/python/grpcio/tests/stress/test_runner.py b/src/python/grpcio_tests/tests/stress/test_runner.py index 88f13727e3..88f13727e3 100644 --- a/src/python/grpcio/tests/stress/test_runner.py +++ b/src/python/grpcio_tests/tests/stress/test_runner.py diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json new file mode 100644 index 0000000000..dcaef0db1f --- /dev/null +++ b/src/python/grpcio_tests/tests/tests.json @@ -0,0 +1,43 @@ +[ + "_api_test.AllTest", + "_api_test.ChannelConnectivityTest", + "_api_test.ChannelTest", + "_auth_test.AccessTokenCallCredentialsTest", + "_auth_test.GoogleCallCredentialsTest", + "_beta_features_test.BetaFeaturesTest", + "_beta_features_test.ContextManagementAndLifecycleTest", + "_cancel_many_calls_test.CancelManyCallsTest", + "_channel_connectivity_test.ChannelConnectivityTest", + "_channel_ready_future_test.ChannelReadyFutureTest", + "_channel_test.ChannelTest", + "_compression_test.CompressionTest", + "_connectivity_channel_test.ConnectivityStatesTest", + "_credentials_test.CredentialsTest", + "_empty_message_test.EmptyMessageTest", + "_exit_test.ExitTest", + "_face_interface_test.DynamicInvokerBlockingInvocationInlineServiceTest", + "_face_interface_test.DynamicInvokerFutureInvocationAsynchronousEventServiceTest", + "_face_interface_test.GenericInvokerBlockingInvocationInlineServiceTest", + "_face_interface_test.GenericInvokerFutureInvocationAsynchronousEventServiceTest", + "_face_interface_test.MultiCallableInvokerBlockingInvocationInlineServiceTest", + "_face_interface_test.MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest", + "_health_servicer_test.HealthServicerTest", + "_implementations_test.CallCredentialsTest", + "_implementations_test.ChannelCredentialsTest", + "_insecure_interop_test.InsecureInteropTest", + "_logging_pool_test.LoggingPoolTest", + "_metadata_code_details_test.MetadataCodeDetailsTest", + "_metadata_test.MetadataTest", + "_not_found_test.NotFoundTest", + "_python_plugin_test.PythonPluginTest", + "_read_some_but_not_all_responses_test.ReadSomeButNotAllResponsesTest", + "_rpc_test.RPCTest", + "_sanity_test.Sanity", + "_secure_interop_test.SecureInteropTest", + "_thread_cleanup_test.CleanupThreadTest", + "_utilities_test.ChannelConnectivityTest", + "beta_python_plugin_test.PythonPluginTest", + "cygrpc_test.InsecureServerInsecureClient", + "cygrpc_test.SecureServerSecureClient", + "cygrpc_test.TypeSmokeTest" +] diff --git a/src/python/grpcio/tests/unit/_adapter/__init__.py b/src/python/grpcio_tests/tests/unit/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/tests/unit/_adapter/__init__.py +++ b/src/python/grpcio_tests/tests/unit/__init__.py diff --git a/src/python/grpcio_tests/tests/unit/_api_test.py b/src/python/grpcio_tests/tests/unit/_api_test.py new file mode 100644 index 0000000000..2fe89499f5 --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/_api_test.py @@ -0,0 +1,111 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Test of gRPC Python's application-layer API.""" + +import unittest + +import six + +import grpc + +from tests.unit import _from_grpc_import_star + + +class AllTest(unittest.TestCase): + + def testAll(self): + expected_grpc_code_elements = ( + 'FutureTimeoutError', + 'FutureCancelledError', + 'Future', + 'ChannelConnectivity', + 'StatusCode', + 'RpcError', + 'RpcContext', + 'Call', + 'ChannelCredentials', + 'CallCredentials', + 'AuthMetadataContext', + 'AuthMetadataPluginCallback', + 'AuthMetadataPlugin', + 'ServerCredentials', + 'UnaryUnaryMultiCallable', + 'UnaryStreamMultiCallable', + 'StreamUnaryMultiCallable', + 'StreamStreamMultiCallable', + 'Channel', + 'ServicerContext', + 'RpcMethodHandler', + 'HandlerCallDetails', + 'GenericRpcHandler', + 'Server', + 'unary_unary_rpc_method_handler', + 'unary_stream_rpc_method_handler', + 'stream_unary_rpc_method_handler', + 'stream_stream_rpc_method_handler', + 'method_handlers_generic_handler', + 'ssl_channel_credentials', + 'metadata_call_credentials', + 'access_token_call_credentials', + 'composite_call_credentials', + 'composite_channel_credentials', + 'ssl_server_credentials', + 'channel_ready_future', + 'insecure_channel', + 'secure_channel', + 'server', + ) + + six.assertCountEqual( + self, expected_grpc_code_elements, + _from_grpc_import_star.GRPC_ELEMENTS) + + +class ChannelConnectivityTest(unittest.TestCase): + + def testChannelConnectivity(self): + self.assertSequenceEqual( + (grpc.ChannelConnectivity.IDLE, + grpc.ChannelConnectivity.CONNECTING, + grpc.ChannelConnectivity.READY, + grpc.ChannelConnectivity.TRANSIENT_FAILURE, + grpc.ChannelConnectivity.SHUTDOWN,), + tuple(grpc.ChannelConnectivity)) + + +class ChannelTest(unittest.TestCase): + + def test_secure_channel(self): + channel_credentials = grpc.ssl_channel_credentials() + channel = grpc.secure_channel('google.com:443', channel_credentials) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/_auth_test.py b/src/python/grpcio_tests/tests/unit/_auth_test.py index c31f7b06f7..c31f7b06f7 100644 --- a/src/python/grpcio/tests/unit/_auth_test.py +++ b/src/python/grpcio_tests/tests/unit/_auth_test.py diff --git a/src/python/grpcio/tests/unit/_channel_connectivity_test.py b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py index a1575efada..3c00f686ce 100644 --- a/src/python/grpcio/tests/unit/_channel_connectivity_test.py +++ b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py @@ -104,7 +104,7 @@ class ChannelConnectivityTest(unittest.TestCase): grpc.ChannelConnectivity.READY, fifth_connectivities) def test_immediately_connectable_channel_connectivity(self): - server = _server.Server((), futures.ThreadPoolExecutor(max_workers=0)) + server = _server.Server(futures.ThreadPoolExecutor(max_workers=0), ()) port = server.add_insecure_port('[::]:0') server.start() first_callback = _Callback() @@ -135,15 +135,15 @@ class ChannelConnectivityTest(unittest.TestCase): self.assertNotIn( grpc.ChannelConnectivity.TRANSIENT_FAILURE, third_connectivities) self.assertNotIn( - grpc.ChannelConnectivity.FATAL_FAILURE, third_connectivities) + grpc.ChannelConnectivity.SHUTDOWN, third_connectivities) self.assertNotIn( grpc.ChannelConnectivity.TRANSIENT_FAILURE, fourth_connectivities) self.assertNotIn( - grpc.ChannelConnectivity.FATAL_FAILURE, fourth_connectivities) + grpc.ChannelConnectivity.SHUTDOWN, fourth_connectivities) def test_reachable_then_unreachable_channel_connectivity(self): - server = _server.Server((), futures.ThreadPoolExecutor(max_workers=0)) + server = _server.Server(futures.ThreadPoolExecutor(max_workers=0), ()) port = server.add_insecure_port('[::]:0') server.start() callback = _Callback() diff --git a/src/python/grpcio/tests/unit/_channel_ready_future_test.py b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py index b84bc0197a..e8982ed2de 100644 --- a/src/python/grpcio/tests/unit/_channel_ready_future_test.py +++ b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py @@ -78,7 +78,7 @@ class ChannelReadyFutureTest(unittest.TestCase): self.assertFalse(ready_future.running()) def test_immediately_connectable_channel_connectivity(self): - server = _server.Server((), futures.ThreadPoolExecutor(max_workers=0)) + server = _server.Server(futures.ThreadPoolExecutor(max_workers=0), ()) port = server.add_insecure_port('[::]:0') server.start() channel = grpc.insecure_channel('localhost:{}'.format(port)) diff --git a/src/python/grpcio_tests/tests/unit/_compression_test.py b/src/python/grpcio_tests/tests/unit/_compression_test.py new file mode 100644 index 0000000000..83b9109466 --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/_compression_test.py @@ -0,0 +1,134 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"""Tests server and client side compression.""" + +import unittest + +import grpc +from grpc import _grpcio_metadata +from grpc.framework.foundation import logging_pool + +from tests.unit import test_common +from tests.unit.framework.common import test_constants + +_UNARY_UNARY = '/test/UnaryUnary' +_STREAM_STREAM = '/test/StreamStream' + + +def handle_unary(request, servicer_context): + servicer_context.send_initial_metadata([ + ('grpc-internal-encoding-request', 'gzip')]) + return request + + +def handle_stream(request_iterator, servicer_context): + # TODO(issue:#6891) We should be able to remove this loop, + # and replace with return; yield + servicer_context.send_initial_metadata([ + ('grpc-internal-encoding-request', 'gzip')]) + for request in request_iterator: + yield request + + +class _MethodHandler(grpc.RpcMethodHandler): + + def __init__(self, request_streaming, response_streaming): + self.request_streaming = request_streaming + self.response_streaming = response_streaming + self.request_deserializer = None + self.response_serializer = None + self.unary_unary = None + self.unary_stream = None + self.stream_unary = None + self.stream_stream = None + if self.request_streaming and self.response_streaming: + self.stream_stream = lambda x, y: handle_stream(x, y) + elif not self.request_streaming and not self.response_streaming: + self.unary_unary = lambda x, y: handle_unary(x, y) + + +class _GenericHandler(grpc.GenericRpcHandler): + + def service(self, handler_call_details): + if handler_call_details.method == _UNARY_UNARY: + return _MethodHandler(False, False) + elif handler_call_details.method == _STREAM_STREAM: + return _MethodHandler(True, True) + else: + return None + + +class CompressionTest(unittest.TestCase): + + def setUp(self): + self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) + self._server = grpc.server( + self._server_pool, handlers=(_GenericHandler(),)) + self._port = self._server.add_insecure_port('[::]:0') + self._server.start() + + def testUnary(self): + request = b'\x00' * 100 + + # Client -> server compressed through default client channel compression + # settings. Server -> client compressed via server-side metadata setting. + # TODO(https://github.com/grpc/grpc/issues/4078): replace the "1" integer + # literal with proper use of the public API. + compressed_channel = grpc.insecure_channel('localhost:%d' % self._port, + options=[('grpc.default_compression_algorithm', 1)]) + multi_callable = compressed_channel.unary_unary(_UNARY_UNARY) + response = multi_callable(request) + self.assertEqual(request, response) + + # Client -> server compressed through client metadata setting. Server -> + # client compressed via server-side metadata setting. + # TODO(https://github.com/grpc/grpc/issues/4078): replace the "0" integer + # literal with proper use of the public API. + uncompressed_channel = grpc.insecure_channel('localhost:%d' % self._port, + options=[('grpc.default_compression_algorithm', 0)]) + multi_callable = compressed_channel.unary_unary(_UNARY_UNARY) + response = multi_callable(request, metadata=[ + ('grpc-internal-encoding-request', 'gzip')]) + self.assertEqual(request, response) + + def testStreaming(self): + request = b'\x00' * 100 + + # TODO(https://github.com/grpc/grpc/issues/4078): replace the "1" integer + # literal with proper use of the public API. + compressed_channel = grpc.insecure_channel('localhost:%d' % self._port, + options=[('grpc.default_compression_algorithm', 1)]) + multi_callable = compressed_channel.stream_stream(_STREAM_STREAM) + call = multi_callable([request] * test_constants.STREAM_LENGTH) + for response in call: + self.assertEqual(request, response) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests/unit/_credentials_test.py b/src/python/grpcio_tests/tests/unit/_credentials_test.py new file mode 100644 index 0000000000..87af85a0b9 --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/_credentials_test.py @@ -0,0 +1,72 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests of credentials.""" + +import unittest + +import grpc + + +class CredentialsTest(unittest.TestCase): + + def test_call_credentials_composition(self): + first = grpc.access_token_call_credentials('abc') + second = grpc.access_token_call_credentials('def') + third = grpc.access_token_call_credentials('ghi') + + first_and_second = grpc.composite_call_credentials(first, second) + first_second_and_third = grpc.composite_call_credentials( + first, second, third) + + self.assertIsInstance(first_and_second, grpc.CallCredentials) + self.assertIsInstance(first_second_and_third, grpc.CallCredentials) + + def test_channel_credentials_composition(self): + first_call_credentials = grpc.access_token_call_credentials('abc') + second_call_credentials = grpc.access_token_call_credentials('def') + third_call_credentials = grpc.access_token_call_credentials('ghi') + channel_credentials = grpc.ssl_channel_credentials() + + channel_and_first = grpc.composite_channel_credentials( + channel_credentials, first_call_credentials) + channel_first_and_second = grpc.composite_channel_credentials( + channel_credentials, first_call_credentials, second_call_credentials) + channel_first_second_and_third = grpc.composite_channel_credentials( + channel_credentials, first_call_credentials, second_call_credentials, + third_call_credentials) + + self.assertIsInstance(channel_and_first, grpc.ChannelCredentials) + self.assertIsInstance(channel_first_and_second, grpc.ChannelCredentials) + self.assertIsInstance( + channel_first_second_and_third, grpc.ChannelCredentials) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/_cython/.gitignore b/src/python/grpcio_tests/tests/unit/_cython/.gitignore index c315029288..c315029288 100644 --- a/src/python/grpcio/tests/unit/_cython/.gitignore +++ b/src/python/grpcio_tests/tests/unit/_cython/.gitignore diff --git a/src/python/grpcio/tests/unit/_cython/__init__.py b/src/python/grpcio_tests/tests/unit/_cython/__init__.py index b89398809f..b89398809f 100644 --- a/src/python/grpcio/tests/unit/_cython/__init__.py +++ b/src/python/grpcio_tests/tests/unit/_cython/__init__.py diff --git a/src/python/grpcio/tests/unit/_cython/_cancel_many_calls_test.py b/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py index c1de779014..cf212c5653 100644 --- a/src/python/grpcio/tests/unit/_cython/_cancel_many_calls_test.py +++ b/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py @@ -81,11 +81,11 @@ class _Handler(object): self._state.condition.wait() with self._lock: - self._call.start_batch( + self._call.start_server_batch( cygrpc.Operations( (cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)), _RECEIVE_CLOSE_ON_SERVER_TAG) - self._call.start_batch( + self._call.start_server_batch( cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)), _RECEIVE_MESSAGE_TAG) first_event = self._completion_queue.poll() @@ -101,7 +101,7 @@ class _Handler(object): _EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!', _EMPTY_FLAGS), ) - self._call.start_batch( + self._call.start_server_batch( cygrpc.Operations(operations), _SERVER_COMPLETE_CALL_TAG) self._completion_queue.poll() self._completion_queue.poll() @@ -159,9 +159,9 @@ class CancelManyCallsTest(unittest.TestCase): server_completion_queue = cygrpc.CompletionQueue() server = cygrpc.Server() server.register_completion_queue(server_completion_queue) - port = server.add_http2_port('[::]:0') + port = server.add_http2_port(b'[::]:0') server.start() - channel = cygrpc.Channel('localhost:{}'.format(port)) + channel = cygrpc.Channel('localhost:{}'.format(port).encode()) state = _State() @@ -193,7 +193,7 @@ class CancelManyCallsTest(unittest.TestCase): cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) tag = 'client_complete_call_{0:04d}_tag'.format(index) - client_call.start_batch(cygrpc.Operations(operations), tag) + client_call.start_client_batch(cygrpc.Operations(operations), tag) client_due.add(tag) client_calls.append(client_call) diff --git a/src/python/grpcio/tests/unit/_cython/_channel_test.py b/src/python/grpcio_tests/tests/unit/_cython/_channel_test.py index 3dc7a246ae..f9c8a3ac62 100644 --- a/src/python/grpcio/tests/unit/_cython/_channel_test.py +++ b/src/python/grpcio_tests/tests/unit/_cython/_channel_test.py @@ -37,7 +37,7 @@ from tests.unit.framework.common import test_constants def _channel_and_completion_queue(): - channel = cygrpc.Channel('localhost:54321', cygrpc.ChannelArgs(())) + channel = cygrpc.Channel(b'localhost:54321', cygrpc.ChannelArgs(())) completion_queue = cygrpc.CompletionQueue() return channel, completion_queue diff --git a/src/python/grpcio/tests/unit/_cython/_read_some_but_not_all_responses_test.py b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py index 6ae7a90fbe..152d8edde3 100644 --- a/src/python/grpcio/tests/unit/_cython/_read_some_but_not_all_responses_test.py +++ b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py @@ -126,9 +126,9 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase): server_completion_queue = cygrpc.CompletionQueue() server = cygrpc.Server() server.register_completion_queue(server_completion_queue) - port = server.add_http2_port('[::]:0') + port = server.add_http2_port(b'[::]:0') server.start() - channel = cygrpc.Channel('localhost:{}'.format(port)) + channel = cygrpc.Channel('localhost:{}'.format(port).encode()) server_shutdown_tag = 'server_shutdown_tag' server_driver = _ServerDriver(server_completion_queue, server_shutdown_tag) @@ -168,12 +168,12 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase): client_complete_rpc_tag = 'client_complete_rpc_tag' with client_condition: client_receive_initial_metadata_start_batch_result = ( - client_call.start_batch(cygrpc.Operations([ + client_call.start_client_batch(cygrpc.Operations([ cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS), ]), client_receive_initial_metadata_tag)) client_due.add(client_receive_initial_metadata_tag) client_complete_rpc_start_batch_result = ( - client_call.start_batch(cygrpc.Operations([ + client_call.start_client_batch(cygrpc.Operations([ cygrpc.operation_send_initial_metadata( _EMPTY_METADATA, _EMPTY_FLAGS), cygrpc.operation_send_close_from_client(_EMPTY_FLAGS), @@ -185,30 +185,30 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase): with server_call_condition: server_send_initial_metadata_start_batch_result = ( - server_rpc_event.operation_call.start_batch(cygrpc.Operations([ + server_rpc_event.operation_call.start_server_batch([ cygrpc.operation_send_initial_metadata( _EMPTY_METADATA, _EMPTY_FLAGS), - ]), server_send_initial_metadata_tag)) + ], server_send_initial_metadata_tag)) server_send_first_message_start_batch_result = ( - server_rpc_event.operation_call.start_batch(cygrpc.Operations([ + server_rpc_event.operation_call.start_server_batch([ cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS), - ]), server_send_first_message_tag)) + ], server_send_first_message_tag)) server_send_initial_metadata_event = server_call_driver.event_with_tag( server_send_initial_metadata_tag) server_send_first_message_event = server_call_driver.event_with_tag( server_send_first_message_tag) with server_call_condition: server_send_second_message_start_batch_result = ( - server_rpc_event.operation_call.start_batch(cygrpc.Operations([ + server_rpc_event.operation_call.start_server_batch([ cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS), - ]), server_send_second_message_tag)) + ], server_send_second_message_tag)) server_complete_rpc_start_batch_result = ( - server_rpc_event.operation_call.start_batch(cygrpc.Operations([ + server_rpc_event.operation_call.start_server_batch([ cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS), cygrpc.operation_send_status_from_server( cygrpc.Metadata(()), cygrpc.StatusCode.ok, b'test details', _EMPTY_FLAGS), - ]), server_complete_rpc_tag)) + ], server_complete_rpc_tag)) server_send_second_message_event = server_call_driver.event_with_tag( server_send_second_message_tag) server_complete_rpc_event = server_call_driver.event_with_tag( @@ -218,7 +218,7 @@ class ReadSomeButNotAllResponsesTest(unittest.TestCase): with client_condition: client_receive_first_message_tag = 'client_receive_first_message_tag' client_receive_first_message_start_batch_result = ( - client_call.start_batch(cygrpc.Operations([ + client_call.start_client_batch(cygrpc.Operations([ cygrpc.operation_receive_message(_EMPTY_FLAGS), ]), client_receive_first_message_tag)) client_due.add(client_receive_first_message_tag) diff --git a/src/python/grpcio/tests/unit/_cython/cygrpc_test.py b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py index 0a511101f0..f9a8e2401b 100644 --- a/src/python/grpcio/tests/unit/_cython/cygrpc_test.py +++ b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py @@ -37,7 +37,7 @@ from tests.unit import test_common from tests.unit import resources -_SSL_HOST_OVERRIDE = 'foo.test.google.fr' +_SSL_HOST_OVERRIDE = b'foo.test.google.fr' _CALL_CREDENTIALS_METADATA_KEY = 'call-creds-key' _CALL_CREDENTIALS_METADATA_VALUE = 'call-creds-value' _EMPTY_FLAGS = 0 @@ -46,38 +46,38 @@ def _metadata_plugin_callback(context, callback): callback(cygrpc.Metadata( [cygrpc.Metadatum(_CALL_CREDENTIALS_METADATA_KEY, _CALL_CREDENTIALS_METADATA_VALUE)]), - cygrpc.StatusCode.ok, '') + cygrpc.StatusCode.ok, b'') class TypeSmokeTest(unittest.TestCase): def testStringsInUtilitiesUpDown(self): self.assertEqual(0, cygrpc.StatusCode.ok) - metadatum = cygrpc.Metadatum('a', 'b') - self.assertEqual('a'.encode(), metadatum.key) - self.assertEqual('b'.encode(), metadatum.value) + metadatum = cygrpc.Metadatum(b'a', b'b') + self.assertEqual(b'a', metadatum.key) + self.assertEqual(b'b', metadatum.value) metadata = cygrpc.Metadata([metadatum]) self.assertEqual(1, len(metadata)) self.assertEqual(metadatum.key, metadata[0].key) def testMetadataIteration(self): metadata = cygrpc.Metadata([ - cygrpc.Metadatum('a', 'b'), cygrpc.Metadatum('c', 'd')]) + cygrpc.Metadatum(b'a', b'b'), cygrpc.Metadatum(b'c', b'd')]) iterator = iter(metadata) metadatum = next(iterator) self.assertIsInstance(metadatum, cygrpc.Metadatum) - self.assertEqual(metadatum.key, 'a'.encode()) - self.assertEqual(metadatum.value, 'b'.encode()) + self.assertEqual(metadatum.key, b'a') + self.assertEqual(metadatum.value, b'b') metadatum = next(iterator) self.assertIsInstance(metadatum, cygrpc.Metadatum) - self.assertEqual(metadatum.key, 'c'.encode()) - self.assertEqual(metadatum.value, 'd'.encode()) + self.assertEqual(metadatum.key, b'c') + self.assertEqual(metadatum.value, b'd') with self.assertRaises(StopIteration): next(iterator) def testOperationsIteration(self): operations = cygrpc.Operations([ - cygrpc.operation_send_message('asdf', _EMPTY_FLAGS)]) + cygrpc.operation_send_message(b'asdf', _EMPTY_FLAGS)]) iterator = iter(operations) operation = next(iterator) self.assertIsInstance(operation, cygrpc.Operation) @@ -87,7 +87,7 @@ class TypeSmokeTest(unittest.TestCase): next(iterator) def testOperationFlags(self): - operation = cygrpc.operation_send_message('asdf', + operation = cygrpc.operation_send_message(b'asdf', cygrpc.WriteFlag.no_compress) self.assertEqual(cygrpc.WriteFlag.no_compress, operation.flags) @@ -105,16 +105,16 @@ class TypeSmokeTest(unittest.TestCase): del server def testChannelUpDown(self): - channel = cygrpc.Channel('[::]:0', cygrpc.ChannelArgs([])) + channel = cygrpc.Channel(b'[::]:0', cygrpc.ChannelArgs([])) del channel def testCredentialsMetadataPluginUpDown(self): plugin = cygrpc.CredentialsMetadataPlugin( - lambda ignored_a, ignored_b: None, '') + lambda ignored_a, ignored_b: None, b'') del plugin def testCallCredentialsFromPluginUpDown(self): - plugin = cygrpc.CredentialsMetadataPlugin(_metadata_plugin_callback, '') + plugin = cygrpc.CredentialsMetadataPlugin(_metadata_plugin_callback, b'') call_credentials = cygrpc.call_credentials_metadata_plugin(plugin) del plugin del call_credentials @@ -123,7 +123,7 @@ class TypeSmokeTest(unittest.TestCase): server = cygrpc.Server() completion_queue = cygrpc.CompletionQueue() server.register_completion_queue(completion_queue) - port = server.add_http2_port('[::]:0') + port = server.add_http2_port(b'[::]:0') self.assertIsInstance(port, int) server.start() del server @@ -131,7 +131,7 @@ class TypeSmokeTest(unittest.TestCase): def testServerStartShutdown(self): completion_queue = cygrpc.CompletionQueue() server = cygrpc.Server() - server.add_http2_port('[::]:0') + server.add_http2_port(b'[::]:0') server.register_completion_queue(completion_queue) server.start() shutdown_tag = object() @@ -143,22 +143,61 @@ class TypeSmokeTest(unittest.TestCase): del completion_queue -class InsecureServerInsecureClient(unittest.TestCase): +class ServerClientMixin(object): - def setUp(self): + def setUpMixin(self, server_credentials, client_credentials, host_override): self.server_completion_queue = cygrpc.CompletionQueue() self.server = cygrpc.Server() self.server.register_completion_queue(self.server_completion_queue) - self.port = self.server.add_http2_port('[::]:0') + if server_credentials: + self.port = self.server.add_http2_port(b'[::]:0', server_credentials) + else: + self.port = self.server.add_http2_port(b'[::]:0') self.server.start() self.client_completion_queue = cygrpc.CompletionQueue() - self.client_channel = cygrpc.Channel('localhost:{}'.format(self.port)) - - def tearDown(self): + if client_credentials: + client_channel_arguments = cygrpc.ChannelArgs([ + cygrpc.ChannelArg(cygrpc.ChannelArgKey.ssl_target_name_override, + host_override)]) + self.client_channel = cygrpc.Channel( + 'localhost:{}'.format(self.port).encode(), client_channel_arguments, + client_credentials) + else: + self.client_channel = cygrpc.Channel('localhost:{}'.format(self.port).encode()) + if host_override: + self.host_argument = None # default host + self.expected_host = host_override + else: + # arbitrary host name necessitating no further identification + self.host_argument = b'hostess' + self.expected_host = self.host_argument + + def tearDownMixin(self): del self.server del self.client_completion_queue del self.server_completion_queue + def _perform_operations(self, operations, call, queue, deadline, description): + """Perform the list of operations with given call, queue, and deadline. + + Invocation errors are reported with as an exception with `description` in + the message. Performs the operations asynchronously, returning a future. + """ + def performer(): + tag = object() + try: + call_result = call.start_client_batch( + cygrpc.Operations(operations), tag) + self.assertEqual(cygrpc.CallError.ok, call_result) + event = queue.poll(deadline) + self.assertEqual(cygrpc.CompletionType.operation_complete, event.type) + self.assertTrue(event.success) + self.assertIs(tag, event.tag) + except Exception as error: + raise Exception("Error in '{}': {}".format(description, error.message)) + return event + return test_utilities.SimpleFuture(performer) + def testEcho(self): DEADLINE = time.time()+5 DEADLINE_TOLERANCE = 0.25 @@ -175,7 +214,6 @@ class InsecureServerInsecureClient(unittest.TestCase): REQUEST = b'in death a member of project mayhem has a name' RESPONSE = b'his name is robert paulson' METHOD = b'twinkies' - HOST = b'hostess' cygrpc_deadline = cygrpc.Timespec(DEADLINE) @@ -188,12 +226,13 @@ class InsecureServerInsecureClient(unittest.TestCase): client_call_tag = object() client_call = self.client_channel.create_call( - None, 0, self.client_completion_queue, METHOD, HOST, cygrpc_deadline) + None, 0, self.client_completion_queue, METHOD, self.host_argument, + cygrpc_deadline) client_initial_metadata = cygrpc.Metadata([ cygrpc.Metadatum(CLIENT_METADATA_ASCII_KEY, CLIENT_METADATA_ASCII_VALUE), cygrpc.Metadatum(CLIENT_METADATA_BIN_KEY, CLIENT_METADATA_BIN_VALUE)]) - client_start_batch_result = client_call.start_batch(cygrpc.Operations([ + client_start_batch_result = client_call.start_client_batch([ cygrpc.operation_send_initial_metadata(client_initial_metadata, _EMPTY_FLAGS), cygrpc.operation_send_message(REQUEST, _EMPTY_FLAGS), @@ -201,7 +240,7 @@ class InsecureServerInsecureClient(unittest.TestCase): cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS), cygrpc.operation_receive_message(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS) - ]), client_call_tag) + ], client_call_tag) self.assertEqual(cygrpc.CallError.ok, client_start_batch_result) client_event_future = test_utilities.CompletionQueuePollFuture( self.client_completion_queue, cygrpc_deadline) @@ -216,7 +255,8 @@ class InsecureServerInsecureClient(unittest.TestCase): test_common.metadata_transmitted(client_initial_metadata, request_event.request_metadata)) self.assertEqual(METHOD, request_event.request_call_details.method) - self.assertEqual(HOST, request_event.request_call_details.host) + self.assertEqual(self.expected_host, + request_event.request_call_details.host) self.assertLess( abs(DEADLINE - float(request_event.request_call_details.deadline)), DEADLINE_TOLERANCE) @@ -229,7 +269,7 @@ class InsecureServerInsecureClient(unittest.TestCase): server_trailing_metadata = cygrpc.Metadata([ cygrpc.Metadatum(SERVER_TRAILING_METADATA_KEY, SERVER_TRAILING_METADATA_VALUE)]) - server_start_batch_result = server_call.start_batch([ + server_start_batch_result = server_call.start_server_batch([ cygrpc.operation_send_initial_metadata(server_initial_metadata, _EMPTY_FLAGS), cygrpc.operation_receive_message(_EMPTY_FLAGS), @@ -241,8 +281,8 @@ class InsecureServerInsecureClient(unittest.TestCase): ], server_call_tag) self.assertEqual(cygrpc.CallError.ok, server_start_batch_result) - client_event = client_event_future.result() server_event = self.server_completion_queue.poll(cygrpc_deadline) + client_event = client_event_future.result() self.assertEqual(6, len(client_event.batch_operations)) found_client_op_types = set() @@ -292,172 +332,101 @@ class InsecureServerInsecureClient(unittest.TestCase): del client_call del server_call - -class SecureServerSecureClient(unittest.TestCase): - - def setUp(self): - server_credentials = cygrpc.server_credentials_ssl( - None, [cygrpc.SslPemKeyCertPair(resources.private_key(), - resources.certificate_chain())], False) - channel_credentials = cygrpc.channel_credentials_ssl( - resources.test_root_certificates(), None) - self.server_completion_queue = cygrpc.CompletionQueue() - self.server = cygrpc.Server() - self.server.register_completion_queue(self.server_completion_queue) - self.port = self.server.add_http2_port('[::]:0', server_credentials) - self.server.start() - self.client_completion_queue = cygrpc.CompletionQueue() - client_channel_arguments = cygrpc.ChannelArgs([ - cygrpc.ChannelArg(cygrpc.ChannelArgKey.ssl_target_name_override, - _SSL_HOST_OVERRIDE)]) - self.client_channel = cygrpc.Channel( - 'localhost:{}'.format(self.port), client_channel_arguments, - channel_credentials) - - def tearDown(self): - del self.server - del self.client_completion_queue - del self.server_completion_queue - - def testEcho(self): + def test6522(self): DEADLINE = time.time()+5 DEADLINE_TOLERANCE = 0.25 - CLIENT_METADATA_ASCII_KEY = b'key' - CLIENT_METADATA_ASCII_VALUE = b'val' - CLIENT_METADATA_BIN_KEY = b'key-bin' - CLIENT_METADATA_BIN_VALUE = b'\0'*1000 - SERVER_INITIAL_METADATA_KEY = b'init_me_me_me' - SERVER_INITIAL_METADATA_VALUE = b'whodawha?' - SERVER_TRAILING_METADATA_KEY = b'california_is_in_a_drought' - SERVER_TRAILING_METADATA_VALUE = b'zomg it is' - SERVER_STATUS_CODE = cygrpc.StatusCode.ok - SERVER_STATUS_DETAILS = b'our work is never over' - REQUEST = b'in death a member of project mayhem has a name' - RESPONSE = b'his name is robert paulson' - METHOD = b'/twinkies' - HOST = None # Default host + METHOD = b'twinkies' cygrpc_deadline = cygrpc.Timespec(DEADLINE) + empty_metadata = cygrpc.Metadata([]) server_request_tag = object() - request_call_result = self.server.request_call( + self.server.request_call( self.server_completion_queue, self.server_completion_queue, server_request_tag) + client_call = self.client_channel.create_call( + None, 0, self.client_completion_queue, METHOD, self.host_argument, + cygrpc_deadline) - self.assertEqual(cygrpc.CallError.ok, request_call_result) + # Prologue + def perform_client_operations(operations, description): + return self._perform_operations( + operations, client_call, + self.client_completion_queue, cygrpc_deadline, description) - plugin = cygrpc.CredentialsMetadataPlugin(_metadata_plugin_callback, '') - call_credentials = cygrpc.call_credentials_metadata_plugin(plugin) - - client_call_tag = object() - client_call = self.client_channel.create_call( - None, 0, self.client_completion_queue, METHOD, HOST, cygrpc_deadline) - client_call.set_credentials(call_credentials) - client_initial_metadata = cygrpc.Metadata([ - cygrpc.Metadatum(CLIENT_METADATA_ASCII_KEY, - CLIENT_METADATA_ASCII_VALUE), - cygrpc.Metadatum(CLIENT_METADATA_BIN_KEY, CLIENT_METADATA_BIN_VALUE)]) - client_start_batch_result = client_call.start_batch(cygrpc.Operations([ - cygrpc.operation_send_initial_metadata(client_initial_metadata, - _EMPTY_FLAGS), - cygrpc.operation_send_message(REQUEST, _EMPTY_FLAGS), - cygrpc.operation_send_close_from_client(_EMPTY_FLAGS), - cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS), - cygrpc.operation_receive_message(_EMPTY_FLAGS), - cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS) - ]), client_call_tag) - self.assertEqual(cygrpc.CallError.ok, client_start_batch_result) - client_event_future = test_utilities.CompletionQueuePollFuture( - self.client_completion_queue, cygrpc_deadline) + client_event_future = perform_client_operations([ + cygrpc.operation_send_initial_metadata(empty_metadata, + _EMPTY_FLAGS), + cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS), + ], "Client prologue") request_event = self.server_completion_queue.poll(cygrpc_deadline) - self.assertEqual(cygrpc.CompletionType.operation_complete, - request_event.type) - self.assertIsInstance(request_event.operation_call, cygrpc.Call) - self.assertIs(server_request_tag, request_event.tag) - self.assertEqual(0, len(request_event.batch_operations)) - client_metadata_with_credentials = list(client_initial_metadata) + [ - (_CALL_CREDENTIALS_METADATA_KEY, _CALL_CREDENTIALS_METADATA_VALUE)] - self.assertTrue( - test_common.metadata_transmitted(client_metadata_with_credentials, - request_event.request_metadata)) - self.assertEqual(METHOD, request_event.request_call_details.method) - self.assertEqual(_SSL_HOST_OVERRIDE, - request_event.request_call_details.host) - self.assertLess( - abs(DEADLINE - float(request_event.request_call_details.deadline)), - DEADLINE_TOLERANCE) - - server_call_tag = object() server_call = request_event.operation_call - server_initial_metadata = cygrpc.Metadata([ - cygrpc.Metadatum(SERVER_INITIAL_METADATA_KEY, - SERVER_INITIAL_METADATA_VALUE)]) - server_trailing_metadata = cygrpc.Metadata([ - cygrpc.Metadatum(SERVER_TRAILING_METADATA_KEY, - SERVER_TRAILING_METADATA_VALUE)]) - server_start_batch_result = server_call.start_batch([ - cygrpc.operation_send_initial_metadata(server_initial_metadata, - _EMPTY_FLAGS), - cygrpc.operation_receive_message(_EMPTY_FLAGS), - cygrpc.operation_send_message(RESPONSE, _EMPTY_FLAGS), - cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS), - cygrpc.operation_send_status_from_server( - server_trailing_metadata, SERVER_STATUS_CODE, - SERVER_STATUS_DETAILS, _EMPTY_FLAGS) - ], server_call_tag) - self.assertEqual(cygrpc.CallError.ok, server_start_batch_result) - client_event = client_event_future.result() - server_event = self.server_completion_queue.poll(cygrpc_deadline) + def perform_server_operations(operations, description): + return self._perform_operations( + operations, server_call, + self.server_completion_queue, cygrpc_deadline, description) - self.assertEqual(6, len(client_event.batch_operations)) - found_client_op_types = set() - for client_result in client_event.batch_operations: - # we expect each op type to be unique - self.assertNotIn(client_result.type, found_client_op_types) - found_client_op_types.add(client_result.type) - if client_result.type == cygrpc.OperationType.receive_initial_metadata: - self.assertTrue( - test_common.metadata_transmitted(server_initial_metadata, - client_result.received_metadata)) - elif client_result.type == cygrpc.OperationType.receive_message: - self.assertEqual(RESPONSE, client_result.received_message.bytes()) - elif client_result.type == cygrpc.OperationType.receive_status_on_client: - self.assertTrue( - test_common.metadata_transmitted(server_trailing_metadata, - client_result.received_metadata)) - self.assertEqual(SERVER_STATUS_DETAILS, - client_result.received_status_details) - self.assertEqual(SERVER_STATUS_CODE, client_result.received_status_code) - self.assertEqual(set([ - cygrpc.OperationType.send_initial_metadata, - cygrpc.OperationType.send_message, - cygrpc.OperationType.send_close_from_client, - cygrpc.OperationType.receive_initial_metadata, - cygrpc.OperationType.receive_message, - cygrpc.OperationType.receive_status_on_client - ]), found_client_op_types) + server_event_future = perform_server_operations([ + cygrpc.operation_send_initial_metadata(empty_metadata, + _EMPTY_FLAGS), + ], "Server prologue") - self.assertEqual(5, len(server_event.batch_operations)) - found_server_op_types = set() - for server_result in server_event.batch_operations: - self.assertNotIn(client_result.type, found_server_op_types) - found_server_op_types.add(server_result.type) - if server_result.type == cygrpc.OperationType.receive_message: - self.assertEqual(REQUEST, server_result.received_message.bytes()) - elif server_result.type == cygrpc.OperationType.receive_close_on_server: - self.assertFalse(server_result.received_cancelled) - self.assertEqual(set([ - cygrpc.OperationType.send_initial_metadata, - cygrpc.OperationType.receive_message, - cygrpc.OperationType.send_message, - cygrpc.OperationType.receive_close_on_server, - cygrpc.OperationType.send_status_from_server - ]), found_server_op_types) + client_event_future.result() # force completion + server_event_future.result() - del client_call - del server_call + # Messaging + for _ in range(10): + client_event_future = perform_client_operations([ + cygrpc.operation_send_message(b'', _EMPTY_FLAGS), + cygrpc.operation_receive_message(_EMPTY_FLAGS), + ], "Client message") + server_event_future = perform_server_operations([ + cygrpc.operation_send_message(b'', _EMPTY_FLAGS), + cygrpc.operation_receive_message(_EMPTY_FLAGS), + ], "Server receive") + + client_event_future.result() # force completion + server_event_future.result() + + # Epilogue + client_event_future = perform_client_operations([ + cygrpc.operation_send_close_from_client(_EMPTY_FLAGS), + cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS) + ], "Client epilogue") + + server_event_future = perform_server_operations([ + cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS), + cygrpc.operation_send_status_from_server( + empty_metadata, cygrpc.StatusCode.ok, b'', _EMPTY_FLAGS) + ], "Server epilogue") + + client_event_future.result() # force completion + server_event_future.result() + + +class InsecureServerInsecureClient(unittest.TestCase, ServerClientMixin): + + def setUp(self): + self.setUpMixin(None, None, None) + + def tearDown(self): + self.tearDownMixin() + + +class SecureServerSecureClient(unittest.TestCase, ServerClientMixin): + + def setUp(self): + server_credentials = cygrpc.server_credentials_ssl( + None, [cygrpc.SslPemKeyCertPair(resources.private_key(), + resources.certificate_chain())], False) + client_credentials = cygrpc.channel_credentials_ssl( + resources.test_root_certificates(), None) + self.setUpMixin(server_credentials, client_credentials, _SSL_HOST_OVERRIDE) + + def tearDown(self): + self.tearDownMixin() if __name__ == '__main__': diff --git a/src/python/grpcio/grpc/framework/foundation/activated.py b/src/python/grpcio_tests/tests/unit/_cython/test_utilities.py index 8b8e4f45b5..6280ce74c4 100644 --- a/src/python/grpcio/grpc/framework/foundation/activated.py +++ b/src/python/grpcio_tests/tests/unit/_cython/test_utilities.py @@ -27,39 +27,40 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Interfaces related to streams of values or objects.""" +import threading -import abc +from grpc._cython import cygrpc -import six -class Activated(six.with_metaclass(abc.ABCMeta)): - """Interface for objects that may be started and stopped. +class SimpleFuture(object): + """A simple future mechanism.""" - Values implementing this type must also implement the context manager - protocol. - """ + def __init__(self, function, *args, **kwargs): + def wrapped_function(): + try: + self._result = function(*args, **kwargs) + except Exception as error: + self._error = error + self._result = None + self._error = None + self._thread = threading.Thread(target=wrapped_function) + self._thread.start() - @abc.abstractmethod - def __enter__(self): - """See the context manager protocol for specification.""" - raise NotImplementedError() + def result(self): + """The resulting value of this future. - @abc.abstractmethod - def __exit__(self, exc_type, exc_val, exc_tb): - """See the context manager protocol for specification.""" - raise NotImplementedError() + Re-raises any exceptions. + """ + self._thread.join() + if self._error: + # TODO(atash): re-raise exceptions in a way that preserves tracebacks + raise self._error + return self._result - @abc.abstractmethod - def start(self): - """Activates this object. - Returns: - A value equal to the value returned by this object's __enter__ method. - """ - raise NotImplementedError() +class CompletionQueuePollFuture(SimpleFuture): + + def __init__(self, completion_queue, deadline): + super(CompletionQueuePollFuture, self).__init__( + lambda: completion_queue.poll(deadline)) - @abc.abstractmethod - def stop(self): - """Deactivates this object.""" - raise NotImplementedError() diff --git a/src/python/grpcio_tests/tests/unit/_empty_message_test.py b/src/python/grpcio_tests/tests/unit/_empty_message_test.py new file mode 100644 index 0000000000..131f6e9452 --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/_empty_message_test.py @@ -0,0 +1,138 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +import grpc +from grpc.framework.foundation import logging_pool + +from tests.unit.framework.common import test_constants + +_REQUEST = b'' +_RESPONSE = b'' + +_UNARY_UNARY = '/test/UnaryUnary' +_UNARY_STREAM = '/test/UnaryStream' +_STREAM_UNARY = '/test/StreamUnary' +_STREAM_STREAM = '/test/StreamStream' + + +def handle_unary_unary(request, servicer_context): + return _RESPONSE + + +def handle_unary_stream(request, servicer_context): + for _ in range(test_constants.STREAM_LENGTH): + yield _RESPONSE + + +def handle_stream_unary(request_iterator, servicer_context): + for request in request_iterator: + pass + return _RESPONSE + + +def handle_stream_stream(request_iterator, servicer_context): + for request in request_iterator: + yield _RESPONSE + + +class _MethodHandler(grpc.RpcMethodHandler): + + def __init__(self, request_streaming, response_streaming): + self.request_streaming = request_streaming + self.response_streaming = response_streaming + self.request_deserializer = None + self.response_serializer = None + self.unary_unary = None + self.unary_stream = None + self.stream_unary = None + self.stream_stream = None + if self.request_streaming and self.response_streaming: + self.stream_stream = handle_stream_stream + elif self.request_streaming: + self.stream_unary = handle_stream_unary + elif self.response_streaming: + self.unary_stream = handle_unary_stream + else: + self.unary_unary = handle_unary_unary + + +class _GenericHandler(grpc.GenericRpcHandler): + + def service(self, handler_call_details): + if handler_call_details.method == _UNARY_UNARY: + return _MethodHandler(False, False) + elif handler_call_details.method == _UNARY_STREAM: + return _MethodHandler(False, True) + elif handler_call_details.method == _STREAM_UNARY: + return _MethodHandler(True, False) + elif handler_call_details.method == _STREAM_STREAM: + return _MethodHandler(True, True) + else: + return None + + +class EmptyMessageTest(unittest.TestCase): + + def setUp(self): + self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) + self._server = grpc.server( + self._server_pool, handlers=(_GenericHandler(),)) + port = self._server.add_insecure_port('[::]:0') + self._server.start() + self._channel = grpc.insecure_channel('localhost:%d' % port) + + def tearDown(self): + self._server.stop(0) + + def testUnaryUnary(self): + response = self._channel.unary_unary(_UNARY_UNARY)(_REQUEST) + self.assertEqual(_RESPONSE, response) + + def testUnaryStream(self): + response_iterator = self._channel.unary_stream(_UNARY_STREAM)(_REQUEST) + self.assertSequenceEqual( + [_RESPONSE] * test_constants.STREAM_LENGTH, list(response_iterator)) + + def testStreamUnary(self): + response = self._channel.stream_unary(_STREAM_UNARY)( + [_REQUEST] * test_constants.STREAM_LENGTH) + self.assertEqual(_RESPONSE, response) + + def testStreamStream(self): + response_iterator = self._channel.stream_stream(_STREAM_STREAM)( + [_REQUEST] * test_constants.STREAM_LENGTH) + self.assertSequenceEqual( + [_RESPONSE] * test_constants.STREAM_LENGTH, list(response_iterator)) + + +if __name__ == '__main__': + unittest.main(verbosity=2) + diff --git a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py new file mode 100644 index 0000000000..b33802bf57 --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py @@ -0,0 +1,249 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Defines a number of module-scope gRPC scenarios to test clean exit.""" + +import argparse +import threading +import time + +import grpc + +from tests.unit.framework.common import test_constants + +WAIT_TIME = 1000 + +REQUEST = b'request' + +UNSTARTED_SERVER = 'unstarted_server' +RUNNING_SERVER = 'running_server' +POLL_CONNECTIVITY_NO_SERVER = 'poll_connectivity_no_server' +POLL_CONNECTIVITY = 'poll_connectivity' +IN_FLIGHT_UNARY_UNARY_CALL = 'in_flight_unary_unary_call' +IN_FLIGHT_UNARY_STREAM_CALL = 'in_flight_unary_stream_call' +IN_FLIGHT_STREAM_UNARY_CALL = 'in_flight_stream_unary_call' +IN_FLIGHT_STREAM_STREAM_CALL = 'in_flight_stream_stream_call' +IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL = 'in_flight_partial_unary_stream_call' +IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL = 'in_flight_partial_stream_unary_call' +IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL = 'in_flight_partial_stream_stream_call' + +UNARY_UNARY = b'/test/UnaryUnary' +UNARY_STREAM = b'/test/UnaryStream' +STREAM_UNARY = b'/test/StreamUnary' +STREAM_STREAM = b'/test/StreamStream' +PARTIAL_UNARY_STREAM = b'/test/PartialUnaryStream' +PARTIAL_STREAM_UNARY = b'/test/PartialStreamUnary' +PARTIAL_STREAM_STREAM = b'/test/PartialStreamStream' + +TEST_TO_METHOD = { + IN_FLIGHT_UNARY_UNARY_CALL: UNARY_UNARY, + IN_FLIGHT_UNARY_STREAM_CALL: UNARY_STREAM, + IN_FLIGHT_STREAM_UNARY_CALL: STREAM_UNARY, + IN_FLIGHT_STREAM_STREAM_CALL: STREAM_STREAM, + IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL: PARTIAL_UNARY_STREAM, + IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL: PARTIAL_STREAM_UNARY, + IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL: PARTIAL_STREAM_STREAM, +} + + +def hang_unary_unary(request, servicer_context): + time.sleep(WAIT_TIME) + + +def hang_unary_stream(request, servicer_context): + time.sleep(WAIT_TIME) + + +def hang_partial_unary_stream(request, servicer_context): + for _ in range(test_constants.STREAM_LENGTH // 2): + yield request + time.sleep(WAIT_TIME) + + +def hang_stream_unary(request_iterator, servicer_context): + time.sleep(WAIT_TIME) + + +def hang_partial_stream_unary(request_iterator, servicer_context): + for _ in range(test_constants.STREAM_LENGTH // 2): + next(request_iterator) + time.sleep(WAIT_TIME) + + +def hang_stream_stream(request_iterator, servicer_context): + time.sleep(WAIT_TIME) + + +def hang_partial_stream_stream(request_iterator, servicer_context): + for _ in range(test_constants.STREAM_LENGTH // 2): + yield next(request_iterator) + time.sleep(WAIT_TIME) + + +class MethodHandler(grpc.RpcMethodHandler): + + def __init__(self, request_streaming, response_streaming, partial_hang): + self.request_streaming = request_streaming + self.response_streaming = response_streaming + self.request_deserializer = None + self.response_serializer = None + self.unary_unary = None + self.unary_stream = None + self.stream_unary = None + self.stream_stream = None + if self.request_streaming and self.response_streaming: + if partial_hang: + self.stream_stream = hang_partial_stream_stream + else: + self.stream_stream = hang_stream_stream + elif self.request_streaming: + if partial_hang: + self.stream_unary = hang_partial_stream_unary + else: + self.stream_unary = hang_stream_unary + elif self.response_streaming: + if partial_hang: + self.unary_stream = hang_partial_unary_stream + else: + self.unary_stream = hang_unary_stream + else: + self.unary_unary = hang_unary_unary + + +class GenericHandler(grpc.GenericRpcHandler): + + def service(self, handler_call_details): + if handler_call_details.method == UNARY_UNARY: + return MethodHandler(False, False, False) + elif handler_call_details.method == UNARY_STREAM: + return MethodHandler(False, True, False) + elif handler_call_details.method == STREAM_UNARY: + return MethodHandler(True, False, False) + elif handler_call_details.method == STREAM_STREAM: + return MethodHandler(True, True, False) + elif handler_call_details.method == PARTIAL_UNARY_STREAM: + return MethodHandler(False, True, True) + elif handler_call_details.method == PARTIAL_STREAM_UNARY: + return MethodHandler(True, False, True) + elif handler_call_details.method == PARTIAL_STREAM_STREAM: + return MethodHandler(True, True, True) + else: + return None + + +# Traditional executors will not exit until all their +# current jobs complete. Because we submit jobs that will +# never finish, we don't want to block exit on these jobs. +class DaemonPool(object): + + def submit(self, fn, *args, **kwargs): + thread = threading.Thread(target=fn, args=args, kwargs=kwargs) + thread.daemon = True + thread.start() + + def shutdown(self, wait=True): + pass + + +def infinite_request_iterator(): + while True: + yield REQUEST + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('scenario', type=str) + parser.add_argument( + '--wait_for_interrupt', dest='wait_for_interrupt', action='store_true') + args = parser.parse_args() + + if args.scenario == UNSTARTED_SERVER: + server = grpc.server(DaemonPool()) + if args.wait_for_interrupt: + time.sleep(WAIT_TIME) + elif args.scenario == RUNNING_SERVER: + server = grpc.server(DaemonPool()) + port = server.add_insecure_port('[::]:0') + server.start() + if args.wait_for_interrupt: + time.sleep(WAIT_TIME) + elif args.scenario == POLL_CONNECTIVITY_NO_SERVER: + channel = grpc.insecure_channel('localhost:12345') + + def connectivity_callback(connectivity): + pass + + channel.subscribe(connectivity_callback, try_to_connect=True) + if args.wait_for_interrupt: + time.sleep(WAIT_TIME) + elif args.scenario == POLL_CONNECTIVITY: + server = grpc.server(DaemonPool()) + port = server.add_insecure_port('[::]:0') + server.start() + channel = grpc.insecure_channel('localhost:%d' % port) + + def connectivity_callback(connectivity): + pass + + channel.subscribe(connectivity_callback, try_to_connect=True) + if args.wait_for_interrupt: + time.sleep(WAIT_TIME) + + else: + handler = GenericHandler() + server = grpc.server(DaemonPool()) + port = server.add_insecure_port('[::]:0') + server.add_generic_rpc_handlers((handler,)) + server.start() + channel = grpc.insecure_channel('localhost:%d' % port) + + method = TEST_TO_METHOD[args.scenario] + + if args.scenario == IN_FLIGHT_UNARY_UNARY_CALL: + multi_callable = channel.unary_unary(method) + future = multi_callable.future(REQUEST) + result, call = multi_callable.with_call(REQUEST) + elif (args.scenario == IN_FLIGHT_UNARY_STREAM_CALL or + args.scenario == IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL): + multi_callable = channel.unary_stream(method) + response_iterator = multi_callable(REQUEST) + for response in response_iterator: + pass + elif (args.scenario == IN_FLIGHT_STREAM_UNARY_CALL or + args.scenario == IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL): + multi_callable = channel.stream_unary(method) + future = multi_callable.future(infinite_request_iterator()) + result, call = multi_callable.with_call( + [REQUEST] * test_constants.STREAM_LENGTH) + elif (args.scenario == IN_FLIGHT_STREAM_STREAM_CALL or + args.scenario == IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL): + multi_callable = channel.stream_stream(method) + response_iterator = multi_callable(infinite_request_iterator()) + for response in response_iterator: + pass diff --git a/src/python/grpcio_tests/tests/unit/_exit_test.py b/src/python/grpcio_tests/tests/unit/_exit_test.py new file mode 100644 index 0000000000..5a4a32887c --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/_exit_test.py @@ -0,0 +1,186 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests clean exit of server/client on Python Interpreter exit/sigint. + +The tests in this module spawn a subprocess for each test case, the +test is considered successful if it doesn't hang/timeout. +""" + +import atexit +import os +import signal +import six +import subprocess +import sys +import threading +import time +import unittest + +from tests.unit import _exit_scenarios + +SCENARIO_FILE = os.path.abspath(os.path.join( + os.path.dirname(os.path.realpath(__file__)), '_exit_scenarios.py')) +INTERPRETER = sys.executable +BASE_COMMAND = [INTERPRETER, SCENARIO_FILE] +BASE_SIGTERM_COMMAND = BASE_COMMAND + ['--wait_for_interrupt'] + +INIT_TIME = 1.0 + + +processes = [] +process_lock = threading.Lock() + + +# Make sure we attempt to clean up any +# processes we may have left running +def cleanup_processes(): + with process_lock: + for process in processes: + try: + process.kill() + except Exception: + pass +atexit.register(cleanup_processes) + + +def interrupt_and_wait(process): + with process_lock: + processes.append(process) + time.sleep(INIT_TIME) + os.kill(process.pid, signal.SIGINT) + process.wait() + + +def wait(process): + with process_lock: + processes.append(process) + process.wait() + + +@unittest.skip('https://github.com/grpc/grpc/issues/7311') +class ExitTest(unittest.TestCase): + + def test_unstarted_server(self): + process = subprocess.Popen( + BASE_COMMAND + [_exit_scenarios.UNSTARTED_SERVER], + stdout=sys.stdout, stderr=sys.stderr) + wait(process) + + def test_unstarted_server_terminate(self): + process = subprocess.Popen( + BASE_SIGTERM_COMMAND + [_exit_scenarios.UNSTARTED_SERVER], + stdout=sys.stdout) + interrupt_and_wait(process) + + def test_running_server(self): + process = subprocess.Popen( + BASE_COMMAND + [_exit_scenarios.RUNNING_SERVER], + stdout=sys.stdout, stderr=sys.stderr) + wait(process) + + def test_running_server_terminate(self): + process = subprocess.Popen( + BASE_SIGTERM_COMMAND + [_exit_scenarios.RUNNING_SERVER], + stdout=sys.stdout, stderr=sys.stderr) + interrupt_and_wait(process) + + def test_poll_connectivity_no_server(self): + process = subprocess.Popen( + BASE_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY_NO_SERVER], + stdout=sys.stdout, stderr=sys.stderr) + wait(process) + + def test_poll_connectivity_no_server_terminate(self): + process = subprocess.Popen( + BASE_SIGTERM_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY_NO_SERVER], + stdout=sys.stdout, stderr=sys.stderr) + interrupt_and_wait(process) + + def test_poll_connectivity(self): + process = subprocess.Popen( + BASE_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY], + stdout=sys.stdout, stderr=sys.stderr) + wait(process) + + def test_poll_connectivity_terminate(self): + process = subprocess.Popen( + BASE_SIGTERM_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY], + stdout=sys.stdout, stderr=sys.stderr) + interrupt_and_wait(process) + + def test_in_flight_unary_unary_call(self): + process = subprocess.Popen( + BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_UNARY_UNARY_CALL], + stdout=sys.stdout, stderr=sys.stderr) + interrupt_and_wait(process) + + @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999') + def test_in_flight_unary_stream_call(self): + process = subprocess.Popen( + BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_UNARY_STREAM_CALL], + stdout=sys.stdout, stderr=sys.stderr) + interrupt_and_wait(process) + + def test_in_flight_stream_unary_call(self): + process = subprocess.Popen( + BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_STREAM_UNARY_CALL], + stdout=sys.stdout, stderr=sys.stderr) + interrupt_and_wait(process) + + @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999') + def test_in_flight_stream_stream_call(self): + process = subprocess.Popen( + BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_STREAM_STREAM_CALL], + stdout=sys.stdout, stderr=sys.stderr) + interrupt_and_wait(process) + + @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999') + def test_in_flight_partial_unary_stream_call(self): + process = subprocess.Popen( + BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL], + stdout=sys.stdout, stderr=sys.stderr) + interrupt_and_wait(process) + + def test_in_flight_partial_stream_unary_call(self): + process = subprocess.Popen( + BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL], + stdout=sys.stdout, stderr=sys.stderr) + interrupt_and_wait(process) + + @unittest.skipIf(six.PY2, 'https://github.com/grpc/grpc/issues/6999') + def test_in_flight_partial_stream_stream_call(self): + process = subprocess.Popen( + BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL], + stdout=sys.stdout, stderr=sys.stderr) + interrupt_and_wait(process) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/framework/interfaces/links/__init__.py b/src/python/grpcio_tests/tests/unit/_from_grpc_import_star.py index 7086519106..78d2fb7dc5 100644 --- a/src/python/grpcio/tests/unit/framework/interfaces/links/__init__.py +++ b/src/python/grpcio_tests/tests/unit/_from_grpc_import_star.py @@ -1,4 +1,4 @@ -# Copyright 2015, Google Inc. +# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -27,4 +27,12 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +_BEFORE_IMPORT = tuple(globals()) +from grpc import * + +_AFTER_IMPORT = tuple(globals()) + +GRPC_ELEMENTS = tuple( + element for element in _AFTER_IMPORT + if element not in _BEFORE_IMPORT and element != '_BEFORE_IMPORT') diff --git a/src/python/grpcio/tests/unit/_junkdrawer/__init__.py b/src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/tests/unit/_junkdrawer/__init__.py +++ b/src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py diff --git a/src/python/grpcio/tests/unit/_junkdrawer/stock_pb2.py b/src/python/grpcio_tests/tests/unit/_junkdrawer/stock_pb2.py index eef18f82d6..eef18f82d6 100644 --- a/src/python/grpcio/tests/unit/_junkdrawer/stock_pb2.py +++ b/src/python/grpcio_tests/tests/unit/_junkdrawer/stock_pb2.py diff --git a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py new file mode 100644 index 0000000000..fb3e547781 --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py @@ -0,0 +1,523 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests application-provided metadata, status code, and details.""" + +import threading +import unittest + +import grpc +from grpc.framework.foundation import logging_pool + +from tests.unit import test_common +from tests.unit.framework.common import test_constants +from tests.unit.framework.common import test_control + +_SERIALIZED_REQUEST = b'\x46\x47\x48' +_SERIALIZED_RESPONSE = b'\x49\x50\x51' + +_REQUEST_SERIALIZER = lambda unused_request: _SERIALIZED_REQUEST +_REQUEST_DESERIALIZER = lambda unused_serialized_request: object() +_RESPONSE_SERIALIZER = lambda unused_response: _SERIALIZED_RESPONSE +_RESPONSE_DESERIALIZER = lambda unused_serialized_resopnse: object() + +_SERVICE = 'test.TestService' +_UNARY_UNARY = 'UnaryUnary' +_UNARY_STREAM = 'UnaryStream' +_STREAM_UNARY = 'StreamUnary' +_STREAM_STREAM = 'StreamStream' + +_CLIENT_METADATA = ( + ('client-md-key', 'client-md-key'), + ('client-md-key-bin', b'\x00\x01') +) + +_SERVER_INITIAL_METADATA = ( + ('server-initial-md-key', 'server-initial-md-value'), + ('server-initial-md-key-bin', b'\x00\x02') +) + +_SERVER_TRAILING_METADATA = ( + ('server-trailing-md-key', 'server-trailing-md-value'), + ('server-trailing-md-key-bin', b'\x00\x03') +) + +_NON_OK_CODE = grpc.StatusCode.NOT_FOUND +_DETAILS = 'Test details!' + + +class _Servicer(object): + + def __init__(self): + self._lock = threading.Lock() + self._code = None + self._details = None + self._exception = False + self._return_none = False + self._received_client_metadata = None + + def unary_unary(self, request, context): + with self._lock: + self._received_client_metadata = context.invocation_metadata() + context.send_initial_metadata(_SERVER_INITIAL_METADATA) + context.set_trailing_metadata(_SERVER_TRAILING_METADATA) + if self._code is not None: + context.set_code(self._code) + if self._details is not None: + context.set_details(self._details) + if self._exception: + raise test_control.Defect() + else: + return None if self._return_none else object() + + def unary_stream(self, request, context): + with self._lock: + self._received_client_metadata = context.invocation_metadata() + context.send_initial_metadata(_SERVER_INITIAL_METADATA) + context.set_trailing_metadata(_SERVER_TRAILING_METADATA) + if self._code is not None: + context.set_code(self._code) + if self._details is not None: + context.set_details(self._details) + for _ in range(test_constants.STREAM_LENGTH // 2): + yield _SERIALIZED_RESPONSE + if self._exception: + raise test_control.Defect() + + def stream_unary(self, request_iterator, context): + with self._lock: + self._received_client_metadata = context.invocation_metadata() + context.send_initial_metadata(_SERVER_INITIAL_METADATA) + context.set_trailing_metadata(_SERVER_TRAILING_METADATA) + if self._code is not None: + context.set_code(self._code) + if self._details is not None: + context.set_details(self._details) + # TODO(https://github.com/grpc/grpc/issues/6891): just ignore the + # request iterator. + for ignored_request in request_iterator: + pass + if self._exception: + raise test_control.Defect() + else: + return None if self._return_none else _SERIALIZED_RESPONSE + + def stream_stream(self, request_iterator, context): + with self._lock: + self._received_client_metadata = context.invocation_metadata() + context.send_initial_metadata(_SERVER_INITIAL_METADATA) + context.set_trailing_metadata(_SERVER_TRAILING_METADATA) + if self._code is not None: + context.set_code(self._code) + if self._details is not None: + context.set_details(self._details) + # TODO(https://github.com/grpc/grpc/issues/6891): just ignore the + # request iterator. + for ignored_request in request_iterator: + pass + for _ in range(test_constants.STREAM_LENGTH // 3): + yield object() + if self._exception: + raise test_control.Defect() + + def set_code(self, code): + with self._lock: + self._code = code + + def set_details(self, details): + with self._lock: + self._details = details + + def set_exception(self): + with self._lock: + self._exception = True + + def set_return_none(self): + with self._lock: + self._return_none = True + + def received_client_metadata(self): + with self._lock: + return self._received_client_metadata + + +def _generic_handler(servicer): + method_handlers = { + _UNARY_UNARY: grpc.unary_unary_rpc_method_handler( + servicer.unary_unary, request_deserializer=_REQUEST_DESERIALIZER, + response_serializer=_RESPONSE_SERIALIZER), + _UNARY_STREAM: grpc.unary_stream_rpc_method_handler( + servicer.unary_stream), + _STREAM_UNARY: grpc.stream_unary_rpc_method_handler( + servicer.stream_unary), + _STREAM_STREAM: grpc.stream_stream_rpc_method_handler( + servicer.stream_stream, request_deserializer=_REQUEST_DESERIALIZER, + response_serializer=_RESPONSE_SERIALIZER), + } + return grpc.method_handlers_generic_handler(_SERVICE, method_handlers) + + +class MetadataCodeDetailsTest(unittest.TestCase): + + def setUp(self): + self._servicer = _Servicer() + self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) + self._server = grpc.server( + self._server_pool, handlers=(_generic_handler(self._servicer),)) + port = self._server.add_insecure_port('[::]:0') + self._server.start() + + channel = grpc.insecure_channel('localhost:{}'.format(port)) + self._unary_unary = channel.unary_unary( + '/'.join(('', _SERVICE, _UNARY_UNARY,)), + request_serializer=_REQUEST_SERIALIZER, + response_deserializer=_RESPONSE_DESERIALIZER,) + self._unary_stream = channel.unary_stream( + '/'.join(('', _SERVICE, _UNARY_STREAM,)),) + self._stream_unary = channel.stream_unary( + '/'.join(('', _SERVICE, _STREAM_UNARY,)),) + self._stream_stream = channel.stream_stream( + '/'.join(('', _SERVICE, _STREAM_STREAM,)), + request_serializer=_REQUEST_SERIALIZER, + response_deserializer=_RESPONSE_DESERIALIZER,) + + + def testSuccessfulUnaryUnary(self): + self._servicer.set_details(_DETAILS) + + unused_response, call = self._unary_unary.with_call( + object(), metadata=_CLIENT_METADATA) + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, call.initial_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, call.trailing_metadata())) + self.assertIs(grpc.StatusCode.OK, call.code()) + self.assertEqual(_DETAILS, call.details()) + + def testSuccessfulUnaryStream(self): + self._servicer.set_details(_DETAILS) + + call = self._unary_stream(_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA) + received_initial_metadata = call.initial_metadata() + for _ in call: + pass + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, received_initial_metadata)) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, call.trailing_metadata())) + self.assertIs(grpc.StatusCode.OK, call.code()) + self.assertEqual(_DETAILS, call.details()) + + def testSuccessfulStreamUnary(self): + self._servicer.set_details(_DETAILS) + + unused_response, call = self._stream_unary.with_call( + iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH), + metadata=_CLIENT_METADATA) + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, call.initial_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, call.trailing_metadata())) + self.assertIs(grpc.StatusCode.OK, call.code()) + self.assertEqual(_DETAILS, call.details()) + + def testSuccessfulStreamStream(self): + self._servicer.set_details(_DETAILS) + + call = self._stream_stream( + iter([object()] * test_constants.STREAM_LENGTH), + metadata=_CLIENT_METADATA) + received_initial_metadata = call.initial_metadata() + for _ in call: + pass + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, received_initial_metadata)) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, call.trailing_metadata())) + self.assertIs(grpc.StatusCode.OK, call.code()) + self.assertEqual(_DETAILS, call.details()) + + def testCustomCodeUnaryUnary(self): + self._servicer.set_code(_NON_OK_CODE) + self._servicer.set_details(_DETAILS) + + with self.assertRaises(grpc.RpcError) as exception_context: + self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA) + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, + exception_context.exception.initial_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, + exception_context.exception.trailing_metadata())) + self.assertIs(_NON_OK_CODE, exception_context.exception.code()) + self.assertEqual(_DETAILS, exception_context.exception.details()) + + def testCustomCodeUnaryStream(self): + self._servicer.set_code(_NON_OK_CODE) + self._servicer.set_details(_DETAILS) + + call = self._unary_stream(_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA) + received_initial_metadata = call.initial_metadata() + with self.assertRaises(grpc.RpcError): + for _ in call: + pass + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, received_initial_metadata)) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, call.trailing_metadata())) + self.assertIs(_NON_OK_CODE, call.code()) + self.assertEqual(_DETAILS, call.details()) + + def testCustomCodeStreamUnary(self): + self._servicer.set_code(_NON_OK_CODE) + self._servicer.set_details(_DETAILS) + + with self.assertRaises(grpc.RpcError) as exception_context: + self._stream_unary.with_call( + iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH), + metadata=_CLIENT_METADATA) + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, + exception_context.exception.initial_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, + exception_context.exception.trailing_metadata())) + self.assertIs(_NON_OK_CODE, exception_context.exception.code()) + self.assertEqual(_DETAILS, exception_context.exception.details()) + + def testCustomCodeStreamStream(self): + self._servicer.set_code(_NON_OK_CODE) + self._servicer.set_details(_DETAILS) + + call = self._stream_stream( + iter([object()] * test_constants.STREAM_LENGTH), + metadata=_CLIENT_METADATA) + received_initial_metadata = call.initial_metadata() + with self.assertRaises(grpc.RpcError) as exception_context: + for _ in call: + pass + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, received_initial_metadata)) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, + exception_context.exception.trailing_metadata())) + self.assertIs(_NON_OK_CODE, exception_context.exception.code()) + self.assertEqual(_DETAILS, exception_context.exception.details()) + + def testCustomCodeExceptionUnaryUnary(self): + self._servicer.set_code(_NON_OK_CODE) + self._servicer.set_details(_DETAILS) + self._servicer.set_exception() + + with self.assertRaises(grpc.RpcError) as exception_context: + self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA) + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, + exception_context.exception.initial_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, + exception_context.exception.trailing_metadata())) + self.assertIs(_NON_OK_CODE, exception_context.exception.code()) + self.assertEqual(_DETAILS, exception_context.exception.details()) + + def testCustomCodeExceptionUnaryStream(self): + self._servicer.set_code(_NON_OK_CODE) + self._servicer.set_details(_DETAILS) + self._servicer.set_exception() + + call = self._unary_stream(_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA) + received_initial_metadata = call.initial_metadata() + with self.assertRaises(grpc.RpcError): + for _ in call: + pass + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, received_initial_metadata)) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, call.trailing_metadata())) + self.assertIs(_NON_OK_CODE, call.code()) + self.assertEqual(_DETAILS, call.details()) + + def testCustomCodeExceptionStreamUnary(self): + self._servicer.set_code(_NON_OK_CODE) + self._servicer.set_details(_DETAILS) + self._servicer.set_exception() + + with self.assertRaises(grpc.RpcError) as exception_context: + self._stream_unary.with_call( + iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH), + metadata=_CLIENT_METADATA) + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, + exception_context.exception.initial_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, + exception_context.exception.trailing_metadata())) + self.assertIs(_NON_OK_CODE, exception_context.exception.code()) + self.assertEqual(_DETAILS, exception_context.exception.details()) + + def testCustomCodeExceptionStreamStream(self): + self._servicer.set_code(_NON_OK_CODE) + self._servicer.set_details(_DETAILS) + self._servicer.set_exception() + + call = self._stream_stream( + iter([object()] * test_constants.STREAM_LENGTH), + metadata=_CLIENT_METADATA) + received_initial_metadata = call.initial_metadata() + with self.assertRaises(grpc.RpcError): + for _ in call: + pass + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, received_initial_metadata)) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, call.trailing_metadata())) + self.assertIs(_NON_OK_CODE, call.code()) + self.assertEqual(_DETAILS, call.details()) + + def testCustomCodeReturnNoneUnaryUnary(self): + self._servicer.set_code(_NON_OK_CODE) + self._servicer.set_details(_DETAILS) + self._servicer.set_return_none() + + with self.assertRaises(grpc.RpcError) as exception_context: + self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA) + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, + exception_context.exception.initial_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, + exception_context.exception.trailing_metadata())) + self.assertIs(_NON_OK_CODE, exception_context.exception.code()) + self.assertEqual(_DETAILS, exception_context.exception.details()) + + def testCustomCodeReturnNoneStreamUnary(self): + self._servicer.set_code(_NON_OK_CODE) + self._servicer.set_details(_DETAILS) + self._servicer.set_return_none() + + with self.assertRaises(grpc.RpcError) as exception_context: + self._stream_unary.with_call( + iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH), + metadata=_CLIENT_METADATA) + + self.assertTrue( + test_common.metadata_transmitted( + _CLIENT_METADATA, self._servicer.received_client_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, + exception_context.exception.initial_metadata())) + self.assertTrue( + test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, + exception_context.exception.trailing_metadata())) + self.assertIs(_NON_OK_CODE, exception_context.exception.code()) + self.assertEqual(_DETAILS, exception_context.exception.details()) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests/unit/_metadata_test.py b/src/python/grpcio_tests/tests/unit/_metadata_test.py new file mode 100644 index 0000000000..da73476929 --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/_metadata_test.py @@ -0,0 +1,216 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"""Tests server and client side metadata API.""" + +import unittest +import weakref + +import grpc +from grpc import _grpcio_metadata +from grpc.framework.foundation import logging_pool + +from tests.unit import test_common +from tests.unit.framework.common import test_constants + +_CHANNEL_ARGS = (('grpc.primary_user_agent', 'primary-agent'), + ('grpc.secondary_user_agent', 'secondary-agent')) + +_REQUEST = b'\x00\x00\x00' +_RESPONSE = b'\x00\x00\x00' + +_UNARY_UNARY = '/test/UnaryUnary' +_UNARY_STREAM = '/test/UnaryStream' +_STREAM_UNARY = '/test/StreamUnary' +_STREAM_STREAM = '/test/StreamStream' + +_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__) + +_CLIENT_METADATA = ( + ('client-md-key', 'client-md-key'), + ('client-md-key-bin', b'\x00\x01') +) + +_SERVER_INITIAL_METADATA = ( + ('server-initial-md-key', 'server-initial-md-value'), + ('server-initial-md-key-bin', b'\x00\x02') +) + +_SERVER_TRAILING_METADATA = ( + ('server-trailing-md-key', 'server-trailing-md-value'), + ('server-trailing-md-key-bin', b'\x00\x03') +) + + +def user_agent(metadata): + for key, val in metadata: + if key == 'user-agent': + return val + raise KeyError('No user agent!') + + +def validate_client_metadata(test, servicer_context): + test.assertTrue(test_common.metadata_transmitted( + _CLIENT_METADATA, servicer_context.invocation_metadata())) + test.assertTrue(user_agent(servicer_context.invocation_metadata()) + .startswith('primary-agent ' + _USER_AGENT)) + test.assertTrue(user_agent(servicer_context.invocation_metadata()) + .endswith('secondary-agent')) + + +def handle_unary_unary(test, request, servicer_context): + validate_client_metadata(test, servicer_context) + servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA) + servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA) + return _RESPONSE + + +def handle_unary_stream(test, request, servicer_context): + validate_client_metadata(test, servicer_context) + servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA) + servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA) + for _ in range(test_constants.STREAM_LENGTH): + yield _RESPONSE + + +def handle_stream_unary(test, request_iterator, servicer_context): + validate_client_metadata(test, servicer_context) + servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA) + servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA) + # TODO(issue:#6891) We should be able to remove this loop + for request in request_iterator: + pass + return _RESPONSE + + +def handle_stream_stream(test, request_iterator, servicer_context): + validate_client_metadata(test, servicer_context) + servicer_context.send_initial_metadata(_SERVER_INITIAL_METADATA) + servicer_context.set_trailing_metadata(_SERVER_TRAILING_METADATA) + # TODO(issue:#6891) We should be able to remove this loop, + # and replace with return; yield + for request in request_iterator: + yield _RESPONSE + + +class _MethodHandler(grpc.RpcMethodHandler): + + def __init__(self, test, request_streaming, response_streaming): + self.request_streaming = request_streaming + self.response_streaming = response_streaming + self.request_deserializer = None + self.response_serializer = None + self.unary_unary = None + self.unary_stream = None + self.stream_unary = None + self.stream_stream = None + if self.request_streaming and self.response_streaming: + self.stream_stream = lambda x, y: handle_stream_stream(test, x, y) + elif self.request_streaming: + self.stream_unary = lambda x, y: handle_stream_unary(test, x, y) + elif self.response_streaming: + self.unary_stream = lambda x, y: handle_unary_stream(test, x, y) + else: + self.unary_unary = lambda x, y: handle_unary_unary(test, x, y) + + +class _GenericHandler(grpc.GenericRpcHandler): + + def __init__(self, test): + self._test = test + + def service(self, handler_call_details): + if handler_call_details.method == _UNARY_UNARY: + return _MethodHandler(self._test, False, False) + elif handler_call_details.method == _UNARY_STREAM: + return _MethodHandler(self._test, False, True) + elif handler_call_details.method == _STREAM_UNARY: + return _MethodHandler(self._test, True, False) + elif handler_call_details.method == _STREAM_STREAM: + return _MethodHandler(self._test, True, True) + else: + return None + + +class MetadataTest(unittest.TestCase): + + def setUp(self): + self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) + self._server = grpc.server( + self._server_pool, handlers=(_GenericHandler(weakref.proxy(self)),)) + port = self._server.add_insecure_port('[::]:0') + self._server.start() + self._channel = grpc.insecure_channel('localhost:%d' % port, + options=_CHANNEL_ARGS) + + def tearDown(self): + self._server.stop(0) + + def testUnaryUnary(self): + multi_callable = self._channel.unary_unary(_UNARY_UNARY) + unused_response, call = multi_callable.with_call( + _REQUEST, metadata=_CLIENT_METADATA) + self.assertTrue(test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, call.initial_metadata())) + self.assertTrue(test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, call.trailing_metadata())) + + def testUnaryStream(self): + multi_callable = self._channel.unary_stream(_UNARY_STREAM) + call = multi_callable(_REQUEST, metadata=_CLIENT_METADATA) + self.assertTrue(test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, call.initial_metadata())) + for _ in call: + pass + self.assertTrue(test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, call.trailing_metadata())) + + def testStreamUnary(self): + multi_callable = self._channel.stream_unary(_STREAM_UNARY) + unused_response, call = multi_callable.with_call( + [_REQUEST] * test_constants.STREAM_LENGTH, + metadata=_CLIENT_METADATA) + self.assertTrue(test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, call.initial_metadata())) + self.assertTrue(test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, call.trailing_metadata())) + + def testStreamStream(self): + multi_callable = self._channel.stream_stream(_STREAM_STREAM) + call = multi_callable([_REQUEST] * test_constants.STREAM_LENGTH, + metadata=_CLIENT_METADATA) + self.assertTrue(test_common.metadata_transmitted( + _SERVER_INITIAL_METADATA, call.initial_metadata())) + for _ in call: + pass + self.assertTrue(test_common.metadata_transmitted( + _SERVER_TRAILING_METADATA, call.trailing_metadata())) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/_rpc_test.py b/src/python/grpcio_tests/tests/unit/_rpc_test.py index 1c7a14c5d0..ab6546bf87 100644 --- a/src/python/grpcio/tests/unit/_rpc_test.py +++ b/src/python/grpcio_tests/tests/unit/_rpc_test.py @@ -27,7 +27,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Test of gRPC Python's application-layer API.""" +"""Test of RPCs made against gRPC Python's application-layer API.""" import itertools import threading @@ -41,14 +41,14 @@ from tests.unit.framework.common import test_constants from tests.unit.framework.common import test_control _SERIALIZE_REQUEST = lambda bytestring: bytestring * 2 -_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) / 2:] +_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:] _SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3 -_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) / 3] +_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3] -_UNARY_UNARY = b'/test/UnaryUnary' -_UNARY_STREAM = b'/test/UnaryStream' -_STREAM_UNARY = b'/test/StreamUnary' -_STREAM_STREAM = b'/test/StreamStream' +_UNARY_UNARY = '/test/UnaryUnary' +_UNARY_STREAM = '/test/UnaryStream' +_STREAM_UNARY = '/test/StreamUnary' +_STREAM_STREAM = '/test/StreamStream' class _Callback(object): @@ -79,7 +79,7 @@ class _Handler(object): def handle_unary_unary(self, request, servicer_context): self._control.control() if servicer_context is not None: - servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),)) + servicer_context.set_trailing_metadata((('testkey', 'testvalue',),)) return request def handle_unary_stream(self, request, servicer_context): @@ -88,7 +88,7 @@ class _Handler(object): yield request self._control.control() if servicer_context is not None: - servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),)) + servicer_context.set_trailing_metadata((('testkey', 'testvalue',),)) def handle_stream_unary(self, request_iterator, servicer_context): if servicer_context is not None: @@ -100,13 +100,13 @@ class _Handler(object): response_elements.append(request) self._control.control() if servicer_context is not None: - servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),)) + servicer_context.set_trailing_metadata((('testkey', 'testvalue',),)) return b''.join(response_elements) def handle_stream_stream(self, request_iterator, servicer_context): self._control.control() if servicer_context is not None: - servicer_context.set_trailing_metadata(((b'testkey', b'testvalue',),)) + servicer_context.set_trailing_metadata((('testkey', 'testvalue',),)) for request in request_iterator: self._control.control() yield request @@ -184,25 +184,18 @@ class RPCTest(unittest.TestCase): self._handler = _Handler(self._control) self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) - self._server = grpc.server((), self._server_pool) - port = self._server.add_insecure_port(b'[::]:0') + self._server = grpc.server(self._server_pool) + port = self._server.add_insecure_port('[::]:0') self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),)) self._server.start() - self._channel = grpc.insecure_channel(b'localhost:%d' % port) - - # TODO(nathaniel): Why is this necessary, and only in some development - # environments? - def tearDown(self): - del self._channel - del self._server - del self._server_pool + self._channel = grpc.insecure_channel('localhost:%d' % port) def testUnrecognizedMethod(self): request = b'abc' with self.assertRaises(grpc.RpcError) as exception_context: - self._channel.unary_unary(b'NoSuchMethod')(request) + self._channel.unary_unary('NoSuchMethod')(request) self.assertEqual( grpc.StatusCode.UNIMPLEMENTED, exception_context.exception.code()) @@ -214,7 +207,7 @@ class RPCTest(unittest.TestCase): multi_callable = _unary_unary_multi_callable(self._channel) response = multi_callable( request, metadata=( - (b'test', b'SuccessfulUnaryRequestBlockingUnaryResponse'),)) + ('test', 'SuccessfulUnaryRequestBlockingUnaryResponse'),)) self.assertEqual(expected_response, response) @@ -223,10 +216,9 @@ class RPCTest(unittest.TestCase): expected_response = self._handler.handle_unary_unary(request, None) multi_callable = _unary_unary_multi_callable(self._channel) - response, call = multi_callable( + response, call = multi_callable.with_call( request, metadata=( - (b'test', b'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),), - with_call=True) + ('test', 'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),)) self.assertEqual(expected_response, response) self.assertIs(grpc.StatusCode.OK, call.code()) @@ -238,10 +230,14 @@ class RPCTest(unittest.TestCase): multi_callable = _unary_unary_multi_callable(self._channel) response_future = multi_callable.future( request, metadata=( - (b'test', b'SuccessfulUnaryRequestFutureUnaryResponse'),)) + ('test', 'SuccessfulUnaryRequestFutureUnaryResponse'),)) response = response_future.result() + self.assertIsInstance(response_future, grpc.Future) + self.assertIsInstance(response_future, grpc.Call) self.assertEqual(expected_response, response) + self.assertIsNone(response_future.exception()) + self.assertIsNone(response_future.traceback()) def testSuccessfulUnaryRequestStreamResponse(self): request = b'\x37\x58' @@ -250,7 +246,7 @@ class RPCTest(unittest.TestCase): multi_callable = _unary_stream_multi_callable(self._channel) response_iterator = multi_callable( request, - metadata=((b'test', b'SuccessfulUnaryRequestStreamResponse'),)) + metadata=(('test', 'SuccessfulUnaryRequestStreamResponse'),)) responses = tuple(response_iterator) self.assertSequenceEqual(expected_responses, responses) @@ -263,7 +259,7 @@ class RPCTest(unittest.TestCase): multi_callable = _stream_unary_multi_callable(self._channel) response = multi_callable( request_iterator, - metadata=((b'test', b'SuccessfulStreamRequestBlockingUnaryResponse'),)) + metadata=(('test', 'SuccessfulStreamRequestBlockingUnaryResponse'),)) self.assertEqual(expected_response, response) @@ -273,11 +269,11 @@ class RPCTest(unittest.TestCase): request_iterator = iter(requests) multi_callable = _stream_unary_multi_callable(self._channel) - response, call = multi_callable( + response, call = multi_callable.with_call( request_iterator, metadata=( - (b'test', b'SuccessfulStreamRequestBlockingUnaryResponseWithCall'), - ), with_call=True) + ('test', 'SuccessfulStreamRequestBlockingUnaryResponseWithCall'), + )) self.assertEqual(expected_response, response) self.assertIs(grpc.StatusCode.OK, call.code()) @@ -291,10 +287,12 @@ class RPCTest(unittest.TestCase): response_future = multi_callable.future( request_iterator, metadata=( - (b'test', b'SuccessfulStreamRequestFutureUnaryResponse'),)) + ('test', 'SuccessfulStreamRequestFutureUnaryResponse'),)) response = response_future.result() self.assertEqual(expected_response, response) + self.assertIsNone(response_future.exception()) + self.assertIsNone(response_future.traceback()) def testSuccessfulStreamRequestStreamResponse(self): requests = tuple(b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH)) @@ -305,7 +303,7 @@ class RPCTest(unittest.TestCase): multi_callable = _stream_stream_multi_callable(self._channel) response_iterator = multi_callable( request_iterator, - metadata=((b'test', b'SuccessfulStreamRequestStreamResponse'),)) + metadata=(('test', 'SuccessfulStreamRequestStreamResponse'),)) responses = tuple(response_iterator) self.assertSequenceEqual(expected_responses, responses) @@ -320,9 +318,9 @@ class RPCTest(unittest.TestCase): multi_callable = _unary_unary_multi_callable(self._channel) first_response = multi_callable( - first_request, metadata=((b'test', b'SequentialInvocations'),)) + first_request, metadata=(('test', 'SequentialInvocations'),)) second_response = multi_callable( - second_request, metadata=((b'test', b'SequentialInvocations'),)) + second_request, metadata=(('test', 'SequentialInvocations'),)) self.assertEqual(expected_first_response, first_response) self.assertEqual(expected_second_response, second_response) @@ -339,7 +337,7 @@ class RPCTest(unittest.TestCase): request_iterator = iter(requests) response_future = pool.submit( multi_callable, request_iterator, - metadata=((b'test', b'ConcurrentBlockingInvocations'),)) + metadata=(('test', 'ConcurrentBlockingInvocations'),)) response_futures[index] = response_future responses = tuple( response_future.result() for response_future in response_futures) @@ -358,7 +356,7 @@ class RPCTest(unittest.TestCase): request_iterator = iter(requests) response_future = multi_callable.future( request_iterator, - metadata=((b'test', b'ConcurrentFutureInvocations'),)) + metadata=(('test', 'ConcurrentFutureInvocations'),)) response_futures[index] = response_future responses = tuple( response_future.result() for response_future in response_futures) @@ -388,8 +386,8 @@ class RPCTest(unittest.TestCase): inner_response_future = multi_callable.future( request, metadata=( - (b'test', - b'WaitingForSomeButNotAllConcurrentFutureInvocations'),)) + ('test', + 'WaitingForSomeButNotAllConcurrentFutureInvocations'),)) outer_response_future = pool.submit(wrap_future(inner_response_future)) response_futures[index] = outer_response_future @@ -408,7 +406,7 @@ class RPCTest(unittest.TestCase): response_iterator = multi_callable( request, metadata=( - (b'test', b'ConsumingOneStreamResponseUnaryRequest'),)) + ('test', 'ConsumingOneStreamResponseUnaryRequest'),)) next(response_iterator) def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self): @@ -418,7 +416,7 @@ class RPCTest(unittest.TestCase): response_iterator = multi_callable( request, metadata=( - (b'test', b'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),)) + ('test', 'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),)) for _ in range(test_constants.STREAM_LENGTH // 2): next(response_iterator) @@ -430,7 +428,7 @@ class RPCTest(unittest.TestCase): response_iterator = multi_callable( request_iterator, metadata=( - (b'test', b'ConsumingSomeButNotAllStreamResponsesStreamRequest'),)) + ('test', 'ConsumingSomeButNotAllStreamResponsesStreamRequest'),)) for _ in range(test_constants.STREAM_LENGTH // 2): next(response_iterator) @@ -442,7 +440,7 @@ class RPCTest(unittest.TestCase): response_iterator = multi_callable( request_iterator, metadata=( - (b'test', b'ConsumingTooManyStreamResponsesStreamRequest'),)) + ('test', 'ConsumingTooManyStreamResponsesStreamRequest'),)) for _ in range(test_constants.STREAM_LENGTH): next(response_iterator) for _ in range(test_constants.STREAM_LENGTH): @@ -461,12 +459,16 @@ class RPCTest(unittest.TestCase): with self._control.pause(): response_future = multi_callable.future( request, - metadata=((b'test', b'CancelledUnaryRequestUnaryResponse'),)) + metadata=(('test', 'CancelledUnaryRequestUnaryResponse'),)) response_future.cancel() self.assertTrue(response_future.cancelled()) with self.assertRaises(grpc.FutureCancelledError): response_future.result() + with self.assertRaises(grpc.FutureCancelledError): + response_future.exception() + with self.assertRaises(grpc.FutureCancelledError): + response_future.traceback() self.assertIs(grpc.StatusCode.CANCELLED, response_future.code()) def testCancelledUnaryRequestStreamResponse(self): @@ -476,7 +478,7 @@ class RPCTest(unittest.TestCase): with self._control.pause(): response_iterator = multi_callable( request, - metadata=((b'test', b'CancelledUnaryRequestStreamResponse'),)) + metadata=(('test', 'CancelledUnaryRequestStreamResponse'),)) self._control.block_until_paused() response_iterator.cancel() @@ -496,13 +498,17 @@ class RPCTest(unittest.TestCase): with self._control.pause(): response_future = multi_callable.future( request_iterator, - metadata=((b'test', b'CancelledStreamRequestUnaryResponse'),)) + metadata=(('test', 'CancelledStreamRequestUnaryResponse'),)) self._control.block_until_paused() response_future.cancel() self.assertTrue(response_future.cancelled()) with self.assertRaises(grpc.FutureCancelledError): response_future.result() + with self.assertRaises(grpc.FutureCancelledError): + response_future.exception() + with self.assertRaises(grpc.FutureCancelledError): + response_future.traceback() self.assertIsNotNone(response_future.initial_metadata()) self.assertIs(grpc.StatusCode.CANCELLED, response_future.code()) self.assertIsNotNone(response_future.details()) @@ -516,7 +522,7 @@ class RPCTest(unittest.TestCase): with self._control.pause(): response_iterator = multi_callable( request_iterator, - metadata=((b'test', b'CancelledStreamRequestStreamResponse'),)) + metadata=(('test', 'CancelledStreamRequestStreamResponse'),)) response_iterator.cancel() with self.assertRaises(grpc.RpcError): @@ -532,11 +538,11 @@ class RPCTest(unittest.TestCase): multi_callable = _unary_unary_multi_callable(self._channel) with self._control.pause(): with self.assertRaises(grpc.RpcError) as exception_context: - multi_callable( + multi_callable.with_call( request, timeout=test_constants.SHORT_TIMEOUT, - metadata=((b'test', b'ExpiredUnaryRequestBlockingUnaryResponse'),), - with_call=True) + metadata=(('test', 'ExpiredUnaryRequestBlockingUnaryResponse'),)) + self.assertIsInstance(exception_context.exception, grpc.Call) self.assertIsNotNone(exception_context.exception.initial_metadata()) self.assertIs( grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) @@ -551,7 +557,7 @@ class RPCTest(unittest.TestCase): with self._control.pause(): response_future = multi_callable.future( request, timeout=test_constants.SHORT_TIMEOUT, - metadata=((b'test', b'ExpiredUnaryRequestFutureUnaryResponse'),)) + metadata=(('test', 'ExpiredUnaryRequestFutureUnaryResponse'),)) response_future.add_done_callback(callback) value_passed_to_callback = callback.value() @@ -565,6 +571,7 @@ class RPCTest(unittest.TestCase): self.assertIs( grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) self.assertIsInstance(response_future.exception(), grpc.RpcError) + self.assertIsNotNone(response_future.traceback()) self.assertIs( grpc.StatusCode.DEADLINE_EXCEEDED, response_future.exception().code()) @@ -576,7 +583,7 @@ class RPCTest(unittest.TestCase): with self.assertRaises(grpc.RpcError) as exception_context: response_iterator = multi_callable( request, timeout=test_constants.SHORT_TIMEOUT, - metadata=((b'test', b'ExpiredUnaryRequestStreamResponse'),)) + metadata=(('test', 'ExpiredUnaryRequestStreamResponse'),)) next(response_iterator) self.assertIs( @@ -592,8 +599,10 @@ class RPCTest(unittest.TestCase): with self.assertRaises(grpc.RpcError) as exception_context: multi_callable( request_iterator, timeout=test_constants.SHORT_TIMEOUT, - metadata=((b'test', b'ExpiredStreamRequestBlockingUnaryResponse'),)) + metadata=(('test', 'ExpiredStreamRequestBlockingUnaryResponse'),)) + self.assertIsInstance(exception_context.exception, grpc.RpcError) + self.assertIsInstance(exception_context.exception, grpc.Call) self.assertIsNotNone(exception_context.exception.initial_metadata()) self.assertIs( grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) @@ -609,7 +618,9 @@ class RPCTest(unittest.TestCase): with self._control.pause(): response_future = multi_callable.future( request_iterator, timeout=test_constants.SHORT_TIMEOUT, - metadata=((b'test', b'ExpiredStreamRequestFutureUnaryResponse'),)) + metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),)) + with self.assertRaises(grpc.FutureTimeoutError): + response_future.result(timeout=test_constants.SHORT_TIMEOUT / 2.0) response_future.add_done_callback(callback) value_passed_to_callback = callback.value() @@ -619,6 +630,7 @@ class RPCTest(unittest.TestCase): self.assertIs( grpc.StatusCode.DEADLINE_EXCEEDED, exception_context.exception.code()) self.assertIsInstance(response_future.exception(), grpc.RpcError) + self.assertIsNotNone(response_future.traceback()) self.assertIs(response_future, value_passed_to_callback) self.assertIsNotNone(response_future.initial_metadata()) self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code()) @@ -634,7 +646,7 @@ class RPCTest(unittest.TestCase): with self.assertRaises(grpc.RpcError) as exception_context: response_iterator = multi_callable( request_iterator, timeout=test_constants.SHORT_TIMEOUT, - metadata=((b'test', b'ExpiredStreamRequestStreamResponse'),)) + metadata=(('test', 'ExpiredStreamRequestStreamResponse'),)) next(response_iterator) self.assertIs( @@ -647,10 +659,9 @@ class RPCTest(unittest.TestCase): multi_callable = _unary_unary_multi_callable(self._channel) with self._control.fail(): with self.assertRaises(grpc.RpcError) as exception_context: - multi_callable( + multi_callable.with_call( request, - metadata=((b'test', b'FailedUnaryRequestBlockingUnaryResponse'),), - with_call=True) + metadata=(('test', 'FailedUnaryRequestBlockingUnaryResponse'),)) self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code()) @@ -662,15 +673,18 @@ class RPCTest(unittest.TestCase): with self._control.fail(): response_future = multi_callable.future( request, - metadata=((b'test', b'FailedUnaryRequestFutureUnaryResponse'),)) + metadata=(('test', 'FailedUnaryRequestFutureUnaryResponse'),)) response_future.add_done_callback(callback) value_passed_to_callback = callback.value() + self.assertIsInstance(response_future, grpc.Future) + self.assertIsInstance(response_future, grpc.Call) with self.assertRaises(grpc.RpcError) as exception_context: response_future.result() self.assertIs( grpc.StatusCode.UNKNOWN, exception_context.exception.code()) self.assertIsInstance(response_future.exception(), grpc.RpcError) + self.assertIsNotNone(response_future.traceback()) self.assertIs(grpc.StatusCode.UNKNOWN, response_future.exception().code()) self.assertIs(response_future, value_passed_to_callback) @@ -682,7 +696,7 @@ class RPCTest(unittest.TestCase): with self._control.fail(): response_iterator = multi_callable( request, - metadata=((b'test', b'FailedUnaryRequestStreamResponse'),)) + metadata=(('test', 'FailedUnaryRequestStreamResponse'),)) next(response_iterator) self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code()) @@ -696,7 +710,7 @@ class RPCTest(unittest.TestCase): with self.assertRaises(grpc.RpcError) as exception_context: multi_callable( request_iterator, - metadata=((b'test', b'FailedStreamRequestBlockingUnaryResponse'),)) + metadata=(('test', 'FailedStreamRequestBlockingUnaryResponse'),)) self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code()) @@ -709,7 +723,7 @@ class RPCTest(unittest.TestCase): with self._control.fail(): response_future = multi_callable.future( request_iterator, - metadata=((b'test', b'FailedStreamRequestFutureUnaryResponse'),)) + metadata=(('test', 'FailedStreamRequestFutureUnaryResponse'),)) response_future.add_done_callback(callback) value_passed_to_callback = callback.value() @@ -719,6 +733,7 @@ class RPCTest(unittest.TestCase): self.assertIs( grpc.StatusCode.UNKNOWN, exception_context.exception.code()) self.assertIsInstance(response_future.exception(), grpc.RpcError) + self.assertIsNotNone(response_future.traceback()) self.assertIs(response_future, value_passed_to_callback) def testFailedStreamRequestStreamResponse(self): @@ -730,7 +745,7 @@ class RPCTest(unittest.TestCase): with self.assertRaises(grpc.RpcError) as exception_context: response_iterator = multi_callable( request_iterator, - metadata=((b'test', b'FailedStreamRequestStreamResponse'),)) + metadata=(('test', 'FailedStreamRequestStreamResponse'),)) tuple(response_iterator) self.assertIs(grpc.StatusCode.UNKNOWN, exception_context.exception.code()) @@ -742,7 +757,7 @@ class RPCTest(unittest.TestCase): multi_callable = _unary_unary_multi_callable(self._channel) multi_callable.future( request, - metadata=((b'test', b'IgnoredUnaryRequestFutureUnaryResponse'),)) + metadata=(('test', 'IgnoredUnaryRequestFutureUnaryResponse'),)) def testIgnoredUnaryRequestStreamResponse(self): request = b'\x37\x17' @@ -750,7 +765,7 @@ class RPCTest(unittest.TestCase): multi_callable = _unary_stream_multi_callable(self._channel) multi_callable( request, - metadata=((b'test', b'IgnoredUnaryRequestStreamResponse'),)) + metadata=(('test', 'IgnoredUnaryRequestStreamResponse'),)) def testIgnoredStreamRequestFutureUnaryResponse(self): requests = tuple(b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH)) @@ -759,7 +774,7 @@ class RPCTest(unittest.TestCase): multi_callable = _stream_unary_multi_callable(self._channel) multi_callable.future( request_iterator, - metadata=((b'test', b'IgnoredStreamRequestFutureUnaryResponse'),)) + metadata=(('test', 'IgnoredStreamRequestFutureUnaryResponse'),)) def testIgnoredStreamRequestStreamResponse(self): requests = tuple(b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH)) @@ -768,7 +783,7 @@ class RPCTest(unittest.TestCase): multi_callable = _stream_stream_multi_callable(self._channel) multi_callable( request_iterator, - metadata=((b'test', b'IgnoredStreamRequestStreamResponse'),)) + metadata=(('test', 'IgnoredStreamRequestStreamResponse'),)) if __name__ == '__main__': diff --git a/src/python/grpcio/tests/unit/_sanity/__init__.py b/src/python/grpcio_tests/tests/unit/_sanity/__init__.py index 2f88fa0412..2f88fa0412 100644 --- a/src/python/grpcio/tests/unit/_sanity/__init__.py +++ b/src/python/grpcio_tests/tests/unit/_sanity/__init__.py diff --git a/src/python/grpcio/tests/unit/_sanity/_sanity_test.py b/src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py index 0a5a715c0e..e9fdf217ae 100644 --- a/src/python/grpcio/tests/unit/_sanity/_sanity_test.py +++ b/src/python/grpcio_tests/tests/unit/_sanity/_sanity_test.py @@ -30,6 +30,9 @@ import json import unittest +import pkg_resources +import six + import tests @@ -44,8 +47,10 @@ class Sanity(unittest.TestCase): for test_case_class in tests._loader.iterate_suite_cases(loader.suite)] test_suite_names = sorted(set(test_suite_names)) - with open('src/python/grpcio/tests/tests.json') as tests_json_file: - tests_json = json.load(tests_json_file) + tests_json_string = pkg_resources.resource_string('tests', 'tests.json') + if six.PY3: + tests_json_string = tests_json_string.decode() + tests_json = json.loads(tests_json_string) self.assertListEqual(test_suite_names, tests_json) diff --git a/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py b/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py new file mode 100644 index 0000000000..3e4f317edc --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py @@ -0,0 +1,117 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"""Tests for CleanupThread.""" + +import threading +import time +import unittest + +from grpc import _common + +_SHORT_TIME = 0.5 +_LONG_TIME = 2.0 +_EPSILON = 0.1 + + +def cleanup(timeout): + if timeout is not None: + time.sleep(timeout) + else: + time.sleep(_LONG_TIME) + + +def slow_cleanup(timeout): + # Don't respect timeout + time.sleep(_LONG_TIME) + + +class CleanupThreadTest(unittest.TestCase): + + def testTargetInvocation(self): + event = threading.Event() + def target(arg1, arg2, arg3=None): + self.assertEqual('arg1', arg1) + self.assertEqual('arg2', arg2) + self.assertEqual('arg3', arg3) + event.set() + + cleanup_thread = _common.CleanupThread(behavior=lambda x: None, + target=target, name='test-name', + args=('arg1', 'arg2'), kwargs={'arg3': 'arg3'}) + cleanup_thread.start() + cleanup_thread.join() + self.assertEqual(cleanup_thread.name, 'test-name') + self.assertTrue(event.is_set()) + + def testJoinNoTimeout(self): + cleanup_thread = _common.CleanupThread(behavior=cleanup) + cleanup_thread.start() + start_time = time.time() + cleanup_thread.join() + end_time = time.time() + self.assertAlmostEqual(_LONG_TIME, end_time - start_time, delta=_EPSILON) + + def testJoinTimeout(self): + cleanup_thread = _common.CleanupThread(behavior=cleanup) + cleanup_thread.start() + start_time = time.time() + cleanup_thread.join(_SHORT_TIME) + end_time = time.time() + self.assertAlmostEqual(_SHORT_TIME, end_time - start_time, delta=_EPSILON) + + def testJoinTimeoutSlowBehavior(self): + cleanup_thread = _common.CleanupThread(behavior=slow_cleanup) + cleanup_thread.start() + start_time = time.time() + cleanup_thread.join(_SHORT_TIME) + end_time = time.time() + self.assertAlmostEqual(_LONG_TIME, end_time - start_time, delta=_EPSILON) + + def testJoinTimeoutSlowTarget(self): + event = threading.Event() + def target(): + event.wait(_LONG_TIME) + cleanup_thread = _common.CleanupThread(behavior=cleanup, target=target) + cleanup_thread.start() + start_time = time.time() + cleanup_thread.join(_SHORT_TIME) + end_time = time.time() + self.assertAlmostEqual(_SHORT_TIME, end_time - start_time, delta=_EPSILON) + event.set() + + def testJoinZeroTimeout(self): + cleanup_thread = _common.CleanupThread(behavior=cleanup) + cleanup_thread.start() + start_time = time.time() + cleanup_thread.join(0) + end_time = time.time() + self.assertAlmostEqual(0, end_time - start_time, delta=_EPSILON) + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/_links/__init__.py b/src/python/grpcio_tests/tests/unit/beta/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/tests/unit/_links/__init__.py +++ b/src/python/grpcio_tests/tests/unit/beta/__init__.py diff --git a/src/python/grpcio/tests/unit/beta/_beta_features_test.py b/src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py index bb2893a21b..3a9701b8eb 100644 --- a/src/python/grpcio/tests/unit/beta/_beta_features_test.py +++ b/src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py @@ -42,8 +42,8 @@ from tests.unit.framework.common import test_constants _SERVER_HOST_OVERRIDE = 'foo.test.google.fr' -_PER_RPC_CREDENTIALS_METADATA_KEY = 'my-call-credentials-metadata-key' -_PER_RPC_CREDENTIALS_METADATA_VALUE = 'my-call-credentials-metadata-value' +_PER_RPC_CREDENTIALS_METADATA_KEY = b'my-call-credentials-metadata-key' +_PER_RPC_CREDENTIALS_METADATA_VALUE = b'my-call-credentials-metadata-value' _GROUP = 'group' _UNARY_UNARY = 'unary-unary' diff --git a/src/python/grpcio/grpc/_links/_constants.py b/src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py index 117fc5a639..5d826a269d 100644 --- a/src/python/grpcio/grpc/_links/_constants.py +++ b/src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py @@ -27,16 +27,22 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Constants for use within this package.""" +"""Tests of grpc.beta._connectivity_channel.""" -from grpc._adapter import _intermediary_low -from grpc.beta import interfaces as beta_interfaces +import unittest -LOW_STATUS_CODE_TO_HIGH_STATUS_CODE = { - low: high for low, high in zip( - _intermediary_low.Code, beta_interfaces.StatusCode) -} +from grpc.beta import interfaces -HIGH_STATUS_CODE_TO_LOW_STATUS_CODE = { - high: low for low, high in LOW_STATUS_CODE_TO_HIGH_STATUS_CODE.items() -} + +class ConnectivityStatesTest(unittest.TestCase): + + def testBetaConnectivityStates(self): + self.assertIsNotNone(interfaces.ChannelConnectivity.IDLE) + self.assertIsNotNone(interfaces.ChannelConnectivity.CONNECTING) + self.assertIsNotNone(interfaces.ChannelConnectivity.READY) + self.assertIsNotNone(interfaces.ChannelConnectivity.TRANSIENT_FAILURE) + self.assertIsNotNone(interfaces.ChannelConnectivity.FATAL_FAILURE) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio/tests/unit/beta/_face_interface_test.py b/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py index 3a67516906..3a67516906 100644 --- a/src/python/grpcio/tests/unit/beta/_face_interface_test.py +++ b/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py diff --git a/src/python/grpcio/tests/unit/beta/_implementations_test.py b/src/python/grpcio_tests/tests/unit/beta/_implementations_test.py index 127f93e9bb..127f93e9bb 100644 --- a/src/python/grpcio/tests/unit/beta/_implementations_test.py +++ b/src/python/grpcio_tests/tests/unit/beta/_implementations_test.py diff --git a/src/python/grpcio/tests/unit/beta/_not_found_test.py b/src/python/grpcio_tests/tests/unit/beta/_not_found_test.py index 44fcd1e13c..37b8c49120 100644 --- a/src/python/grpcio/tests/unit/beta/_not_found_test.py +++ b/src/python/grpcio_tests/tests/unit/beta/_not_found_test.py @@ -61,7 +61,7 @@ class NotFoundTest(unittest.TestCase): def test_future_stream_unary_not_found(self): rpc_future = self._generic_stub.future_stream_unary( - 'grupe', 'mevvod', b'def', test_constants.LONG_TIMEOUT) + 'grupe', 'mevvod', [b'def'], test_constants.LONG_TIMEOUT) with self.assertRaises(face.LocalError) as exception_assertion_context: rpc_future.result() self.assertIs( diff --git a/src/python/grpcio/tests/unit/beta/_utilities_test.py b/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py index 08ce98e751..90fe10c77c 100644 --- a/src/python/grpcio/tests/unit/beta/_utilities_test.py +++ b/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py @@ -33,21 +33,12 @@ import threading import time import unittest -from grpc._adapter import _low -from grpc._adapter import _types from grpc.beta import implementations from grpc.beta import utilities from grpc.framework.foundation import future from tests.unit.framework.common import test_constants -def _drive_completion_queue(completion_queue): - while True: - event = completion_queue.next(time.time() + 24 * 60 * 60) - if event.type == _types.EventType.QUEUE_SHUTDOWN: - break - - class _Callback(object): def __init__(self): @@ -87,13 +78,9 @@ class ChannelConnectivityTest(unittest.TestCase): self.assertFalse(ready_future.running()) def test_immediately_connectable_channel_connectivity(self): - server_completion_queue = _low.CompletionQueue() - server = _low.Server(server_completion_queue, []) - port = server.add_http2_port('[::]:0') + server = implementations.server({}) + port = server.add_insecure_port('[::]:0') server.start() - server_completion_queue_thread = threading.Thread( - target=_drive_completion_queue, args=(server_completion_queue,)) - server_completion_queue_thread.start() channel = implementations.insecure_channel('localhost', port) callback = _Callback() @@ -114,9 +101,7 @@ class ChannelConnectivityTest(unittest.TestCase): self.assertFalse(ready_future.running()) finally: ready_future.cancel() - server.shutdown() - server_completion_queue.shutdown() - server_completion_queue_thread.join() + server.stop(0) if __name__ == '__main__': diff --git a/src/python/grpcio/tests/unit/beta/test_utilities.py b/src/python/grpcio_tests/tests/unit/beta/test_utilities.py index 66b5f72897..692da9c97d 100644 --- a/src/python/grpcio/tests/unit/beta/test_utilities.py +++ b/src/python/grpcio_tests/tests/unit/beta/test_utilities.py @@ -50,6 +50,6 @@ def not_really_secure_channel( """ target = '%s:%d' % (host, port) channel = grpc.secure_channel( - target, ((b'grpc.ssl_target_name_override', server_host_override,),), - channel_credentials._credentials) + target, channel_credentials, + (('grpc.ssl_target_name_override', server_host_override,),)) return implementations.Channel(channel) diff --git a/src/python/grpcio/tests/unit/credentials/README b/src/python/grpcio_tests/tests/unit/credentials/README index cb20dcb49f..cb20dcb49f 100644 --- a/src/python/grpcio/tests/unit/credentials/README +++ b/src/python/grpcio_tests/tests/unit/credentials/README diff --git a/src/python/grpcio/tests/unit/credentials/ca.pem b/src/python/grpcio_tests/tests/unit/credentials/ca.pem index 6c8511a73c..6c8511a73c 100755 --- a/src/python/grpcio/tests/unit/credentials/ca.pem +++ b/src/python/grpcio_tests/tests/unit/credentials/ca.pem diff --git a/src/python/grpcio/tests/unit/credentials/server1.key b/src/python/grpcio_tests/tests/unit/credentials/server1.key index 143a5b8765..143a5b8765 100755 --- a/src/python/grpcio/tests/unit/credentials/server1.key +++ b/src/python/grpcio_tests/tests/unit/credentials/server1.key diff --git a/src/python/grpcio/tests/unit/credentials/server1.pem b/src/python/grpcio_tests/tests/unit/credentials/server1.pem index f3d43fcc5b..f3d43fcc5b 100755 --- a/src/python/grpcio/tests/unit/credentials/server1.pem +++ b/src/python/grpcio_tests/tests/unit/credentials/server1.pem diff --git a/src/python/grpcio/tests/unit/beta/__init__.py b/src/python/grpcio_tests/tests/unit/framework/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/tests/unit/beta/__init__.py +++ b/src/python/grpcio_tests/tests/unit/framework/__init__.py diff --git a/src/python/grpcio/tests/unit/framework/__init__.py b/src/python/grpcio_tests/tests/unit/framework/common/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/tests/unit/framework/__init__.py +++ b/src/python/grpcio_tests/tests/unit/framework/common/__init__.py diff --git a/src/python/grpcio/tests/unit/framework/common/test_constants.py b/src/python/grpcio_tests/tests/unit/framework/common/test_constants.py index b6682d396c..b6682d396c 100644 --- a/src/python/grpcio/tests/unit/framework/common/test_constants.py +++ b/src/python/grpcio_tests/tests/unit/framework/common/test_constants.py diff --git a/src/python/grpcio/tests/unit/framework/common/test_control.py b/src/python/grpcio_tests/tests/unit/framework/common/test_control.py index 088e2f8b88..088e2f8b88 100644 --- a/src/python/grpcio/tests/unit/framework/common/test_control.py +++ b/src/python/grpcio_tests/tests/unit/framework/common/test_control.py diff --git a/src/python/grpcio/tests/unit/framework/common/test_coverage.py b/src/python/grpcio_tests/tests/unit/framework/common/test_coverage.py index ea2d2812ce..ea2d2812ce 100644 --- a/src/python/grpcio/tests/unit/framework/common/test_coverage.py +++ b/src/python/grpcio_tests/tests/unit/framework/common/test_coverage.py diff --git a/src/python/grpcio/tests/unit/framework/common/__init__.py b/src/python/grpcio_tests/tests/unit/framework/foundation/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/tests/unit/framework/common/__init__.py +++ b/src/python/grpcio_tests/tests/unit/framework/foundation/__init__.py diff --git a/src/python/grpcio/tests/unit/framework/foundation/_logging_pool_test.py b/src/python/grpcio_tests/tests/unit/framework/foundation/_logging_pool_test.py index 330e445d43..330e445d43 100644 --- a/src/python/grpcio/tests/unit/framework/foundation/_logging_pool_test.py +++ b/src/python/grpcio_tests/tests/unit/framework/foundation/_logging_pool_test.py diff --git a/src/python/grpcio/tests/unit/framework/foundation/stream_testing.py b/src/python/grpcio_tests/tests/unit/framework/foundation/stream_testing.py index 098a53d5e7..098a53d5e7 100644 --- a/src/python/grpcio/tests/unit/framework/foundation/stream_testing.py +++ b/src/python/grpcio_tests/tests/unit/framework/foundation/stream_testing.py diff --git a/src/python/grpcio/tests/unit/framework/core/__init__.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/tests/unit/framework/core/__init__.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/__init__.py diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_3069_test_constant.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py index 1ea356c0bf..1ea356c0bf 100644 --- a/src/python/grpcio/tests/unit/framework/interfaces/face/_3069_test_constant.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_3069_test_constant.py diff --git a/src/python/grpcio/tests/unit/framework/foundation/__init__.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio/tests/unit/framework/foundation/__init__.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/__init__.py diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py index e338aaa396..e338aaa396 100644 --- a/src/python/grpcio/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_digest.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py index f0befb0b27..f0befb0b27 100644 --- a/src/python/grpcio/tests/unit/framework/interfaces/face/_digest.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py index 791620307b..d32208f9eb 100644 --- a/src/python/grpcio/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py @@ -41,6 +41,7 @@ from concurrent import futures import six # test_interfaces is referenced from specification in this module. +from grpc.framework.foundation import future from grpc.framework.foundation import logging_pool from grpc.framework.interfaces.face import face from tests.unit.framework.common import test_constants @@ -159,6 +160,8 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. test_messages.verify(request, response, self) self.assertIs(callback.future(), response_future) + self.assertIsNone(response_future.exception()) + self.assertIsNone(response_future.traceback()) def testSuccessfulUnaryRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -191,6 +194,8 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. test_messages.verify(requests, response, self) self.assertIs(future_passed_to_callback, response_future) + self.assertIsNone(response_future.exception()) + self.assertIsNone(response_future.traceback()) def testSuccessfulStreamRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -301,6 +306,12 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. self.assertIs(callback.future(), response_future) self.assertFalse(cancel_method_return_value) self.assertTrue(response_future.cancelled()) + with self.assertRaises(future.CancelledError): + response_future.result() + with self.assertRaises(future.CancelledError): + response_future.exception() + with self.assertRaises(future.CancelledError): + response_future.traceback() def testCancelledUnaryRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -332,6 +343,12 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. self.assertIs(callback.future(), response_future) self.assertFalse(cancel_method_return_value) self.assertTrue(response_future.cancelled()) + with self.assertRaises(future.CancelledError): + response_future.result() + with self.assertRaises(future.CancelledError): + response_future.exception() + with self.assertRaises(future.CancelledError): + response_future.traceback() def testCancelledStreamRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -363,6 +380,9 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. response_future.exception(), face.ExpirationError) with self.assertRaises(face.ExpirationError): response_future.result() + self.assertIsInstance( + response_future.exception(), face.AbortionError) + self.assertIsNotNone(response_future.traceback()) def testExpiredUnaryRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -392,6 +412,9 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. response_future.exception(), face.ExpirationError) with self.assertRaises(face.ExpirationError): response_future.result() + self.assertIsInstance( + response_future.exception(), face.AbortionError) + self.assertIsNotNone(response_future.traceback()) def testExpiredStreamRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -426,6 +449,7 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. response_future.exception(), face.ExpirationError) with self.assertRaises(face.ExpirationError): response_future.result() + self.assertIsNotNone(response_future.traceback()) def testFailedUnaryRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -463,6 +487,7 @@ class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest. response_future.exception(), face.ExpirationError) with self.assertRaises(face.ExpirationError): response_future.result() + self.assertIsNotNone(response_future.traceback()) def testFailedStreamRequestStreamResponse(self): for (group, method), test_messages_sequence in ( diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_invocation.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py index ac487bed4f..ac487bed4f 100644 --- a/src/python/grpcio/tests/unit/framework/interfaces/face/_invocation.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py index f13dff0558..f13dff0558 100644 --- a/src/python/grpcio/tests/unit/framework/interfaces/face/_service.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_service.py diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/_stock_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py index 5299655bb3..5299655bb3 100644 --- a/src/python/grpcio/tests/unit/framework/interfaces/face/_stock_service.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/test_cases.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py index 71de9d835e..71de9d835e 100644 --- a/src/python/grpcio/tests/unit/framework/interfaces/face/test_cases.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py diff --git a/src/python/grpcio/tests/unit/framework/interfaces/face/test_interfaces.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py index 40f38e68ba..40f38e68ba 100644 --- a/src/python/grpcio/tests/unit/framework/interfaces/face/test_interfaces.py +++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_interfaces.py diff --git a/src/python/grpcio/tests/unit/resources.py b/src/python/grpcio_tests/tests/unit/resources.py index 023cdb155f..023cdb155f 100644 --- a/src/python/grpcio/tests/unit/resources.py +++ b/src/python/grpcio_tests/tests/unit/resources.py diff --git a/src/python/grpcio/tests/unit/test_common.py b/src/python/grpcio_tests/tests/unit/test_common.py index 7b4d20dccb..cd71bd80d7 100644 --- a/src/python/grpcio/tests/unit/test_common.py +++ b/src/python/grpcio_tests/tests/unit/test_common.py @@ -31,12 +31,13 @@ import collections +import grpc import six -INVOCATION_INITIAL_METADATA = ((b'0', b'abc'), (b'1', b'def'), (b'2', b'ghi'),) -SERVICE_INITIAL_METADATA = ((b'3', b'jkl'), (b'4', b'mno'), (b'5', b'pqr'),) -SERVICE_TERMINAL_METADATA = ((b'6', b'stu'), (b'7', b'vwx'), (b'8', b'yza'),) -DETAILS = b'test details' +INVOCATION_INITIAL_METADATA = (('0', 'abc'), ('1', 'def'), ('2', 'ghi'),) +SERVICE_INITIAL_METADATA = (('3', 'jkl'), ('4', 'mno'), ('5', 'pqr'),) +SERVICE_TERMINAL_METADATA = (('6', 'stu'), ('7', 'vwx'), ('8', 'yza'),) +DETAILS = 'test details' def metadata_transmitted(original_metadata, transmitted_metadata): @@ -59,12 +60,10 @@ def metadata_transmitted(original_metadata, transmitted_metadata): original_metadata after having been transmitted via gRPC. """ original = collections.defaultdict(list) - for key_value_pair in original_metadata: - key, value = tuple(key_value_pair) + for key, value in original_metadata: original[key].append(value) transmitted = collections.defaultdict(list) - for key_value_pair in transmitted_metadata: - key, value = tuple(key_value_pair) + for key, value in transmitted_metadata: transmitted[key].append(value) for key, values in six.iteritems(original): @@ -80,3 +79,24 @@ def metadata_transmitted(original_metadata, transmitted_metadata): return False else: return True + + +def test_secure_channel( + target, channel_credentials, server_host_override): + """Creates an insecure Channel to a remote host. + + Args: + host: The name of the remote host to which to connect. + port: The port of the remote host to which to connect. + channel_credentials: The implementations.ChannelCredentials with which to + connect. + server_host_override: The target name used for SSL host name checking. + + Returns: + An implementations.Channel to the remote host through which RPCs may be + conducted. + """ + channel = grpc.secure_channel( + target, channel_credentials, + (('grpc.ssl_target_name_override', server_host_override,),)) + return channel |