aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/c-ares/CMakeLists.txt49
-rwxr-xr-xsrc/c-ares/gen_build_yaml.py149
-rw-r--r--src/compiler/csharp_generator.cc77
-rw-r--r--src/compiler/php_generator.cc2
-rw-r--r--src/compiler/python_generator.cc8
-rw-r--r--src/core/ext/census/grpc_filter.c4
-rw-r--r--src/core/ext/client_channel/channel_connectivity.c4
-rw-r--r--src/core/ext/client_channel/client_channel.c188
-rw-r--r--src/core/ext/client_channel/client_channel_plugin.c3
-rw-r--r--src/core/ext/client_channel/connector.h2
-rw-r--r--src/core/ext/client_channel/http_connect_handshaker.c4
-rw-r--r--src/core/ext/client_channel/proxy_mapper_registry.c12
-rw-r--r--src/core/ext/client_channel/resolver_registry.c1
-rw-r--r--src/core/ext/client_channel/retry_throttle.c210
-rw-r--r--src/core/ext/client_channel/retry_throttle.h65
-rw-r--r--src/core/ext/client_channel/subchannel.c70
-rw-r--r--src/core/ext/client_channel/subchannel.h18
-rw-r--r--src/core/ext/lb_policy/grpclb/grpclb.c22
-rw-r--r--src/core/ext/lb_policy/pick_first/pick_first.c27
-rw-r--r--src/core/ext/lb_policy/round_robin/round_robin.c29
-rw-r--r--src/core/ext/load_reporting/load_reporting_filter.c6
-rw-r--r--src/core/ext/resolver/dns/c_ares/dns_resolver_ares.c350
-rw-r--r--src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver.h65
-rw-r--r--src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c319
-rw-r--r--src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.c289
-rw-r--r--src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h63
-rw-r--r--src/core/ext/resolver/dns/native/dns_resolver.c22
-rw-r--r--src/core/ext/transport/chttp2/client/chttp2_connector.c41
-rw-r--r--src/core/ext/transport/chttp2/server/chttp2_server.c4
-rw-r--r--src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c262
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_data.c17
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_goaway.c5
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_rst_stream.c7
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_settings.c11
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_window_update.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_parser.c73
-rw-r--r--src/core/ext/transport/chttp2/transport/hpack_table.c4
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.c65
-rw-r--r--src/core/ext/transport/chttp2/transport/incoming_metadata.h18
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h2
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c48
-rw-r--r--src/core/ext/transport/cronet/transport/cronet_transport.c115
-rw-r--r--src/core/lib/channel/channel_stack.c36
-rw-r--r--src/core/lib/channel/channel_stack.h23
-rw-r--r--src/core/lib/channel/compress_filter.c2
-rw-r--r--src/core/lib/channel/connected_channel.c9
-rw-r--r--src/core/lib/channel/deadline_filter.c8
-rw-r--r--src/core/lib/channel/handshaker.c5
-rw-r--r--src/core/lib/channel/http_client_filter.c67
-rw-r--r--src/core/lib/channel/http_server_filter.c113
-rw-r--r--src/core/lib/channel/message_size_filter.c13
-rw-r--r--src/core/lib/http/httpcli.c14
-rw-r--r--src/core/lib/http/httpcli_security_connector.c2
-rw-r--r--src/core/lib/http/parser.c88
-rw-r--r--src/core/lib/iomgr/closure.c4
-rw-r--r--src/core/lib/iomgr/combiner.c2
-rw-r--r--src/core/lib/iomgr/error.c88
-rw-r--r--src/core/lib/iomgr/error.h53
-rw-r--r--src/core/lib/iomgr/ev_epoll_linux.c14
-rw-r--r--src/core/lib/iomgr/ev_poll_posix.c12
-rw-r--r--src/core/lib/iomgr/executor.c4
-rw-r--r--src/core/lib/iomgr/load_file.c9
-rw-r--r--src/core/lib/iomgr/port.h4
-rw-r--r--src/core/lib/iomgr/resolve_address_posix.c24
-rw-r--r--src/core/lib/iomgr/resolve_address_uv.c14
-rw-r--r--src/core/lib/iomgr/resolve_address_windows.c4
-rw-r--r--src/core/lib/iomgr/socket_factory_posix.c110
-rw-r--r--src/core/lib/iomgr/socket_factory_posix.h90
-rw-r--r--src/core/lib/iomgr/socket_utils_common_posix.c34
-rw-r--r--src/core/lib/iomgr/socket_utils_posix.h7
-rw-r--r--src/core/lib/iomgr/tcp_client_posix.c22
-rw-r--r--src/core/lib/iomgr/tcp_client_uv.c16
-rw-r--r--src/core/lib/iomgr/tcp_client_windows.c7
-rw-r--r--src/core/lib/iomgr/tcp_posix.c19
-rw-r--r--src/core/lib/iomgr/tcp_server_posix.c445
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix.h134
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_common.c221
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c196
-rw-r--r--src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c (renamed from src/core/ext/client_channel/initial_connect_string.c)27
-rw-r--r--src/core/lib/iomgr/tcp_server_uv.c27
-rw-r--r--src/core/lib/iomgr/tcp_server_windows.c11
-rw-r--r--src/core/lib/iomgr/tcp_uv.c15
-rw-r--r--src/core/lib/iomgr/tcp_windows.c22
-rw-r--r--src/core/lib/iomgr/timer_generic.c13
-rw-r--r--src/core/lib/iomgr/timer_uv.c4
-rw-r--r--src/core/lib/iomgr/udp_server.c62
-rw-r--r--src/core/lib/iomgr/udp_server.h2
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix.c2
-rw-r--r--src/core/lib/iomgr/unix_sockets_posix_noop.c3
-rw-r--r--src/core/lib/profiling/basic_timers.c2
-rw-r--r--src/core/lib/security/credentials/google_default/google_default_credentials.c19
-rw-r--r--src/core/lib/security/transport/client_auth_filter.c21
-rw-r--r--src/core/lib/security/transport/secure_endpoint.c11
-rw-r--r--src/core/lib/security/transport/security_connector.c21
-rw-r--r--src/core/lib/security/transport/security_handshaker.c27
-rw-r--r--src/core/lib/security/transport/server_auth_filter.c12
-rw-r--r--src/core/lib/security/transport/tsi_error.c8
-rw-r--r--src/core/lib/security/util/b64.c25
-rw-r--r--src/core/lib/security/util/b64.h14
-rw-r--r--src/core/lib/support/atm.c (renamed from src/core/ext/client_channel/default_initial_connect_string.c)19
-rw-r--r--src/core/lib/surface/call.c88
-rw-r--r--src/core/lib/surface/channel.c78
-rw-r--r--src/core/lib/surface/channel.h8
-rw-r--r--src/core/lib/surface/completion_queue_factory.c77
-rw-r--r--src/core/lib/surface/completion_queue_factory.h (renamed from src/core/ext/client_channel/initial_connect_string.h)29
-rw-r--r--src/core/lib/surface/lame_client.c12
-rw-r--r--src/core/lib/surface/server.c40
-rw-r--r--src/core/lib/surface/validate_metadata.c12
-rw-r--r--src/core/lib/surface/version.c2
-rw-r--r--src/core/lib/transport/connectivity_state.c3
-rw-r--r--src/core/lib/transport/error_utils.c12
-rw-r--r--src/core/lib/transport/error_utils.h2
-rw-r--r--src/core/lib/transport/metadata_batch.c13
-rw-r--r--src/core/lib/transport/service_config.c12
-rw-r--r--src/core/lib/transport/service_config.h6
-rw-r--r--src/core/lib/transport/transport.c9
-rw-r--r--src/core/lib/transport/transport.h6
-rw-r--r--src/core/lib/transport/transport_impl.h5
-rw-r--r--src/core/plugin_registry/grpc_plugin_registry.c4
-rw-r--r--src/core/plugin_registry/grpc_unsecure_plugin_registry.c4
-rw-r--r--src/cpp/common/channel_arguments.cc31
-rw-r--r--src/cpp/common/channel_filter.h3
-rw-r--r--src/csharp/Grpc.Core/ChannelOptions.cs9
-rw-r--r--src/csharp/Grpc.Examples/MathGrpc.cs82
-rw-r--r--src/csharp/Grpc.HealthCheck/HealthGrpc.cs36
-rw-r--r--src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs52
-rw-r--r--src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs142
-rw-r--r--src/csharp/Grpc.IntegrationTesting/TestGrpc.cs250
-rw-r--r--src/csharp/Grpc.Reflection/ReflectionGrpc.cs30
-rw-r--r--src/node/ext/server_uv.cc2
-rw-r--r--src/node/src/common.js1
-rw-r--r--src/node/src/server.js13
-rw-r--r--src/node/test/surface_test.js26
-rw-r--r--src/objective-c/!ProtoCompiler-gRPCPlugin.podspec4
-rw-r--r--src/objective-c/!ProtoCompiler.podspec4
-rw-r--r--src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m3
-rw-r--r--src/objective-c/tests/CronetUnitTests/CronetUnitTests.m45
-rw-r--r--src/php/lib/Grpc/AbstractCall.php8
-rw-r--r--src/python/grpcio/grpc/__init__.py11
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/security.pxd.pxi2
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/security.pyx.pxi18
-rw-r--r--src/python/grpcio/grpc/_cython/cygrpc.pyx10
-rw-r--r--src/python/grpcio/grpc/_server.py94
-rw-r--r--src/python/grpcio/grpc_core_dependencies.py63
-rw-r--r--src/python/grpcio_health_checking/setup.py4
-rw-r--r--src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py59
-rw-r--r--src/python/grpcio_reflection/setup.py4
-rw-r--r--src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py8
-rw-r--r--src/python/grpcio_tests/tests/interop/_secure_intraop_test.py8
-rw-r--r--src/python/grpcio_tests/tests/interop/methods.py4
-rw-r--r--src/python/grpcio_tests/tests/interop/server.py5
-rw-r--r--src/python/grpcio_tests/tests/qps/qps_worker.py4
-rw-r--r--src/python/grpcio_tests/tests/qps/worker_server.py8
-rw-r--r--src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py43
-rw-r--r--src/python/grpcio_tests/tests/stress/client.py4
-rw-r--r--src/python/grpcio_tests/tests/tests.json1
-rw-r--r--src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py270
-rw-r--r--src/ruby/.rubocop.yml1
-rw-r--r--src/ruby/end2end/README.md18
-rwxr-xr-xsrc/ruby/end2end/channel_closing_client.rb84
-rwxr-xr-xsrc/ruby/end2end/channel_closing_driver.rb67
-rwxr-xr-xsrc/ruby/end2end/channel_state_client.rb54
-rwxr-xr-xsrc/ruby/end2end/channel_state_driver.rb64
-rwxr-xr-xsrc/ruby/end2end/end2end_common.rb109
-rw-r--r--src/ruby/end2end/gen_protos.sh32
-rw-r--r--src/ruby/end2end/lib/client_control_pb.rb17
-rw-r--r--src/ruby/end2end/lib/client_control_services_pb.rb53
-rw-r--r--src/ruby/end2end/lib/echo_pb.rb18
-rw-r--r--src/ruby/end2end/lib/echo_services_pb.rb52
-rw-r--r--src/ruby/end2end/protos/client_control.proto43
-rw-r--r--src/ruby/end2end/protos/echo.proto46
-rwxr-xr-xsrc/ruby/end2end/sig_handling_client.rb89
-rwxr-xr-xsrc/ruby/end2end/sig_handling_driver.rb61
-rwxr-xr-xsrc/ruby/end2end/sig_int_during_channel_watch_client.rb70
-rwxr-xr-xsrc/ruby/end2end/sig_int_during_channel_watch_driver.rb69
-rw-r--r--src/ruby/ext/grpc/extconf.rb1
-rw-r--r--src/ruby/ext/grpc/rb_channel.c331
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.c6
-rw-r--r--src/ruby/ext/grpc/rb_grpc_imports.generated.h9
-rw-r--r--src/ruby/spec/channel_connection_spec.rb141
-rw-r--r--src/ruby/spec/channel_spec.rb29
183 files changed, 6356 insertions, 1911 deletions
diff --git a/src/c-ares/CMakeLists.txt b/src/c-ares/CMakeLists.txt
new file mode 100644
index 0000000000..22acd51048
--- /dev/null
+++ b/src/c-ares/CMakeLists.txt
@@ -0,0 +1,49 @@
+# c-ares cmake file for gRPC
+#
+# This is currently very experimental, and unsupported.
+#
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+string(TOLOWER ${CMAKE_SYSTEM_NAME} cares_system_name)
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/cares)
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/cares/cares)
+
+if(${cares_system_name} MATCHES windows)
+ add_definitions(-DCARES_STATICLIB=1)
+ add_definitions(-DWIN32_LEAN_AND_MEAN=1)
+else()
+ include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/cares/config_${cares_system_name})
+ add_definitions(-DHAVE_CONFIG_H=1)
+ add_definitions(-D_GNU_SOURCE=1)
+endif()
+
+file(GLOB lib_sources ../../third_party/cares/cares/*.c)
+add_library(cares ${lib_sources})
diff --git a/src/c-ares/gen_build_yaml.py b/src/c-ares/gen_build_yaml.py
new file mode 100755
index 0000000000..b2ae971f37
--- /dev/null
+++ b/src/c-ares/gen_build_yaml.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python2.7
+
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+import os
+import sys
+import yaml
+
+os.chdir(os.path.dirname(sys.argv[0])+'/../..')
+
+out = {}
+
+try:
+ def gen_ares_build(x):
+ subprocess.call("third_party/cares/cares/buildconf", shell=True)
+ subprocess.call("third_party/cares/cares/configure", shell=True)
+
+ def config_platform(x):
+ if 'linux' in sys.platform:
+ return 'src/cares/cares/config_linux/ares_config.h'
+ if 'darwin' in sys.platform:
+ return 'src/cares/cares/config_darwin/ares_config.h'
+ if not os.path.isfile('third_party/cares/cares/ares_config.h'):
+ gen_ares_build(x)
+ return 'third_party/cares/cares/ares_config.h'
+
+ def ares_build(x):
+ if os.path.isfile('src/cares/cares/ares_build.h'):
+ return 'src/cares/cares/ares_build.h'
+ if not os.path.isfile('third_party/cares/cares/ares_build.h'):
+ gen_ares_build(x)
+ return 'third_party/cares/cares/ares_build.h'
+
+ out['libs'] = [{
+ 'name': 'ares',
+ 'defaults': 'ares',
+ 'build': 'private',
+ 'language': 'c',
+ 'secure': 'no',
+ 'src': [
+ "third_party/cares/cares/ares__close_sockets.c",
+ "third_party/cares/cares/ares__get_hostent.c",
+ "third_party/cares/cares/ares__read_line.c",
+ "third_party/cares/cares/ares__timeval.c",
+ "third_party/cares/cares/ares_cancel.c",
+ "third_party/cares/cares/ares_create_query.c",
+ "third_party/cares/cares/ares_data.c",
+ "third_party/cares/cares/ares_destroy.c",
+ "third_party/cares/cares/ares_expand_name.c",
+ "third_party/cares/cares/ares_expand_string.c",
+ "third_party/cares/cares/ares_fds.c",
+ "third_party/cares/cares/ares_free_hostent.c",
+ "third_party/cares/cares/ares_free_string.c",
+ "third_party/cares/cares/ares_getenv.c",
+ "third_party/cares/cares/ares_gethostbyaddr.c",
+ "third_party/cares/cares/ares_gethostbyname.c",
+ "third_party/cares/cares/ares_getnameinfo.c",
+ "third_party/cares/cares/ares_getopt.c",
+ "third_party/cares/cares/ares_getsock.c",
+ "third_party/cares/cares/ares_init.c",
+ "third_party/cares/cares/ares_library_init.c",
+ "third_party/cares/cares/ares_llist.c",
+ "third_party/cares/cares/ares_mkquery.c",
+ "third_party/cares/cares/ares_nowarn.c",
+ "third_party/cares/cares/ares_options.c",
+ "third_party/cares/cares/ares_parse_a_reply.c",
+ "third_party/cares/cares/ares_parse_aaaa_reply.c",
+ "third_party/cares/cares/ares_parse_mx_reply.c",
+ "third_party/cares/cares/ares_parse_naptr_reply.c",
+ "third_party/cares/cares/ares_parse_ns_reply.c",
+ "third_party/cares/cares/ares_parse_ptr_reply.c",
+ "third_party/cares/cares/ares_parse_soa_reply.c",
+ "third_party/cares/cares/ares_parse_srv_reply.c",
+ "third_party/cares/cares/ares_parse_txt_reply.c",
+ "third_party/cares/cares/ares_platform.c",
+ "third_party/cares/cares/ares_process.c",
+ "third_party/cares/cares/ares_query.c",
+ "third_party/cares/cares/ares_search.c",
+ "third_party/cares/cares/ares_send.c",
+ "third_party/cares/cares/ares_strcasecmp.c",
+ "third_party/cares/cares/ares_strdup.c",
+ "third_party/cares/cares/ares_strerror.c",
+ "third_party/cares/cares/ares_timeout.c",
+ "third_party/cares/cares/ares_version.c",
+ "third_party/cares/cares/ares_writev.c",
+ "third_party/cares/cares/bitncmp.c",
+ "third_party/cares/cares/inet_net_pton.c",
+ "third_party/cares/cares/inet_ntop.c",
+ "third_party/cares/cares/windows_port.c",
+ ],
+ 'headers': [
+ "third_party/cares/cares/ares.h",
+ "third_party/cares/cares/ares_data.h",
+ "third_party/cares/cares/ares_dns.h",
+ "third_party/cares/cares/ares_getenv.h",
+ "third_party/cares/cares/ares_getopt.h",
+ "third_party/cares/cares/ares_inet_net_pton.h",
+ "third_party/cares/cares/ares_iphlpapi.h",
+ "third_party/cares/cares/ares_ipv6.h",
+ "third_party/cares/cares/ares_library_init.h",
+ "third_party/cares/cares/ares_llist.h",
+ "third_party/cares/cares/ares_nowarn.h",
+ "third_party/cares/cares/ares_platform.h",
+ "third_party/cares/cares/ares_private.h",
+ "third_party/cares/cares/ares_rules.h",
+ "third_party/cares/cares/ares_setup.h",
+ "third_party/cares/cares/ares_strcasecmp.h",
+ "third_party/cares/cares/ares_strdup.h",
+ "third_party/cares/cares/ares_version.h",
+ "third_party/cares/cares/bitncmp.h",
+ "third_party/cares/cares/config-win32.h",
+ "third_party/cares/cares/setup_once.h",
+ "third_party/cares/ares_build.h",
+ "third_party/cares/config_linux/ares_config.h",
+ "third_party/cares/config_darwin/ares_config.h"
+ ],
+ }]
+except:
+ pass
+
+print yaml.dump(out)
diff --git a/src/compiler/csharp_generator.cc b/src/compiler/csharp_generator.cc
index cc7a7a96ae..ce1e6b94c3 100644
--- a/src/compiler/csharp_generator.cc
+++ b/src/compiler/csharp_generator.cc
@@ -203,13 +203,13 @@ std::string GetServerClassName(const ServiceDescriptor *service) {
std::string GetCSharpMethodType(MethodType method_type) {
switch (method_type) {
case METHODTYPE_NO_STREAMING:
- return "MethodType.Unary";
+ return "grpc::MethodType.Unary";
case METHODTYPE_CLIENT_STREAMING:
- return "MethodType.ClientStreaming";
+ return "grpc::MethodType.ClientStreaming";
case METHODTYPE_SERVER_STREAMING:
- return "MethodType.ServerStreaming";
+ return "grpc::MethodType.ServerStreaming";
case METHODTYPE_BIDI_STREAMING:
- return "MethodType.DuplexStreaming";
+ return "grpc::MethodType.DuplexStreaming";
}
GOOGLE_LOG(FATAL) << "Can't get here.";
return "";
@@ -243,16 +243,19 @@ std::string GetAccessLevel(bool internal_access) {
std::string GetMethodReturnTypeClient(const MethodDescriptor *method) {
switch (GetMethodType(method)) {
case METHODTYPE_NO_STREAMING:
- return "AsyncUnaryCall<" + GetClassName(method->output_type()) + ">";
+ return "grpc::AsyncUnaryCall<" + GetClassName(method->output_type()) +
+ ">";
case METHODTYPE_CLIENT_STREAMING:
- return "AsyncClientStreamingCall<" + GetClassName(method->input_type()) +
- ", " + GetClassName(method->output_type()) + ">";
+ return "grpc::AsyncClientStreamingCall<" +
+ GetClassName(method->input_type()) + ", " +
+ GetClassName(method->output_type()) + ">";
case METHODTYPE_SERVER_STREAMING:
- return "AsyncServerStreamingCall<" + GetClassName(method->output_type()) +
- ">";
+ return "grpc::AsyncServerStreamingCall<" +
+ GetClassName(method->output_type()) + ">";
case METHODTYPE_BIDI_STREAMING:
- return "AsyncDuplexStreamingCall<" + GetClassName(method->input_type()) +
- ", " + GetClassName(method->output_type()) + ">";
+ return "grpc::AsyncDuplexStreamingCall<" +
+ GetClassName(method->input_type()) + ", " +
+ GetClassName(method->output_type()) + ">";
}
GOOGLE_LOG(FATAL) << "Can't get here.";
return "";
@@ -265,7 +268,7 @@ std::string GetMethodRequestParamServer(const MethodDescriptor *method) {
return GetClassName(method->input_type()) + " request";
case METHODTYPE_CLIENT_STREAMING:
case METHODTYPE_BIDI_STREAMING:
- return "IAsyncStreamReader<" + GetClassName(method->input_type()) +
+ return "grpc::IAsyncStreamReader<" + GetClassName(method->input_type()) +
"> requestStream";
}
GOOGLE_LOG(FATAL) << "Can't get here.";
@@ -293,8 +296,8 @@ std::string GetMethodResponseStreamMaybe(const MethodDescriptor *method) {
return "";
case METHODTYPE_SERVER_STREAMING:
case METHODTYPE_BIDI_STREAMING:
- return ", IServerStreamWriter<" + GetClassName(method->output_type()) +
- "> responseStream";
+ return ", grpc::IServerStreamWriter<" +
+ GetClassName(method->output_type()) + "> responseStream";
}
GOOGLE_LOG(FATAL) << "Can't get here.";
return "";
@@ -325,8 +328,8 @@ void GenerateMarshallerFields(Printer *out, const ServiceDescriptor *service) {
for (size_t i = 0; i < used_messages.size(); i++) {
const Descriptor *message = used_messages[i];
out->Print(
- "static readonly Marshaller<$type$> $fieldname$ = "
- "Marshallers.Create((arg) => "
+ "static readonly grpc::Marshaller<$type$> $fieldname$ = "
+ "grpc::Marshallers.Create((arg) => "
"global::Google.Protobuf.MessageExtensions.ToByteArray(arg), "
"$type$.Parser.ParseFrom);\n",
"fieldname", GetMarshallerFieldName(message), "type",
@@ -337,8 +340,8 @@ void GenerateMarshallerFields(Printer *out, const ServiceDescriptor *service) {
void GenerateStaticMethodField(Printer *out, const MethodDescriptor *method) {
out->Print(
- "static readonly Method<$request$, $response$> $fieldname$ = new "
- "Method<$request$, $response$>(\n",
+ "static readonly grpc::Method<$request$, $response$> $fieldname$ = new "
+ "grpc::Method<$request$, $response$>(\n",
"fieldname", GetMethodFieldName(method), "request",
GetClassName(method->input_type()), "response",
GetClassName(method->output_type()));
@@ -389,7 +392,7 @@ void GenerateServerClass(Printer *out, const ServiceDescriptor *service) {
out->Print(
"public virtual $returntype$ "
"$methodname$($request$$response_stream_maybe$, "
- "ServerCallContext context)\n",
+ "grpc::ServerCallContext context)\n",
"methodname", method->name(), "returntype",
GetMethodReturnTypeServer(method), "request",
GetMethodRequestParamServer(method), "response_stream_maybe",
@@ -397,8 +400,8 @@ void GenerateServerClass(Printer *out, const ServiceDescriptor *service) {
out->Print("{\n");
out->Indent();
out->Print(
- "throw new RpcException("
- "new Status(StatusCode.Unimplemented, \"\"));\n");
+ "throw new grpc::RpcException("
+ "new grpc::Status(grpc::StatusCode.Unimplemented, \"\"));\n");
out->Outdent();
out->Print("}\n\n");
}
@@ -410,7 +413,7 @@ void GenerateServerClass(Printer *out, const ServiceDescriptor *service) {
void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
out->Print("/// <summary>Client for $servicename$</summary>\n", "servicename",
GetServiceClassName(service));
- out->Print("public partial class $name$ : ClientBase<$name$>\n", "name",
+ out->Print("public partial class $name$ : grpc::ClientBase<$name$>\n", "name",
GetClientClassName(service));
out->Print("{\n");
out->Indent();
@@ -421,7 +424,7 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
"/// <param name=\"channel\">The channel to use to make remote "
"calls.</param>\n",
"servicename", GetServiceClassName(service));
- out->Print("public $name$(Channel channel) : base(channel)\n", "name",
+ out->Print("public $name$(grpc::Channel channel) : base(channel)\n", "name",
GetClientClassName(service));
out->Print("{\n");
out->Print("}\n");
@@ -431,8 +434,9 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
"/// <param name=\"callInvoker\">The callInvoker to use to make remote "
"calls.</param>\n",
"servicename", GetServiceClassName(service));
- out->Print("public $name$(CallInvoker callInvoker) : base(callInvoker)\n",
- "name", GetClientClassName(service));
+ out->Print(
+ "public $name$(grpc::CallInvoker callInvoker) : base(callInvoker)\n",
+ "name", GetClientClassName(service));
out->Print("{\n");
out->Print("}\n");
out->Print(
@@ -461,7 +465,8 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
// unary calls have an extra synchronous stub method
GenerateDocCommentClientMethod(out, method, true, false);
out->Print(
- "public virtual $response$ $methodname$($request$ request, Metadata "
+ "public virtual $response$ $methodname$($request$ request, "
+ "grpc::Metadata "
"headers = null, DateTime? deadline = null, CancellationToken "
"cancellationToken = default(CancellationToken))\n",
"methodname", method->name(), "request",
@@ -470,7 +475,8 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
out->Print("{\n");
out->Indent();
out->Print(
- "return $methodname$(request, new CallOptions(headers, deadline, "
+ "return $methodname$(request, new grpc::CallOptions(headers, "
+ "deadline, "
"cancellationToken));\n",
"methodname", method->name());
out->Outdent();
@@ -480,7 +486,7 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
GenerateDocCommentClientMethod(out, method, true, true);
out->Print(
"public virtual $response$ $methodname$($request$ request, "
- "CallOptions options)\n",
+ "grpc::CallOptions options)\n",
"methodname", method->name(), "request",
GetClassName(method->input_type()), "response",
GetClassName(method->output_type()));
@@ -500,7 +506,8 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
}
GenerateDocCommentClientMethod(out, method, false, false);
out->Print(
- "public virtual $returntype$ $methodname$($request_maybe$Metadata "
+ "public virtual $returntype$ "
+ "$methodname$($request_maybe$grpc::Metadata "
"headers = null, DateTime? deadline = null, CancellationToken "
"cancellationToken = default(CancellationToken))\n",
"methodname", method_name, "request_maybe",
@@ -510,7 +517,8 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
out->Indent();
out->Print(
- "return $methodname$($request_maybe$new CallOptions(headers, deadline, "
+ "return $methodname$($request_maybe$new grpc::CallOptions(headers, "
+ "deadline, "
"cancellationToken));\n",
"methodname", method_name, "request_maybe",
GetMethodRequestParamMaybe(method, true));
@@ -520,7 +528,8 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) {
// overload taking CallOptions as a param
GenerateDocCommentClientMethod(out, method, false, true);
out->Print(
- "public virtual $returntype$ $methodname$($request_maybe$CallOptions "
+ "public virtual $returntype$ "
+ "$methodname$($request_maybe$grpc::CallOptions "
"options)\n",
"methodname", method_name, "request_maybe",
GetMethodRequestParamMaybe(method), "returntype",
@@ -587,13 +596,13 @@ void GenerateBindServiceMethod(Printer *out, const ServiceDescriptor *service) {
"/// <param name=\"serviceImpl\">An object implementing the server-side"
" handling logic.</param>\n");
out->Print(
- "public static ServerServiceDefinition BindService($implclass$ "
+ "public static grpc::ServerServiceDefinition BindService($implclass$ "
"serviceImpl)\n",
"implclass", GetServerClassName(service));
out->Print("{\n");
out->Indent();
- out->Print("return ServerServiceDefinition.CreateBuilder()\n");
+ out->Print("return grpc::ServerServiceDefinition.CreateBuilder()\n");
out->Indent();
out->Indent();
for (int i = 0; i < service->method_count(); i++) {
@@ -681,7 +690,7 @@ grpc::string GetServices(const FileDescriptor *file, bool generate_client,
out.Print("using System;\n");
out.Print("using System.Threading;\n");
out.Print("using System.Threading.Tasks;\n");
- out.Print("using Grpc.Core;\n");
+ out.Print("using grpc = global::Grpc.Core;\n");
out.Print("\n");
out.Print("namespace $namespace$ {\n", "namespace", GetFileNamespace(file));
diff --git a/src/compiler/php_generator.cc b/src/compiler/php_generator.cc
index fba8cbaa97..7d51d40301 100644
--- a/src/compiler/php_generator.cc
+++ b/src/compiler/php_generator.cc
@@ -118,7 +118,7 @@ void PrintService(const ServiceDescriptor *service, Printer *out) {
out->Print(
"/**\n * @param string $$hostname hostname\n"
" * @param array $$opts channel options\n"
- " * @param Grpc\\Channel $$channel (optional) re-use channel "
+ " * @param \\Grpc\\Channel $$channel (optional) re-use channel "
"object\n */\n"
"public function __construct($$hostname, $$opts, "
"$$channel = null) {\n");
diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc
index 242ce06a16..49d90fd36e 100644
--- a/src/compiler/python_generator.cc
+++ b/src/compiler/python_generator.cc
@@ -647,15 +647,15 @@ bool PrivateGenerator::PrintBetaPreamble() {
"Package", config.beta_package_root);
out->Print("from $Package$ import interfaces as beta_interfaces\n", "Package",
config.beta_package_root);
+ out->Print("from grpc.framework.common import cardinality\n");
+ out->Print(
+ "from grpc.framework.interfaces.face import utilities as "
+ "face_utilities\n");
return true;
}
bool PrivateGenerator::PrintPreamble() {
out->Print("import $Package$\n", "Package", config.grpc_package_root);
- out->Print("from grpc.framework.common import cardinality\n");
- out->Print(
- "from grpc.framework.interfaces.face import utilities as "
- "face_utilities\n");
if (generate_in_pb2_grpc) {
out->Print("\n");
StringPairSet imports_set;
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index b80d831557..fc29dbd454 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -138,7 +138,7 @@ static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx,
static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {
+ grpc_closure *ignored) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc client stats and census_rpc_end_op here */
@@ -160,7 +160,7 @@ static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {
+ grpc_closure *ignored) {
call_data *d = elem->call_data;
GPR_ASSERT(d != NULL);
/* TODO(hongyu): record rpc server stats and census_tracing_end_op here */
diff --git a/src/core/ext/client_channel/channel_connectivity.c b/src/core/ext/client_channel/channel_connectivity.c
index dd70bc2c6c..f6cb3b9115 100644
--- a/src/core/ext/client_channel/channel_connectivity.c
+++ b/src/core/ext/client_channel/channel_connectivity.c
@@ -139,8 +139,8 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
error = GRPC_ERROR_NONE;
} else {
if (error == GRPC_ERROR_NONE) {
- error =
- GRPC_ERROR_CREATE("Timed out waiting for connection state change");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Timed out waiting for connection state change");
} else if (error == GRPC_ERROR_CANCELLED) {
error = GRPC_ERROR_NONE;
}
diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c
index f2475cf6ae..435a3ab0fe 100644
--- a/src/core/ext/client_channel/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -47,6 +47,7 @@
#include "src/core/ext/client_channel/lb_policy_registry.h"
#include "src/core/ext/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/client_channel/resolver_registry.h"
+#include "src/core/ext/client_channel/retry_throttle.h"
#include "src/core/ext/client_channel/subchannel.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
@@ -189,6 +190,8 @@ typedef struct client_channel_channel_data {
grpc_combiner *combiner;
/** currently active load balancer */
grpc_lb_policy *lb_policy;
+ /** retry throttle data */
+ grpc_server_retry_throttle_data *retry_throttle_data;
/** maps method names to method_parameters structs */
grpc_slice_hash_table *method_params_table;
/** incoming resolver result - set by resolver.next() */
@@ -284,6 +287,65 @@ static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
&w->on_changed);
}
+typedef struct {
+ char *server_name;
+ grpc_server_retry_throttle_data *retry_throttle_data;
+} service_config_parsing_state;
+
+static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
+ service_config_parsing_state *parsing_state = arg;
+ if (strcmp(field->key, "retryThrottling") == 0) {
+ if (parsing_state->retry_throttle_data != NULL) return; // Duplicate.
+ if (field->type != GRPC_JSON_OBJECT) return;
+ int max_milli_tokens = 0;
+ int milli_token_ratio = 0;
+ for (grpc_json *sub_field = field->child; sub_field != NULL;
+ sub_field = sub_field->next) {
+ if (sub_field->key == NULL) return;
+ if (strcmp(sub_field->key, "maxTokens") == 0) {
+ if (max_milli_tokens != 0) return; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return;
+ max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value);
+ if (max_milli_tokens == -1) return;
+ max_milli_tokens *= 1000;
+ } else if (strcmp(sub_field->key, "tokenRatio") == 0) {
+ if (milli_token_ratio != 0) return; // Duplicate.
+ if (sub_field->type != GRPC_JSON_NUMBER) return;
+ // We support up to 3 decimal digits.
+ size_t whole_len = strlen(sub_field->value);
+ uint32_t multiplier = 1;
+ uint32_t decimal_value = 0;
+ const char *decimal_point = strchr(sub_field->value, '.');
+ if (decimal_point != NULL) {
+ whole_len = (size_t)(decimal_point - sub_field->value);
+ multiplier = 1000;
+ size_t decimal_len = strlen(decimal_point + 1);
+ if (decimal_len > 3) decimal_len = 3;
+ if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len,
+ &decimal_value)) {
+ return;
+ }
+ uint32_t decimal_multiplier = 1;
+ for (size_t i = 0; i < (3 - decimal_len); ++i) {
+ decimal_multiplier *= 10;
+ }
+ decimal_value *= decimal_multiplier;
+ }
+ uint32_t whole_value;
+ if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len,
+ &whole_value)) {
+ return;
+ }
+ milli_token_ratio = (int)((whole_value * multiplier) + decimal_value);
+ if (milli_token_ratio <= 0) return;
+ }
+ }
+ parsing_state->retry_throttle_data =
+ grpc_retry_throttle_map_get_data_for_server(
+ parsing_state->server_name, max_milli_tokens, milli_token_ratio);
+ }
+}
+
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
channel_data *chand = arg;
@@ -293,8 +355,11 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_slice_hash_table *method_params_table = NULL;
grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
bool exit_idle = false;
- grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");
+ grpc_error *state_error =
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
char *service_config_json = NULL;
+ service_config_parsing_state parsing_state;
+ memset(&parsing_state, 0, sizeof(parsing_state));
if (chand->resolver_result != NULL) {
// Find LB policy name.
@@ -355,6 +420,19 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_service_config *service_config =
grpc_service_config_create(service_config_json);
if (service_config != NULL) {
+ channel_arg =
+ grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
+ GPR_ASSERT(channel_arg != NULL);
+ GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
+ grpc_uri *uri =
+ grpc_uri_parse(exec_ctx, channel_arg->value.string, true);
+ GPR_ASSERT(uri->path[0] != '\0');
+ parsing_state.server_name =
+ uri->path[0] == '/' ? uri->path + 1 : uri->path;
+ grpc_service_config_parse_global_params(
+ service_config, parse_retry_throttle_params, &parsing_state);
+ parsing_state.server_name = NULL;
+ grpc_uri_destroy(uri);
method_params_table = grpc_service_config_create_method_config_table(
exec_ctx, service_config, method_parameters_create_from_json,
&method_parameters_vtable);
@@ -386,6 +464,11 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
chand->info_service_config_json = service_config_json;
}
gpr_mu_unlock(&chand->info_mu);
+
+ if (chand->retry_throttle_data != NULL) {
+ grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
+ }
+ chand->retry_throttle_data = parsing_state.retry_throttle_data;
if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
@@ -393,9 +476,9 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
if (lb_policy != NULL) {
grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
} else if (chand->resolver == NULL /* disconnected */) {
- grpc_closure_list_fail_all(
- &chand->waiting_for_config_closures,
- GRPC_ERROR_CREATE_REFERENCING("Channel disconnected", &error, 1));
+ grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Channel disconnected", &error, 1));
grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
}
if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
@@ -423,8 +506,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
grpc_error *refs[] = {error, state_error};
set_channel_connectivity_state_locked(
exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE_REFERENCING("Got config after disconnection", refs,
- GPR_ARRAY_SIZE(refs)),
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Got config after disconnection", refs, GPR_ARRAY_SIZE(refs)),
"resolver_gone");
}
@@ -463,8 +546,9 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (op->send_ping != NULL) {
if (chand->lb_policy == NULL) {
- grpc_closure_sched(exec_ctx, op->send_ping,
- GRPC_ERROR_CREATE("Ping with no load balancing"));
+ grpc_closure_sched(
+ exec_ctx, op->send_ping,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
} else {
grpc_lb_policy_ping_one_locked(exec_ctx, chand->lb_policy, op->send_ping);
op->bind_pollset = NULL;
@@ -579,7 +663,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
if (proxy_name != NULL) gpr_free(proxy_name);
if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args);
if (chand->resolver == NULL) {
- return GRPC_ERROR_CREATE("resolver creation failed");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
}
return GRPC_ERROR_NONE;
}
@@ -613,6 +697,9 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
}
gpr_free(chand->info_lb_policy_name);
gpr_free(chand->info_service_config_json);
+ if (chand->retry_throttle_data != NULL) {
+ grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
+ }
if (chand->method_params_table != NULL) {
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
}
@@ -654,6 +741,7 @@ typedef struct client_channel_call_data {
grpc_slice path; // Request path.
gpr_timespec call_start_time;
gpr_timespec deadline;
+ grpc_server_retry_throttle_data *retry_throttle_data;
method_parameters *method_params;
grpc_error *cancel_error;
@@ -661,6 +749,7 @@ typedef struct client_channel_call_data {
/** either 0 for no call, 1 for cancelled, or a pointer to a
grpc_subchannel_call */
gpr_atm subchannel_call;
+ gpr_arena *arena;
subchannel_creation_phase creation_phase;
grpc_connected_subchannel *connected_subchannel;
@@ -675,6 +764,9 @@ typedef struct client_channel_call_data {
grpc_call_stack *owning_call;
grpc_linked_mdelem lb_token_mdelem;
+
+ grpc_closure on_complete;
+ grpc_closure *original_on_complete;
} call_data;
grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
@@ -727,7 +819,7 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
gpr_free(ops);
}
-// Sets calld->method_params.
+// Sets calld->method_params and calld->retry_throttle_data.
// If the method params specify a timeout, populates
// *per_method_deadline and returns true.
static bool set_call_method_params_from_service_config_locked(
@@ -735,6 +827,10 @@ static bool set_call_method_params_from_service_config_locked(
gpr_timespec *per_method_deadline) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
+ if (chand->retry_throttle_data != NULL) {
+ calld->retry_throttle_data =
+ grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
+ }
if (chand->method_params_table != NULL) {
calld->method_params = grpc_method_config_table_get(
exec_ctx, chand->method_params_table, calld->path);
@@ -780,12 +876,14 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
if (calld->connected_subchannel == NULL) {
gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
- fail_locked(exec_ctx, calld, GRPC_ERROR_CREATE_REFERENCING(
- "Failed to create subchannel", &error, 1));
+ fail_locked(exec_ctx, calld,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed to create subchannel", &error, 1));
} else if (GET_CALL(calld) == CANCELLED_CALL) {
/* already cancelled before subchannel became ready */
- grpc_error *cancellation_error = GRPC_ERROR_CREATE_REFERENCING(
- "Cancelled before creating subchannel", &error, 1);
+ grpc_error *cancellation_error =
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Cancelled before creating subchannel", &error, 1);
/* if due to deadline, attach the deadline exceeded status to the error */
if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) {
cancellation_error =
@@ -796,9 +894,14 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
} else {
/* Create call on subchannel. */
grpc_subchannel_call *subchannel_call = NULL;
+ const grpc_connected_subchannel_call_args call_args = {
+ .pollent = calld->pollent,
+ .path = calld->path,
+ .start_time = calld->call_start_time,
+ .deadline = calld->deadline,
+ .arena = calld->arena};
grpc_error *new_error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
- calld->call_start_time, calld->deadline, &subchannel_call);
+ exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
if (new_error != GRPC_ERROR_NONE) {
new_error = grpc_error_add_child(new_error, error);
subchannel_call = CANCELLED_CALL;
@@ -882,9 +985,9 @@ static bool pick_subchannel_locked(
cpa = closure->cb_arg;
if (cpa->connected_subchannel == connected_subchannel) {
cpa->connected_subchannel = NULL;
- grpc_closure_sched(
- exec_ctx, cpa->on_ready,
- GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, cpa->on_ready,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick cancelled", &error, 1));
}
}
GPR_TIMER_END("pick_subchannel", 0);
@@ -941,7 +1044,8 @@ static bool pick_subchannel_locked(
grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
GRPC_ERROR_NONE);
} else {
- grpc_closure_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"));
+ grpc_closure_sched(exec_ctx, on_ready,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
}
GPR_TIMER_END("pick_subchannel", 0);
@@ -1025,9 +1129,14 @@ static void start_transport_stream_op_locked_inner(grpc_exec_ctx *exec_ctx,
if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
calld->connected_subchannel != NULL) {
grpc_subchannel_call *subchannel_call = NULL;
+ const grpc_connected_subchannel_call_args call_args = {
+ .pollent = calld->pollent,
+ .path = calld->path,
+ .start_time = calld->call_start_time,
+ .deadline = calld->deadline,
+ .arena = calld->arena};
grpc_error *error = grpc_connected_subchannel_create_call(
- exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
- calld->call_start_time, calld->deadline, &subchannel_call);
+ exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
if (error != GRPC_ERROR_NONE) {
subchannel_call = CANCELLED_CALL;
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
@@ -1045,6 +1154,26 @@ static void start_transport_stream_op_locked_inner(grpc_exec_ctx *exec_ctx,
add_waiting_locked(calld, op);
}
+static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+ grpc_call_element *elem = arg;
+ call_data *calld = elem->call_data;
+ if (calld->retry_throttle_data != NULL) {
+ if (error == GRPC_ERROR_NONE) {
+ grpc_server_retry_throttle_data_record_success(
+ calld->retry_throttle_data);
+ } else {
+ // TODO(roth): In a subsequent PR, check the return value here and
+ // decide whether or not to retry. Note that we should only
+ // record failures whose statuses match the configured retryable
+ // or non-fatal status codes.
+ grpc_server_retry_throttle_data_record_failure(
+ calld->retry_throttle_data);
+ }
+ }
+ grpc_closure_run(exec_ctx, calld->original_on_complete,
+ GRPC_ERROR_REF(error));
+}
+
static void start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("start_transport_stream_op_locked", 0);
@@ -1053,6 +1182,14 @@ static void start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_call_element *elem = op->handler_private.args[0];
call_data *calld = elem->call_data;
+ if (op->recv_trailing_metadata != NULL) {
+ GPR_ASSERT(op->on_complete != NULL);
+ calld->original_on_complete = op->on_complete;
+ grpc_closure_init(&calld->on_complete, on_complete, elem,
+ grpc_schedule_on_exec_ctx);
+ op->on_complete = &calld->on_complete;
+ }
+
start_transport_stream_op_locked_inner(exec_ctx, op, elem);
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
@@ -1114,6 +1251,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
calld->owning_call = args->call_stack;
+ calld->arena = args->arena;
grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
return GRPC_ERROR_NONE;
}
@@ -1122,7 +1260,7 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *and_free_memory) {
+ grpc_closure *then_schedule_closure) {
call_data *calld = elem->call_data;
grpc_deadline_state_destroy(exec_ctx, elem);
grpc_slice_unref_internal(exec_ctx, calld->path);
@@ -1132,6 +1270,8 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_UNREF(calld->cancel_error);
grpc_subchannel_call *call = GET_CALL(calld);
if (call != NULL && call != CANCELLED_CALL) {
+ grpc_subchannel_call_set_cleanup_closure(call, then_schedule_closure);
+ then_schedule_closure = NULL;
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call");
}
GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
@@ -1141,7 +1281,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
"picked");
}
gpr_free(calld->waiting_ops);
- gpr_free(and_free_memory);
+ grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/ext/client_channel/client_channel_plugin.c b/src/core/ext/client_channel/client_channel_plugin.c
index 28d3b63f99..f51277d0b2 100644
--- a/src/core/ext/client_channel/client_channel_plugin.c
+++ b/src/core/ext/client_channel/client_channel_plugin.c
@@ -43,6 +43,7 @@
#include "src/core/ext/client_channel/lb_policy_registry.h"
#include "src/core/ext/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/client_channel/resolver_registry.h"
+#include "src/core/ext/client_channel/retry_throttle.h"
#include "src/core/ext/client_channel/subchannel_index.h"
#include "src/core/lib/surface/channel_init.h"
@@ -82,6 +83,7 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
void grpc_client_channel_init(void) {
grpc_lb_policy_registry_init();
grpc_resolver_registry_init();
+ grpc_retry_throttle_map_init();
grpc_proxy_mapper_registry_init();
grpc_register_http_proxy_mapper();
grpc_subchannel_index_init();
@@ -96,6 +98,7 @@ void grpc_client_channel_shutdown(void) {
grpc_subchannel_index_shutdown();
grpc_channel_init_shutdown();
grpc_proxy_mapper_registry_shutdown();
+ grpc_retry_throttle_map_shutdown();
grpc_resolver_registry_shutdown();
grpc_lb_policy_registry_shutdown();
}
diff --git a/src/core/ext/client_channel/connector.h b/src/core/ext/client_channel/connector.h
index 9bff41f003..94b5fb5c9e 100644
--- a/src/core/ext/client_channel/connector.h
+++ b/src/core/ext/client_channel/connector.h
@@ -48,8 +48,6 @@ struct grpc_connector {
typedef struct {
/** set of pollsets interested in this connection */
grpc_pollset_set *interested_parties;
- /** initial connect string to send */
- grpc_slice initial_connect_string;
/** deadline for connection */
gpr_timespec deadline;
/** channel arguments (to be passed to transport) */
diff --git a/src/core/ext/client_channel/http_connect_handshaker.c b/src/core/ext/client_channel/http_connect_handshaker.c
index 58ab233f1b..778d76c39f 100644
--- a/src/core/ext/client_channel/http_connect_handshaker.c
+++ b/src/core/ext/client_channel/http_connect_handshaker.c
@@ -116,7 +116,7 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
// If we were shut down after an endpoint operation succeeded but
// before the endpoint callback was invoked, we need to generate our
// own error.
- error = GRPC_ERROR_CREATE("Handshaker shutdown");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshaker shutdown");
}
if (!handshaker->shutdown) {
// TODO(ctiller): It is currently necessary to shutdown endpoints
@@ -226,7 +226,7 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
char* msg;
gpr_asprintf(&msg, "HTTP proxy returned response code %d",
handshaker->http_response.status);
- error = GRPC_ERROR_CREATE(msg);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
handshake_failed_locked(exec_ctx, handshaker, error);
goto done;
diff --git a/src/core/ext/client_channel/proxy_mapper_registry.c b/src/core/ext/client_channel/proxy_mapper_registry.c
index 2c44b9d490..0935ddbdbd 100644
--- a/src/core/ext/client_channel/proxy_mapper_registry.c
+++ b/src/core/ext/client_channel/proxy_mapper_registry.c
@@ -94,6 +94,14 @@ static void grpc_proxy_mapper_list_destroy(grpc_proxy_mapper_list* list) {
grpc_proxy_mapper_destroy(list->list[i]);
}
gpr_free(list->list);
+ // Clean up in case we re-initialze later.
+ // TODO(ctiller): This should ideally live in
+ // grpc_proxy_mapper_registry_init(). However, if we did this there,
+ // then we would do it AFTER we start registering proxy mappers from
+ // third-party plugins, so they'd never show up (and would leak memory).
+ // We probably need some sort of dependency system for plugins to fix
+ // this.
+ memset(list, 0, sizeof(*list));
}
//
@@ -102,9 +110,7 @@ static void grpc_proxy_mapper_list_destroy(grpc_proxy_mapper_list* list) {
static grpc_proxy_mapper_list g_proxy_mapper_list;
-void grpc_proxy_mapper_registry_init() {
- memset(&g_proxy_mapper_list, 0, sizeof(g_proxy_mapper_list));
-}
+void grpc_proxy_mapper_registry_init() {}
void grpc_proxy_mapper_registry_shutdown() {
grpc_proxy_mapper_list_destroy(&g_proxy_mapper_list);
diff --git a/src/core/ext/client_channel/resolver_registry.c b/src/core/ext/client_channel/resolver_registry.c
index 3c5a6fb3ff..0f074a3386 100644
--- a/src/core/ext/client_channel/resolver_registry.c
+++ b/src/core/ext/client_channel/resolver_registry.c
@@ -93,7 +93,6 @@ static grpc_resolver_factory *lookup_factory(const char *name) {
return g_all_of_the_resolvers[i];
}
}
-
return NULL;
}
diff --git a/src/core/ext/client_channel/retry_throttle.c b/src/core/ext/client_channel/retry_throttle.c
new file mode 100644
index 0000000000..8926c3d782
--- /dev/null
+++ b/src/core/ext/client_channel/retry_throttle.c
@@ -0,0 +1,210 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/ext/client_channel/retry_throttle.h"
+
+#include <limits.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/atm.h>
+#include <grpc/support/avl.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/sync.h>
+
+//
+// server_retry_throttle_data
+//
+
+struct grpc_server_retry_throttle_data {
+ gpr_refcount refs;
+ int max_milli_tokens;
+ int milli_token_ratio;
+ gpr_atm milli_tokens;
+ // A pointer to the replacement for this grpc_server_retry_throttle_data
+ // entry. If non-NULL, then this entry is stale and must not be used.
+ // We hold a reference to the replacement.
+ gpr_atm replacement;
+};
+
+static void get_replacement_throttle_data_if_needed(
+ grpc_server_retry_throttle_data** throttle_data) {
+ while (true) {
+ grpc_server_retry_throttle_data* new_throttle_data =
+ (grpc_server_retry_throttle_data*)gpr_atm_acq_load(
+ &(*throttle_data)->replacement);
+ if (new_throttle_data == NULL) return;
+ *throttle_data = new_throttle_data;
+ }
+}
+
+bool grpc_server_retry_throttle_data_record_failure(
+ grpc_server_retry_throttle_data* throttle_data) {
+ // First, check if we are stale and need to be replaced.
+ get_replacement_throttle_data_if_needed(&throttle_data);
+ // We decrement milli_tokens by 1000 (1 token) for each failure.
+ const int new_value = (int)gpr_atm_no_barrier_clamped_add(
+ &throttle_data->milli_tokens, (gpr_atm)-1000, (gpr_atm)0,
+ (gpr_atm)throttle_data->max_milli_tokens);
+ // Retries are allowed as long as the new value is above the threshold
+ // (max_milli_tokens / 2).
+ return new_value > throttle_data->max_milli_tokens / 2;
+}
+
+void grpc_server_retry_throttle_data_record_success(
+ grpc_server_retry_throttle_data* throttle_data) {
+ // First, check if we are stale and need to be replaced.
+ get_replacement_throttle_data_if_needed(&throttle_data);
+ // We increment milli_tokens by milli_token_ratio for each success.
+ gpr_atm_no_barrier_clamped_add(
+ &throttle_data->milli_tokens, (gpr_atm)throttle_data->milli_token_ratio,
+ (gpr_atm)0, (gpr_atm)throttle_data->max_milli_tokens);
+}
+
+grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_ref(
+ grpc_server_retry_throttle_data* throttle_data) {
+ gpr_ref(&throttle_data->refs);
+ return throttle_data;
+}
+
+void grpc_server_retry_throttle_data_unref(
+ grpc_server_retry_throttle_data* throttle_data) {
+ if (gpr_unref(&throttle_data->refs)) {
+ grpc_server_retry_throttle_data* replacement =
+ (grpc_server_retry_throttle_data*)gpr_atm_acq_load(
+ &throttle_data->replacement);
+ if (replacement != NULL) {
+ grpc_server_retry_throttle_data_unref(replacement);
+ }
+ gpr_free(throttle_data);
+ }
+}
+
+static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
+ int max_milli_tokens, int milli_token_ratio,
+ grpc_server_retry_throttle_data* old_throttle_data) {
+ grpc_server_retry_throttle_data* throttle_data =
+ gpr_malloc(sizeof(*throttle_data));
+ memset(throttle_data, 0, sizeof(*throttle_data));
+ gpr_ref_init(&throttle_data->refs, 1);
+ throttle_data->max_milli_tokens = max_milli_tokens;
+ throttle_data->milli_token_ratio = milli_token_ratio;
+ int initial_milli_tokens = max_milli_tokens;
+ // If there was a pre-existing entry for this server name, initialize
+ // the token count by scaling proportionately to the old data. This
+ // ensures that if we're already throttling retries on the old scale,
+ // we will start out doing the same thing on the new one.
+ if (old_throttle_data != NULL) {
+ double token_fraction =
+ (int)gpr_atm_acq_load(&old_throttle_data->milli_tokens) /
+ (double)old_throttle_data->max_milli_tokens;
+ initial_milli_tokens = (int)(token_fraction * max_milli_tokens);
+ }
+ gpr_atm_rel_store(&throttle_data->milli_tokens,
+ (gpr_atm)initial_milli_tokens);
+ // If there was a pre-existing entry, mark it as stale and give it a
+ // pointer to the new entry, which is its replacement.
+ if (old_throttle_data != NULL) {
+ grpc_server_retry_throttle_data_ref(throttle_data);
+ gpr_atm_rel_store(&old_throttle_data->replacement, (gpr_atm)throttle_data);
+ }
+ return throttle_data;
+}
+
+//
+// avl vtable for string -> server_retry_throttle_data map
+//
+
+static void* copy_server_name(void* key) { return gpr_strdup(key); }
+
+static long compare_server_name(void* key1, void* key2) {
+ return strcmp(key1, key2);
+}
+
+static void destroy_server_retry_throttle_data(void* value) {
+ grpc_server_retry_throttle_data* throttle_data = value;
+ grpc_server_retry_throttle_data_unref(throttle_data);
+}
+
+static void* copy_server_retry_throttle_data(void* value) {
+ grpc_server_retry_throttle_data* throttle_data = value;
+ return grpc_server_retry_throttle_data_ref(throttle_data);
+}
+
+static const gpr_avl_vtable avl_vtable = {
+ gpr_free /* destroy_key */, copy_server_name, compare_server_name,
+ destroy_server_retry_throttle_data, copy_server_retry_throttle_data};
+
+//
+// server_retry_throttle_map
+//
+
+static gpr_mu g_mu;
+static gpr_avl g_avl;
+
+void grpc_retry_throttle_map_init() {
+ gpr_mu_init(&g_mu);
+ g_avl = gpr_avl_create(&avl_vtable);
+}
+
+void grpc_retry_throttle_map_shutdown() {
+ gpr_mu_destroy(&g_mu);
+ gpr_avl_unref(g_avl);
+}
+
+grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
+ const char* server_name, int max_milli_tokens, int milli_token_ratio) {
+ gpr_mu_lock(&g_mu);
+ grpc_server_retry_throttle_data* throttle_data =
+ gpr_avl_get(g_avl, (char*)server_name);
+ if (throttle_data == NULL) {
+ // Entry not found. Create a new one.
+ throttle_data = grpc_server_retry_throttle_data_create(
+ max_milli_tokens, milli_token_ratio, NULL);
+ g_avl = gpr_avl_add(g_avl, (char*)server_name, throttle_data);
+ } else {
+ if (throttle_data->max_milli_tokens != max_milli_tokens ||
+ throttle_data->milli_token_ratio != milli_token_ratio) {
+ // Entry found but with old parameters. Create a new one based on
+ // the original one.
+ throttle_data = grpc_server_retry_throttle_data_create(
+ max_milli_tokens, milli_token_ratio, throttle_data);
+ g_avl = gpr_avl_add(g_avl, (char*)server_name, throttle_data);
+ } else {
+ // Entry found. Increase refcount.
+ grpc_server_retry_throttle_data_ref(throttle_data);
+ }
+ }
+ gpr_mu_unlock(&g_mu);
+ return throttle_data;
+}
diff --git a/src/core/ext/client_channel/retry_throttle.h b/src/core/ext/client_channel/retry_throttle.h
new file mode 100644
index 0000000000..f9971faf65
--- /dev/null
+++ b/src/core/ext/client_channel/retry_throttle.h
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_RETRY_THROTTLE_H
+#define GRPC_CORE_EXT_CLIENT_CHANNEL_RETRY_THROTTLE_H
+
+#include <stdbool.h>
+
+/// Tracks retry throttling data for an individual server name.
+typedef struct grpc_server_retry_throttle_data grpc_server_retry_throttle_data;
+
+/// Records a failure. Returns true if it's okay to send a retry.
+bool grpc_server_retry_throttle_data_record_failure(
+ grpc_server_retry_throttle_data* throttle_data);
+/// Records a success.
+void grpc_server_retry_throttle_data_record_success(
+ grpc_server_retry_throttle_data* throttle_data);
+
+grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_ref(
+ grpc_server_retry_throttle_data* throttle_data);
+void grpc_server_retry_throttle_data_unref(
+ grpc_server_retry_throttle_data* throttle_data);
+
+/// Initializes global map of failure data for each server name.
+void grpc_retry_throttle_map_init();
+/// Shuts down global map of failure data for each server name.
+void grpc_retry_throttle_map_shutdown();
+
+/// Returns a reference to the failure data for \a server_name, creating
+/// a new entry if needed.
+/// Caller must eventually unref via \a grpc_server_retry_throttle_data_unref().
+grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
+ const char* server_name, int max_milli_tokens, int milli_token_ratio);
+
+#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RETRY_THROTTLE_H */
diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c
index 5df0a9060d..063c0badff 100644
--- a/src/core/ext/client_channel/subchannel.c
+++ b/src/core/ext/client_channel/subchannel.c
@@ -41,7 +41,6 @@
#include <grpc/support/string_util.h>
#include "src/core/ext/client_channel/client_channel.h"
-#include "src/core/ext/client_channel/initial_connect_string.h"
#include "src/core/ext/client_channel/parse_address.h"
#include "src/core/ext/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/client_channel/subchannel_index.h"
@@ -103,9 +102,6 @@ struct grpc_subchannel {
grpc_subchannel_key *key;
- /** initial string to send to peer */
- grpc_slice initial_connect_string;
-
/** set during connection */
grpc_connect_out_args connecting_result;
@@ -148,6 +144,7 @@ struct grpc_subchannel {
struct grpc_subchannel_call {
grpc_connected_subchannel *connection;
+ grpc_closure *schedule_closure_after_destroy;
};
#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack *)((call) + 1))
@@ -214,7 +211,6 @@ static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_subchannel *c = arg;
gpr_free((void *)c->filters);
grpc_channel_args_destroy(exec_ctx, c->args);
- grpc_slice_unref_internal(exec_ctx, c->initial_connect_string);
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
grpc_connector_unref(exec_ctx, c->connector);
grpc_pollset_set_destroy(exec_ctx, c->pollset_set);
@@ -273,8 +269,9 @@ static void disconnect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->disconnected);
c->disconnected = true;
- grpc_connector_shutdown(exec_ctx, c->connector,
- GRPC_ERROR_CREATE("Subchannel disconnected"));
+ grpc_connector_shutdown(
+ exec_ctx, c->connector,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Subchannel disconnected"));
con = GET_CONNECTED_SUBCHANNEL(c, no_barrier);
if (con != NULL) {
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, con, "connection");
@@ -332,7 +329,6 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
c->pollset_set = grpc_pollset_set_create();
grpc_resolved_address *addr = gpr_malloc(sizeof(*addr));
grpc_get_subchannel_address_arg(exec_ctx, args->args, addr);
- grpc_set_initial_connect_string(&addr, &c->initial_connect_string);
grpc_resolved_address *new_address = NULL;
grpc_channel_args *new_args = NULL;
if (grpc_proxy_mappers_map_address(exec_ctx, addr, args->args, &new_address,
@@ -340,17 +336,15 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(new_address != NULL);
gpr_free(addr);
addr = new_address;
- if (new_args != NULL) c->args = new_args;
- }
- if (c->args == NULL) {
- static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
- grpc_arg new_arg = grpc_create_subchannel_address_arg(addr);
- c->args = grpc_channel_args_copy_and_add_and_remove(
- args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &new_arg,
- 1);
- gpr_free(new_arg.value.string);
}
+ static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
+ grpc_arg new_arg = grpc_create_subchannel_address_arg(addr);
gpr_free(addr);
+ c->args = grpc_channel_args_copy_and_add_and_remove(
+ new_args != NULL ? new_args : args->args, keys_to_remove,
+ GPR_ARRAY_SIZE(keys_to_remove), &new_arg, 1);
+ gpr_free(new_arg.value.string);
+ if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args);
c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
&c->root_external_state_watcher;
grpc_closure_init(&c->connected, subchannel_connected, c,
@@ -405,7 +399,6 @@ static void continue_connect_locked(grpc_exec_ctx *exec_ctx,
args.interested_parties = c->pollset_set;
args.deadline = c->next_attempt;
args.channel_args = c->args;
- args.initial_connect_string = c->initial_connect_string;
grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
@@ -445,7 +438,8 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_mu_lock(&c->mu);
c->have_alarm = false;
if (c->disconnected) {
- error = GRPC_ERROR_CREATE_REFERENCING("Disconnected", &error, 1);
+ error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Disconnected",
+ &error, 1);
} else {
GRPC_ERROR_REF(error);
}
@@ -696,9 +690,9 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
} else {
grpc_connectivity_state_set(
exec_ctx, &c->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
- grpc_error_set_int(
- GRPC_ERROR_CREATE_REFERENCING("Connect Failed", &error, 1),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
+ grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Connect Failed", &error, 1),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
"connect_failed");
const char *errmsg = grpc_error_string(error);
@@ -719,13 +713,22 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
grpc_subchannel_call *c = call;
+ GPR_ASSERT(c->schedule_closure_after_destroy != NULL);
GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
grpc_connected_subchannel *connection = c->connection;
- grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), NULL, c);
+ grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), NULL,
+ c->schedule_closure_after_destroy);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, connection, "subchannel_call");
GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0);
}
+void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call *call,
+ grpc_closure *closure) {
+ GPR_ASSERT(call->schedule_closure_after_destroy == NULL);
+ GPR_ASSERT(closure != NULL);
+ call->schedule_closure_after_destroy = closure;
+}
+
void grpc_subchannel_call_ref(
grpc_subchannel_call *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
@@ -761,15 +764,22 @@ grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con,
- grpc_polling_entity *pollent, grpc_slice path, gpr_timespec start_time,
- gpr_timespec deadline, grpc_subchannel_call **call) {
+ const grpc_connected_subchannel_call_args *args,
+ grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
- *call = gpr_zalloc(sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
+ *call = gpr_arena_alloc(
+ args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = con; // Ref is added below.
- grpc_error *error =
- grpc_call_stack_init(exec_ctx, chanstk, 1, subchannel_call_destroy, *call,
- NULL, NULL, path, start_time, deadline, callstk);
+ const grpc_call_element_args call_args = {.call_stack = callstk,
+ .server_transport_data = NULL,
+ .context = NULL,
+ .path = args->path,
+ .start_time = args->start_time,
+ .deadline = args->deadline,
+ .arena = args->arena};
+ grpc_error *error = grpc_call_stack_init(
+ exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args);
if (error != GRPC_ERROR_NONE) {
const char *error_string = grpc_error_string(error);
gpr_log(GPR_ERROR, "error: %s", error_string);
@@ -778,7 +788,7 @@ grpc_error *grpc_connected_subchannel_create_call(
return error;
}
GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");
- grpc_call_stack_set_pollset_or_pollset_set(exec_ctx, callstk, pollent);
+ grpc_call_stack_set_pollset_or_pollset_set(exec_ctx, callstk, args->pollent);
return GRPC_ERROR_NONE;
}
diff --git a/src/core/ext/client_channel/subchannel.h b/src/core/ext/client_channel/subchannel.h
index 6a70a76467..3e64a2507c 100644
--- a/src/core/ext/client_channel/subchannel.h
+++ b/src/core/ext/client_channel/subchannel.h
@@ -37,6 +37,7 @@
#include "src/core/ext/client_channel/connector.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/iomgr/polling_entity.h"
+#include "src/core/lib/support/arena.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata.h"
@@ -112,10 +113,18 @@ void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
/** construct a subchannel call */
+typedef struct {
+ grpc_polling_entity *pollent;
+ grpc_slice path;
+ gpr_timespec start_time;
+ gpr_timespec deadline;
+ gpr_arena *arena;
+} grpc_connected_subchannel_call_args;
+
grpc_error *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
- grpc_polling_entity *pollent, grpc_slice path, gpr_timespec start_time,
- gpr_timespec deadline, grpc_subchannel_call **subchannel_call);
+ const grpc_connected_subchannel_call_args *args,
+ grpc_subchannel_call **subchannel_call);
/** process a transport level op */
void grpc_connected_subchannel_process_transport_op(
@@ -154,6 +163,11 @@ void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
char *grpc_subchannel_call_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call);
+/** Must be called once per call. Sets the 'then_schedule_closure' argument for
+ call stack destruction. */
+void grpc_subchannel_call_set_cleanup_closure(
+ grpc_subchannel_call *subchannel_call, grpc_closure *closure);
+
grpc_call_stack *grpc_subchannel_call_get_call_stack(
grpc_subchannel_call *subchannel_call);
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c
index d612591f2e..601b0e643b 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/lb_policy/grpclb/grpclb.c
@@ -925,7 +925,7 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
grpc_connectivity_state_set(
exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE("Channel Shutdown"), "glb_shutdown");
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
/* We need a copy of the lb_call pointer because we can't cancell the call
* while holding glb_policy->mu: lb_on_server_status_received, invoked due to
* the cancel, needs to acquire that same lock */
@@ -965,9 +965,9 @@ static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if (pp->target == target) {
*target = NULL;
- grpc_closure_sched(
- exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
- GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick Cancelled", &error, 1));
} else {
pp->next = glb_policy->pending_picks;
glb_policy->pending_picks = pp;
@@ -989,9 +989,9 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
pending_pick *next = pp->next;
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_closure_sched(
- exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
- GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick Cancelled", &error, 1));
} else {
pp->next = glb_policy->pending_picks;
glb_policy->pending_picks = pp;
@@ -1023,10 +1023,10 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *on_complete) {
if (pick_args->lb_token_mdelem_storage == NULL) {
*target = NULL;
- grpc_closure_sched(
- exec_ctx, on_complete,
- GRPC_ERROR_CREATE("No mdelem storage for the LB token. Load reporting "
- "won't work without it. Failing"));
+ grpc_closure_sched(exec_ctx, on_complete,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "No mdelem storage for the LB token. Load reporting "
+ "won't work without it. Failing"));
return 0;
}
diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c
index e2a66d16bd..fc65dfdcb9 100644
--- a/src/core/ext/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/lb_policy/pick_first/pick_first.c
@@ -101,7 +101,7 @@ static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
p->pending_picks = NULL;
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE("Channel shutdown"), "shutdown");
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"), "shutdown");
/* cancel subscription */
if (p->selected != NULL) {
grpc_connected_subchannel_notify_on_state_change(
@@ -131,9 +131,9 @@ static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if (pp->target == target) {
*target = NULL;
- grpc_closure_sched(
- exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick Cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -156,9 +156,9 @@ static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
- grpc_closure_sched(
- exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick Cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -325,8 +325,8 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (p->num_subchannels == 0) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE_REFERENCING("Pick first exhausted channels",
- &error, 1),
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick first exhausted channels", &error, 1),
"no_more_channels");
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
@@ -373,7 +373,8 @@ static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
} else {
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"));
+ grpc_closure_sched(exec_ctx, closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
}
}
@@ -423,11 +424,13 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
"This LB policy doesn't support user data. It will be ignored");
}
+ static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
- grpc_channel_args *new_args =
- grpc_channel_args_copy_and_add(args->args, &addr_arg, 1);
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
+ args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
+ 1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index f2d1d46179..a62082a2ff 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -320,13 +320,14 @@ static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = NULL;
- grpc_closure_sched(exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE("Channel Shutdown"));
+ grpc_closure_sched(
+ exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
gpr_free(pp);
}
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
- GRPC_ERROR_CREATE("Channel Shutdown"), "rr_shutdown");
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "rr_shutdown");
for (i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
@@ -345,9 +346,9 @@ static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *next = pp->next;
if (pp->target == target) {
*target = NULL;
- grpc_closure_sched(
- exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -371,9 +372,9 @@ static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
*pp->target = NULL;
- grpc_closure_sched(
- exec_ctx, pp->on_complete,
- GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
+ grpc_closure_sched(exec_ctx, pp->on_complete,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Pick cancelled", &error, 1));
gpr_free(pp);
} else {
pp->next = p->pending_picks;
@@ -661,8 +662,8 @@ static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
} else {
- grpc_closure_sched(exec_ctx, closure,
- GRPC_ERROR_CREATE("Round Robin not connected"));
+ grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Round Robin not connected"));
}
}
@@ -709,11 +710,13 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
/* Skip balancer addresses, since we only know how to handle backends. */
if (addresses->addresses[i].is_balancer) continue;
+ static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
memset(&sc_args, 0, sizeof(grpc_subchannel_args));
grpc_arg addr_arg =
grpc_create_subchannel_address_arg(&addresses->addresses[i].address);
- grpc_channel_args *new_args =
- grpc_channel_args_copy_and_add(args->args, &addr_arg, 1);
+ grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove(
+ args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg,
+ 1);
gpr_free(addr_arg.value.string);
sc_args.args = new_args;
grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c
index c2750634a5..ea57c85c3a 100644
--- a/src/core/ext/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/load_reporting/load_reporting_filter.c
@@ -78,8 +78,8 @@ static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.path->md));
calld->have_service_method = true;
} else {
- err =
- grpc_error_add_child(err, GRPC_ERROR_CREATE("Missing :path header"));
+ err = grpc_error_add_child(
+ err, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing :path header"));
}
if (calld->recv_initial_metadata->idx.named.lb_token != NULL) {
calld->initial_md_string = grpc_slice_ref_internal(
@@ -123,7 +123,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {
+ grpc_closure *ignored) {
call_data *calld = elem->call_data;
/* TODO(dgq): do something with the data
diff --git a/src/core/ext/resolver/dns/c_ares/dns_resolver_ares.c b/src/core/ext/resolver/dns/c_ares/dns_resolver_ares.c
new file mode 100644
index 0000000000..f27da231f5
--- /dev/null
+++ b/src/core/ext/resolver/dns/c_ares/dns_resolver_ares.c
@@ -0,0 +1,350 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+#if GRPC_ARES == 1 && !defined(GRPC_UV)
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/host_port.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/ext/client_channel/http_connect_handshaker.h"
+#include "src/core/ext/client_channel/lb_policy_registry.h"
+#include "src/core/ext/client_channel/resolver_registry.h"
+#include "src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/iomgr/combiner.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/support/backoff.h"
+#include "src/core/lib/support/env.h"
+#include "src/core/lib/support/string.h"
+
+#define GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS 1
+#define GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS 1
+#define GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER 1.6
+#define GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS 120
+#define GRPC_DNS_RECONNECT_JITTER 0.2
+
+typedef struct {
+ /** base class: must be first */
+ grpc_resolver base;
+ /** name to resolve (usually the same as target_name) */
+ char *name_to_resolve;
+ /** default port to use */
+ char *default_port;
+ /** channel args. */
+ grpc_channel_args *channel_args;
+ /** pollset_set to drive the name resolution process */
+ grpc_pollset_set *interested_parties;
+
+ /** Closures used by the combiner */
+ grpc_closure dns_ares_on_retry_timer_locked;
+ grpc_closure dns_ares_on_resolved_locked;
+
+ /** Combiner guarding the rest of the state */
+ grpc_combiner *combiner;
+ /** are we currently resolving? */
+ bool resolving;
+ /** which version of the result have we published? */
+ int published_version;
+ /** which version of the result is current? */
+ int resolved_version;
+ /** pending next completion, or NULL */
+ grpc_closure *next_completion;
+ /** target result address for next completion */
+ grpc_channel_args **target_result;
+ /** current (fully resolved) result */
+ grpc_channel_args *resolved_result;
+ /** retry timer */
+ bool have_retry_timer;
+ grpc_timer retry_timer;
+ /** retry backoff state */
+ gpr_backoff backoff_state;
+
+ /** currently resolving addresses */
+ grpc_resolved_addresses *addresses;
+} ares_dns_resolver;
+
+static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+
+static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
+ ares_dns_resolver *r);
+static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ ares_dns_resolver *r);
+
+static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
+static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *r);
+static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
+ grpc_channel_args **target_result,
+ grpc_closure *on_complete);
+
+static const grpc_resolver_vtable dns_ares_resolver_vtable = {
+ dns_ares_destroy, dns_ares_shutdown_locked,
+ dns_ares_channel_saw_error_locked, dns_ares_next_locked};
+
+static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ ares_dns_resolver *r = (ares_dns_resolver *)resolver;
+ if (r->have_retry_timer) {
+ grpc_timer_cancel(exec_ctx, &r->retry_timer);
+ }
+ if (r->next_completion != NULL) {
+ *r->target_result = NULL;
+ grpc_closure_sched(
+ exec_ctx, r->next_completion,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
+ r->next_completion = NULL;
+ }
+}
+
+static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver) {
+ ares_dns_resolver *r = (ares_dns_resolver *)resolver;
+ if (!r->resolving) {
+ gpr_backoff_reset(&r->backoff_state);
+ dns_ares_start_resolving_locked(exec_ctx, r);
+ }
+}
+
+static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ ares_dns_resolver *r = arg;
+ r->have_retry_timer = false;
+ if (error == GRPC_ERROR_NONE) {
+ if (!r->resolving) {
+ dns_ares_start_resolving_locked(exec_ctx, r);
+ }
+ }
+ GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
+}
+
+static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ ares_dns_resolver *r = arg;
+ grpc_channel_args *result = NULL;
+ GPR_ASSERT(r->resolving);
+ r->resolving = false;
+ if (r->addresses != NULL) {
+ grpc_lb_addresses *addresses = grpc_lb_addresses_create(
+ r->addresses->naddrs, NULL /* user_data_vtable */);
+ for (size_t i = 0; i < r->addresses->naddrs; ++i) {
+ grpc_lb_addresses_set_address(
+ addresses, i, &r->addresses->addrs[i].addr,
+ r->addresses->addrs[i].len, false /* is_balancer */,
+ NULL /* balancer_name */, NULL /* user_data */);
+ }
+ grpc_arg new_arg = grpc_lb_addresses_create_channel_arg(addresses);
+ result = grpc_channel_args_copy_and_add(r->channel_args, &new_arg, 1);
+ grpc_resolved_addresses_destroy(r->addresses);
+ grpc_lb_addresses_destroy(exec_ctx, addresses);
+ } else {
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now);
+ gpr_timespec timeout = gpr_time_sub(next_try, now);
+ gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
+ grpc_error_string(error));
+ GPR_ASSERT(!r->have_retry_timer);
+ r->have_retry_timer = true;
+ GRPC_RESOLVER_REF(&r->base, "retry-timer");
+ if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
+ gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
+ timeout.tv_nsec);
+ } else {
+ gpr_log(GPR_DEBUG, "retrying immediately");
+ }
+ grpc_timer_init(exec_ctx, &r->retry_timer, next_try,
+ &r->dns_ares_on_retry_timer_locked, now);
+ }
+ if (r->resolved_result != NULL) {
+ grpc_channel_args_destroy(exec_ctx, r->resolved_result);
+ }
+ r->resolved_result = result;
+ r->resolved_version++;
+ dns_ares_maybe_finish_next_locked(exec_ctx, r);
+ GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
+}
+
+static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx,
+ grpc_resolver *resolver,
+ grpc_channel_args **target_result,
+ grpc_closure *on_complete) {
+ gpr_log(GPR_DEBUG, "dns_ares_next is called.");
+ ares_dns_resolver *r = (ares_dns_resolver *)resolver;
+ GPR_ASSERT(!r->next_completion);
+ r->next_completion = on_complete;
+ r->target_result = target_result;
+ if (r->resolved_version == 0 && !r->resolving) {
+ gpr_backoff_reset(&r->backoff_state);
+ dns_ares_start_resolving_locked(exec_ctx, r);
+ } else {
+ dns_ares_maybe_finish_next_locked(exec_ctx, r);
+ }
+}
+
+static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx,
+ ares_dns_resolver *r) {
+ GRPC_RESOLVER_REF(&r->base, "dns-resolving");
+ GPR_ASSERT(!r->resolving);
+ r->resolving = true;
+ r->addresses = NULL;
+ grpc_resolve_address(exec_ctx, r->name_to_resolve, r->default_port,
+ r->interested_parties, &r->dns_ares_on_resolved_locked,
+ &r->addresses);
+}
+
+static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
+ ares_dns_resolver *r) {
+ if (r->next_completion != NULL &&
+ r->resolved_version != r->published_version) {
+ *r->target_result = r->resolved_result == NULL
+ ? NULL
+ : grpc_channel_args_copy(r->resolved_result);
+ grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+ r->next_completion = NULL;
+ r->published_version = r->resolved_version;
+ }
+}
+
+static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) {
+ gpr_log(GPR_DEBUG, "dns_ares_destroy");
+ ares_dns_resolver *r = (ares_dns_resolver *)gr;
+ if (r->resolved_result != NULL) {
+ grpc_channel_args_destroy(exec_ctx, r->resolved_result);
+ }
+ grpc_pollset_set_destroy(exec_ctx, r->interested_parties);
+ gpr_free(r->name_to_resolve);
+ gpr_free(r->default_port);
+ grpc_channel_args_destroy(exec_ctx, r->channel_args);
+ gpr_free(r);
+}
+
+static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
+ grpc_resolver_args *args,
+ const char *default_port) {
+ // Get name from args.
+ const char *path = args->uri->path;
+ if (0 != strcmp(args->uri->authority, "")) {
+ gpr_log(GPR_ERROR, "authority based dns uri's not supported");
+ return NULL;
+ }
+ if (path[0] == '/') ++path;
+ // Create resolver.
+ ares_dns_resolver *r = gpr_zalloc(sizeof(ares_dns_resolver));
+ grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner);
+ r->name_to_resolve = gpr_strdup(path);
+ r->default_port = gpr_strdup(default_port);
+ r->channel_args = grpc_channel_args_copy(args->args);
+ r->interested_parties = grpc_pollset_set_create();
+ if (args->pollset_set != NULL) {
+ grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
+ args->pollset_set);
+ }
+ gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
+ GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
+ GRPC_DNS_RECONNECT_JITTER,
+ GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
+ GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+ grpc_closure_init(&r->dns_ares_on_retry_timer_locked,
+ dns_ares_on_retry_timer_locked, r,
+ grpc_combiner_scheduler(r->base.combiner, false));
+ grpc_closure_init(&r->dns_ares_on_resolved_locked,
+ dns_ares_on_resolved_locked, r,
+ grpc_combiner_scheduler(r->base.combiner, false));
+ return &r->base;
+}
+
+/*
+ * FACTORY
+ */
+
+static void dns_ares_factory_ref(grpc_resolver_factory *factory) {}
+
+static void dns_ares_factory_unref(grpc_resolver_factory *factory) {}
+
+static grpc_resolver *dns_factory_create_resolver(
+ grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory,
+ grpc_resolver_args *args) {
+ return dns_ares_create(exec_ctx, args, "https");
+}
+
+static char *dns_ares_factory_get_default_host_name(
+ grpc_resolver_factory *factory, grpc_uri *uri) {
+ const char *path = uri->path;
+ if (path[0] == '/') ++path;
+ return gpr_strdup(path);
+}
+
+static const grpc_resolver_factory_vtable dns_ares_factory_vtable = {
+ dns_ares_factory_ref, dns_ares_factory_unref, dns_factory_create_resolver,
+ dns_ares_factory_get_default_host_name, "dns"};
+static grpc_resolver_factory dns_resolver_factory = {&dns_ares_factory_vtable};
+
+static grpc_resolver_factory *dns_ares_resolver_factory_create() {
+ return &dns_resolver_factory;
+}
+
+void grpc_resolver_dns_ares_init(void) {
+ char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+ /* TODO(zyc): Turn on c-ares based resolver by default after the address
+ sorter and the CNAME support are added. */
+ if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) {
+ grpc_error *error = grpc_ares_init();
+ if (error != GRPC_ERROR_NONE) {
+ GRPC_LOG_IF_ERROR("ares_library_init() failed", error);
+ return;
+ }
+ grpc_resolve_address = grpc_resolve_address_ares;
+ grpc_register_resolver_type(dns_ares_resolver_factory_create());
+ }
+ gpr_free(resolver);
+}
+
+void grpc_resolver_dns_ares_shutdown(void) {
+ char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+ if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) {
+ grpc_ares_cleanup();
+ }
+ gpr_free(resolver);
+}
+
+#else /* GRPC_ARES == 1 && !defined(GRPC_UV) */
+
+void grpc_resolver_dns_ares_init(void) {}
+
+void grpc_resolver_dns_ares_shutdown(void) {}
+
+#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */
diff --git a/src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver.h
new file mode 100644
index 0000000000..334feaa2ab
--- /dev/null
+++ b/src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H
+#define GRPC_CORE_EXT_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H
+
+#include <ares.h>
+
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/pollset_set.h"
+
+typedef struct grpc_ares_ev_driver grpc_ares_ev_driver;
+
+/* Start \a ev_driver. It will keep working until all IO on its ares_channel is
+ done, or grpc_ares_ev_driver_destroy() is called. It may notify the callbacks
+ bound to its ares_channel when necessary. */
+void grpc_ares_ev_driver_start(grpc_exec_ctx *exec_ctx,
+ grpc_ares_ev_driver *ev_driver);
+
+/* Returns the ares_channel owned by \a ev_driver. To bind a c-ares query to
+ \a ev_driver, use the ares_channel owned by \a ev_driver as the arg of the
+ query. */
+ares_channel *grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver *ev_driver);
+
+/* Creates a new grpc_ares_ev_driver. Returns GRPC_ERROR_NONE if \a ev_driver is
+ created successfully. */
+grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
+ grpc_pollset_set *pollset_set);
+
+/* Destroys \a ev_driver asynchronously. Pending lookups made on \a ev_driver
+ will be cancelled and their on_done callbacks will be invoked with a status
+ of ARES_ECANCELLED. */
+void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver);
+
+#endif /* GRPC_CORE_EXT_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H */
diff --git a/src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
new file mode 100644
index 0000000000..fab4f0c977
--- /dev/null
+++ b/src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
@@ -0,0 +1,319 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <grpc/support/port_platform.h>
+#include "src/core/lib/iomgr/port.h"
+#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET)
+
+#include "src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
+#include "src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h"
+#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/support/string.h"
+
+typedef struct fd_node {
+ /** the owner of this fd node */
+ grpc_ares_ev_driver *ev_driver;
+ /** the grpc_fd owned by this fd node */
+ grpc_fd *grpc_fd;
+ /** a closure wrapping on_readable_cb, which should be invoked when the
+ grpc_fd in this node becomes readable. */
+ grpc_closure read_closure;
+ /** a closure wrapping on_writable_cb, which should be invoked when the
+ grpc_fd in this node becomes writable. */
+ grpc_closure write_closure;
+ /** next fd node in the list */
+ struct fd_node *next;
+
+ /** mutex guarding the rest of the state */
+ gpr_mu mu;
+ /** if the readable closure has been registered */
+ bool readable_registered;
+ /** if the writable closure has been registered */
+ bool writable_registered;
+} fd_node;
+
+struct grpc_ares_ev_driver {
+ /** the ares_channel owned by this event driver */
+ ares_channel channel;
+ /** pollset set for driving the IO events of the channel */
+ grpc_pollset_set *pollset_set;
+ /** refcount of the event driver */
+ gpr_refcount refs;
+
+ /** mutex guarding the rest of the state */
+ gpr_mu mu;
+ /** a list of grpc_fd that this event driver is currently using. */
+ fd_node *fds;
+ /** is this event driver currently working? */
+ bool working;
+ /** is this event driver being shut down */
+ bool shutting_down;
+};
+
+static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
+ grpc_ares_ev_driver *ev_driver);
+
+static grpc_ares_ev_driver *grpc_ares_ev_driver_ref(
+ grpc_ares_ev_driver *ev_driver) {
+ gpr_log(GPR_DEBUG, "Ref ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
+ gpr_ref(&ev_driver->refs);
+ return ev_driver;
+}
+
+static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver *ev_driver) {
+ gpr_log(GPR_DEBUG, "Unref ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
+ if (gpr_unref(&ev_driver->refs)) {
+ gpr_log(GPR_DEBUG, "destroy ev_driver %" PRIuPTR, (uintptr_t)ev_driver);
+ GPR_ASSERT(ev_driver->fds == NULL);
+ gpr_mu_destroy(&ev_driver->mu);
+ ares_destroy(ev_driver->channel);
+ gpr_free(ev_driver);
+ }
+}
+
+static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
+ gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
+ GPR_ASSERT(!fdn->readable_registered);
+ GPR_ASSERT(!fdn->writable_registered);
+ gpr_mu_destroy(&fdn->mu);
+ grpc_pollset_set_del_fd(exec_ctx, fdn->ev_driver->pollset_set, fdn->grpc_fd);
+ grpc_fd_shutdown(exec_ctx, fdn->grpc_fd,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("fd node destroyed"));
+ grpc_fd_orphan(exec_ctx, fdn->grpc_fd, NULL, NULL, "c-ares query finished");
+ gpr_free(fdn);
+}
+
+grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
+ grpc_pollset_set *pollset_set) {
+ *ev_driver = gpr_malloc(sizeof(grpc_ares_ev_driver));
+ int status = ares_init(&(*ev_driver)->channel);
+ gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create");
+ if (status != ARES_SUCCESS) {
+ char *err_msg;
+ gpr_asprintf(&err_msg, "Failed to init ares channel. C-ares error: %s",
+ ares_strerror(status));
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_msg);
+ gpr_free(err_msg);
+ gpr_free(*ev_driver);
+ return err;
+ }
+ gpr_mu_init(&(*ev_driver)->mu);
+ gpr_ref_init(&(*ev_driver)->refs, 1);
+ (*ev_driver)->pollset_set = pollset_set;
+ (*ev_driver)->fds = NULL;
+ (*ev_driver)->working = false;
+ (*ev_driver)->shutting_down = false;
+ return GRPC_ERROR_NONE;
+}
+
+void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver) {
+ // It's not safe to shut down remaining fds here directly, becauses
+ // ares_host_callback does not provide an exec_ctx. We mark the event driver
+ // as being shut down. If the event driver is working,
+ // grpc_ares_notify_on_event_locked will shut down the fds; if it's not
+ // working, there are no fds to shut down.
+ gpr_mu_lock(&ev_driver->mu);
+ ev_driver->shutting_down = true;
+ gpr_mu_unlock(&ev_driver->mu);
+ grpc_ares_ev_driver_unref(ev_driver);
+}
+
+// Search fd in the fd_node list head. This is an O(n) search, the max possible
+// value of n is ARES_GETSOCK_MAXNUM (16). n is typically 1 - 2 in our tests.
+static fd_node *pop_fd_node(fd_node **head, int fd) {
+ fd_node dummy_head;
+ dummy_head.next = *head;
+ fd_node *node = &dummy_head;
+ while (node->next != NULL) {
+ if (grpc_fd_wrapped_fd(node->next->grpc_fd) == fd) {
+ fd_node *ret = node->next;
+ node->next = node->next->next;
+ *head = dummy_head.next;
+ return ret;
+ }
+ node = node->next;
+ }
+ return NULL;
+}
+
+static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ fd_node *fdn = arg;
+ grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
+ gpr_mu_lock(&fdn->mu);
+ fdn->readable_registered = false;
+ gpr_mu_unlock(&fdn->mu);
+
+ gpr_log(GPR_DEBUG, "readable on %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
+ if (error == GRPC_ERROR_NONE) {
+ ares_process_fd(ev_driver->channel, grpc_fd_wrapped_fd(fdn->grpc_fd),
+ ARES_SOCKET_BAD);
+ } else {
+ // If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
+ // timed out. The pending lookups made on this ev_driver will be cancelled
+ // by the following ares_cancel() and the on_done callbacks will be invoked
+ // with a status of ARES_ECANCELLED. The remaining file descriptors in this
+ // ev_driver will be cleaned up in the follwing
+ // grpc_ares_notify_on_event_locked().
+ ares_cancel(ev_driver->channel);
+ }
+ gpr_mu_lock(&ev_driver->mu);
+ grpc_ares_notify_on_event_locked(exec_ctx, ev_driver);
+ gpr_mu_unlock(&ev_driver->mu);
+ grpc_ares_ev_driver_unref(ev_driver);
+}
+
+static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
+ grpc_error *error) {
+ fd_node *fdn = arg;
+ grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
+ gpr_mu_lock(&fdn->mu);
+ fdn->writable_registered = false;
+ gpr_mu_unlock(&fdn->mu);
+
+ gpr_log(GPR_DEBUG, "writable on %d", grpc_fd_wrapped_fd(fdn->grpc_fd));
+ if (error == GRPC_ERROR_NONE) {
+ ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD,
+ grpc_fd_wrapped_fd(fdn->grpc_fd));
+ } else {
+ // If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
+ // timed out. The pending lookups made on this ev_driver will be cancelled
+ // by the following ares_cancel() and the on_done callbacks will be invoked
+ // with a status of ARES_ECANCELLED. The remaining file descriptors in this
+ // ev_driver will be cleaned up in the follwing
+ // grpc_ares_notify_on_event_locked().
+ ares_cancel(ev_driver->channel);
+ }
+ gpr_mu_lock(&ev_driver->mu);
+ grpc_ares_notify_on_event_locked(exec_ctx, ev_driver);
+ gpr_mu_unlock(&ev_driver->mu);
+ grpc_ares_ev_driver_unref(ev_driver);
+}
+
+ares_channel *grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver *ev_driver) {
+ return &ev_driver->channel;
+}
+
+// Get the file descriptors used by the ev_driver's ares channel, register
+// driver_closure with these filedescriptors.
+static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
+ grpc_ares_ev_driver *ev_driver) {
+ fd_node *new_list = NULL;
+ if (!ev_driver->shutting_down) {
+ ares_socket_t socks[ARES_GETSOCK_MAXNUM];
+ int socks_bitmask =
+ ares_getsock(ev_driver->channel, socks, ARES_GETSOCK_MAXNUM);
+ for (size_t i = 0; i < ARES_GETSOCK_MAXNUM; i++) {
+ if (ARES_GETSOCK_READABLE(socks_bitmask, i) ||
+ ARES_GETSOCK_WRITABLE(socks_bitmask, i)) {
+ fd_node *fdn = pop_fd_node(&ev_driver->fds, socks[i]);
+ // Create a new fd_node if sock[i] is not in the fd_node list.
+ if (fdn == NULL) {
+ char *fd_name;
+ gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i);
+ fdn = gpr_malloc(sizeof(fd_node));
+ gpr_log(GPR_DEBUG, "new fd: %d", socks[i]);
+ fdn->grpc_fd = grpc_fd_create(socks[i], fd_name);
+ fdn->ev_driver = ev_driver;
+ fdn->readable_registered = false;
+ fdn->writable_registered = false;
+ gpr_mu_init(&fdn->mu);
+ grpc_closure_init(&fdn->read_closure, on_readable_cb, fdn,
+ grpc_schedule_on_exec_ctx);
+ grpc_closure_init(&fdn->write_closure, on_writable_cb, fdn,
+ grpc_schedule_on_exec_ctx);
+ grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set,
+ fdn->grpc_fd);
+ gpr_free(fd_name);
+ }
+ fdn->next = new_list;
+ new_list = fdn;
+ gpr_mu_lock(&fdn->mu);
+ // Register read_closure if the socket is readable and read_closure has
+ // not been registered with this socket.
+ if (ARES_GETSOCK_READABLE(socks_bitmask, i) &&
+ !fdn->readable_registered) {
+ grpc_ares_ev_driver_ref(ev_driver);
+ gpr_log(GPR_DEBUG, "notify read on: %d",
+ grpc_fd_wrapped_fd(fdn->grpc_fd));
+ grpc_fd_notify_on_read(exec_ctx, fdn->grpc_fd, &fdn->read_closure);
+ fdn->readable_registered = true;
+ }
+ // Register write_closure if the socket is writable and write_closure
+ // has not been registered with this socket.
+ if (ARES_GETSOCK_WRITABLE(socks_bitmask, i) &&
+ !fdn->writable_registered) {
+ gpr_log(GPR_DEBUG, "notify write on: %d",
+ grpc_fd_wrapped_fd(fdn->grpc_fd));
+ grpc_ares_ev_driver_ref(ev_driver);
+ grpc_fd_notify_on_write(exec_ctx, fdn->grpc_fd, &fdn->write_closure);
+ fdn->writable_registered = true;
+ }
+ gpr_mu_unlock(&fdn->mu);
+ }
+ }
+ }
+ // Any remaining fds in ev_driver->fds were not returned by ares_getsock() and
+ // are therefore no longer in use, so they can be shut down and removed from
+ // the list.
+ while (ev_driver->fds != NULL) {
+ fd_node *cur = ev_driver->fds;
+ ev_driver->fds = ev_driver->fds->next;
+ fd_node_destroy(exec_ctx, cur);
+ }
+ ev_driver->fds = new_list;
+ // If the ev driver has no working fd, all the tasks are done.
+ if (new_list == NULL) {
+ ev_driver->working = false;
+ gpr_log(GPR_DEBUG, "ev driver stop working");
+ }
+}
+
+void grpc_ares_ev_driver_start(grpc_exec_ctx *exec_ctx,
+ grpc_ares_ev_driver *ev_driver) {
+ gpr_mu_lock(&ev_driver->mu);
+ if (!ev_driver->working) {
+ ev_driver->working = true;
+ grpc_ares_notify_on_event_locked(exec_ctx, ev_driver);
+ }
+ gpr_mu_unlock(&ev_driver->mu);
+}
+
+#endif /* GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET) */
diff --git a/src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.c b/src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.c
new file mode 100644
index 0000000000..3eee8e3513
--- /dev/null
+++ b/src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.c
@@ -0,0 +1,289 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+#if GRPC_ARES == 1 && !defined(GRPC_UV)
+
+#include "src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/socket_utils_posix.h"
+
+#include <string.h>
+#include <sys/types.h>
+
+#include <ares.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/host_port.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
+#include "src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver.h"
+#include "src/core/lib/iomgr/executor.h"
+#include "src/core/lib/iomgr/iomgr_internal.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/support/string.h"
+
+static gpr_once g_basic_init = GPR_ONCE_INIT;
+static gpr_mu g_init_mu;
+
+typedef struct grpc_ares_request {
+ /** following members are set in grpc_resolve_address_ares_impl */
+ /** host to resolve, parsed from the name to resolve */
+ char *host;
+ /** port to fill in sockaddr_in, parsed from the name to resolve */
+ char *port;
+ /** default port to use */
+ char *default_port;
+ /** closure to call when the request completes */
+ grpc_closure *on_done;
+ /** the pointer to receive the resolved addresses */
+ grpc_resolved_addresses **addrs_out;
+ /** the evernt driver used by this request */
+ grpc_ares_ev_driver *ev_driver;
+ /** number of ongoing queries */
+ gpr_refcount pending_queries;
+
+ /** mutex guarding the rest of the state */
+ gpr_mu mu;
+ /** is there at least one successful query, set in on_done_cb */
+ bool success;
+ /** the errors explaining the request failure, set in on_done_cb */
+ grpc_error *error;
+} grpc_ares_request;
+
+static void do_basic_init(void) { gpr_mu_init(&g_init_mu); }
+
+static uint16_t strhtons(const char *port) {
+ if (strcmp(port, "http") == 0) {
+ return htons(80);
+ } else if (strcmp(port, "https") == 0) {
+ return htons(443);
+ }
+ return htons((unsigned short)atoi(port));
+}
+
+static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx,
+ grpc_ares_request *r) {
+ /* If there are no pending queries, invoke on_done callback and destroy the
+ request */
+ if (gpr_unref(&r->pending_queries)) {
+ /* TODO(zyc): Sort results with RFC6724 before invoking on_done. */
+ if (exec_ctx == NULL) {
+ /* A new exec_ctx is created here, as the c-ares interface does not
+ provide one in ares_host_callback. It's safe to schedule on_done with
+ the newly created exec_ctx, since the caller has been warned not to
+ acquire locks in on_done. ares_dns_resolver is using combiner to
+ protect resources needed by on_done. */
+ grpc_exec_ctx new_exec_ctx = GRPC_EXEC_CTX_INIT;
+ grpc_closure_sched(&new_exec_ctx, r->on_done, r->error);
+ grpc_exec_ctx_finish(&new_exec_ctx);
+ } else {
+ grpc_closure_sched(exec_ctx, r->on_done, r->error);
+ }
+ gpr_mu_destroy(&r->mu);
+ grpc_ares_ev_driver_destroy(r->ev_driver);
+ gpr_free(r->host);
+ gpr_free(r->port);
+ gpr_free(r->default_port);
+ gpr_free(r);
+ }
+}
+
+static void on_done_cb(void *arg, int status, int timeouts,
+ struct hostent *hostent) {
+ grpc_ares_request *r = (grpc_ares_request *)arg;
+ gpr_mu_lock(&r->mu);
+ if (status == ARES_SUCCESS) {
+ GRPC_ERROR_UNREF(r->error);
+ r->error = GRPC_ERROR_NONE;
+ r->success = true;
+ grpc_resolved_addresses **addresses = r->addrs_out;
+ if (*addresses == NULL) {
+ *addresses = gpr_malloc(sizeof(grpc_resolved_addresses));
+ (*addresses)->naddrs = 0;
+ (*addresses)->addrs = NULL;
+ }
+ size_t prev_naddr = (*addresses)->naddrs;
+ size_t i;
+ for (i = 0; hostent->h_addr_list[i] != NULL; i++) {
+ }
+ (*addresses)->naddrs += i;
+ (*addresses)->addrs =
+ gpr_realloc((*addresses)->addrs,
+ sizeof(grpc_resolved_address) * (*addresses)->naddrs);
+ for (i = prev_naddr; i < (*addresses)->naddrs; i++) {
+ memset(&(*addresses)->addrs[i], 0, sizeof(grpc_resolved_address));
+ if (hostent->h_addrtype == AF_INET6) {
+ (*addresses)->addrs[i].len = sizeof(struct sockaddr_in6);
+ struct sockaddr_in6 *addr =
+ (struct sockaddr_in6 *)&(*addresses)->addrs[i].addr;
+ addr->sin6_family = (sa_family_t)hostent->h_addrtype;
+ addr->sin6_port = strhtons(r->port);
+
+ char output[INET6_ADDRSTRLEN];
+ memcpy(&addr->sin6_addr, hostent->h_addr_list[i - prev_naddr],
+ sizeof(struct in6_addr));
+ ares_inet_ntop(AF_INET6, &addr->sin6_addr, output, INET6_ADDRSTRLEN);
+ gpr_log(GPR_DEBUG,
+ "c-ares resolver gets a AF_INET6 result: \n"
+ " addr: %s\n port: %s\n sin6_scope_id: %d\n",
+ output, r->port, addr->sin6_scope_id);
+ } else {
+ (*addresses)->addrs[i].len = sizeof(struct sockaddr_in);
+ struct sockaddr_in *addr =
+ (struct sockaddr_in *)&(*addresses)->addrs[i].addr;
+ memcpy(&addr->sin_addr, hostent->h_addr_list[i - prev_naddr],
+ sizeof(struct in_addr));
+ addr->sin_family = (sa_family_t)hostent->h_addrtype;
+ addr->sin_port = strhtons(r->port);
+
+ char output[INET_ADDRSTRLEN];
+ ares_inet_ntop(AF_INET, &addr->sin_addr, output, INET_ADDRSTRLEN);
+ gpr_log(GPR_DEBUG,
+ "c-ares resolver gets a AF_INET result: \n"
+ " addr: %s\n port: %s\n",
+ output, r->port);
+ }
+ }
+ } else if (!r->success) {
+ char *error_msg;
+ gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s",
+ ares_strerror(status));
+ grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ gpr_free(error_msg);
+ if (r->error == GRPC_ERROR_NONE) {
+ r->error = error;
+ } else {
+ r->error = grpc_error_add_child(error, r->error);
+ }
+ }
+ gpr_mu_unlock(&r->mu);
+ grpc_ares_request_unref(NULL, r);
+}
+
+void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx, const char *name,
+ const char *default_port,
+ grpc_pollset_set *interested_parties,
+ grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) {
+ /* TODO(zyc): Enable tracing after #9603 is checked in */
+ /* if (grpc_dns_trace) {
+ gpr_log(GPR_DEBUG, "resolve_address (blocking): name=%s, default_port=%s",
+ name, default_port);
+ } */
+
+ /* parse name, splitting it into host and port parts */
+ char *host;
+ char *port;
+ gpr_split_host_port(name, &host, &port);
+ if (host == NULL) {
+ grpc_error *err = grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("unparseable host:port"),
+ GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name));
+ grpc_closure_sched(exec_ctx, on_done, err);
+ goto error_cleanup;
+ } else if (port == NULL) {
+ if (default_port == NULL) {
+ grpc_error *err = grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("no port in name"),
+ GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name));
+ grpc_closure_sched(exec_ctx, on_done, err);
+ goto error_cleanup;
+ }
+ port = gpr_strdup(default_port);
+ }
+
+ grpc_ares_ev_driver *ev_driver;
+ grpc_error *err = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
+ if (err != GRPC_ERROR_NONE) {
+ GRPC_LOG_IF_ERROR("grpc_ares_ev_driver_create() failed", err);
+ goto error_cleanup;
+ }
+
+ grpc_ares_request *r = gpr_malloc(sizeof(grpc_ares_request));
+ gpr_mu_init(&r->mu);
+ r->ev_driver = ev_driver;
+ r->on_done = on_done;
+ r->addrs_out = addrs;
+ r->default_port = gpr_strdup(default_port);
+ r->port = port;
+ r->host = host;
+ r->success = false;
+ r->error = GRPC_ERROR_NONE;
+ ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver);
+ gpr_ref_init(&r->pending_queries, 2);
+ if (grpc_ipv6_loopback_available()) {
+ gpr_ref(&r->pending_queries);
+ ares_gethostbyname(*channel, r->host, AF_INET6, on_done_cb, r);
+ }
+ ares_gethostbyname(*channel, r->host, AF_INET, on_done_cb, r);
+ /* TODO(zyc): Handle CNAME records here. */
+ grpc_ares_ev_driver_start(exec_ctx, r->ev_driver);
+ grpc_ares_request_unref(exec_ctx, r);
+ return;
+
+error_cleanup:
+ gpr_free(host);
+ gpr_free(port);
+}
+
+void (*grpc_resolve_address_ares)(
+ grpc_exec_ctx *exec_ctx, const char *name, const char *default_port,
+ grpc_pollset_set *interested_parties, grpc_closure *on_done,
+ grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl;
+
+grpc_error *grpc_ares_init(void) {
+ gpr_once_init(&g_basic_init, do_basic_init);
+ gpr_mu_lock(&g_init_mu);
+ int status = ares_library_init(ARES_LIB_INIT_ALL);
+ gpr_mu_unlock(&g_init_mu);
+
+ if (status != ARES_SUCCESS) {
+ char *error_msg;
+ gpr_asprintf(&error_msg, "ares_library_init failed: %s",
+ ares_strerror(status));
+ grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg);
+ gpr_free(error_msg);
+ return error;
+ }
+ return GRPC_ERROR_NONE;
+}
+
+void grpc_ares_cleanup(void) {
+ gpr_mu_lock(&g_init_mu);
+ ares_library_cleanup();
+ gpr_mu_unlock(&g_init_mu);
+}
+
+#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */
diff --git a/src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h
new file mode 100644
index 0000000000..ab00a26b36
--- /dev/null
+++ b/src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_EXT_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H
+#define GRPC_CORE_EXT_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H
+
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/iomgr/polling_entity.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+
+/* Asynchronously resolve addr. Use \a default_port if a port isn't designated
+ in addr, otherwise use the port in addr. grpc_ares_init() must be called at
+ least once before this function. \a on_done may be called directly in this
+ function without being scheduled with \a exec_ctx, it must not try to acquire
+ locks that are being held by the caller. */
+extern void (*grpc_resolve_address_ares)(grpc_exec_ctx *exec_ctx,
+ const char *addr,
+ const char *default_port,
+ grpc_pollset_set *interested_parties,
+ grpc_closure *on_done,
+ grpc_resolved_addresses **addresses);
+
+/* Initialize gRPC ares wrapper. Must be called at least once before
+ grpc_resolve_address_ares(). */
+grpc_error *grpc_ares_init(void);
+
+/* Uninitialized gRPC ares wrapper. If there was more than one previous call to
+ grpc_ares_init(), this function uninitializes the gRPC ares wrapper only if
+ it has been called the same number of times as grpc_ares_init(). */
+void grpc_ares_cleanup(void);
+
+#endif /* GRPC_CORE_EXT_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H */
diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c
index d227c19c43..97cd0486a9 100644
--- a/src/core/ext/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/resolver/dns/native/dns_resolver.c
@@ -44,6 +44,7 @@
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/backoff.h"
+#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
#define GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS 1
@@ -113,8 +114,9 @@ static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx,
}
if (r->next_completion != NULL) {
*r->target_result = NULL;
- grpc_closure_sched(exec_ctx, r->next_completion,
- GRPC_ERROR_CREATE("Resolver Shutdown"));
+ grpc_closure_sched(
+ exec_ctx, r->next_completion,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
r->next_completion = NULL;
}
}
@@ -303,7 +305,21 @@ static grpc_resolver_factory *dns_resolver_factory_create() {
}
void grpc_resolver_dns_native_init(void) {
- grpc_register_resolver_type(dns_resolver_factory_create());
+ char *resolver = gpr_getenv("GRPC_DNS_RESOLVER");
+ if (resolver != NULL && gpr_stricmp(resolver, "native") == 0) {
+ gpr_log(GPR_DEBUG, "Using native dns resolver");
+ grpc_register_resolver_type(dns_resolver_factory_create());
+ } else {
+ grpc_resolver_factory *existing_factory =
+ grpc_resolver_factory_lookup("dns");
+ if (existing_factory == NULL) {
+ gpr_log(GPR_DEBUG, "Using native dns resolver");
+ grpc_register_resolver_type(dns_resolver_factory_create());
+ } else {
+ grpc_resolver_factory_unref(existing_factory);
+ }
+ }
+ gpr_free(resolver);
}
void grpc_resolver_dns_native_shutdown(void) {}
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.c
index eae0145ecc..2b226c1bf7 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.c
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.c
@@ -63,8 +63,6 @@ typedef struct {
grpc_closure *notify;
grpc_connect_in_args args;
grpc_connect_out_args *result;
- grpc_closure initial_string_sent;
- grpc_slice_buffer initial_string_buffer;
grpc_endpoint *endpoint; // Non-NULL until handshaking starts.
@@ -82,7 +80,6 @@ static void chttp2_connector_unref(grpc_exec_ctx *exec_ctx,
grpc_connector *con) {
chttp2_connector *c = (chttp2_connector *)con;
if (gpr_unref(&c->refs)) {
- /* c->initial_string_buffer does not need to be destroyed */
gpr_mu_destroy(&c->mu);
// If handshaking is not yet in progress, destroy the endpoint.
// Otherwise, the handshaker will do this for us.
@@ -116,7 +113,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&c->mu);
if (error != GRPC_ERROR_NONE || c->shutdown) {
if (error == GRPC_ERROR_NONE) {
- error = GRPC_ERROR_CREATE("connector shutdown");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
// We were shut down after handshaking completed successfully, so
// destroy the endpoint here.
// TODO(ctiller): It is currently necessary to shutdown endpoints
@@ -160,28 +157,6 @@ static void start_handshake_locked(grpc_exec_ctx *exec_ctx,
c->endpoint = NULL; // Endpoint handed off to handshake manager.
}
-static void on_initial_connect_string_sent(grpc_exec_ctx *exec_ctx, void *arg,
- grpc_error *error) {
- chttp2_connector *c = arg;
- gpr_mu_lock(&c->mu);
- if (error != GRPC_ERROR_NONE || c->shutdown) {
- if (error == GRPC_ERROR_NONE) {
- error = GRPC_ERROR_CREATE("connector shutdown");
- } else {
- error = GRPC_ERROR_REF(error);
- }
- memset(c->result, 0, sizeof(*c->result));
- grpc_closure *notify = c->notify;
- c->notify = NULL;
- grpc_closure_sched(exec_ctx, notify, error);
- gpr_mu_unlock(&c->mu);
- chttp2_connector_unref(exec_ctx, arg);
- } else {
- start_handshake_locked(exec_ctx, c);
- gpr_mu_unlock(&c->mu);
- }
-}
-
static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
chttp2_connector *c = arg;
gpr_mu_lock(&c->mu);
@@ -189,7 +164,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
c->connecting = false;
if (error != GRPC_ERROR_NONE || c->shutdown) {
if (error == GRPC_ERROR_NONE) {
- error = GRPC_ERROR_CREATE("connector shutdown");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
} else {
error = GRPC_ERROR_REF(error);
}
@@ -204,17 +179,7 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
chttp2_connector_unref(exec_ctx, arg);
} else {
GPR_ASSERT(c->endpoint != NULL);
- if (!GRPC_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
- grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
- c, grpc_schedule_on_exec_ctx);
- grpc_slice_buffer_init(&c->initial_string_buffer);
- grpc_slice_buffer_add(&c->initial_string_buffer,
- c->args.initial_connect_string);
- grpc_endpoint_write(exec_ctx, c->endpoint, &c->initial_string_buffer,
- &c->initial_string_sent);
- } else {
- start_handshake_locked(exec_ctx, c);
- }
+ start_handshake_locked(exec_ctx, c);
gpr_mu_unlock(&c->mu);
}
}
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c
index 837b9fd03d..6433968ca5 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.c
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.c
@@ -256,7 +256,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
char *msg;
gpr_asprintf(&msg, "No address added out of total %" PRIuPTR " resolved",
naddrs);
- err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, naddrs);
+ err = GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(msg, errors, naddrs);
gpr_free(msg);
goto error;
} else if (count != naddrs) {
@@ -264,7 +264,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&msg, "Only %" PRIuPTR
" addresses added out of total %" PRIuPTR " resolved",
count, naddrs);
- err = GRPC_ERROR_CREATE_REFERENCING(msg, errors, naddrs);
+ err = GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(msg, errors, naddrs);
gpr_free(msg);
const char *warning_message = grpc_error_string(err);
diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
index cb2b3f5502..88c813f3be 100644
--- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
+++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c
@@ -61,7 +61,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
3, (server, addr, creds));
// Create security context.
if (creds == NULL) {
- err = GRPC_ERROR_CREATE(
+ err = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"No credentials specified for secure server port (creds==NULL)");
goto done;
}
@@ -72,7 +72,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
gpr_asprintf(&msg,
"Unable to create secure server with credentials of type %s.",
creds->type);
- err = grpc_error_set_int(GRPC_ERROR_CREATE(msg),
+ err = grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg),
GRPC_ERROR_INT_SECURITY_STATUS, status);
gpr_free(msg);
goto done;
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index a3684535ff..8676a3752e 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -187,7 +187,8 @@ static void destruct_transport(grpc_exec_ctx *exec_ctx,
GRPC_COMBINER_UNREF(exec_ctx, t->combiner, "chttp2_transport");
- cancel_pings(exec_ctx, t, GRPC_ERROR_CREATE("Transport destroyed"));
+ cancel_pings(exec_ctx, t,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"));
while (t->write_cb_pool) {
grpc_chttp2_write_cb *next = t->write_cb_pool->next;
@@ -494,8 +495,9 @@ static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
t->destroying = 1;
close_transport_locked(
exec_ctx, t,
- grpc_error_set_int(GRPC_ERROR_CREATE("Transport destroyed"),
- GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state));
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"),
+ GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state));
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destroy");
}
@@ -518,7 +520,8 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
if (t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE) {
if (t->close_transport_on_writes_finished == NULL) {
t->close_transport_on_writes_finished =
- GRPC_ERROR_CREATE("Delayed close due to in-progress write");
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Delayed close due to in-progress write");
}
t->close_transport_on_writes_finished =
grpc_error_add_child(t->close_transport_on_writes_finished, error);
@@ -575,7 +578,7 @@ void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s) {
static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_stream_refcount *refcount,
- const void *server_data) {
+ const void *server_data, gpr_arena *arena) {
GPR_TIMER_BEGIN("init_stream", 0);
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
@@ -588,8 +591,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
gpr_ref_init(&s->active_streams, 1);
GRPC_CHTTP2_STREAM_REF(s, "chttp2");
- grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[0]);
- grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1]);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[0], arena);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1], arena);
grpc_chttp2_data_parser_init(&s->data_parser);
grpc_slice_buffer_init(&s->flow_controlled_buffer);
s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
@@ -665,16 +668,17 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
GPR_TIMER_END("destroy_stream", 0);
- gpr_free(s->destroy_stream_arg);
+ grpc_closure_sched(exec_ctx, s->destroy_stream_arg, GRPC_ERROR_NONE);
}
static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, void *and_free_memory) {
+ grpc_stream *gs,
+ grpc_closure *then_schedule_closure) {
GPR_TIMER_BEGIN("destroy_stream", 0);
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
- s->destroy_stream_arg = and_free_memory;
+ s->destroy_stream_arg = then_schedule_closure;
grpc_closure_sched(
exec_ctx, grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s,
grpc_combiner_scheduler(t->combiner, false)),
@@ -833,7 +837,8 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED) {
t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SENT;
if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
- close_transport_locked(exec_ctx, t, GRPC_ERROR_CREATE("goaway sent"));
+ close_transport_locked(
+ exec_ctx, t, GRPC_ERROR_CREATE_FROM_STATIC_STRING("goaway sent"));
}
}
@@ -897,22 +902,19 @@ void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
uint32_t goaway_error,
grpc_slice goaway_text) {
- char *msg = grpc_dump_slice(goaway_text, GPR_DUMP_HEX | GPR_DUMP_ASCII);
- GRPC_CHTTP2_IF_TRACING(
- gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
- grpc_slice_unref_internal(exec_ctx, goaway_text);
+ // GRPC_CHTTP2_IF_TRACING(
+ // gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg));
t->seen_goaway = 1;
/* lie: use transient failure from the transport to indicate goaway has been
* received */
connectivity_state_set(
exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_error_set_str(
- grpc_error_set_int(GRPC_ERROR_CREATE("GOAWAY received"),
- GRPC_ERROR_INT_HTTP2_ERROR,
- (intptr_t)goaway_error),
- GRPC_ERROR_STR_RAW_BYTES, msg),
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("GOAWAY received"),
+ GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)goaway_error),
+ GRPC_ERROR_STR_RAW_BYTES, goaway_text),
"got_goaway");
- gpr_free(msg);
}
static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
@@ -935,9 +937,10 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
t->next_stream_id += 2;
if (t->next_stream_id >= MAX_CLIENT_STREAM_ID) {
- connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
- GRPC_ERROR_CREATE("Stream IDs exhausted"),
- "no_more_stream_ids");
+ connectivity_state_set(
+ exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream IDs exhausted"),
+ "no_more_stream_ids");
}
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
@@ -951,9 +954,9 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
grpc_chttp2_cancel_stream(
exec_ctx, t, s,
- grpc_error_set_int(GRPC_ERROR_CREATE("Stream IDs exhausted"),
- GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_UNAVAILABLE));
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream IDs exhausted"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
}
@@ -1002,11 +1005,11 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
}
if (error != GRPC_ERROR_NONE) {
if (closure->error_data.error == GRPC_ERROR_NONE) {
- closure->error_data.error =
- GRPC_ERROR_CREATE("Error in HTTP transport completing operation");
- closure->error_data.error =
- grpc_error_set_str(closure->error_data.error,
- GRPC_ERROR_STR_TARGET_ADDRESS, t->peer_string);
+ closure->error_data.error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Error in HTTP transport completing operation");
+ closure->error_data.error = grpc_error_set_str(
+ closure->error_data.error, GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(t->peer_string));
}
closure->error_data.error =
grpc_error_add_child(closure->error_data.error, error);
@@ -1181,10 +1184,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
exec_ctx, t, s,
grpc_error_set_int(
grpc_error_set_int(
- grpc_error_set_int(
- GRPC_ERROR_CREATE("to-be-sent initial metadata size "
- "exceeds peer limit"),
- GRPC_ERROR_INT_SIZE, (intptr_t)metadata_size),
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "to-be-sent initial metadata size "
+ "exceeds peer limit"),
+ GRPC_ERROR_INT_SIZE,
+ (intptr_t)metadata_size),
GRPC_ERROR_INT_LIMIT, (intptr_t)metadata_peer_limit),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
} else {
@@ -1200,9 +1204,9 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
} else {
grpc_chttp2_cancel_stream(
exec_ctx, t, s,
- grpc_error_set_int(GRPC_ERROR_CREATE("Transport closed"),
- GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_UNAVAILABLE));
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport closed"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
} else {
GPR_ASSERT(s->id != 0);
@@ -1214,7 +1218,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
s->send_initial_metadata = NULL;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_initial_metadata_finished,
- GRPC_ERROR_CREATE_REFERENCING(
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Attempt to send initial metadata after stream was closed",
&s->write_closed_error, 1),
"send_initial_metadata_finished");
@@ -1228,7 +1232,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if (s->write_closed) {
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished,
- GRPC_ERROR_CREATE_REFERENCING(
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Attempt to send message after stream was closed",
&s->write_closed_error, 1),
"fetching_send_message_finished");
@@ -1276,10 +1280,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
exec_ctx, t, s,
grpc_error_set_int(
grpc_error_set_int(
- grpc_error_set_int(
- GRPC_ERROR_CREATE("to-be-sent trailing metadata size "
- "exceeds peer limit"),
- GRPC_ERROR_INT_SIZE, (intptr_t)metadata_size),
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "to-be-sent trailing metadata size "
+ "exceeds peer limit"),
+ GRPC_ERROR_INT_SIZE,
+ (intptr_t)metadata_size),
GRPC_ERROR_INT_LIMIT, (intptr_t)metadata_peer_limit),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
} else {
@@ -1292,8 +1297,9 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
exec_ctx, t, s, &s->send_trailing_metadata_finished,
grpc_metadata_batch_is_empty(op->send_trailing_metadata)
? GRPC_ERROR_NONE
- : GRPC_ERROR_CREATE("Attempt to send trailing metadata after "
- "stream was closed"),
+ : GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Attempt to send trailing metadata after "
+ "stream was closed"),
"send_trailing_metadata_finished");
} else if (s->id != 0) {
/* TODO(ctiller): check if there's flow control for any outstanding
@@ -1408,11 +1414,11 @@ static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error) {
t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
grpc_http2_error_code http_error;
- const char *msg;
- grpc_error_get_status(error, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL, &msg,
- &http_error);
+ grpc_slice slice;
+ grpc_error_get_status(error, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL,
+ &slice, &http_error);
grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error,
- grpc_slice_from_copied_string(msg), &t->qbuf);
+ grpc_slice_ref_internal(slice), &t->qbuf);
grpc_chttp2_initiate_write(exec_ctx, t, false, "goaway_sent");
GRPC_ERROR_UNREF(error);
}
@@ -1572,7 +1578,7 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SENT) {
close_transport_locked(
exec_ctx, t,
- GRPC_ERROR_CREATE_REFERENCING(
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Last stream closed after sending GOAWAY", &error, 1));
}
}
@@ -1613,8 +1619,8 @@ void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
grpc_status_code status;
- const char *msg;
- grpc_error_get_status(error, s->deadline, &status, &msg, NULL);
+ grpc_slice slice;
+ grpc_error_get_status(error, s->deadline, &status, &slice, NULL);
if (status != GRPC_STATUS_OK) {
s->seen_error = true;
@@ -1629,15 +1635,19 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
s->recv_trailing_metadata_finished != NULL) {
char status_string[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(status, status_string);
- grpc_chttp2_incoming_metadata_buffer_replace_or_add(
- exec_ctx, &s->metadata_buffer[1],
- grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_STATUS,
- grpc_slice_from_copied_string(status_string)));
- if (msg != NULL) {
- grpc_chttp2_incoming_metadata_buffer_replace_or_add(
- exec_ctx, &s->metadata_buffer[1],
- grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
- grpc_slice_from_copied_string(msg)));
+ GRPC_LOG_IF_ERROR("add_status",
+ grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+ exec_ctx, &s->metadata_buffer[1],
+ grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_GRPC_STATUS,
+ grpc_slice_from_copied_string(status_string))));
+ if (!GRPC_SLICE_IS_EMPTY(slice)) {
+ GRPC_LOG_IF_ERROR(
+ "add_status_message",
+ grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+ exec_ctx, &s->metadata_buffer[1],
+ grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
+ grpc_slice_ref_internal(slice))));
}
s->published_metadata[1] = GRPC_METADATA_SYNTHESIZED_FROM_FAKE;
grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
@@ -1666,7 +1676,8 @@ static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s,
add_error(extra_error, refs, &nrefs);
grpc_error *error = GRPC_ERROR_NONE;
if (nrefs > 0) {
- error = GRPC_ERROR_CREATE_REFERENCING(master_error_msg, refs, nrefs);
+ error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(master_error_msg,
+ refs, nrefs);
}
GRPC_ERROR_UNREF(extra_error);
return error;
@@ -1760,12 +1771,13 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
grpc_slice hdr;
grpc_slice status_hdr;
+ grpc_slice http_status_hdr;
grpc_slice message_pfx;
uint8_t *p;
uint32_t len = 0;
grpc_status_code grpc_status;
- const char *msg;
- grpc_error_get_status(error, s->deadline, &grpc_status, &msg, NULL);
+ grpc_slice slice;
+ grpc_error_get_status(error, s->deadline, &grpc_status, &slice, NULL);
GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100);
@@ -1775,6 +1787,26 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
It's complicated by the fact that our send machinery would be dead by
the time we got around to sending this, so instead we ignore HPACK
compression and just write the uncompressed bytes onto the wire. */
+ if (!s->sent_initial_metadata) {
+ http_status_hdr = grpc_slice_malloc(13);
+ p = GRPC_SLICE_START_PTR(http_status_hdr);
+ *p++ = 0x00;
+ *p++ = 7;
+ *p++ = ':';
+ *p++ = 's';
+ *p++ = 't';
+ *p++ = 'a';
+ *p++ = 't';
+ *p++ = 'u';
+ *p++ = 's';
+ *p++ = 3;
+ *p++ = '2';
+ *p++ = '0';
+ *p++ = '0';
+ GPR_ASSERT(p == GRPC_SLICE_END_PTR(http_status_hdr));
+ len += (uint32_t)GRPC_SLICE_LENGTH(http_status_hdr);
+ }
+
status_hdr = grpc_slice_malloc(15 + (grpc_status >= 10));
p = GRPC_SLICE_START_PTR(status_hdr);
*p++ = 0x00; /* literal header, not indexed */
@@ -1801,32 +1833,30 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GPR_ASSERT(p == GRPC_SLICE_END_PTR(status_hdr));
len += (uint32_t)GRPC_SLICE_LENGTH(status_hdr);
- if (msg != NULL) {
- size_t msg_len = strlen(msg);
- GPR_ASSERT(msg_len <= UINT32_MAX);
- uint32_t msg_len_len = GRPC_CHTTP2_VARINT_LENGTH((uint32_t)msg_len, 0);
- message_pfx = grpc_slice_malloc(14 + msg_len_len);
- p = GRPC_SLICE_START_PTR(message_pfx);
- *p++ = 0x00; /* literal header, not indexed */
- *p++ = 12; /* len(grpc-message) */
- *p++ = 'g';
- *p++ = 'r';
- *p++ = 'p';
- *p++ = 'c';
- *p++ = '-';
- *p++ = 'm';
- *p++ = 'e';
- *p++ = 's';
- *p++ = 's';
- *p++ = 'a';
- *p++ = 'g';
- *p++ = 'e';
- GRPC_CHTTP2_WRITE_VARINT((uint32_t)msg_len, 0, 0, p, (uint32_t)msg_len_len);
- p += msg_len_len;
- GPR_ASSERT(p == GRPC_SLICE_END_PTR(message_pfx));
- len += (uint32_t)GRPC_SLICE_LENGTH(message_pfx);
- len += (uint32_t)msg_len;
- }
+ size_t msg_len = GRPC_SLICE_LENGTH(slice);
+ GPR_ASSERT(msg_len <= UINT32_MAX);
+ uint32_t msg_len_len = GRPC_CHTTP2_VARINT_LENGTH((uint32_t)msg_len, 1);
+ message_pfx = grpc_slice_malloc(14 + msg_len_len);
+ p = GRPC_SLICE_START_PTR(message_pfx);
+ *p++ = 0x00; /* literal header, not indexed */
+ *p++ = 12; /* len(grpc-message) */
+ *p++ = 'g';
+ *p++ = 'r';
+ *p++ = 'p';
+ *p++ = 'c';
+ *p++ = '-';
+ *p++ = 'm';
+ *p++ = 'e';
+ *p++ = 's';
+ *p++ = 's';
+ *p++ = 'a';
+ *p++ = 'g';
+ *p++ = 'e';
+ GRPC_CHTTP2_WRITE_VARINT((uint32_t)msg_len, 1, 0, p, (uint32_t)msg_len_len);
+ p += msg_len_len;
+ GPR_ASSERT(p == GRPC_SLICE_END_PTR(message_pfx));
+ len += (uint32_t)GRPC_SLICE_LENGTH(message_pfx);
+ len += (uint32_t)msg_len;
hdr = grpc_slice_malloc(9);
p = GRPC_SLICE_START_PTR(hdr);
@@ -1842,11 +1872,12 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GPR_ASSERT(p == GRPC_SLICE_END_PTR(hdr));
grpc_slice_buffer_add(&t->qbuf, hdr);
- grpc_slice_buffer_add(&t->qbuf, status_hdr);
- if (msg != NULL) {
- grpc_slice_buffer_add(&t->qbuf, message_pfx);
- grpc_slice_buffer_add(&t->qbuf, grpc_slice_from_copied_string(msg));
+ if (!s->sent_initial_metadata) {
+ grpc_slice_buffer_add(&t->qbuf, http_status_hdr);
}
+ grpc_slice_buffer_add(&t->qbuf, status_hdr);
+ grpc_slice_buffer_add(&t->qbuf, message_pfx);
+ grpc_slice_buffer_add(&t->qbuf, grpc_slice_ref_internal(slice));
grpc_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
&s->stats.outgoing));
@@ -1921,9 +1952,9 @@ static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
if (parse_error == GRPC_ERROR_NONE &&
(parse_error = grpc_http_parser_eof(&parser)) == GRPC_ERROR_NONE) {
error = grpc_error_set_int(
- grpc_error_set_int(
- GRPC_ERROR_CREATE("Trying to connect an http1.x server"),
- GRPC_ERROR_INT_HTTP_STATUS, response.status),
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Trying to connect an http1.x server"),
+ GRPC_ERROR_INT_HTTP_STATUS, response.status),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
}
GRPC_ERROR_UNREF(parse_error);
@@ -1944,9 +1975,10 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *err = error;
if (err != GRPC_ERROR_NONE) {
- err = grpc_error_set_int(
- GRPC_ERROR_CREATE_REFERENCING("Endpoint read failed", &err, 1),
- GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state);
+ err = grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Endpoint read failed", &err, 1),
+ GRPC_ERROR_INT_OCCURRED_DURING_WRITE,
+ t->write_state);
}
GPR_SWAP(grpc_error *, err, error);
GRPC_ERROR_UNREF(err);
@@ -1967,8 +1999,8 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (errors[1] != GRPC_ERROR_NONE) {
errors[2] = try_http_parsing(exec_ctx, t);
GRPC_ERROR_UNREF(error);
- error = GRPC_ERROR_CREATE_REFERENCING("Failed parsing HTTP/2", errors,
- GPR_ARRAY_SIZE(errors));
+ error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed parsing HTTP/2", errors, GPR_ARRAY_SIZE(errors));
}
for (i = 0; i < GPR_ARRAY_SIZE(errors); i++) {
GRPC_ERROR_UNREF(errors[i]);
@@ -1993,7 +2025,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
GPR_TIMER_BEGIN("post_reading_action_locked", 0);
bool keep_reading = false;
if (error == GRPC_ERROR_NONE && t->closed) {
- error = GRPC_ERROR_CREATE("Transport closed");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport closed");
}
if (error != GRPC_ERROR_NONE) {
close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
@@ -2128,8 +2160,8 @@ static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg,
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
- close_transport_locked(exec_ctx, t,
- GRPC_ERROR_CREATE("keepalive watchdog timeout"));
+ close_transport_locked(exec_ctx, t, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "keepalive watchdog timeout"));
}
} else {
/** The watchdog timer should have been cancelled by
@@ -2328,7 +2360,8 @@ void grpc_chttp2_incoming_byte_stream_push(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&bs->slice_mu);
if (bs->remaining_bytes < GRPC_SLICE_LENGTH(slice)) {
incoming_byte_stream_publish_error(
- exec_ctx, bs, GRPC_ERROR_CREATE("Too many bytes in stream"));
+ exec_ctx, bs,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many bytes in stream"));
} else {
bs->remaining_bytes -= (uint32_t)GRPC_SLICE_LENGTH(slice);
if (bs->on_next != NULL) {
@@ -2348,7 +2381,7 @@ void grpc_chttp2_incoming_byte_stream_finished(
if (error == GRPC_ERROR_NONE) {
gpr_mu_lock(&bs->slice_mu);
if (bs->remaining_bytes != 0) {
- error = GRPC_ERROR_CREATE("Truncated message");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message");
}
gpr_mu_unlock(&bs->slice_mu);
}
@@ -2428,9 +2461,9 @@ static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
t->peer_string);
}
send_goaway(exec_ctx, t,
- grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
- GRPC_ERROR_INT_HTTP2_ERROR,
- GRPC_HTTP2_ENHANCE_YOUR_CALM));
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
+ GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
} else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
@@ -2457,9 +2490,10 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
s->id);
}
grpc_chttp2_cancel_stream(
- exec_ctx, t, s, grpc_error_set_int(GRPC_ERROR_CREATE("Buffers full"),
- GRPC_ERROR_INT_HTTP2_ERROR,
- GRPC_HTTP2_ENHANCE_YOUR_CALM));
+ exec_ctx, t, s,
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
+ GRPC_ERROR_INT_HTTP2_ERROR,
+ GRPC_HTTP2_ENHANCE_YOUR_CALM));
if (n > 1) {
/* Since we cancel one stream per destructive reclamation, if
there are more streams left, we can immediately post a new
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.c b/src/core/ext/transport/chttp2/transport/frame_data.c
index f9b9e1b309..6e9258ee7e 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.c
+++ b/src/core/ext/transport/chttp2/transport/frame_data.c
@@ -54,7 +54,8 @@ void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_data_parser *parser) {
if (parser->parsing_frame != NULL) {
grpc_chttp2_incoming_byte_stream_finished(
- exec_ctx, parser->parsing_frame, GRPC_ERROR_CREATE("Parser destroyed"));
+ exec_ctx, parser->parsing_frame,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Parser destroyed"));
}
GRPC_ERROR_UNREF(parser->error);
}
@@ -65,8 +66,9 @@ grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser,
if (flags & ~GRPC_CHTTP2_DATA_FLAG_END_STREAM) {
char *msg;
gpr_asprintf(&msg, "unsupported data flags: 0x%02x", flags);
- grpc_error *err = grpc_error_set_int(
- GRPC_ERROR_CREATE(msg), GRPC_ERROR_INT_STREAM_ID, (intptr_t)stream_id);
+ grpc_error *err =
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg),
+ GRPC_ERROR_INT_STREAM_ID, (intptr_t)stream_id);
gpr_free(msg);
return err;
}
@@ -173,13 +175,13 @@ static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
break;
default:
gpr_asprintf(&msg, "Bad GRPC frame type 0x%02x", p->frame_type);
- p->error = GRPC_ERROR_CREATE(msg);
+ p->error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
p->error = grpc_error_set_int(p->error, GRPC_ERROR_INT_STREAM_ID,
(intptr_t)s->id);
gpr_free(msg);
msg = grpc_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
- p->error =
- grpc_error_set_str(p->error, GRPC_ERROR_STR_RAW_BYTES, msg);
+ p->error = grpc_error_set_str(p->error, GRPC_ERROR_STR_RAW_BYTES,
+ grpc_slice_from_copied_string(msg));
gpr_free(msg);
p->error =
grpc_error_set_int(p->error, GRPC_ERROR_INT_OFFSET, cur - beg);
@@ -265,7 +267,8 @@ static grpc_error *parse_inner(grpc_exec_ctx *exec_ctx,
}
}
- GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
+ GPR_UNREACHABLE_CODE(
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here"));
}
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.c b/src/core/ext/transport/chttp2/transport/frame_goaway.c
index d99d486c1b..001271dd22 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.c
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.c
@@ -54,7 +54,7 @@ grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
if (length < 8) {
char *msg;
gpr_asprintf(&msg, "goaway frame too short (%d bytes)", length);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -156,7 +156,8 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
}
return GRPC_ERROR_NONE;
}
- GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
+ GPR_UNREACHABLE_CODE(
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here"));
}
void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code,
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.c
index 9b4b1a7b84..de8462a17e 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.c
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.c
@@ -71,7 +71,7 @@ grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser,
if (flags & 0xfe || length != 8) {
char *msg;
gpr_asprintf(&msg, "invalid ping: length=%d, flags=%02x", length, flags);
- grpc_error *error = GRPC_ERROR_CREATE(msg);
+ grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return error;
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
index cb017e75f0..225f15c77c 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.c
@@ -76,7 +76,7 @@ grpc_error *grpc_chttp2_rst_stream_parser_begin_frame(
char *msg;
gpr_asprintf(&msg, "invalid rst_stream: length=%d, flags=%02x", length,
flags);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -112,8 +112,9 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
char *message;
gpr_asprintf(&message, "Received RST_STREAM with error code %d", reason);
error = grpc_error_set_int(
- grpc_error_set_str(GRPC_ERROR_CREATE("RST_STREAM"),
- GRPC_ERROR_STR_GRPC_MESSAGE, message),
+ grpc_error_set_str(GRPC_ERROR_CREATE_FROM_STATIC_STRING("RST_STREAM"),
+ GRPC_ERROR_STR_GRPC_MESSAGE,
+ grpc_slice_from_copied_string(message)),
GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)reason);
gpr_free(message);
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.c b/src/core/ext/transport/chttp2/transport/frame_settings.c
index 82290e34cd..16881c0707 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.c
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.c
@@ -131,13 +131,16 @@ grpc_error *grpc_chttp2_settings_parser_begin_frame(
if (flags == GRPC_CHTTP2_FLAG_ACK) {
parser->is_ack = 1;
if (length != 0) {
- return GRPC_ERROR_CREATE("non-empty settings ack frame received");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "non-empty settings ack frame received");
}
return GRPC_ERROR_NONE;
} else if (flags != 0) {
- return GRPC_ERROR_CREATE("invalid flags on settings frame");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "invalid flags on settings frame");
} else if (length % 6 != 0) {
- return GRPC_ERROR_CREATE("settings frames must be a multiple of six bytes");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "settings frames must be a multiple of six bytes");
} else {
return GRPC_ERROR_NONE;
}
@@ -229,7 +232,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
&t->qbuf);
gpr_asprintf(&msg, "invalid value %u passed for %s",
parser->value, sp->name);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.c b/src/core/ext/transport/chttp2/transport/frame_window_update.c
index 8fa0bb471a..b76b6f6f47 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.c
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.c
@@ -70,7 +70,7 @@ grpc_error *grpc_chttp2_window_update_parser_begin_frame(
char *msg;
gpr_asprintf(&msg, "invalid window update: length=%d, flags=%02x", length,
flags);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -102,7 +102,7 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
if (received_update == 0 || (received_update & 0x80000000u)) {
char *msg;
gpr_asprintf(&msg, "invalid window update bytes: %d", p->amount);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index 40f5120308..5099d736bf 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -693,7 +693,7 @@ static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p,
}
if (p->on_header == NULL) {
GRPC_MDELEM_UNREF(exec_ctx, md);
- return GRPC_ERROR_CREATE("on_header callback not set");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("on_header callback not set");
}
p->on_header(exec_ctx, p->on_header_user_data, md);
return GRPC_ERROR_NONE;
@@ -810,7 +810,8 @@ static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx,
grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
if (GRPC_MDISNULL(md)) {
return grpc_error_set_int(
- grpc_error_set_int(GRPC_ERROR_CREATE("Invalid HPACK index received"),
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Invalid HPACK index received"),
GRPC_ERROR_INT_INDEX, (intptr_t)p->index),
GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents);
}
@@ -1074,7 +1075,7 @@ static grpc_error *parse_max_tbl_size(grpc_exec_ctx *exec_ctx,
if (p->dynamic_table_update_allowed == 0) {
return parse_error(
exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"More than two max table size changes in a single frame"));
}
p->dynamic_table_update_allowed--;
@@ -1092,7 +1093,7 @@ static grpc_error *parse_max_tbl_size_x(grpc_exec_ctx *exec_ctx,
if (p->dynamic_table_update_allowed == 0) {
return parse_error(
exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"More than two max table size changes in a single frame"));
}
p->dynamic_table_update_allowed--;
@@ -1126,7 +1127,7 @@ static grpc_error *parse_illegal_op(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(cur != end);
char *msg;
gpr_asprintf(&msg, "Illegal hpack op code %d", *cur);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1246,7 +1247,7 @@ error:
"integer overflow in hpack integer decoding: have 0x%08x, "
"got byte 0x%02x on byte 5",
*p->parsing.value, *cur);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1275,7 +1276,7 @@ static grpc_error *parse_value5up(grpc_exec_ctx *exec_ctx,
"integer overflow in hpack integer decoding: have 0x%08x, "
"got byte 0x%02x sometime after byte 5",
*p->parsing.value, *cur);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1333,8 +1334,9 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE("Illegal base64 character"));
+ return parse_error(
+ exec_ctx, p, cur, end,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character"));
else if (bits == 64)
goto b64_byte0;
p->base64_buffer = bits << 18;
@@ -1348,8 +1350,9 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE("Illegal base64 character"));
+ return parse_error(
+ exec_ctx, p, cur, end,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character"));
else if (bits == 64)
goto b64_byte1;
p->base64_buffer |= bits << 12;
@@ -1363,8 +1366,9 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE("Illegal base64 character"));
+ return parse_error(
+ exec_ctx, p, cur, end,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character"));
else if (bits == 64)
goto b64_byte2;
p->base64_buffer |= bits << 6;
@@ -1378,8 +1382,9 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
bits = inverse_base64[*cur];
++cur;
if (bits == 255)
- return parse_error(exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE("Illegal base64 character"));
+ return parse_error(
+ exec_ctx, p, cur, end,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character"));
else if (bits == 64)
goto b64_byte3;
p->base64_buffer |= bits;
@@ -1391,7 +1396,8 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx,
goto b64_byte0;
}
GPR_UNREACHABLE_CODE(return parse_error(
- exec_ctx, p, cur, end, GRPC_ERROR_CREATE("Should never reach here")));
+ exec_ctx, p, cur, end,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here")));
}
static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
@@ -1406,16 +1412,16 @@ static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
case B64_BYTE0:
break;
case B64_BYTE1:
- return parse_error(
- exec_ctx, p, cur, end,
- GRPC_ERROR_CREATE("illegal base64 encoding")); /* illegal encoding */
+ return parse_error(exec_ctx, p, cur, end,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "illegal base64 encoding")); /* illegal encoding */
case B64_BYTE2:
bits = p->base64_buffer;
if (bits & 0xffff) {
char *msg;
gpr_asprintf(&msg, "trailing bits in base64 encoding: 0x%04x",
bits & 0xffff);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1428,7 +1434,7 @@ static grpc_error *finish_str(grpc_exec_ctx *exec_ctx,
char *msg;
gpr_asprintf(&msg, "trailing bits in base64 encoding: 0x%02x",
bits & 0xff);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return parse_error(exec_ctx, p, cur, end, err);
}
@@ -1550,7 +1556,8 @@ static grpc_error *is_binary_indexed_header(grpc_chttp2_hpack_parser *p,
grpc_mdelem elem = grpc_chttp2_hptbl_lookup(&p->table, p->index);
if (GRPC_MDISNULL(elem)) {
return grpc_error_set_int(
- grpc_error_set_int(GRPC_ERROR_CREATE("Invalid HPACK index received"),
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Invalid HPACK index received"),
GRPC_ERROR_INT_INDEX, (intptr_t)p->index),
GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents);
}
@@ -1620,13 +1627,18 @@ void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx *exec_ctx,
grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_hpack_parser *p,
grpc_slice slice) {
- /* TODO(ctiller): limit the distance of end from beg, and perform multiple
- steps in the event of a large chunk of data to limit
- stack space usage when no tail call optimization is
- available */
+/* max number of bytes to parse at a time... limits call stack depth on
+ * compilers without TCO */
+#define MAX_PARSE_LENGTH 1024
p->current_slice_refcount = slice.refcount;
- grpc_error *error = p->state(exec_ctx, p, GRPC_SLICE_START_PTR(slice),
- GRPC_SLICE_END_PTR(slice));
+ uint8_t *start = GRPC_SLICE_START_PTR(slice);
+ uint8_t *end = GRPC_SLICE_END_PTR(slice);
+ grpc_error *error = GRPC_ERROR_NONE;
+ while (start != end && error == GRPC_ERROR_NONE) {
+ uint8_t *target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start);
+ error = p->state(exec_ctx, p, start, target);
+ start = target;
+ }
p->current_slice_refcount = NULL;
return error;
}
@@ -1670,7 +1682,7 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
if (is_last) {
if (parser->is_boundary && parser->state != parse_begin) {
GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
- return GRPC_ERROR_CREATE(
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"end of header frame not aligned with a hpack record boundary");
}
/* need to check for null stream: this can occur if we receive an invalid
@@ -1678,7 +1690,8 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
if (s != NULL) {
if (parser->is_boundary) {
if (s->header_frames_received == GPR_ARRAY_SIZE(s->metadata_buffer)) {
- return GRPC_ERROR_CREATE("Too many trailer frames");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Too many trailer frames");
}
s->published_metadata[s->header_frames_received] =
GRPC_METADATA_PUBLISHED_FROM_WIRE;
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.c b/src/core/ext/transport/chttp2/transport/hpack_table.c
index 62dd1b8cab..9dd41fdbe1 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.c
@@ -280,7 +280,7 @@ grpc_error *grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx *exec_ctx,
gpr_asprintf(&msg,
"Attempt to make hpack table %d bytes when max is %d bytes",
bytes, tbl->max_bytes);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -317,7 +317,7 @@ grpc_error *grpc_chttp2_hptbl_add(grpc_exec_ctx *exec_ctx,
"HPACK max table size reduced to %d but not reflected by hpack "
"stream (still at %d)",
tbl->max_bytes, tbl->current_table_bytes);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.c b/src/core/ext/transport/chttp2/transport/incoming_metadata.c
index c91b019aa0..da0a34d32a 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.c
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.c
@@ -41,69 +41,48 @@
#include <grpc/support/log.h>
void grpc_chttp2_incoming_metadata_buffer_init(
- grpc_chttp2_incoming_metadata_buffer *buffer) {
- buffer->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena) {
+ buffer->arena = arena;
+ grpc_metadata_batch_init(&buffer->batch);
+ buffer->batch.deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
void grpc_chttp2_incoming_metadata_buffer_destroy(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer) {
- size_t i;
- if (!buffer->published) {
- for (i = 0; i < buffer->count; i++) {
- GRPC_MDELEM_UNREF(exec_ctx, buffer->elems[i].md);
- }
- }
- gpr_free(buffer->elems);
+ grpc_metadata_batch_destroy(exec_ctx, &buffer->batch);
}
-void grpc_chttp2_incoming_metadata_buffer_add(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem elem) {
- GPR_ASSERT(!buffer->published);
- if (buffer->capacity == buffer->count) {
- buffer->capacity = GPR_MAX(8, 2 * buffer->capacity);
- buffer->elems =
- gpr_realloc(buffer->elems, sizeof(*buffer->elems) * buffer->capacity);
- }
- buffer->elems[buffer->count++].md = elem;
+grpc_error *grpc_chttp2_incoming_metadata_buffer_add(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
+ grpc_mdelem elem) {
buffer->size += GRPC_MDELEM_LENGTH(elem);
+ return grpc_metadata_batch_add_tail(
+ exec_ctx, &buffer->batch,
+ gpr_arena_alloc(buffer->arena, sizeof(grpc_linked_mdelem)), elem);
}
-void grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
grpc_mdelem elem) {
- for (size_t i = 0; i < buffer->count; i++) {
- if (grpc_slice_eq(GRPC_MDKEY(buffer->elems[i].md), GRPC_MDKEY(elem))) {
- GRPC_MDELEM_UNREF(exec_ctx, buffer->elems[i].md);
- buffer->elems[i].md = elem;
- return;
+ for (grpc_linked_mdelem *l = buffer->batch.list.head; l != NULL;
+ l = l->next) {
+ if (grpc_slice_eq(GRPC_MDKEY(l->md), GRPC_MDKEY(elem))) {
+ GRPC_MDELEM_UNREF(exec_ctx, l->md);
+ l->md = elem;
+ return GRPC_ERROR_NONE;
}
}
- grpc_chttp2_incoming_metadata_buffer_add(buffer, elem);
+ return grpc_chttp2_incoming_metadata_buffer_add(exec_ctx, buffer, elem);
}
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline) {
- GPR_ASSERT(!buffer->published);
- buffer->deadline = deadline;
+ buffer->batch.deadline = deadline;
}
void grpc_chttp2_incoming_metadata_buffer_publish(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
grpc_metadata_batch *batch) {
- GPR_ASSERT(!buffer->published);
- buffer->published = 1;
- if (buffer->count > 0) {
- size_t i;
- for (i = 0; i < buffer->count; i++) {
- /* TODO(ctiller): do something better here */
- if (!GRPC_LOG_IF_ERROR("grpc_chttp2_incoming_metadata_buffer_publish",
- grpc_metadata_batch_link_tail(
- exec_ctx, batch, &buffer->elems[i]))) {
- GRPC_MDELEM_UNREF(exec_ctx, buffer->elems[i].md);
- }
- }
- } else {
- batch->list.head = batch->list.tail = NULL;
- }
- batch->deadline = buffer->deadline;
+ *batch = buffer->batch;
+ grpc_metadata_batch_init(&buffer->batch);
}
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.h b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
index 1eac6fc150..288c917e65 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.h
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
@@ -37,28 +37,26 @@
#include "src/core/lib/transport/transport.h"
typedef struct {
- grpc_linked_mdelem *elems;
- size_t count;
- size_t capacity;
- gpr_timespec deadline;
- int published;
+ gpr_arena *arena;
+ grpc_metadata_batch batch;
size_t size; // total size of metadata
} grpc_chttp2_incoming_metadata_buffer;
/** assumes everything initially zeroed */
void grpc_chttp2_incoming_metadata_buffer_init(
- grpc_chttp2_incoming_metadata_buffer *buffer);
+ grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena);
void grpc_chttp2_incoming_metadata_buffer_destroy(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer);
void grpc_chttp2_incoming_metadata_buffer_publish(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
grpc_metadata_batch *batch);
-void grpc_chttp2_incoming_metadata_buffer_add(
- grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem elem);
-void grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+grpc_error *grpc_chttp2_incoming_metadata_buffer_add(
grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
- grpc_mdelem elem);
+ grpc_mdelem elem) GRPC_MUST_USE_RESULT;
+grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add(
+ grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer,
+ grpc_mdelem elem) GRPC_MUST_USE_RESULT;
void grpc_chttp2_incoming_metadata_buffer_set_deadline(
grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline);
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index d26812ad6b..3c56c21599 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -425,7 +425,7 @@ struct grpc_chttp2_stream {
grpc_stream_refcount *refcount;
grpc_closure destroy_stream;
- void *destroy_stream_arg;
+ grpc_closure *destroy_stream_arg;
grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
uint8_t included[STREAM_LIST_COUNT];
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index e7f2597f89..7e457ced27 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -116,7 +116,7 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state],
(int)(uint8_t)GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state],
*cur, (int)*cur, t->deframe_state);
- err = GRPC_ERROR_CREATE(msg);
+ err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -219,7 +219,7 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
t->incoming_frame_size,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE]);
- err = GRPC_ERROR_CREATE(msg);
+ err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -278,7 +278,7 @@ static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
gpr_asprintf(
&msg, "Expected SETTINGS frame as the first frame, got frame type %d",
t->incoming_frame_type);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -288,7 +288,7 @@ static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
char *msg;
gpr_asprintf(&msg, "Expected CONTINUATION frame, got frame type %02x",
t->incoming_frame_type);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -299,7 +299,7 @@ static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
"Expected CONTINUATION frame for grpc_chttp2_stream %08x, got "
"grpc_chttp2_stream %08x",
t->expect_continuation_stream_id, t->incoming_stream_id);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -311,7 +311,8 @@ static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_FRAME_HEADER:
return init_header_frame_parser(exec_ctx, t, 0);
case GRPC_CHTTP2_FRAME_CONTINUATION:
- return GRPC_ERROR_CREATE("Unexpected CONTINUATION frame");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Unexpected CONTINUATION frame");
case GRPC_CHTTP2_FRAME_RST_STREAM:
return init_rst_stream_parser(exec_ctx, t);
case GRPC_CHTTP2_FRAME_SETTINGS:
@@ -371,7 +372,7 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
char *msg;
gpr_asprintf(&msg, "frame of size %d overflows incoming window of %" PRId64,
t->incoming_frame_size, t->incoming_window);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -409,7 +410,7 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
s->incoming_window_delta +
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
- grpc_error *err = GRPC_ERROR_CREATE(msg);
+ grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return err;
}
@@ -542,13 +543,21 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_chttp2_cancel_stream(
exec_ctx, t, s,
grpc_error_set_int(
- GRPC_ERROR_CREATE("received initial metadata size exceeds limit"),
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "received initial metadata size exceeds limit"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
s->seen_error = true;
GRPC_MDELEM_UNREF(exec_ctx, md);
} else {
- grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[0], md);
+ grpc_error *error = grpc_chttp2_incoming_metadata_buffer_add(
+ exec_ctx, &s->metadata_buffer[0], md);
+ if (error != GRPC_ERROR_NONE) {
+ grpc_chttp2_cancel_stream(exec_ctx, t, s, error);
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+ s->seen_error = true;
+ GRPC_MDELEM_UNREF(exec_ctx, md);
+ }
}
}
@@ -591,14 +600,22 @@ static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
new_size, metadata_size_limit);
grpc_chttp2_cancel_stream(
exec_ctx, t, s,
- grpc_error_set_int(
- GRPC_ERROR_CREATE("received trailing metadata size exceeds limit"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "received trailing metadata size exceeds limit"),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_RESOURCE_EXHAUSTED));
grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
s->seen_error = true;
GRPC_MDELEM_UNREF(exec_ctx, md);
} else {
- grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[1], md);
+ grpc_error *error = grpc_chttp2_incoming_metadata_buffer_add(
+ exec_ctx, &s->metadata_buffer[1], md);
+ if (error != GRPC_ERROR_NONE) {
+ grpc_chttp2_cancel_stream(exec_ctx, t, s, error);
+ grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+ s->seen_error = true;
+ GRPC_MDELEM_UNREF(exec_ctx, md);
+ }
}
GPR_TIMER_END("on_trailing_header", 0);
@@ -757,7 +774,8 @@ static grpc_error *init_goaway_parser(grpc_exec_ctx *exec_ctx,
static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t) {
if (t->incoming_stream_id != 0) {
- return GRPC_ERROR_CREATE("Settings frame received for grpc_chttp2_stream");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Settings frame received for grpc_chttp2_stream");
}
grpc_error *err = grpc_chttp2_settings_parser_begin_frame(
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index fabfaf8a27..952690eba7 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -128,6 +128,7 @@ struct read_state {
int received_bytes;
int remaining_bytes;
int length_field;
+ bool compressed;
char grpc_header_bytes[GRPC_HEADER_SIZE_IN_BYTES];
char *payload_field;
bool read_stream_closed;
@@ -184,6 +185,7 @@ struct op_storage {
};
struct stream_obj {
+ gpr_arena *arena;
struct op_and_state *oas;
grpc_transport_stream_op *curr_op;
grpc_cronet_transport *curr_ct;
@@ -272,7 +274,7 @@ static void maybe_flush_read(stream_obj *s) {
/* Whenever the evaluation of any of the two condition is changed, we check
* whether we should enter the flush read state. */
if (s->state.pending_recv_trailing_metadata && s->state.fail_state) {
- if (!s->state.flush_read) {
+ if (!s->state.flush_read && !s->state.rs.read_stream_closed) {
CRONET_LOG(GPR_DEBUG, "%p: Flush read", s);
s->state.flush_read = true;
null_and_maybe_free_read_buffer(s);
@@ -288,7 +290,7 @@ static void maybe_flush_read(stream_obj *s) {
}
static grpc_error *make_error_with_desc(int error_code, const char *desc) {
- grpc_error *error = GRPC_ERROR_CREATE(desc);
+ grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, error_code);
return error;
}
@@ -483,18 +485,31 @@ static void on_response_headers_received(
CRONET_LOG(GPR_DEBUG, "R: on_response_headers_received(%p, %p, %s)", stream,
headers, negotiated_protocol);
stream_obj *s = (stream_obj *)stream->annotation;
+
+ /* Identify if this is a header or a trailer (in a trailer-only response case)
+ */
+ for (size_t i = 0; i < headers->count; i++) {
+ if (0 == strcmp("grpc-status", headers->headers[i].key)) {
+ on_response_trailers_received(stream, headers);
+ return;
+ }
+ }
+
gpr_mu_lock(&s->mu);
memset(&s->state.rs.initial_metadata, 0,
sizeof(s->state.rs.initial_metadata));
- grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.initial_metadata);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.initial_metadata,
+ s->arena);
for (size_t i = 0; i < headers->count; i++) {
- grpc_chttp2_incoming_metadata_buffer_add(
- &s->state.rs.initial_metadata,
- grpc_mdelem_from_slices(
- &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
- headers->headers[i].key)),
- grpc_slice_intern(
- grpc_slice_from_static_string(headers->headers[i].value))));
+ GRPC_LOG_IF_ERROR(
+ "on_response_headers_received",
+ grpc_chttp2_incoming_metadata_buffer_add(
+ &exec_ctx, &s->state.rs.initial_metadata,
+ grpc_mdelem_from_slices(
+ &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
+ headers->headers[i].key)),
+ grpc_slice_intern(grpc_slice_from_static_string(
+ headers->headers[i].value)))));
}
s->state.state_callback_received[OP_RECV_INITIAL_METADATA] = true;
if (!(s->state.state_op_done[OP_CANCEL_ERROR] ||
@@ -503,6 +518,7 @@ static void on_response_headers_received(
is closed */
GPR_ASSERT(s->state.rs.length_field_received == false);
s->state.rs.read_buffer = s->state.rs.grpc_header_bytes;
+ s->state.rs.compressed = false;
s->state.rs.received_bytes = 0;
s->state.rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
@@ -586,17 +602,20 @@ static void on_response_trailers_received(
memset(&s->state.rs.trailing_metadata, 0,
sizeof(s->state.rs.trailing_metadata));
s->state.rs.trailing_metadata_valid = false;
- grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.trailing_metadata);
+ grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.trailing_metadata,
+ s->arena);
for (size_t i = 0; i < trailers->count; i++) {
CRONET_LOG(GPR_DEBUG, "trailer key=%s, value=%s", trailers->headers[i].key,
trailers->headers[i].value);
- grpc_chttp2_incoming_metadata_buffer_add(
- &s->state.rs.trailing_metadata,
- grpc_mdelem_from_slices(
- &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
- trailers->headers[i].key)),
- grpc_slice_intern(
- grpc_slice_from_static_string(trailers->headers[i].value))));
+ GRPC_LOG_IF_ERROR(
+ "on_response_trailers_received",
+ grpc_chttp2_incoming_metadata_buffer_add(
+ &exec_ctx, &s->state.rs.trailing_metadata,
+ grpc_mdelem_from_slices(
+ &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
+ trailers->headers[i].key)),
+ grpc_slice_intern(grpc_slice_from_static_string(
+ trailers->headers[i].value)))));
s->state.rs.trailing_metadata_valid = true;
if (0 == strcmp(trailers->headers[i].key, "grpc-status") &&
0 != strcmp(trailers->headers[i].value, "0")) {
@@ -634,7 +653,7 @@ static void on_response_trailers_received(
*/
static void create_grpc_frame(grpc_slice_buffer *write_slice_buffer,
char **pp_write_buffer,
- size_t *p_write_buffer_size) {
+ size_t *p_write_buffer_size, uint32_t flags) {
grpc_slice slice = grpc_slice_buffer_take_first(write_slice_buffer);
size_t length = GRPC_SLICE_LENGTH(slice);
*p_write_buffer_size = length + GRPC_HEADER_SIZE_IN_BYTES;
@@ -643,7 +662,9 @@ static void create_grpc_frame(grpc_slice_buffer *write_slice_buffer,
*pp_write_buffer = write_buffer;
uint8_t *p = (uint8_t *)write_buffer;
/* Append 5 byte header */
- *p++ = 0;
+ /* Compressed flag */
+ *p++ = (flags & GRPC_WRITE_INTERNAL_COMPRESS) ? 1 : 0;
+ /* Message length */
*p++ = (uint8_t)(length >> 24);
*p++ = (uint8_t)(length >> 16);
*p++ = (uint8_t)(length >> 8);
@@ -721,14 +742,16 @@ static void convert_metadata_to_cronet_headers(
*p_num_headers = (size_t)num_headers;
}
-static int parse_grpc_header(const uint8_t *data) {
+static void parse_grpc_header(const uint8_t *data, int *length,
+ bool *compressed) {
+ const uint8_t c = *data;
const uint8_t *p = data + 1;
- int length = 0;
- length |= ((uint8_t)*p++) << 24;
- length |= ((uint8_t)*p++) << 16;
- length |= ((uint8_t)*p++) << 8;
- length |= ((uint8_t)*p++);
- return length;
+ *compressed = ((c & 0x01) == 0x01);
+ *length = 0;
+ *length |= ((uint8_t)*p++) << 24;
+ *length |= ((uint8_t)*p++) << 16;
+ *length |= ((uint8_t)*p++) << 8;
+ *length |= ((uint8_t)*p++);
}
static bool header_has_authority(grpc_linked_mdelem *head) {
@@ -781,7 +804,8 @@ static bool op_can_be_run(grpc_transport_stream_op *curr_op,
else if (!stream_state->state_callback_received[OP_SEND_INITIAL_METADATA])
result = false;
/* we haven't received headers yet. */
- else if (!stream_state->state_callback_received[OP_RECV_INITIAL_METADATA])
+ else if (!stream_state->state_callback_received[OP_RECV_INITIAL_METADATA] &&
+ !stream_state->state_op_done[OP_RECV_TRAILING_METADATA])
result = false;
} else if (op_id == OP_SEND_MESSAGE) {
/* already executed (note we're checking op specific state, not stream
@@ -794,7 +818,8 @@ static bool op_can_be_run(grpc_transport_stream_op *curr_op,
/* already executed */
if (op_state->state_op_done[OP_RECV_MESSAGE]) result = false;
/* we haven't received headers yet. */
- else if (!stream_state->state_callback_received[OP_RECV_INITIAL_METADATA])
+ else if (!stream_state->state_callback_received[OP_RECV_INITIAL_METADATA] &&
+ !stream_state->state_op_done[OP_RECV_TRAILING_METADATA])
result = false;
} else if (op_id == OP_RECV_TRAILING_METADATA) {
/* already executed */
@@ -948,12 +973,6 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_init(&write_slice_buffer);
grpc_byte_stream_next(NULL, stream_op->send_message, &slice,
stream_op->send_message->length, NULL);
- /* Check that compression flag is OFF. We don't support compression yet.
- */
- if (stream_op->send_message->flags != 0) {
- gpr_log(GPR_ERROR, "Compression is not supported");
- GPR_ASSERT(stream_op->send_message->flags == 0);
- }
grpc_slice_buffer_add(&write_slice_buffer, slice);
if (write_slice_buffer.count != 1) {
/* Empty request not handled yet */
@@ -963,7 +982,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
if (write_slice_buffer.count > 0) {
size_t write_buffer_size;
create_grpc_frame(&write_slice_buffer, &stream_state->ws.write_buffer,
- &write_buffer_size);
+ &write_buffer_size, stream_op->send_message->flags);
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, %p)", s->cbs,
stream_state->ws.write_buffer);
stream_state->state_callback_received[OP_SEND_MESSAGE] = false;
@@ -1015,6 +1034,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
} else if (stream_state->state_callback_received[OP_FAILED]) {
grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
GRPC_ERROR_NONE);
+ } else if (stream_state->state_op_done[OP_RECV_TRAILING_METADATA]) {
+ grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
+ GRPC_ERROR_NONE);
} else {
grpc_chttp2_incoming_metadata_buffer_publish(
exec_ctx, &oas->s->state.rs.initial_metadata,
@@ -1059,8 +1081,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->rs.remaining_bytes == 0) {
/* Start a read operation for data */
stream_state->rs.length_field_received = true;
- stream_state->rs.length_field = stream_state->rs.remaining_bytes =
- parse_grpc_header((const uint8_t *)stream_state->rs.read_buffer);
+ parse_grpc_header((const uint8_t *)stream_state->rs.read_buffer,
+ &stream_state->rs.length_field,
+ &stream_state->rs.compressed);
CRONET_LOG(GPR_DEBUG, "length field = %d",
stream_state->rs.length_field);
if (stream_state->rs.length_field > 0) {
@@ -1082,6 +1105,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer_init(&stream_state->rs.read_slice_buffer);
grpc_slice_buffer_stream_init(&stream_state->rs.sbs,
&stream_state->rs.read_slice_buffer, 0);
+ if (stream_state->rs.compressed) {
+ stream_state->rs.sbs.base.flags |= GRPC_WRITE_INTERNAL_COMPRESS;
+ }
*((grpc_byte_buffer **)stream_op->recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs;
grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
@@ -1093,6 +1119,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes;
stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
stream_state->rs.received_bytes = 0;
+ stream_state->rs.compressed = false;
stream_state->rs.length_field_received = false;
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
stream_state->state_op_done[OP_READ_REQ_MADE] =
@@ -1107,6 +1134,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes;
stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
stream_state->rs.received_bytes = 0;
+ stream_state->rs.compressed = false;
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
stream_state->state_op_done[OP_READ_REQ_MADE] =
true; /* Indicates that at least one read request has been made */
@@ -1130,6 +1158,9 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
read_data_slice);
grpc_slice_buffer_stream_init(&stream_state->rs.sbs,
&stream_state->rs.read_slice_buffer, 0);
+ if (stream_state->rs.compressed) {
+ stream_state->rs.sbs.base.flags = GRPC_WRITE_INTERNAL_COMPRESS;
+ }
*((grpc_byte_buffer **)stream_op->recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs;
grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
@@ -1139,6 +1170,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
/* Do an extra read to trigger on_succeeded() callback in case connection
is closed */
stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes;
+ stream_state->rs.compressed = false;
stream_state->rs.received_bytes = 0;
stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
stream_state->rs.length_field_received = false;
@@ -1215,7 +1247,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_stream *gs, grpc_stream_refcount *refcount,
- const void *server_data) {
+ const void *server_data, gpr_arena *arena) {
stream_obj *s = (stream_obj *)gs;
memset(&s->storage, 0, sizeof(s->storage));
s->storage.head = NULL;
@@ -1237,6 +1269,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->curr_gs = gs;
s->curr_ct = (grpc_cronet_transport *)gt;
+ s->arena = arena;
gpr_mu_init(&s->mu);
return 0;
@@ -1273,10 +1306,12 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
}
static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
- grpc_stream *gs, void *and_free_memory) {
+ grpc_stream *gs,
+ grpc_closure *then_schedule_closure) {
stream_obj *s = (stream_obj *)gs;
null_and_maybe_free_read_buffer(s);
GRPC_ERROR_UNREF(s->state.cancel_error);
+ grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {}
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index 3fb2a60ac7..6d53b0576e 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -166,41 +166,32 @@ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
}
}
-grpc_error *grpc_call_stack_init(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
- int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
- grpc_call_context_element *context, const void *transport_server_data,
- grpc_slice path, gpr_timespec start_time, gpr_timespec deadline,
- grpc_call_stack *call_stack) {
+grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *channel_stack,
+ int initial_refs, grpc_iomgr_cb_func destroy,
+ void *destroy_arg,
+ const grpc_call_element_args *elem_args) {
grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack);
size_t count = channel_stack->count;
grpc_call_element *call_elems;
char *user_data;
size_t i;
- call_stack->count = count;
- GRPC_STREAM_REF_INIT(&call_stack->refcount, initial_refs, destroy,
+ elem_args->call_stack->count = count;
+ GRPC_STREAM_REF_INIT(&elem_args->call_stack->refcount, initial_refs, destroy,
destroy_arg, "CALL_STACK");
- call_elems = CALL_ELEMS_FROM_STACK(call_stack);
+ call_elems = CALL_ELEMS_FROM_STACK(elem_args->call_stack);
user_data = ((char *)call_elems) +
ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
/* init per-filter data */
grpc_error *first_error = GRPC_ERROR_NONE;
- const grpc_call_element_args args = {
- .start_time = start_time,
- .call_stack = call_stack,
- .server_transport_data = transport_server_data,
- .context = context,
- .path = path,
- .deadline = deadline,
- };
for (i = 0; i < count; i++) {
call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data;
- grpc_error *error =
- call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i], &args);
+ grpc_error *error = call_elems[i].filter->init_call_elem(
+ exec_ctx, &call_elems[i], elem_args);
if (error != GRPC_ERROR_NONE) {
if (first_error == GRPC_ERROR_NONE) {
first_error = error;
@@ -241,15 +232,16 @@ void grpc_call_stack_ignore_set_pollset_or_pollset_set(
void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
const grpc_call_final_info *final_info,
- void *and_free_memory) {
+ grpc_closure *then_schedule_closure) {
grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack);
size_t count = stack->count;
size_t i;
/* destroy per-filter data */
for (i = 0; i < count; i++) {
- elems[i].filter->destroy_call_elem(exec_ctx, &elems[i], final_info,
- i == count - 1 ? and_free_memory : NULL);
+ elems[i].filter->destroy_call_elem(
+ exec_ctx, &elems[i], final_info,
+ i == count - 1 ? then_schedule_closure : NULL);
}
}
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 6d3340bcbf..80e3603e8d 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -56,6 +56,7 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/polling_entity.h"
+#include "src/core/lib/support/arena.h"
#include "src/core/lib/transport/transport.h"
#ifdef __cplusplus
@@ -84,6 +85,7 @@ typedef struct {
grpc_slice path;
gpr_timespec start_time;
gpr_timespec deadline;
+ gpr_arena *arena;
} grpc_call_element_args;
typedef struct {
@@ -139,12 +141,12 @@ typedef struct {
/* Destroy per call data.
The filter does not need to do any chaining.
The bottom filter of a stack will be passed a non-NULL pointer to
- \a and_free_memory that should be passed to gpr_free when destruction
- is complete. \a final_info contains data about the completed call, mainly
- for reporting purposes. */
+ \a then_schedule_closure that should be passed to grpc_closure_sched when
+ destruction is complete. \a final_info contains data about the completed
+ call, mainly for reporting purposes. */
void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *and_free_memory);
+ grpc_closure *then_schedule_closure);
/* sizeof(per channel data) */
size_t sizeof_channel_data;
@@ -236,12 +238,11 @@ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
/* Initialize a call stack given a channel stack. transport_server_data is
expected to be NULL on a client, or an opaque transport owned pointer on the
server. */
-grpc_error *grpc_call_stack_init(
- grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack,
- int initial_refs, grpc_iomgr_cb_func destroy, void *destroy_arg,
- grpc_call_context_element *context, const void *transport_server_data,
- grpc_slice path, gpr_timespec start_time, gpr_timespec deadline,
- grpc_call_stack *call_stack);
+grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
+ grpc_channel_stack *channel_stack,
+ int initial_refs, grpc_iomgr_cb_func destroy,
+ void *destroy_arg,
+ const grpc_call_element_args *elem_args);
/* Set a pollset or a pollset_set for a call stack: must occur before the first
* op is started */
void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
@@ -271,7 +272,7 @@ void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
/* Destroy a call stack */
void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack,
const grpc_call_final_info *final_info,
- void *and_free_memory);
+ grpc_closure *then_schedule_closure);
/* Ignore set pollset{_set} - used by filters if they don't care about pollsets
* at all. Does nothing. */
diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c
index aa41014a21..02dc479f3a 100644
--- a/src/core/lib/channel/compress_filter.c
+++ b/src/core/lib/channel/compress_filter.c
@@ -292,7 +292,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {
+ grpc_closure *ignored) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c
index 29796f7ca7..75c68a5534 100644
--- a/src/core/lib/channel/connected_channel.c
+++ b/src/core/lib/channel/connected_channel.c
@@ -88,9 +88,10 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
channel_data *chand = elem->channel_data;
int r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
- &args->call_stack->refcount, args->server_transport_data);
+ &args->call_stack->refcount, args->server_transport_data, args->arena);
return r == 0 ? GRPC_ERROR_NONE
- : GRPC_ERROR_CREATE("transport stream initialization failed");
+ : GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "transport stream initialization failed");
}
static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
@@ -105,12 +106,12 @@ static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *and_free_memory) {
+ grpc_closure *then_schedule_closure) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
grpc_transport_destroy_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
- and_free_memory);
+ then_schedule_closure);
}
/* Constructor for channel_data */
diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c
index 5a12d62f1d..ca701ed457 100644
--- a/src/core/lib/channel/deadline_filter.c
+++ b/src/core/lib/channel/deadline_filter.c
@@ -55,9 +55,9 @@ static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
if (error != GRPC_ERROR_CANCELLED) {
grpc_call_element_signal_error(
exec_ctx, elem,
- grpc_error_set_int(GRPC_ERROR_CREATE("Deadline Exceeded"),
- GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_DEADLINE_EXCEEDED));
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED));
}
GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
}
@@ -256,7 +256,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
// Destructor for call_data. Used for both client and server filters.
static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
const grpc_call_final_info* final_info,
- void* and_free_memory) {
+ grpc_closure* ignored) {
grpc_deadline_state_destroy(exec_ctx, elem);
}
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index 1b4240bb10..5861fa6f54 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -236,8 +236,9 @@ static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_handshake_manager* mgr = arg;
if (error == GRPC_ERROR_NONE) { // Timer fired, rather than being cancelled.
- grpc_handshake_manager_shutdown(exec_ctx, mgr,
- GRPC_ERROR_CREATE("Handshake timed out"));
+ grpc_handshake_manager_shutdown(
+ exec_ctx, mgr,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake timed out"));
}
grpc_handshake_manager_unref(exec_ctx, mgr);
}
diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c
index c031533dd8..967904df1e 100644
--- a/src/core/lib/channel/http_client_filter.c
+++ b/src/core/lib/channel/http_client_filter.c
@@ -36,6 +36,7 @@
#include <grpc/support/string_util.h>
#include <string.h>
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/security/util/b64.h"
#include "src/core/lib/slice/percent_encoding.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@@ -56,7 +57,6 @@ typedef struct call_data {
grpc_linked_mdelem te_trailers;
grpc_linked_mdelem content_type;
grpc_linked_mdelem user_agent;
- grpc_linked_mdelem payload_bin;
grpc_metadata_batch *recv_initial_metadata;
grpc_metadata_batch *recv_trailing_metadata;
@@ -108,11 +108,11 @@ static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
grpc_error *e = grpc_error_set_str(
grpc_error_set_int(
grpc_error_set_str(
- GRPC_ERROR_CREATE(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Received http2 :status header with non-200 OK status"),
- GRPC_ERROR_STR_VALUE, val),
+ GRPC_ERROR_STR_VALUE, grpc_slice_from_copied_string(val)),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_CANCELLED),
- GRPC_ERROR_STR_GRPC_MESSAGE, msg);
+ GRPC_ERROR_STR_GRPC_MESSAGE, grpc_slice_from_copied_string(msg));
gpr_free(val);
gpr_free(msg);
return e;
@@ -292,19 +292,58 @@ static grpc_error *hc_mutate_op(grpc_exec_ctx *exec_ctx,
continue_send_message(exec_ctx, elem);
if (calld->send_message_blocked == false) {
- /* when all the send_message data is available, then create a MDELEM and
- append to headers */
- grpc_mdelem payload_bin = grpc_mdelem_from_slices(
- exec_ctx, GRPC_MDSTR_GRPC_PAYLOAD_BIN,
- grpc_slice_from_copied_buffer((const char *)calld->payload_bytes,
- op->send_message->length));
- error =
- grpc_metadata_batch_add_tail(exec_ctx, op->send_initial_metadata,
- &calld->payload_bin, payload_bin);
+ /* when all the send_message data is available, then modify the path
+ * MDELEM by appending base64 encoded query to the path */
+ const int k_url_safe = 1;
+ const int k_multi_line = 0;
+ const unsigned char k_query_separator = '?';
+
+ grpc_slice path_slice =
+ GRPC_MDVALUE(op->send_initial_metadata->idx.named.path->md);
+ /* sum up individual component's lengths and allocate enough memory to
+ * hold combined path+query */
+ size_t estimated_len = GRPC_SLICE_LENGTH(path_slice);
+ estimated_len++; /* for the '?' */
+ estimated_len += grpc_base64_estimate_encoded_size(
+ op->send_message->length, k_url_safe, k_multi_line);
+ estimated_len += 1; /* for the trailing 0 */
+ grpc_slice path_with_query_slice = grpc_slice_malloc(estimated_len);
+
+ /* memcopy individual pieces into this slice */
+ uint8_t *write_ptr =
+ (uint8_t *)GRPC_SLICE_START_PTR(path_with_query_slice);
+ uint8_t *original_path = (uint8_t *)GRPC_SLICE_START_PTR(path_slice);
+ memcpy(write_ptr, original_path, GRPC_SLICE_LENGTH(path_slice));
+ write_ptr += GRPC_SLICE_LENGTH(path_slice);
+
+ *write_ptr = k_query_separator;
+ write_ptr++; /* for the '?' */
+
+ grpc_base64_encode_core((char *)write_ptr, calld->payload_bytes,
+ op->send_message->length, k_url_safe,
+ k_multi_line);
+
+ /* remove trailing unused memory and add trailing 0 to terminate string
+ */
+ char *t = (char *)GRPC_SLICE_START_PTR(path_with_query_slice);
+ /* safe to use strlen since base64_encode will always add '\0' */
+ size_t path_length = strlen(t) + 1;
+ *(t + path_length) = '\0';
+ path_with_query_slice =
+ grpc_slice_sub(path_with_query_slice, 0, path_length);
+
+ /* substitute previous path with the new path+query */
+ grpc_mdelem mdelem_path_and_query = grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_PATH, path_with_query_slice);
+ grpc_metadata_batch *b = op->send_initial_metadata;
+ error = grpc_metadata_batch_substitute(exec_ctx, b, b->idx.named.path,
+ mdelem_path_and_query);
if (error != GRPC_ERROR_NONE) return error;
+
calld->on_complete = op->on_complete;
op->on_complete = &calld->hc_on_complete;
op->send_message = NULL;
+ grpc_slice_unref_internal(exec_ctx, path_with_query_slice);
} else {
/* Not all data is available. Fall back to POST. */
gpr_log(GPR_DEBUG,
@@ -412,7 +451,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {
+ grpc_closure *ignored) {
call_data *calld = elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
}
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index fb70de8e96..8d3c488ea0 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -37,6 +37,7 @@
#include <grpc/support/log.h>
#include <string.h>
#include "src/core/lib/profiling/timers.h"
+#include "src/core/lib/security/util/b64.h"
#include "src/core/lib/slice/percent_encoding.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@@ -51,8 +52,8 @@ typedef struct call_data {
grpc_linked_mdelem status;
grpc_linked_mdelem content_type;
- /* did this request come with payload-bin */
- bool seen_payload_bin;
+ /* did this request come with path query containing request payload */
+ bool seen_path_with_query;
/* flag to ensure payload_bin is delivered only once */
bool payload_bin_delivered;
@@ -61,7 +62,7 @@ typedef struct call_data {
bool *recv_cacheable_request;
/** Closure to call when finished with the hs_on_recv hook */
grpc_closure *on_done_recv;
- /** Closure to call when we retrieve read message from the payload-bin header
+ /** Closure to call when we retrieve read message from the path URI
*/
grpc_closure *recv_message_ready;
grpc_closure *on_complete;
@@ -101,7 +102,7 @@ static void add_error(const char *error_name, grpc_error **cumulative,
grpc_error *new) {
if (new == GRPC_ERROR_NONE) return;
if (*cumulative == GRPC_ERROR_NONE) {
- *cumulative = GRPC_ERROR_CREATE(error_name);
+ *cumulative = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_name);
}
*cumulative = grpc_error_add_child(*cumulative, new);
}
@@ -125,27 +126,32 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
*calld->recv_cacheable_request = true;
} else {
add_error(error_name, &error,
- grpc_attach_md_to_error(GRPC_ERROR_CREATE("Bad header"),
- b->idx.named.method->md));
+ grpc_attach_md_to_error(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Bad header"),
+ b->idx.named.method->md));
}
grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.method);
} else {
- add_error(error_name, &error,
- grpc_error_set_str(GRPC_ERROR_CREATE("Missing header"),
- GRPC_ERROR_STR_KEY, ":method"));
+ add_error(
+ error_name, &error,
+ grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing header"),
+ GRPC_ERROR_STR_KEY, grpc_slice_from_static_string(":method")));
}
if (b->idx.named.te != NULL) {
if (!grpc_mdelem_eq(b->idx.named.te->md, GRPC_MDELEM_TE_TRAILERS)) {
add_error(error_name, &error,
- grpc_attach_md_to_error(GRPC_ERROR_CREATE("Bad header"),
- b->idx.named.te->md));
+ grpc_attach_md_to_error(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Bad header"),
+ b->idx.named.te->md));
}
grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.te);
} else {
add_error(error_name, &error,
- grpc_error_set_str(GRPC_ERROR_CREATE("Missing header"),
- GRPC_ERROR_STR_KEY, "te"));
+ grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing header"),
+ GRPC_ERROR_STR_KEY, grpc_slice_from_static_string("te")));
}
if (b->idx.named.scheme != NULL) {
@@ -153,14 +159,17 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
!grpc_mdelem_eq(b->idx.named.scheme->md, GRPC_MDELEM_SCHEME_HTTPS) &&
!grpc_mdelem_eq(b->idx.named.scheme->md, GRPC_MDELEM_SCHEME_GRPC)) {
add_error(error_name, &error,
- grpc_attach_md_to_error(GRPC_ERROR_CREATE("Bad header"),
- b->idx.named.scheme->md));
+ grpc_attach_md_to_error(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Bad header"),
+ b->idx.named.scheme->md));
}
grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.scheme);
} else {
- add_error(error_name, &error,
- grpc_error_set_str(GRPC_ERROR_CREATE("Missing header"),
- GRPC_ERROR_STR_KEY, ":scheme"));
+ add_error(
+ error_name, &error,
+ grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing header"),
+ GRPC_ERROR_STR_KEY, grpc_slice_from_static_string(":scheme")));
}
if (b->idx.named.content_type != NULL) {
@@ -194,8 +203,46 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
if (b->idx.named.path == NULL) {
add_error(error_name, &error,
- grpc_error_set_str(GRPC_ERROR_CREATE("Missing header"),
- GRPC_ERROR_STR_KEY, ":path"));
+ grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing header"),
+ GRPC_ERROR_STR_KEY, grpc_slice_from_static_string(":path")));
+ } else if (*calld->recv_cacheable_request == true) {
+ /* We have a cacheable request made with GET verb. The path contains the
+ * query parameter which is base64 encoded request payload. */
+ const char k_query_separator = '?';
+ grpc_slice path_slice = GRPC_MDVALUE(b->idx.named.path->md);
+ uint8_t *path_ptr = (uint8_t *)GRPC_SLICE_START_PTR(path_slice);
+ size_t path_length = GRPC_SLICE_LENGTH(path_slice);
+ /* offset of the character '?' */
+ size_t offset = 0;
+ for (offset = 0; *path_ptr != k_query_separator && offset < path_length;
+ path_ptr++, offset++)
+ ;
+ if (offset < path_length) {
+ grpc_slice query_slice =
+ grpc_slice_sub(path_slice, offset + 1, path_length);
+
+ /* substitute path metadata with just the path (not query) */
+ grpc_mdelem mdelem_path_without_query = grpc_mdelem_from_slices(
+ exec_ctx, GRPC_MDSTR_PATH, grpc_slice_sub(path_slice, 0, offset));
+
+ grpc_metadata_batch_substitute(exec_ctx, b, b->idx.named.path,
+ mdelem_path_without_query);
+
+ /* decode payload from query and add to the slice buffer to be returned */
+ const int k_url_safe = 1;
+ grpc_slice_buffer_add(
+ &calld->read_slice_buffer,
+ grpc_base64_decode(exec_ctx,
+ (const char *)GRPC_SLICE_START_PTR(query_slice),
+ k_url_safe));
+ grpc_slice_buffer_stream_init(&calld->read_stream,
+ &calld->read_slice_buffer, 0);
+ calld->seen_path_with_query = true;
+ grpc_slice_unref_internal(exec_ctx, query_slice);
+ } else {
+ gpr_log(GPR_ERROR, "GET request without QUERY");
+ }
}
if (b->idx.named.host != NULL && b->idx.named.authority == NULL) {
@@ -212,19 +259,11 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
}
if (b->idx.named.authority == NULL) {
- add_error(error_name, &error,
- grpc_error_set_str(GRPC_ERROR_CREATE("Missing header"),
- GRPC_ERROR_STR_KEY, ":authority"));
- }
-
- if (b->idx.named.grpc_payload_bin != NULL) {
- calld->seen_payload_bin = true;
- grpc_slice_buffer_add(&calld->read_slice_buffer,
- grpc_slice_ref_internal(
- GRPC_MDVALUE(b->idx.named.grpc_payload_bin->md)));
- grpc_slice_buffer_stream_init(&calld->read_stream,
- &calld->read_slice_buffer, 0);
- grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_payload_bin);
+ add_error(
+ error_name, &error,
+ grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing header"),
+ GRPC_ERROR_STR_KEY, grpc_slice_from_static_string(":authority")));
}
return error;
@@ -247,8 +286,8 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
- /* Call recv_message_ready if we got the payload via the header field */
- if (calld->seen_payload_bin && calld->recv_message_ready != NULL) {
+ /* Call recv_message_ready if we got the payload via the path field */
+ if (calld->seen_path_with_query && calld->recv_message_ready != NULL) {
*calld->pp_recv_message = calld->payload_bin_delivered
? NULL
: (grpc_byte_stream *)&calld->read_stream;
@@ -263,7 +302,7 @@ static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
- if (calld->seen_payload_bin) {
+ if (calld->seen_path_with_query) {
/* do nothing. This is probably a GET request, and payload will be returned
in hs_on_complete callback. */
} else {
@@ -358,7 +397,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {
+ grpc_closure *ignored) {
call_data *calld = elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->read_slice_buffer);
}
diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c
index b424c0d2ac..63136650a5 100644
--- a/src/core/lib/channel/message_size_filter.c
+++ b/src/core/lib/channel/message_size_filter.c
@@ -121,8 +121,8 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
"Received message larger than max (%u vs. %d)",
(*calld->recv_message)->length, calld->max_recv_size);
grpc_error* new_error = grpc_error_set_int(
- GRPC_ERROR_CREATE(message_string), GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_INVALID_ARGUMENT);
+ GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INVALID_ARGUMENT);
if (error == GRPC_ERROR_NONE) {
error = new_error;
} else {
@@ -147,9 +147,10 @@ static void start_transport_stream_op(grpc_exec_ctx* exec_ctx,
gpr_asprintf(&message_string, "Sent message larger than max (%u vs. %d)",
op->send_message->length, calld->max_send_size);
grpc_transport_stream_op_finish_with_failure(
- exec_ctx, op, grpc_error_set_int(GRPC_ERROR_CREATE(message_string),
- GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_INVALID_ARGUMENT));
+ exec_ctx, op,
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_INVALID_ARGUMENT));
gpr_free(message_string);
return;
}
@@ -200,7 +201,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
// Destructor for call_data.
static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
const grpc_call_final_info* final_info,
- void* ignored) {}
+ grpc_closure* ignored) {}
// Constructor for channel_data.
static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c
index 6d7aa43b81..453a64b049 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.c
@@ -126,13 +126,15 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
static void append_error(internal_request *req, grpc_error *error) {
if (req->overall_error == GRPC_ERROR_NONE) {
- req->overall_error = GRPC_ERROR_CREATE("Failed HTTP/1 client request");
+ req->overall_error =
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed HTTP/1 client request");
}
grpc_resolved_address *addr = &req->addresses->addrs[req->next_address - 1];
char *addr_text = grpc_sockaddr_to_uri(addr);
req->overall_error = grpc_error_add_child(
req->overall_error,
- grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS, addr_text));
+ grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(addr_text)));
gpr_free(addr_text);
}
@@ -190,8 +192,8 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
internal_request *req = arg;
if (!ep) {
- next_address(exec_ctx, req,
- GRPC_ERROR_CREATE("Unexplained handshake failure"));
+ next_address(exec_ctx, req, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Unexplained handshake failure"));
return;
}
@@ -221,8 +223,8 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
}
if (req->next_address == req->addresses->naddrs) {
finish(exec_ctx, req,
- GRPC_ERROR_CREATE_REFERENCING("Failed HTTP requests to all targets",
- &req->overall_error, 1));
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed HTTP requests to all targets", &req->overall_error, 1));
return;
}
addr = &req->addresses->addrs[req->next_address++];
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index 354d2f4a09..be6a6d618a 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -95,7 +95,7 @@ static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx,
char *msg;
gpr_asprintf(&msg, "Peer name %s is not in peer certificate",
c->secure_peer_name);
- error = GRPC_ERROR_CREATE(msg);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
}
grpc_closure_sched(exec_ctx, on_peer_checked, error);
diff --git a/src/core/lib/http/parser.c b/src/core/lib/http/parser.c
index b9c56c103c..aac506b800 100644
--- a/src/core/lib/http/parser.c
+++ b/src/core/lib/http/parser.c
@@ -54,26 +54,36 @@ static grpc_error *handle_response_line(grpc_http_parser *parser) {
uint8_t *cur = beg;
uint8_t *end = beg + parser->cur_line_length;
- if (cur == end || *cur++ != 'H') return GRPC_ERROR_CREATE("Expected 'H'");
- if (cur == end || *cur++ != 'T') return GRPC_ERROR_CREATE("Expected 'T'");
- if (cur == end || *cur++ != 'T') return GRPC_ERROR_CREATE("Expected 'T'");
- if (cur == end || *cur++ != 'P') return GRPC_ERROR_CREATE("Expected 'P'");
- if (cur == end || *cur++ != '/') return GRPC_ERROR_CREATE("Expected '/'");
- if (cur == end || *cur++ != '1') return GRPC_ERROR_CREATE("Expected '1'");
- if (cur == end || *cur++ != '.') return GRPC_ERROR_CREATE("Expected '.'");
+ if (cur == end || *cur++ != 'H')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected 'H'");
+ if (cur == end || *cur++ != 'T')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected 'T'");
+ if (cur == end || *cur++ != 'T')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected 'T'");
+ if (cur == end || *cur++ != 'P')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected 'P'");
+ if (cur == end || *cur++ != '/')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected '/'");
+ if (cur == end || *cur++ != '1')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected '1'");
+ if (cur == end || *cur++ != '.')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected '.'");
if (cur == end || *cur < '0' || *cur++ > '1') {
- return GRPC_ERROR_CREATE("Expected HTTP/1.0 or HTTP/1.1");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Expected HTTP/1.0 or HTTP/1.1");
}
- if (cur == end || *cur++ != ' ') return GRPC_ERROR_CREATE("Expected ' '");
+ if (cur == end || *cur++ != ' ')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected ' '");
if (cur == end || *cur < '1' || *cur++ > '9')
- return GRPC_ERROR_CREATE("Expected status code");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected status code");
if (cur == end || *cur < '0' || *cur++ > '9')
- return GRPC_ERROR_CREATE("Expected status code");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected status code");
if (cur == end || *cur < '0' || *cur++ > '9')
- return GRPC_ERROR_CREATE("Expected status code");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected status code");
parser->http.response->status =
(cur[-3] - '0') * 100 + (cur[-2] - '0') * 10 + (cur[-1] - '0');
- if (cur == end || *cur++ != ' ') return GRPC_ERROR_CREATE("Expected ' '");
+ if (cur == end || *cur++ != ' ')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected ' '");
/* we don't really care about the status code message */
@@ -89,24 +99,33 @@ static grpc_error *handle_request_line(grpc_http_parser *parser) {
while (cur != end && *cur++ != ' ')
;
- if (cur == end) return GRPC_ERROR_CREATE("No method on HTTP request line");
+ if (cur == end)
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "No method on HTTP request line");
parser->http.request->method = buf2str(beg, (size_t)(cur - beg - 1));
beg = cur;
while (cur != end && *cur++ != ' ')
;
- if (cur == end) return GRPC_ERROR_CREATE("No path on HTTP request line");
+ if (cur == end)
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("No path on HTTP request line");
parser->http.request->path = buf2str(beg, (size_t)(cur - beg - 1));
- if (cur == end || *cur++ != 'H') return GRPC_ERROR_CREATE("Expected 'H'");
- if (cur == end || *cur++ != 'T') return GRPC_ERROR_CREATE("Expected 'T'");
- if (cur == end || *cur++ != 'T') return GRPC_ERROR_CREATE("Expected 'T'");
- if (cur == end || *cur++ != 'P') return GRPC_ERROR_CREATE("Expected 'P'");
- if (cur == end || *cur++ != '/') return GRPC_ERROR_CREATE("Expected '/'");
+ if (cur == end || *cur++ != 'H')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected 'H'");
+ if (cur == end || *cur++ != 'T')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected 'T'");
+ if (cur == end || *cur++ != 'T')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected 'T'");
+ if (cur == end || *cur++ != 'P')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected 'P'");
+ if (cur == end || *cur++ != '/')
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected '/'");
vers_major = (uint8_t)(*cur++ - '1' + 1);
++cur;
if (cur == end)
- return GRPC_ERROR_CREATE("End of line in HTTP version string");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "End of line in HTTP version string");
vers_minor = (uint8_t)(*cur++ - '1' + 1);
if (vers_major == 1) {
@@ -115,18 +134,19 @@ static grpc_error *handle_request_line(grpc_http_parser *parser) {
} else if (vers_minor == 1) {
parser->http.request->version = GRPC_HTTP_HTTP11;
} else {
- return GRPC_ERROR_CREATE(
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Expected one of HTTP/1.0, HTTP/1.1, or HTTP/2.0");
}
} else if (vers_major == 2) {
if (vers_minor == 0) {
parser->http.request->version = GRPC_HTTP_HTTP20;
} else {
- return GRPC_ERROR_CREATE(
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Expected one of HTTP/1.0, HTTP/1.1, or HTTP/2.0");
}
} else {
- return GRPC_ERROR_CREATE("Expected one of HTTP/1.0, HTTP/1.1, or HTTP/2.0");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Expected one of HTTP/1.0, HTTP/1.1, or HTTP/2.0");
}
return GRPC_ERROR_NONE;
@@ -139,7 +159,8 @@ static grpc_error *handle_first_line(grpc_http_parser *parser) {
case GRPC_HTTP_RESPONSE:
return handle_response_line(parser);
}
- GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
+ GPR_UNREACHABLE_CODE(
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here"));
}
static grpc_error *add_header(grpc_http_parser *parser) {
@@ -154,7 +175,8 @@ static grpc_error *add_header(grpc_http_parser *parser) {
GPR_ASSERT(cur != end);
if (*cur == ' ' || *cur == '\t') {
- error = GRPC_ERROR_CREATE("Continued header lines not supported yet");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Continued header lines not supported yet");
goto done;
}
@@ -162,7 +184,8 @@ static grpc_error *add_header(grpc_http_parser *parser) {
cur++;
}
if (cur == end) {
- error = GRPC_ERROR_CREATE("Didn't find ':' in header string");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Didn't find ':' in header string");
goto done;
}
GPR_ASSERT(cur >= beg);
@@ -222,7 +245,8 @@ static grpc_error *finish_line(grpc_http_parser *parser,
}
break;
case GRPC_HTTP_BODY:
- GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
+ GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Should never reach here"));
}
parser->cur_line_length = 0;
@@ -240,7 +264,8 @@ static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) {
body_length = &parser->http.request->body_length;
body = &parser->http.request->body;
} else {
- GPR_UNREACHABLE_CODE(return GRPC_ERROR_CREATE("Should never reach here"));
+ GPR_UNREACHABLE_CODE(
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here"));
}
if (*body_length == parser->body_capacity) {
@@ -286,7 +311,8 @@ static grpc_error *addbyte(grpc_http_parser *parser, uint8_t byte,
if (grpc_http1_trace)
gpr_log(GPR_ERROR, "HTTP header max line length (%d) exceeded",
GRPC_HTTP_PARSER_MAX_HEADER_LENGTH);
- return GRPC_ERROR_CREATE("HTTP header max line length exceeded");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "HTTP header max line length exceeded");
}
parser->cur_line[parser->cur_line_length] = byte;
parser->cur_line_length++;
@@ -347,7 +373,7 @@ grpc_error *grpc_http_parser_parse(grpc_http_parser *parser, grpc_slice slice,
grpc_error *grpc_http_parser_eof(grpc_http_parser *parser) {
if (parser->state != GRPC_HTTP_BODY) {
- return GRPC_ERROR_CREATE("Did not finish headers");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Did not finish headers");
}
return GRPC_ERROR_NONE;
}
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c
index 509c1ff95d..6633fb68ec 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.c
@@ -33,6 +33,7 @@
#include "src/core/lib/iomgr/closure.h"
+#include <assert.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -124,6 +125,7 @@ void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c,
grpc_error *error) {
GPR_TIMER_BEGIN("grpc_closure_run", 0);
if (c != NULL) {
+ assert(c->cb);
c->scheduler->vtable->run(exec_ctx, c, error);
} else {
GRPC_ERROR_UNREF(error);
@@ -135,6 +137,7 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
grpc_error *error) {
GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != NULL) {
+ assert(c->cb);
c->scheduler->vtable->sched(exec_ctx, c, error);
} else {
GRPC_ERROR_UNREF(error);
@@ -146,6 +149,7 @@ void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
grpc_closure *c = list->head;
while (c != NULL) {
grpc_closure *next = c->next_data.next;
+ assert(c->cb);
c->scheduler->vtable->sched(exec_ctx, c, c->error_data.error);
c = next;
}
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index fa9966c3a6..2bc476bbef 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -33,6 +33,7 @@
#include "src/core/lib/iomgr/combiner.h"
+#include <assert.h>
#include <string.h>
#include <grpc/support/alloc.h>
@@ -216,6 +217,7 @@ static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d last=%" PRIdPTR, lock,
cl, covered_by_poller, last));
GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed
+ assert(cl->cb);
cl->error_data.scratch =
pack_error_data((error_data){error, covered_by_poller});
if (covered_by_poller) {
diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c
index 1127fff756..1dbb64e8f3 100644
--- a/src/core/lib/iomgr/error.c
+++ b/src/core/lib/iomgr/error.c
@@ -35,7 +35,6 @@
#include <string.h>
-#include <grpc/slice.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@@ -287,7 +286,7 @@ static void internal_add_error(grpc_error **err, grpc_error *new) {
// It is very common to include and extra int and string in an error
#define SURPLUS_CAPACITY (2 * SLOTS_PER_INT + SLOTS_PER_TIME)
-grpc_error *grpc_error_create(const char *file, int line, const char *desc,
+grpc_error *grpc_error_create(grpc_slice file, int line, grpc_slice desc,
grpc_error **referencing,
size_t num_referencing) {
GPR_TIMER_BEGIN("grpc_error_create", 0);
@@ -313,14 +312,8 @@ grpc_error *grpc_error_create(const char *file, int line, const char *desc,
memset(err->times, UINT8_MAX, GRPC_ERROR_TIME_MAX);
internal_set_int(&err, GRPC_ERROR_INT_FILE_LINE, line);
- internal_set_str(&err, GRPC_ERROR_STR_FILE,
- grpc_slice_from_static_string(file));
- internal_set_str(
- &err, GRPC_ERROR_STR_DESCRIPTION,
- grpc_slice_from_copied_buffer(
- desc,
- strlen(desc) +
- 1)); // TODO, pull this up. // TODO(ncteisen), pull this up.
+ internal_set_str(&err, GRPC_ERROR_STR_FILE, file);
+ internal_set_str(&err, GRPC_ERROR_STR_DESCRIPTION, desc);
for (size_t i = 0; i < num_referencing; ++i) {
if (referencing[i] == GRPC_ERROR_NONE) continue;
@@ -360,7 +353,7 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
GPR_TIMER_BEGIN("copy_error_and_unref", 0);
grpc_error *out;
if (grpc_error_is_special(in)) {
- out = GRPC_ERROR_CREATE("unknown");
+ out = GRPC_ERROR_CREATE_FROM_STATIC_STRING("unknown");
if (in == GRPC_ERROR_NONE) {
internal_set_str(&out, GRPC_ERROR_STR_DESCRIPTION,
grpc_slice_from_static_string("no error"));
@@ -417,7 +410,7 @@ typedef struct {
const char *msg;
} special_error_status_map;
static special_error_status_map error_status_map[] = {
- {GRPC_ERROR_NONE, GRPC_STATUS_OK, NULL},
+ {GRPC_ERROR_NONE, GRPC_STATUS_OK, ""},
{GRPC_ERROR_CANCELLED, GRPC_STATUS_CANCELLED, "Cancelled"},
{GRPC_ERROR_OOM, GRPC_STATUS_RESOURCE_EXHAUSTED, "Out of memory"},
};
@@ -448,33 +441,33 @@ bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) {
}
grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
- const char *value) {
+ grpc_slice str) {
GPR_TIMER_BEGIN("grpc_error_set_str", 0);
grpc_error *new = copy_error_and_unref(src);
- internal_set_str(&new, which,
- grpc_slice_from_copied_buffer(
- value, strlen(value) + 1)); // TODO, pull this up.
+ internal_set_str(&new, which, str);
GPR_TIMER_END("grpc_error_set_str", 0);
return new;
}
-const char *grpc_error_get_str(grpc_error *err, grpc_error_strs which) {
+bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
+ grpc_slice *str) {
if (grpc_error_is_special(err)) {
if (which == GRPC_ERROR_STR_GRPC_MESSAGE) {
for (size_t i = 0; i < GPR_ARRAY_SIZE(error_status_map); i++) {
if (error_status_map[i].error == err) {
- return error_status_map[i].msg;
+ *str = grpc_slice_from_static_string(error_status_map[i].msg);
+ return true;
}
}
}
- return NULL;
+ return false;
}
uint8_t slot = err->strs[which];
if (slot != UINT8_MAX) {
- return (const char *)GRPC_SLICE_START_PTR(
- *(grpc_slice *)(err->arena + slot));
+ *str = *(grpc_slice *)(err->arena + slot);
+ return true;
} else {
- return NULL;
+ return false;
}
}
@@ -515,13 +508,14 @@ static void append_str(const char *str, char **s, size_t *sz, size_t *cap) {
}
}
-static void append_esc_str(const char *str, char **s, size_t *sz, size_t *cap) {
+static void append_esc_str(const uint8_t *str, size_t len, char **s, size_t *sz,
+ size_t *cap) {
static const char *hex = "0123456789abcdef";
append_chr('"', s, sz, cap);
- for (const uint8_t *c = (const uint8_t *)str; *c; c++) {
- if (*c < 32 || *c >= 127) {
+ for (size_t i = 0; i < len; i++, str++) {
+ if (*str < 32 || *str >= 127) {
append_chr('\\', s, sz, cap);
- switch (*c) {
+ switch (*str) {
case '\b':
append_chr('b', s, sz, cap);
break;
@@ -541,12 +535,12 @@ static void append_esc_str(const char *str, char **s, size_t *sz, size_t *cap) {
append_chr('u', s, sz, cap);
append_chr('0', s, sz, cap);
append_chr('0', s, sz, cap);
- append_chr(hex[*c >> 4], s, sz, cap);
- append_chr(hex[*c & 0x0f], s, sz, cap);
+ append_chr(hex[*str >> 4], s, sz, cap);
+ append_chr(hex[*str & 0x0f], s, sz, cap);
break;
}
} else {
- append_chr((char)*c, s, sz, cap);
+ append_chr((char)*str, s, sz, cap);
}
}
append_chr('"', s, sz, cap);
@@ -586,11 +580,12 @@ static char *key_str(grpc_error_strs which) {
return gpr_strdup(error_str_name(which));
}
-static char *fmt_str(void *p) {
+static char *fmt_str(grpc_slice slice) {
char *s = NULL;
size_t sz = 0;
size_t cap = 0;
- append_esc_str(p, &s, &sz, &cap);
+ append_esc_str((const uint8_t *)GRPC_SLICE_START_PTR(slice),
+ GRPC_SLICE_LENGTH(slice), &s, &sz, &cap);
append_chr(0, &s, &sz, &cap);
return s;
}
@@ -599,9 +594,8 @@ static void collect_strs_kvs(grpc_error *err, kv_pairs *kvs) {
for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) {
uint8_t slot = err->strs[which];
if (slot != UINT8_MAX) {
- append_kv(
- kvs, key_str((grpc_error_strs)which),
- fmt_str(GRPC_SLICE_START_PTR(*(grpc_slice *)(err->arena + slot))));
+ append_kv(kvs, key_str((grpc_error_strs)which),
+ fmt_str(*(grpc_slice *)(err->arena + slot)));
}
}
}
@@ -681,7 +675,8 @@ static char *finish_kvs(kv_pairs *kvs) {
append_chr('{', &s, &sz, &cap);
for (size_t i = 0; i < kvs->num_kvs; i++) {
if (i != 0) append_chr(',', &s, &sz, &cap);
- append_esc_str(kvs->kvs[i].key, &s, &sz, &cap);
+ append_esc_str((const uint8_t *)kvs->kvs[i].key, strlen(kvs->kvs[i].key),
+ &s, &sz, &cap);
gpr_free(kvs->kvs[i].key);
append_chr(':', &s, &sz, &cap);
append_str(kvs->kvs[i].value, &s, &sz, &cap);
@@ -733,10 +728,14 @@ grpc_error *grpc_os_error(const char *file, int line, int err,
const char *call_name) {
return grpc_error_set_str(
grpc_error_set_str(
- grpc_error_set_int(grpc_error_create(file, line, "OS Error", NULL, 0),
- GRPC_ERROR_INT_ERRNO, err),
- GRPC_ERROR_STR_OS_ERROR, strerror(err)),
- GRPC_ERROR_STR_SYSCALL, call_name);
+ grpc_error_set_int(
+ grpc_error_create(grpc_slice_from_static_string(file), line,
+ grpc_slice_from_static_string("OS Error"), NULL,
+ 0),
+ GRPC_ERROR_INT_ERRNO, err),
+ GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string(strerror(err))),
+ GRPC_ERROR_STR_SYSCALL, grpc_slice_from_static_string(call_name));
}
#ifdef GPR_WINDOWS
@@ -745,10 +744,13 @@ grpc_error *grpc_wsa_error(const char *file, int line, int err,
char *utf8_message = gpr_format_message(err);
grpc_error *error = grpc_error_set_str(
grpc_error_set_str(
- grpc_error_set_int(grpc_error_create(file, line, "OS Error", NULL, 0),
- GRPC_ERROR_INT_WSA_ERROR, err),
- GRPC_ERROR_STR_OS_ERROR, utf8_message),
- GRPC_ERROR_STR_SYSCALL, call_name);
+ grpc_error_set_int(
+ grpc_error_create(grpc_slice_from_static_string(file), line,
+ grpc_slice_from_static_string("OS Error"), NULL,
+ 0),
+ GRPC_ERROR_INT_WSA_ERROR, err),
+ GRPC_ERROR_STR_OS_ERROR, grpc_slice_from_copied_string(utf8_message)),
+ GRPC_ERROR_STR_SYSCALL, grpc_slice_from_static_string(call_name));
gpr_free(utf8_message);
return error;
}
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index eb953947ae..2a44fcfe25 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -37,6 +37,7 @@
#include <stdbool.h>
#include <stdint.h>
+#include <grpc/slice.h>
#include <grpc/status.h>
#include <grpc/support/time.h>
@@ -45,28 +46,9 @@ extern "C" {
#endif
/// Opaque representation of an error.
-/// Errors are refcounted objects that represent the result of an operation.
-/// Ownership laws:
-/// if a grpc_error is returned by a function, the caller owns a ref to that
-/// instance
-/// if a grpc_error is passed to a grpc_closure callback function (functions
-/// with the signature:
-/// void (*f)(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error))
-/// then those functions do not own a ref to error (but are free to manually
-/// take a reference).
-/// if a grpc_error is passed to *ANY OTHER FUNCTION* then that function takes
-/// ownership of the error
-/// Errors have:
-/// a set of ints, strings, and timestamps that describe the error
-/// always present are:
-/// GRPC_ERROR_STR_FILE, GRPC_ERROR_INT_FILE_LINE - source location the error
-/// was generated
-/// GRPC_ERROR_STR_DESCRIPTION - a human readable description of the error
-/// GRPC_ERROR_TIME_CREATED - a timestamp indicating when the error happened
-/// an error can also have children; these are other errors that are believed
-/// to have contributed to this one. By accumulating children, we can begin
-/// to root cause high level failures from low level failures, without having
-/// to derive execution paths from log lines
+/// See https://github.com/grpc/grpc/blob/master/doc/core/grpc-error.md for a
+/// full write up of this object.
+
typedef struct grpc_error grpc_error;
typedef enum {
@@ -156,7 +138,7 @@ typedef enum {
const char *grpc_error_string(grpc_error *error);
/// Create an error - but use GRPC_ERROR_CREATE instead
-grpc_error *grpc_error_create(const char *file, int line, const char *desc,
+grpc_error *grpc_error_create(grpc_slice file, int line, grpc_slice desc,
grpc_error **referencing, size_t num_referencing);
/// Create an error (this is the preferred way of generating an error that is
/// not due to a system call - for system calls, use GRPC_OS_ERROR or
@@ -166,13 +148,21 @@ grpc_error *grpc_error_create(const char *file, int line, const char *desc,
/// err = grpc_error_create(x, y, z, r, nr) is equivalent to:
/// err = grpc_error_create(x, y, z, NULL, 0);
/// for (i=0; i<nr; i++) err = grpc_error_add_child(err, r[i]);
-#define GRPC_ERROR_CREATE(desc) \
- grpc_error_create(__FILE__, __LINE__, desc, NULL, 0)
+#define GRPC_ERROR_CREATE_FROM_STATIC_STRING(desc) \
+ grpc_error_create(grpc_slice_from_static_string(__FILE__), __LINE__, \
+ grpc_slice_from_static_string(desc), NULL, 0)
+#define GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc) \
+ grpc_error_create(grpc_slice_from_static_string(__FILE__), __LINE__, \
+ grpc_slice_from_copied_string(desc), NULL, 0)
// Create an error that references some other errors. This function adds a
// reference to each error in errs - it does not consume an existing reference
-#define GRPC_ERROR_CREATE_REFERENCING(desc, errs, count) \
- grpc_error_create(__FILE__, __LINE__, desc, errs, count)
+#define GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(desc, errs, count) \
+ grpc_error_create(grpc_slice_from_static_string(__FILE__), __LINE__, \
+ grpc_slice_from_static_string(desc), errs, count)
+#define GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(desc, errs, count) \
+ grpc_error_create(grpc_slice_from_static_string(__FILE__), __LINE__, \
+ grpc_slice_from_copied_string(desc), errs, count)
//#define GRPC_ERROR_REFCOUNT_DEBUG
#ifdef GRPC_ERROR_REFCOUNT_DEBUG
@@ -194,10 +184,11 @@ grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which,
intptr_t value) GRPC_MUST_USE_RESULT;
bool grpc_error_get_int(grpc_error *error, grpc_error_ints which, intptr_t *p);
grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
- const char *value) GRPC_MUST_USE_RESULT;
-/// Returns NULL if the specified string is not set.
-/// Caller does NOT own return value.
-const char *grpc_error_get_str(grpc_error *error, grpc_error_strs which);
+ grpc_slice str) GRPC_MUST_USE_RESULT;
+/// Returns false if the specified string is not set.
+/// Caller does NOT own the slice.
+bool grpc_error_get_str(grpc_error *error, grpc_error_strs which,
+ grpc_slice *s);
/// Add a child error: an error that is believed to have contributed to this
/// error occurring. Allows root causing high level errors from lower level
diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c
index 11208b9ad1..1924e76f13 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_linux.c
@@ -321,7 +321,7 @@ static bool append_error(grpc_error **composite, grpc_error *error,
const char *desc) {
if (error == GRPC_ERROR_NONE) return true;
if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE(desc);
+ *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
}
*composite = grpc_error_add_child(*composite, error);
return false;
@@ -1146,9 +1146,9 @@ static void notify_on(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state,
schedule the closure with the shutdown error */
if ((curr & FD_SHUTDOWN_BIT) > 0) {
grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT);
- grpc_closure_sched(
- exec_ctx, closure,
- GRPC_ERROR_CREATE_REFERENCING("FD Shutdown", &shutdown_err, 1));
+ grpc_closure_sched(exec_ctx, closure,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "FD Shutdown", &shutdown_err, 1));
return;
}
@@ -1203,9 +1203,9 @@ static void set_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm *state,
notify_on to ensure that the closure it schedules 'happens-after'
the set_shutdown is called on the fd */
if (gpr_atm_rel_cas(state, curr, new_state)) {
- grpc_closure_sched(
- exec_ctx, (grpc_closure *)curr,
- GRPC_ERROR_CREATE_REFERENCING("FD Shutdown", &shutdown_err, 1));
+ grpc_closure_sched(exec_ctx, (grpc_closure *)curr,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "FD Shutdown", &shutdown_err, 1));
return;
}
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index 5ddd5313e2..ca6e855611 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -451,14 +451,16 @@ static grpc_error *fd_shutdown_error(grpc_fd *fd) {
if (!fd->shutdown) {
return GRPC_ERROR_NONE;
} else {
- return GRPC_ERROR_CREATE_REFERENCING("FD shutdown", &fd->shutdown_error, 1);
+ return GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "FD shutdown", &fd->shutdown_error, 1);
}
}
static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure **st, grpc_closure *closure) {
if (fd->shutdown) {
- grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"));
+ grpc_closure_sched(exec_ctx, closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown"));
} else if (*st == CLOSURE_NOT_READY) {
/* not ready ==> switch to a waiting state by setting the closure */
*st = closure;
@@ -696,7 +698,7 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
static void kick_append_error(grpc_error **composite, grpc_error *error) {
if (error == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE("Kick Failure");
+ *composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Kick Failure");
}
*composite = grpc_error_add_child(*composite, error);
}
@@ -859,7 +861,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
static void work_combine_error(grpc_error **composite, grpc_error *error) {
if (error == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE("pollset_work");
+ *composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("pollset_work");
}
*composite = grpc_error_add_child(*composite, error);
}
@@ -1421,7 +1423,7 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
g_cvfds.pollcount++;
opt = gpr_thd_options_default();
gpr_thd_options_set_detached(&opt);
- gpr_thd_new(&t_id, &run_poll, pargs, &opt);
+ GPR_ASSERT(gpr_thd_new(&t_id, &run_poll, pargs, &opt));
// We want the poll() thread to trigger the deadline, so wait forever here
gpr_cv_wait(pollcv, &g_cvfds.mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
if (gpr_atm_no_barrier_load(&pargs->status) == COMPLETED) {
diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c
index a5b62aa888..ae3e2eabc3 100644
--- a/src/core/lib/iomgr/executor.c
+++ b/src/core/lib/iomgr/executor.c
@@ -115,8 +115,8 @@ static void maybe_spawn_locked() {
/* All previous instances of the thread should have been joined at this point.
* Spawn time! */
g_executor.busy = 1;
- gpr_thd_new(&g_executor.tid, closure_exec_thread_func, NULL,
- &g_executor.options);
+ GPR_ASSERT(gpr_thd_new(&g_executor.tid, closure_exec_thread_func, NULL,
+ &g_executor.options));
g_executor.pending_join = 1;
}
diff --git a/src/core/lib/iomgr/load_file.c b/src/core/lib/iomgr/load_file.c
index f40c8b28cc..208f74e20c 100644
--- a/src/core/lib/iomgr/load_file.c
+++ b/src/core/lib/iomgr/load_file.c
@@ -78,9 +78,12 @@ end:
*output = result;
if (file != NULL) fclose(file);
if (error != GRPC_ERROR_NONE) {
- grpc_error *error_out = grpc_error_set_str(
- GRPC_ERROR_CREATE_REFERENCING("Failed to load file", &error, 1),
- GRPC_ERROR_STR_FILENAME, filename);
+ grpc_error *error_out =
+ grpc_error_set_str(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Failed to load file", &error, 1),
+ GRPC_ERROR_STR_FILENAME,
+ grpc_slice_from_copied_string(
+ filename)); // TODO(ncteisen), always static?
GRPC_ERROR_UNREF(error);
error = error_out;
}
diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h
index f1897bb91f..94a454c0b7 100644
--- a/src/core/lib/iomgr/port.h
+++ b/src/core/lib/iomgr/port.h
@@ -39,6 +39,7 @@
#if defined(GRPC_UV)
// Do nothing
#elif defined(GPR_MANYLINUX1)
+#define GRPC_HAVE_IFADDRS 1
#define GRPC_HAVE_IPV6_RECVPKTINFO 1
#define GRPC_HAVE_IP_PKTINFO 1
#define GRPC_HAVE_MSG_NOSIGNAL 1
@@ -65,6 +66,7 @@
#define GRPC_POSIX_WAKEUP_FD 1
#define GRPC_TIMER_USE_GENERIC 1
#elif defined(GPR_LINUX)
+#define GRPC_HAVE_IFADDRS 1
#define GRPC_HAVE_IPV6_RECVPKTINFO 1
#define GRPC_HAVE_IP_PKTINFO 1
#define GRPC_HAVE_MSG_NOSIGNAL 1
@@ -90,6 +92,7 @@
#define GRPC_POSIX_SOCKETUTILS
#endif
#elif defined(GPR_APPLE)
+#define GRPC_HAVE_IFADDRS 1
#define GRPC_HAVE_SO_NOSIGPIPE 1
#define GRPC_HAVE_UNIX_SOCKET 1
#define GRPC_MSG_IOVLEN_TYPE int
@@ -100,6 +103,7 @@
#define GRPC_POSIX_WAKEUP_FD 1
#define GRPC_TIMER_USE_GENERIC 1
#elif defined(GPR_FREEBSD)
+#define GRPC_HAVE_IFADDRS 1
#define GRPC_HAVE_IPV6_RECVPKTINFO 1
#define GRPC_HAVE_SO_NOSIGPIPE 1
#define GRPC_HAVE_UNIX_SOCKET 1
diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c
index 50e470d149..d0ede0f2d5 100644
--- a/src/core/lib/iomgr/resolve_address_posix.c
+++ b/src/core/lib/iomgr/resolve_address_posix.c
@@ -73,14 +73,16 @@ static grpc_error *blocking_resolve_address_impl(
/* parse name, splitting it into host and port parts */
gpr_split_host_port(name, &host, &port);
if (host == NULL) {
- err = grpc_error_set_str(GRPC_ERROR_CREATE("unparseable host:port"),
- GRPC_ERROR_STR_TARGET_ADDRESS, name);
+ err = grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("unparseable host:port"),
+ GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name));
goto done;
}
if (port == NULL) {
if (default_port == NULL) {
- err = grpc_error_set_str(GRPC_ERROR_CREATE("no port in name"),
- GRPC_ERROR_STR_TARGET_ADDRESS, name);
+ err = grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("no port in name"),
+ GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name));
goto done;
}
port = gpr_strdup(default_port);
@@ -112,11 +114,15 @@ static grpc_error *blocking_resolve_address_impl(
if (s != 0) {
err = grpc_error_set_str(
grpc_error_set_str(
- grpc_error_set_str(grpc_error_set_int(GRPC_ERROR_CREATE("OS Error"),
- GRPC_ERROR_INT_ERRNO, s),
- GRPC_ERROR_STR_OS_ERROR, gai_strerror(s)),
- GRPC_ERROR_STR_SYSCALL, "getaddrinfo"),
- GRPC_ERROR_STR_TARGET_ADDRESS, name);
+ grpc_error_set_str(
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("OS Error"),
+ GRPC_ERROR_INT_ERRNO, s),
+ GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string(gai_strerror(s))),
+ GRPC_ERROR_STR_SYSCALL,
+ grpc_slice_from_static_string("getaddrinfo")),
+ GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name));
goto done;
}
diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c
index 4d715be94c..102d1aa290 100644
--- a/src/core/lib/iomgr/resolve_address_uv.c
+++ b/src/core/lib/iomgr/resolve_address_uv.c
@@ -92,9 +92,10 @@ static grpc_error *handle_addrinfo_result(int status, struct addrinfo *result,
if (status != 0) {
grpc_error *error;
*addresses = NULL;
- error = GRPC_ERROR_CREATE("getaddrinfo failed");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed");
error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string(uv_strerror(status)));
return error;
}
(*addresses) = gpr_malloc(sizeof(grpc_resolved_addresses));
@@ -153,7 +154,7 @@ static grpc_error *try_split_host_port(const char *name,
if (*host == NULL) {
char *msg;
gpr_asprintf(&msg, "unparseable host:port: '%s'", name);
- error = GRPC_ERROR_CREATE(msg);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return error;
}
@@ -162,7 +163,7 @@ static grpc_error *try_split_host_port(const char *name,
if (default_port == NULL) {
char *msg;
gpr_asprintf(&msg, "no port in name '%s'", name);
- error = GRPC_ERROR_CREATE(msg);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return error;
}
@@ -262,8 +263,9 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
if (s != 0) {
*addrs = NULL;
- err = GRPC_ERROR_CREATE("getaddrinfo failed");
- err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR, uv_strerror(s));
+ err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed");
+ err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string(uv_strerror(s)));
grpc_closure_sched(exec_ctx, on_done, err);
gpr_free(r);
gpr_free(req);
diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c
index 2439ce3cb7..22eca1fd3b 100644
--- a/src/core/lib/iomgr/resolve_address_windows.c
+++ b/src/core/lib/iomgr/resolve_address_windows.c
@@ -78,7 +78,7 @@ static grpc_error *blocking_resolve_address_impl(
if (host == NULL) {
char *msg;
gpr_asprintf(&msg, "unparseable host:port: '%s'", name);
- error = GRPC_ERROR_CREATE(msg);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
goto done;
}
@@ -86,7 +86,7 @@ static grpc_error *blocking_resolve_address_impl(
if (default_port == NULL) {
char *msg;
gpr_asprintf(&msg, "no port in name '%s'", name);
- error = GRPC_ERROR_CREATE(msg);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
goto done;
}
diff --git a/src/core/lib/iomgr/socket_factory_posix.c b/src/core/lib/iomgr/socket_factory_posix.c
new file mode 100644
index 0000000000..1050a14c46
--- /dev/null
+++ b/src/core/lib/iomgr/socket_factory_posix.c
@@ -0,0 +1,110 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_POSIX_SOCKET
+
+#include "src/core/lib/iomgr/socket_factory_posix.h"
+
+#include <grpc/impl/codegen/grpc_types.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/useful.h>
+
+void grpc_socket_factory_init(grpc_socket_factory *factory,
+ const grpc_socket_factory_vtable *vtable) {
+ factory->vtable = vtable;
+ gpr_ref_init(&factory->refcount, 1);
+}
+
+int grpc_socket_factory_socket(grpc_socket_factory *factory, int domain,
+ int type, int protocol) {
+ return factory->vtable->socket(factory, domain, type, protocol);
+}
+
+int grpc_socket_factory_bind(grpc_socket_factory *factory, int sockfd,
+ const grpc_resolved_address *addr) {
+ return factory->vtable->bind(factory, sockfd, addr);
+}
+
+int grpc_socket_factory_compare(grpc_socket_factory *a,
+ grpc_socket_factory *b) {
+ int c = GPR_ICMP(a, b);
+ if (c != 0) {
+ grpc_socket_factory *sma = a;
+ grpc_socket_factory *smb = b;
+ c = GPR_ICMP(sma->vtable, smb->vtable);
+ if (c == 0) {
+ c = sma->vtable->compare(sma, smb);
+ }
+ }
+ return c;
+}
+
+grpc_socket_factory *grpc_socket_factory_ref(grpc_socket_factory *factory) {
+ gpr_ref(&factory->refcount);
+ return factory;
+}
+
+void grpc_socket_factory_unref(grpc_socket_factory *factory) {
+ if (gpr_unref(&factory->refcount)) {
+ factory->vtable->destroy(factory);
+ }
+}
+
+static void *socket_factory_arg_copy(void *p) {
+ return grpc_socket_factory_ref(p);
+}
+
+static void socket_factory_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) {
+ grpc_socket_factory_unref(p);
+}
+
+static int socket_factory_cmp(void *a, void *b) {
+ return grpc_socket_factory_compare((grpc_socket_factory *)a,
+ (grpc_socket_factory *)b);
+}
+
+static const grpc_arg_pointer_vtable socket_factory_arg_vtable = {
+ socket_factory_arg_copy, socket_factory_arg_destroy, socket_factory_cmp};
+
+grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory) {
+ grpc_arg arg;
+ arg.type = GRPC_ARG_POINTER;
+ arg.key = GRPC_ARG_SOCKET_FACTORY;
+ arg.value.pointer.vtable = &socket_factory_arg_vtable;
+ arg.value.pointer.p = factory;
+ return arg;
+}
+
+#endif
diff --git a/src/core/lib/iomgr/socket_factory_posix.h b/src/core/lib/iomgr/socket_factory_posix.h
new file mode 100644
index 0000000000..2c63299030
--- /dev/null
+++ b/src/core/lib/iomgr/socket_factory_posix.h
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_SOCKET_FACTORY_POSIX_H
+#define GRPC_CORE_LIB_IOMGR_SOCKET_FACTORY_POSIX_H
+
+#include <grpc/impl/codegen/grpc_types.h>
+#include <grpc/support/sync.h>
+#include "src/core/lib/iomgr/resolve_address.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** The virtual table of grpc_socket_factory */
+typedef struct {
+ /** Replacement for socket(2) */
+ int (*socket)(grpc_socket_factory *factory, int domain, int type,
+ int protocol);
+ /** Replacement for bind(2) */
+ int (*bind)(grpc_socket_factory *factory, int sockfd,
+ const grpc_resolved_address *addr);
+ /** Compare socket factory \a a and \a b */
+ int (*compare)(grpc_socket_factory *a, grpc_socket_factory *b);
+ /** Destroys the socket factory instance */
+ void (*destroy)(grpc_socket_factory *factory);
+} grpc_socket_factory_vtable;
+
+/** The Socket Factory interface allows changes on socket options */
+struct grpc_socket_factory {
+ const grpc_socket_factory_vtable *vtable;
+ gpr_refcount refcount;
+};
+
+/** called by concrete implementations to initialize the base struct */
+void grpc_socket_factory_init(grpc_socket_factory *factory,
+ const grpc_socket_factory_vtable *vtable);
+
+/** Wrap \a factory as a grpc_arg */
+grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory);
+
+/** Perform the equivalent of a socket(2) operation using \a factory */
+int grpc_socket_factory_socket(grpc_socket_factory *factory, int domain,
+ int type, int protocol);
+
+/** Perform the equivalent of a bind(2) operation using \a factory */
+int grpc_socket_factory_bind(grpc_socket_factory *factory, int sockfd,
+ const grpc_resolved_address *addr);
+
+/** Compare if \a a and \a b are the same factory or have same settings */
+int grpc_socket_factory_compare(grpc_socket_factory *a, grpc_socket_factory *b);
+
+grpc_socket_factory *grpc_socket_factory_ref(grpc_socket_factory *factory);
+void grpc_socket_factory_unref(grpc_socket_factory *factory);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_CORE_LIB_IOMGR_SOCKET_FACTORY_POSIX_H */
diff --git a/src/core/lib/iomgr/socket_utils_common_posix.c b/src/core/lib/iomgr/socket_utils_common_posix.c
index 88e9ade253..bbe642d0fb 100644
--- a/src/core/lib/iomgr/socket_utils_common_posix.c
+++ b/src/core/lib/iomgr/socket_utils_common_posix.c
@@ -90,7 +90,7 @@ grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd) {
return GRPC_OS_ERROR(errno, "getsockopt(SO_NOSIGPIPE)");
}
if ((newval != 0) != (val != 0)) {
- return GRPC_ERROR_CREATE("Failed to set SO_NOSIGPIPE");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to set SO_NOSIGPIPE");
}
#endif
return GRPC_ERROR_NONE;
@@ -164,7 +164,7 @@ grpc_error *grpc_set_socket_reuse_addr(int fd, int reuse) {
return GRPC_OS_ERROR(errno, "getsockopt(SO_REUSEADDR)");
}
if ((newval != 0) != val) {
- return GRPC_ERROR_CREATE("Failed to set SO_REUSEADDR");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to set SO_REUSEADDR");
}
return GRPC_ERROR_NONE;
@@ -173,7 +173,8 @@ grpc_error *grpc_set_socket_reuse_addr(int fd, int reuse) {
/* set a socket to reuse old addresses */
grpc_error *grpc_set_socket_reuse_port(int fd, int reuse) {
#ifndef SO_REUSEPORT
- return GRPC_ERROR_CREATE("SO_REUSEPORT unavailable on compiling system");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "SO_REUSEPORT unavailable on compiling system");
#else
int val = (reuse != 0);
int newval;
@@ -185,7 +186,7 @@ grpc_error *grpc_set_socket_reuse_port(int fd, int reuse) {
return GRPC_OS_ERROR(errno, "getsockopt(SO_REUSEPORT)");
}
if ((newval != 0) != val) {
- return GRPC_ERROR_CREATE("Failed to set SO_REUSEPORT");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to set SO_REUSEPORT");
}
return GRPC_ERROR_NONE;
@@ -204,7 +205,7 @@ grpc_error *grpc_set_socket_low_latency(int fd, int low_latency) {
return GRPC_OS_ERROR(errno, "getsockopt(TCP_NODELAY)");
}
if ((newval != 0) != val) {
- return GRPC_ERROR_CREATE("Failed to set TCP_NODELAY");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to set TCP_NODELAY");
}
return GRPC_ERROR_NONE;
}
@@ -213,7 +214,7 @@ grpc_error *grpc_set_socket_low_latency(int fd, int low_latency) {
grpc_error *grpc_set_socket_with_mutator(int fd, grpc_socket_mutator *mutator) {
GPR_ASSERT(mutator);
if (!grpc_socket_mutator_mutate_fd(mutator, fd)) {
- return GRPC_ERROR_CREATE("grpc_socket_mutator failed.");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_socket_mutator failed.");
}
return GRPC_ERROR_NONE;
}
@@ -268,7 +269,8 @@ static grpc_error *error_for_fd(int fd, const grpc_resolved_address *addr) {
char *addr_str;
grpc_sockaddr_to_string(&addr_str, addr, 0);
grpc_error *err = grpc_error_set_str(GRPC_OS_ERROR(errno, "socket"),
- GRPC_ERROR_STR_TARGET_ADDRESS, addr_str);
+ GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(addr_str));
gpr_free(addr_str);
return err;
}
@@ -276,11 +278,25 @@ static grpc_error *error_for_fd(int fd, const grpc_resolved_address *addr) {
grpc_error *grpc_create_dualstack_socket(
const grpc_resolved_address *resolved_addr, int type, int protocol,
grpc_dualstack_mode *dsmode, int *newfd) {
+ return grpc_create_dualstack_socket_using_factory(NULL, resolved_addr, type,
+ protocol, dsmode, newfd);
+}
+
+static int create_socket(grpc_socket_factory *factory, int domain, int type,
+ int protocol) {
+ return (factory != NULL)
+ ? grpc_socket_factory_socket(factory, domain, type, protocol)
+ : socket(domain, type, protocol);
+}
+
+grpc_error *grpc_create_dualstack_socket_using_factory(
+ grpc_socket_factory *factory, const grpc_resolved_address *resolved_addr,
+ int type, int protocol, grpc_dualstack_mode *dsmode, int *newfd) {
const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
int family = addr->sa_family;
if (family == AF_INET6) {
if (grpc_ipv6_loopback_available()) {
- *newfd = socket(family, type, protocol);
+ *newfd = create_socket(factory, family, type, protocol);
} else {
*newfd = -1;
errno = EAFNOSUPPORT;
@@ -302,7 +318,7 @@ grpc_error *grpc_create_dualstack_socket(
family = AF_INET;
}
*dsmode = family == AF_INET ? GRPC_DSMODE_IPV4 : GRPC_DSMODE_NONE;
- *newfd = socket(family, type, protocol);
+ *newfd = create_socket(factory, family, type, protocol);
return error_for_fd(*newfd, resolved_addr);
}
diff --git a/src/core/lib/iomgr/socket_utils_posix.h b/src/core/lib/iomgr/socket_utils_posix.h
index e84d3781a1..2c2fc95ff9 100644
--- a/src/core/lib/iomgr/socket_utils_posix.h
+++ b/src/core/lib/iomgr/socket_utils_posix.h
@@ -41,6 +41,7 @@
#include <grpc/impl/codegen/grpc_types.h>
#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/socket_factory_posix.h"
#include "src/core/lib/iomgr/socket_mutator.h"
/* a wrapper for accept or accept4 */
@@ -137,4 +138,10 @@ grpc_error *grpc_create_dualstack_socket(const grpc_resolved_address *addr,
grpc_dualstack_mode *dsmode,
int *newfd);
+/* Same as grpc_create_dualstack_socket(), but use the given socket factory (if
+ non-null) to create the socket, rather than calling socket() directly. */
+grpc_error *grpc_create_dualstack_socket_using_factory(
+ grpc_socket_factory *factory, const grpc_resolved_address *addr, int type,
+ int protocol, grpc_dualstack_mode *dsmode, int *newfd);
+
#endif /* GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c
index 0144192b71..a108b10da6 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.c
@@ -121,8 +121,8 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
}
gpr_mu_lock(&ac->mu);
if (ac->fd != NULL) {
- grpc_fd_shutdown(exec_ctx, ac->fd,
- GRPC_ERROR_CREATE("connect() timed out"));
+ grpc_fd_shutdown(exec_ctx, ac->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "connect() timed out"));
}
done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
@@ -191,7 +191,8 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
gpr_mu_lock(&ac->mu);
if (error != GRPC_ERROR_NONE) {
error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, "Timeout occurred");
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string("Timeout occurred"));
goto finish;
}
@@ -252,12 +253,17 @@ finish:
gpr_mu_unlock(&ac->mu);
if (error != GRPC_ERROR_NONE) {
char *error_descr;
- gpr_asprintf(&error_descr, "Failed to connect to remote host: %s",
- grpc_error_get_str(error, GRPC_ERROR_STR_DESCRIPTION));
- error = grpc_error_set_str(error, GRPC_ERROR_STR_DESCRIPTION, error_descr);
+ grpc_slice str;
+ bool ret = grpc_error_get_str(error, GRPC_ERROR_STR_DESCRIPTION, &str);
+ GPR_ASSERT(ret);
+ char *desc = grpc_slice_to_c_string(str);
+ gpr_asprintf(&error_descr, "Failed to connect to remote host: %s", desc);
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_DESCRIPTION,
+ grpc_slice_from_copied_string(error_descr));
gpr_free(error_descr);
- error =
- grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS, ac->addr_str);
+ gpr_free(desc);
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(ac->addr_str));
}
if (done) {
gpr_mu_destroy(&ac->mu);
diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c
index 618483d9cb..682c24ed56 100644
--- a/src/core/lib/iomgr/tcp_client_uv.c
+++ b/src/core/lib/iomgr/tcp_client_uv.c
@@ -100,17 +100,21 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
*connect->endpoint = grpc_tcp_create(
connect->tcp_handle, connect->resource_quota, connect->addr_name);
} else {
- error = GRPC_ERROR_CREATE("Failed to connect to remote host");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Failed to connect to remote host");
error = grpc_error_set_int(error, GRPC_ERROR_INT_ERRNO, -status);
error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string(uv_strerror(status)));
if (status == UV_ECANCELED) {
- error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
- "Timeout occurred");
+ error =
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string("Timeout occurred"));
// This should only happen if the handle is already closed
} else {
- error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
- uv_strerror(status));
+ error = grpc_error_set_str(
+ error, GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string(uv_strerror(status)));
uv_close((uv_handle_t *)connect->tcp_handle, tcp_close_callback);
}
}
diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c
index c8dc9e64bd..a356564766 100644
--- a/src/core/lib/iomgr/tcp_client_windows.c
+++ b/src/core/lib/iomgr/tcp_client_windows.c
@@ -123,7 +123,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
socket = NULL;
}
} else {
- error = GRPC_ERROR_CREATE("socket is null");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("socket is null");
}
}
@@ -238,8 +238,9 @@ failure:
GPR_ASSERT(error != GRPC_ERROR_NONE);
char *target_uri = grpc_sockaddr_to_uri(addr);
grpc_error *final_error = grpc_error_set_str(
- GRPC_ERROR_CREATE_REFERENCING("Failed to connect", &error, 1),
- GRPC_ERROR_STR_TARGET_ADDRESS, target_uri);
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Failed to connect",
+ &error, 1),
+ GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(target_uri));
GRPC_ERROR_UNREF(error);
if (socket != NULL) {
grpc_winsocket_destroy(socket);
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index a4381f8fc9..4d7cf3ff51 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -111,7 +111,8 @@ typedef struct {
static grpc_error *tcp_annotate_error(grpc_error *src_error, grpc_tcp *tcp) {
return grpc_error_set_str(
grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd),
- GRPC_ERROR_STR_TARGET_ADDRESS, tcp->peer_string);
+ GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(tcp->peer_string));
}
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
@@ -246,8 +247,10 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
} else if (read_bytes == 0) {
/* 0 read size ==> end of stream */
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
- call_read_cb(exec_ctx, tcp,
- tcp_annotate_error(GRPC_ERROR_CREATE("Socket closed"), tcp));
+ call_read_cb(
+ exec_ctx, tcp,
+ tcp_annotate_error(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
TCP_UNREF(exec_ctx, tcp, "read");
} else {
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
@@ -464,10 +467,12 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (buf->length == 0) {
GPR_TIMER_END("tcp_write", 0);
- grpc_closure_sched(exec_ctx, cb,
- grpc_fd_is_shutdown(tcp->em_fd)
- ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp)
- : GRPC_ERROR_NONE);
+ grpc_closure_sched(
+ exec_ctx, cb,
+ grpc_fd_is_shutdown(tcp->em_fd)
+ ? tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"),
+ tcp)
+ : GRPC_ERROR_NONE);
return;
}
tcp->outgoing_buffer = buf;
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index 5f286a6723..d6a017cf7f 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -44,11 +44,8 @@
#include <errno.h>
#include <fcntl.h>
-#include <ifaddrs.h>
-#include <limits.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
-#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/stat.h>
@@ -67,82 +64,10 @@
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/tcp_posix.h"
+#include "src/core/lib/iomgr/tcp_server_utils_posix.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
-#define MIN_SAFE_ACCEPT_QUEUE_SIZE 100
-
-static gpr_once s_init_max_accept_queue_size;
-static int s_max_accept_queue_size;
-
-/* one listening port */
-typedef struct grpc_tcp_listener grpc_tcp_listener;
-struct grpc_tcp_listener {
- int fd;
- grpc_fd *emfd;
- grpc_tcp_server *server;
- grpc_resolved_address addr;
- int port;
- unsigned port_index;
- unsigned fd_index;
- grpc_closure read_closure;
- grpc_closure destroyed_closure;
- struct grpc_tcp_listener *next;
- /* sibling is a linked list of all listeners for a given port. add_port and
- clone_port place all new listeners in the same sibling list. A member of
- the 'sibling' list is also a member of the 'next' list. The head of each
- sibling list has is_sibling==0, and subsequent members of sibling lists
- have is_sibling==1. is_sibling allows separate sibling lists to be
- identified while iterating through 'next'. */
- struct grpc_tcp_listener *sibling;
- int is_sibling;
-};
-
-/* the overall server */
-struct grpc_tcp_server {
- gpr_refcount refs;
- /* Called whenever accept() succeeds on a server port. */
- grpc_tcp_server_cb on_accept_cb;
- void *on_accept_cb_arg;
-
- gpr_mu mu;
-
- /* active port count: how many ports are actually still listening */
- size_t active_ports;
- /* destroyed port count: how many ports are completely destroyed */
- size_t destroyed_ports;
-
- /* is this server shutting down? */
- bool shutdown;
- /* have listeners been shutdown? */
- bool shutdown_listeners;
- /* use SO_REUSEPORT */
- bool so_reuseport;
- /* expand wildcard addresses to a list of all local addresses */
- bool expand_wildcard_addrs;
-
- /* linked list of server ports */
- grpc_tcp_listener *head;
- grpc_tcp_listener *tail;
- unsigned nports;
-
- /* List of closures passed to shutdown_starting_add(). */
- grpc_closure_list shutdown_starting;
-
- /* shutdown callback */
- grpc_closure *shutdown_complete;
-
- /* all pollsets interested in new connections */
- grpc_pollset **pollsets;
- /* number of pollsets in the pollsets array */
- size_t pollset_count;
-
- /* next pollset to assign a channel to */
- gpr_atm next_pollset_to_assign;
-
- grpc_resource_quota *resource_quota;
-};
-
static gpr_once check_init = GPR_ONCE_INIT;
static bool has_so_reuseport = false;
@@ -175,8 +100,8 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
} else {
grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
gpr_free(s);
- return GRPC_ERROR_CREATE(GRPC_ARG_ALLOW_REUSEPORT
- " must be an integer");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(GRPC_ARG_ALLOW_REUSEPORT
+ " must be an integer");
}
} else if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_POINTER) {
@@ -186,8 +111,8 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
} else {
grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
gpr_free(s);
- return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
- " must be a pointer to a buffer pool");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool");
}
} else if (0 == strcmp(GRPC_ARG_EXPAND_WILDCARD_ADDRS, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_INTEGER) {
@@ -195,8 +120,8 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
} else {
grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
gpr_free(s);
- return GRPC_ERROR_CREATE(GRPC_ARG_EXPAND_WILDCARD_ADDRS
- " must be an integer");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ GRPC_ARG_EXPAND_WILDCARD_ADDRS " must be an integer");
}
}
}
@@ -260,10 +185,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* delete ALL the things */
gpr_mu_lock(&s->mu);
- if (!s->shutdown) {
- gpr_mu_unlock(&s->mu);
- return;
- }
+ GPR_ASSERT(s->shutdown);
if (s->head) {
grpc_tcp_listener *sp;
@@ -291,8 +213,8 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (s->active_ports) {
grpc_tcp_listener *sp;
for (sp = s->head; sp; sp = sp->next) {
- grpc_fd_shutdown(exec_ctx, sp->emfd,
- GRPC_ERROR_CREATE("Server destroyed"));
+ grpc_fd_shutdown(exec_ctx, sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Server destroyed"));
}
gpr_mu_unlock(&s->mu);
} else {
@@ -301,99 +223,6 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
}
}
-/* get max listen queue size on linux */
-static void init_max_accept_queue_size(void) {
- int n = SOMAXCONN;
- char buf[64];
- FILE *fp = fopen("/proc/sys/net/core/somaxconn", "r");
- if (fp == NULL) {
- /* 2.4 kernel. */
- s_max_accept_queue_size = SOMAXCONN;
- return;
- }
- if (fgets(buf, sizeof buf, fp)) {
- char *end;
- long i = strtol(buf, &end, 10);
- if (i > 0 && i <= INT_MAX && end && *end == 0) {
- n = (int)i;
- }
- }
- fclose(fp);
- s_max_accept_queue_size = n;
-
- if (s_max_accept_queue_size < MIN_SAFE_ACCEPT_QUEUE_SIZE) {
- gpr_log(GPR_INFO,
- "Suspiciously small accept queue (%d) will probably lead to "
- "connection drops",
- s_max_accept_queue_size);
- }
-}
-
-static int get_max_accept_queue_size(void) {
- gpr_once_init(&s_init_max_accept_queue_size, init_max_accept_queue_size);
- return s_max_accept_queue_size;
-}
-
-/* Prepare a recently-created socket for listening. */
-static grpc_error *prepare_socket(int fd, const grpc_resolved_address *addr,
- bool so_reuseport, int *port) {
- grpc_resolved_address sockname_temp;
- grpc_error *err = GRPC_ERROR_NONE;
-
- GPR_ASSERT(fd >= 0);
-
- if (so_reuseport && !grpc_is_unix_socket(addr)) {
- err = grpc_set_socket_reuse_port(fd, 1);
- if (err != GRPC_ERROR_NONE) goto error;
- }
-
- err = grpc_set_socket_nonblocking(fd, 1);
- if (err != GRPC_ERROR_NONE) goto error;
- err = grpc_set_socket_cloexec(fd, 1);
- if (err != GRPC_ERROR_NONE) goto error;
- if (!grpc_is_unix_socket(addr)) {
- err = grpc_set_socket_low_latency(fd, 1);
- if (err != GRPC_ERROR_NONE) goto error;
- err = grpc_set_socket_reuse_addr(fd, 1);
- if (err != GRPC_ERROR_NONE) goto error;
- }
- err = grpc_set_socket_no_sigpipe_if_possible(fd);
- if (err != GRPC_ERROR_NONE) goto error;
-
- GPR_ASSERT(addr->len < ~(socklen_t)0);
- if (bind(fd, (struct sockaddr *)addr->addr, (socklen_t)addr->len) < 0) {
- err = GRPC_OS_ERROR(errno, "bind");
- goto error;
- }
-
- if (listen(fd, get_max_accept_queue_size()) < 0) {
- err = GRPC_OS_ERROR(errno, "listen");
- goto error;
- }
-
- sockname_temp.len = sizeof(struct sockaddr_storage);
-
- if (getsockname(fd, (struct sockaddr *)sockname_temp.addr,
- (socklen_t *)&sockname_temp.len) < 0) {
- err = GRPC_OS_ERROR(errno, "getsockname");
- goto error;
- }
-
- *port = grpc_sockaddr_get_port(&sockname_temp);
- return GRPC_ERROR_NONE;
-
-error:
- GPR_ASSERT(err != GRPC_ERROR_NONE);
- if (fd >= 0) {
- close(fd);
- }
- grpc_error *ret = grpc_error_set_int(
- GRPC_ERROR_CREATE_REFERENCING("Unable to configure socket", &err, 1),
- GRPC_ERROR_INT_FD, fd);
- GRPC_ERROR_UNREF(err);
- return ret;
-}
-
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_tcp_listener *sp = arg;
@@ -469,7 +298,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
error:
gpr_mu_lock(&sp->server->mu);
- if (0 == --sp->server->active_ports) {
+ if (0 == --sp->server->active_ports && sp->server->shutdown) {
gpr_mu_unlock(&sp->server->mu);
deactivated_all_ports(exec_ctx, sp->server);
} else {
@@ -477,216 +306,6 @@ error:
}
}
-static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
- const grpc_resolved_address *addr,
- unsigned port_index, unsigned fd_index,
- grpc_tcp_listener **listener) {
- grpc_tcp_listener *sp = NULL;
- int port = -1;
- char *addr_str;
- char *name;
-
- grpc_error *err = prepare_socket(fd, addr, s->so_reuseport, &port);
- if (err == GRPC_ERROR_NONE) {
- GPR_ASSERT(port > 0);
- grpc_sockaddr_to_string(&addr_str, addr, 1);
- gpr_asprintf(&name, "tcp-server-listener:%s", addr_str);
- gpr_mu_lock(&s->mu);
- s->nports++;
- GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
- sp = gpr_malloc(sizeof(grpc_tcp_listener));
- sp->next = NULL;
- if (s->head == NULL) {
- s->head = sp;
- } else {
- s->tail->next = sp;
- }
- s->tail = sp;
- sp->server = s;
- sp->fd = fd;
- sp->emfd = grpc_fd_create(fd, name);
- memcpy(&sp->addr, addr, sizeof(grpc_resolved_address));
- sp->port = port;
- sp->port_index = port_index;
- sp->fd_index = fd_index;
- sp->is_sibling = 0;
- sp->sibling = NULL;
- GPR_ASSERT(sp->emfd);
- gpr_mu_unlock(&s->mu);
- gpr_free(addr_str);
- gpr_free(name);
- }
-
- *listener = sp;
- return err;
-}
-
-/* If successful, add a listener to s for addr, set *dsmode for the socket, and
- return the *listener. */
-static grpc_error *add_addr_to_server(grpc_tcp_server *s,
- const grpc_resolved_address *addr,
- unsigned port_index, unsigned fd_index,
- grpc_dualstack_mode *dsmode,
- grpc_tcp_listener **listener) {
- grpc_resolved_address addr4_copy;
- int fd;
- grpc_error *err =
- grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, dsmode, &fd);
- if (err != GRPC_ERROR_NONE) {
- return err;
- }
- if (*dsmode == GRPC_DSMODE_IPV4 &&
- grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) {
- addr = &addr4_copy;
- }
- return add_socket_to_server(s, fd, addr, port_index, fd_index, listener);
-}
-
-/* Bind to "::" to get a port number not used by any address. */
-static grpc_error *get_unused_port(int *port) {
- grpc_resolved_address wild;
- grpc_sockaddr_make_wildcard6(0, &wild);
- grpc_dualstack_mode dsmode;
- int fd;
- grpc_error *err =
- grpc_create_dualstack_socket(&wild, SOCK_STREAM, 0, &dsmode, &fd);
- if (err != GRPC_ERROR_NONE) {
- return err;
- }
- if (dsmode == GRPC_DSMODE_IPV4) {
- grpc_sockaddr_make_wildcard4(0, &wild);
- }
- if (bind(fd, (const struct sockaddr *)wild.addr, (socklen_t)wild.len) != 0) {
- err = GRPC_OS_ERROR(errno, "bind");
- close(fd);
- return err;
- }
- if (getsockname(fd, (struct sockaddr *)wild.addr, (socklen_t *)&wild.len) !=
- 0) {
- err = GRPC_OS_ERROR(errno, "getsockname");
- close(fd);
- return err;
- }
- close(fd);
- *port = grpc_sockaddr_get_port(&wild);
- return *port <= 0 ? GRPC_ERROR_CREATE("Bad port") : GRPC_ERROR_NONE;
-}
-
-/* Return the listener in s with address addr or NULL. */
-static grpc_tcp_listener *find_listener_with_addr(grpc_tcp_server *s,
- grpc_resolved_address *addr) {
- grpc_tcp_listener *l;
- gpr_mu_lock(&s->mu);
- for (l = s->head; l != NULL; l = l->next) {
- if (l->addr.len != addr->len) {
- continue;
- }
- if (memcmp(l->addr.addr, addr->addr, addr->len) == 0) {
- break;
- }
- }
- gpr_mu_unlock(&s->mu);
- return l;
-}
-
-/* Get all addresses assigned to network interfaces on the machine and create a
- listener for each. requested_port is the port to use for every listener, or 0
- to select one random port that will be used for every listener. Set *out_port
- to the port selected. Return GRPC_ERROR_NONE only if all listeners were
- added. */
-static grpc_error *add_all_local_addrs_to_server(grpc_tcp_server *s,
- unsigned port_index,
- int requested_port,
- int *out_port) {
- struct ifaddrs *ifa = NULL;
- struct ifaddrs *ifa_it;
- unsigned fd_index = 0;
- grpc_tcp_listener *sp = NULL;
- grpc_error *err = GRPC_ERROR_NONE;
- if (requested_port == 0) {
- /* Note: There could be a race where some local addrs can listen on the
- selected port and some can't. The sane way to handle this would be to
- retry by recreating the whole grpc_tcp_server. Backing out individual
- listeners and orphaning the FDs looks like too much trouble. */
- if ((err = get_unused_port(&requested_port)) != GRPC_ERROR_NONE) {
- return err;
- } else if (requested_port <= 0) {
- return GRPC_ERROR_CREATE("Bad get_unused_port()");
- }
- gpr_log(GPR_DEBUG, "Picked unused port %d", requested_port);
- }
- if (getifaddrs(&ifa) != 0 || ifa == NULL) {
- return GRPC_OS_ERROR(errno, "getifaddrs");
- }
- for (ifa_it = ifa; ifa_it != NULL; ifa_it = ifa_it->ifa_next) {
- grpc_resolved_address addr;
- char *addr_str = NULL;
- grpc_dualstack_mode dsmode;
- grpc_tcp_listener *new_sp = NULL;
- const char *ifa_name = (ifa_it->ifa_name ? ifa_it->ifa_name : "<unknown>");
- if (ifa_it->ifa_addr == NULL) {
- continue;
- } else if (ifa_it->ifa_addr->sa_family == AF_INET) {
- addr.len = sizeof(struct sockaddr_in);
- } else if (ifa_it->ifa_addr->sa_family == AF_INET6) {
- addr.len = sizeof(struct sockaddr_in6);
- } else {
- continue;
- }
- memcpy(addr.addr, ifa_it->ifa_addr, addr.len);
- if (!grpc_sockaddr_set_port(&addr, requested_port)) {
- /* Should never happen, because we check sa_family above. */
- err = GRPC_ERROR_CREATE("Failed to set port");
- break;
- }
- if (grpc_sockaddr_to_string(&addr_str, &addr, 0) < 0) {
- addr_str = gpr_strdup("<error>");
- }
- gpr_log(GPR_DEBUG,
- "Adding local addr from interface %s flags 0x%x to server: %s",
- ifa_name, ifa_it->ifa_flags, addr_str);
- /* We could have multiple interfaces with the same address (e.g., bonding),
- so look for duplicates. */
- if (find_listener_with_addr(s, &addr) != NULL) {
- gpr_log(GPR_DEBUG, "Skipping duplicate addr %s on interface %s", addr_str,
- ifa_name);
- gpr_free(addr_str);
- continue;
- }
- if ((err = add_addr_to_server(s, &addr, port_index, fd_index, &dsmode,
- &new_sp)) != GRPC_ERROR_NONE) {
- char *err_str = NULL;
- grpc_error *root_err;
- if (gpr_asprintf(&err_str, "Failed to add listener: %s", addr_str) < 0) {
- err_str = gpr_strdup("Failed to add listener");
- }
- root_err = GRPC_ERROR_CREATE(err_str);
- gpr_free(err_str);
- gpr_free(addr_str);
- err = grpc_error_add_child(root_err, err);
- break;
- } else {
- GPR_ASSERT(requested_port == new_sp->port);
- ++fd_index;
- if (sp != NULL) {
- new_sp->is_sibling = 1;
- sp->sibling = new_sp;
- }
- sp = new_sp;
- }
- gpr_free(addr_str);
- }
- freeifaddrs(ifa);
- if (err != GRPC_ERROR_NONE) {
- return err;
- } else if (sp == NULL) {
- return GRPC_ERROR_CREATE("No local addresses");
- } else {
- *out_port = sp->port;
- return GRPC_ERROR_NONE;
- }
-}
-
/* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */
static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s,
unsigned port_index,
@@ -701,14 +320,16 @@ static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s,
grpc_error *v6_err = GRPC_ERROR_NONE;
grpc_error *v4_err = GRPC_ERROR_NONE;
*out_port = -1;
- if (s->expand_wildcard_addrs) {
- return add_all_local_addrs_to_server(s, port_index, requested_port,
- out_port);
+
+ if (grpc_tcp_server_have_ifaddrs() && s->expand_wildcard_addrs) {
+ return grpc_tcp_server_add_all_local_addrs(s, port_index, requested_port,
+ out_port);
}
+
grpc_sockaddr_make_wildcards(requested_port, &wild4, &wild6);
/* Try listening on IPv6 first. */
- if ((v6_err = add_addr_to_server(s, &wild6, port_index, fd_index, &dsmode,
- &sp)) == GRPC_ERROR_NONE) {
+ if ((v6_err = grpc_tcp_server_add_addr(s, &wild6, port_index, fd_index,
+ &dsmode, &sp)) == GRPC_ERROR_NONE) {
++fd_index;
requested_port = *out_port = sp->port;
if (dsmode == GRPC_DSMODE_DUALSTACK || dsmode == GRPC_DSMODE_IPV4) {
@@ -717,8 +338,8 @@ static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s,
}
/* If we got a v6-only socket or nothing, try adding 0.0.0.0. */
grpc_sockaddr_set_port(&wild4, requested_port);
- if ((v4_err = add_addr_to_server(s, &wild4, port_index, fd_index, &dsmode,
- &sp2)) == GRPC_ERROR_NONE) {
+ if ((v4_err = grpc_tcp_server_add_addr(s, &wild4, port_index, fd_index,
+ &dsmode, &sp2)) == GRPC_ERROR_NONE) {
*out_port = sp2->port;
if (sp != NULL) {
sp2->is_sibling = 1;
@@ -726,12 +347,24 @@ static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s,
}
}
if (*out_port > 0) {
- GRPC_LOG_IF_ERROR("Failed to add :: listener", v6_err);
- GRPC_LOG_IF_ERROR("Failed to add 0.0.0.0 listener", v4_err);
+ if (v6_err != GRPC_ERROR_NONE) {
+ gpr_log(GPR_INFO,
+ "Failed to add :: listener, "
+ "the environment may not support IPv6: %s",
+ grpc_error_string(v6_err));
+ GRPC_ERROR_UNREF(v6_err);
+ }
+ if (v4_err != GRPC_ERROR_NONE) {
+ gpr_log(GPR_INFO,
+ "Failed to add 0.0.0.0 listener, "
+ "the environment may not support IPv4: %s",
+ grpc_error_string(v4_err));
+ GRPC_ERROR_UNREF(v4_err);
+ }
return GRPC_ERROR_NONE;
} else {
- grpc_error *root_err =
- GRPC_ERROR_CREATE("Failed to add any wildcard listeners");
+ grpc_error *root_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Failed to add any wildcard listeners");
GPR_ASSERT(v6_err != GRPC_ERROR_NONE && v4_err != GRPC_ERROR_NONE);
root_err = grpc_error_add_child(root_err, v6_err);
root_err = grpc_error_add_child(root_err, v4_err);
@@ -756,7 +389,7 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
err = grpc_create_dualstack_socket(&listener->addr, SOCK_STREAM, 0, &dsmode,
&fd);
if (err != GRPC_ERROR_NONE) return err;
- err = prepare_socket(fd, &listener->addr, true, &port);
+ err = grpc_tcp_server_prepare_socket(fd, &listener->addr, true, &port);
if (err != GRPC_ERROR_NONE) return err;
listener->server->nports++;
grpc_sockaddr_to_string(&addr_str, &listener->addr, 1);
@@ -828,7 +461,7 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
addr = &addr6_v4mapped;
}
- if ((err = add_addr_to_server(s, addr, port_index, 0, &dsmode, &sp)) ==
+ if ((err = grpc_tcp_server_add_addr(s, addr, port_index, 0, &dsmode, &sp)) ==
GRPC_ERROR_NONE) {
*out_port = sp->port;
}
@@ -951,7 +584,7 @@ void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx,
grpc_tcp_listener *sp;
for (sp = s->head; sp; sp = sp->next) {
grpc_fd_shutdown(exec_ctx, sp->emfd,
- GRPC_ERROR_CREATE("Server shutdown"));
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown"));
}
}
gpr_mu_unlock(&s->mu);
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix.h b/src/core/lib/iomgr/tcp_server_utils_posix.h
new file mode 100644
index 0000000000..f5dc8532f9
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_server_utils_posix.h
@@ -0,0 +1,134 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_IOMGR_TCP_SERVER_UTILS_POSIX_H
+#define GRPC_CORE_LIB_IOMGR_TCP_SERVER_UTILS_POSIX_H
+
+#include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/socket_utils_posix.h"
+#include "src/core/lib/iomgr/tcp_server.h"
+
+/* one listening port */
+typedef struct grpc_tcp_listener {
+ int fd;
+ grpc_fd *emfd;
+ grpc_tcp_server *server;
+ grpc_resolved_address addr;
+ int port;
+ unsigned port_index;
+ unsigned fd_index;
+ grpc_closure read_closure;
+ grpc_closure destroyed_closure;
+ struct grpc_tcp_listener *next;
+ /* sibling is a linked list of all listeners for a given port. add_port and
+ clone_port place all new listeners in the same sibling list. A member of
+ the 'sibling' list is also a member of the 'next' list. The head of each
+ sibling list has is_sibling==0, and subsequent members of sibling lists
+ have is_sibling==1. is_sibling allows separate sibling lists to be
+ identified while iterating through 'next'. */
+ struct grpc_tcp_listener *sibling;
+ int is_sibling;
+} grpc_tcp_listener;
+
+/* the overall server */
+struct grpc_tcp_server {
+ gpr_refcount refs;
+ /* Called whenever accept() succeeds on a server port. */
+ grpc_tcp_server_cb on_accept_cb;
+ void *on_accept_cb_arg;
+
+ gpr_mu mu;
+
+ /* active port count: how many ports are actually still listening */
+ size_t active_ports;
+ /* destroyed port count: how many ports are completely destroyed */
+ size_t destroyed_ports;
+
+ /* is this server shutting down? */
+ bool shutdown;
+ /* have listeners been shutdown? */
+ bool shutdown_listeners;
+ /* use SO_REUSEPORT */
+ bool so_reuseport;
+ /* expand wildcard addresses to a list of all local addresses */
+ bool expand_wildcard_addrs;
+
+ /* linked list of server ports */
+ grpc_tcp_listener *head;
+ grpc_tcp_listener *tail;
+ unsigned nports;
+
+ /* List of closures passed to shutdown_starting_add(). */
+ grpc_closure_list shutdown_starting;
+
+ /* shutdown callback */
+ grpc_closure *shutdown_complete;
+
+ /* all pollsets interested in new connections */
+ grpc_pollset **pollsets;
+ /* number of pollsets in the pollsets array */
+ size_t pollset_count;
+
+ /* next pollset to assign a channel to */
+ gpr_atm next_pollset_to_assign;
+
+ grpc_resource_quota *resource_quota;
+};
+
+/* If successful, add a listener to \a s for \a addr, set \a dsmode for the
+ socket, and return the \a listener. */
+grpc_error *grpc_tcp_server_add_addr(grpc_tcp_server *s,
+ const grpc_resolved_address *addr,
+ unsigned port_index, unsigned fd_index,
+ grpc_dualstack_mode *dsmode,
+ grpc_tcp_listener **listener);
+
+/* Get all addresses assigned to network interfaces on the machine and create a
+ listener for each. requested_port is the port to use for every listener, or 0
+ to select one random port that will be used for every listener. Set *out_port
+ to the port selected. Return GRPC_ERROR_NONE only if all listeners were
+ added. */
+grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s,
+ unsigned port_index,
+ int requested_port,
+ int *out_port);
+
+/* Prepare a recently-created socket for listening. */
+grpc_error *grpc_tcp_server_prepare_socket(int fd,
+ const grpc_resolved_address *addr,
+ bool so_reuseport, int *port);
+/* Ruturn true if the platform supports ifaddrs */
+bool grpc_tcp_server_have_ifaddrs(void);
+
+#endif /* GRPC_CORE_LIB_IOMGR_TCP_SERVER_UTILS_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_common.c b/src/core/lib/iomgr/tcp_server_utils_posix_common.c
new file mode 100644
index 0000000000..af2b00b4b5
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_common.c
@@ -0,0 +1,221 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_POSIX_SOCKET
+
+#include "src/core/lib/iomgr/tcp_server_utils_posix.h"
+
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/sync.h>
+
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/unix_sockets_posix.h"
+
+#define MIN_SAFE_ACCEPT_QUEUE_SIZE 100
+
+static gpr_once s_init_max_accept_queue_size;
+static int s_max_accept_queue_size;
+
+/* get max listen queue size on linux */
+static void init_max_accept_queue_size(void) {
+ int n = SOMAXCONN;
+ char buf[64];
+ FILE *fp = fopen("/proc/sys/net/core/somaxconn", "r");
+ if (fp == NULL) {
+ /* 2.4 kernel. */
+ s_max_accept_queue_size = SOMAXCONN;
+ return;
+ }
+ if (fgets(buf, sizeof buf, fp)) {
+ char *end;
+ long i = strtol(buf, &end, 10);
+ if (i > 0 && i <= INT_MAX && end && *end == 0) {
+ n = (int)i;
+ }
+ }
+ fclose(fp);
+ s_max_accept_queue_size = n;
+
+ if (s_max_accept_queue_size < MIN_SAFE_ACCEPT_QUEUE_SIZE) {
+ gpr_log(GPR_INFO,
+ "Suspiciously small accept queue (%d) will probably lead to "
+ "connection drops",
+ s_max_accept_queue_size);
+ }
+}
+
+static int get_max_accept_queue_size(void) {
+ gpr_once_init(&s_init_max_accept_queue_size, init_max_accept_queue_size);
+ return s_max_accept_queue_size;
+}
+
+static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
+ const grpc_resolved_address *addr,
+ unsigned port_index, unsigned fd_index,
+ grpc_tcp_listener **listener) {
+ grpc_tcp_listener *sp = NULL;
+ int port = -1;
+ char *addr_str;
+ char *name;
+
+ grpc_error *err =
+ grpc_tcp_server_prepare_socket(fd, addr, s->so_reuseport, &port);
+ if (err == GRPC_ERROR_NONE) {
+ GPR_ASSERT(port > 0);
+ grpc_sockaddr_to_string(&addr_str, addr, 1);
+ gpr_asprintf(&name, "tcp-server-listener:%s", addr_str);
+ gpr_mu_lock(&s->mu);
+ s->nports++;
+ GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
+ sp = gpr_malloc(sizeof(grpc_tcp_listener));
+ sp->next = NULL;
+ if (s->head == NULL) {
+ s->head = sp;
+ } else {
+ s->tail->next = sp;
+ }
+ s->tail = sp;
+ sp->server = s;
+ sp->fd = fd;
+ sp->emfd = grpc_fd_create(fd, name);
+ memcpy(&sp->addr, addr, sizeof(grpc_resolved_address));
+ sp->port = port;
+ sp->port_index = port_index;
+ sp->fd_index = fd_index;
+ sp->is_sibling = 0;
+ sp->sibling = NULL;
+ GPR_ASSERT(sp->emfd);
+ gpr_mu_unlock(&s->mu);
+ gpr_free(addr_str);
+ gpr_free(name);
+ }
+
+ *listener = sp;
+ return err;
+}
+
+/* If successful, add a listener to s for addr, set *dsmode for the socket, and
+ return the *listener. */
+grpc_error *grpc_tcp_server_add_addr(grpc_tcp_server *s,
+ const grpc_resolved_address *addr,
+ unsigned port_index, unsigned fd_index,
+ grpc_dualstack_mode *dsmode,
+ grpc_tcp_listener **listener) {
+ grpc_resolved_address addr4_copy;
+ int fd;
+ grpc_error *err =
+ grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, dsmode, &fd);
+ if (err != GRPC_ERROR_NONE) {
+ return err;
+ }
+ if (*dsmode == GRPC_DSMODE_IPV4 &&
+ grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) {
+ addr = &addr4_copy;
+ }
+ return add_socket_to_server(s, fd, addr, port_index, fd_index, listener);
+}
+
+/* Prepare a recently-created socket for listening. */
+grpc_error *grpc_tcp_server_prepare_socket(int fd,
+ const grpc_resolved_address *addr,
+ bool so_reuseport, int *port) {
+ grpc_resolved_address sockname_temp;
+ grpc_error *err = GRPC_ERROR_NONE;
+
+ GPR_ASSERT(fd >= 0);
+
+ if (so_reuseport && !grpc_is_unix_socket(addr)) {
+ err = grpc_set_socket_reuse_port(fd, 1);
+ if (err != GRPC_ERROR_NONE) goto error;
+ }
+
+ err = grpc_set_socket_nonblocking(fd, 1);
+ if (err != GRPC_ERROR_NONE) goto error;
+ err = grpc_set_socket_cloexec(fd, 1);
+ if (err != GRPC_ERROR_NONE) goto error;
+ if (!grpc_is_unix_socket(addr)) {
+ err = grpc_set_socket_low_latency(fd, 1);
+ if (err != GRPC_ERROR_NONE) goto error;
+ err = grpc_set_socket_reuse_addr(fd, 1);
+ if (err != GRPC_ERROR_NONE) goto error;
+ }
+ err = grpc_set_socket_no_sigpipe_if_possible(fd);
+ if (err != GRPC_ERROR_NONE) goto error;
+
+ GPR_ASSERT(addr->len < ~(socklen_t)0);
+ if (bind(fd, (struct sockaddr *)addr->addr, (socklen_t)addr->len) < 0) {
+ err = GRPC_OS_ERROR(errno, "bind");
+ goto error;
+ }
+
+ if (listen(fd, get_max_accept_queue_size()) < 0) {
+ err = GRPC_OS_ERROR(errno, "listen");
+ goto error;
+ }
+
+ sockname_temp.len = sizeof(struct sockaddr_storage);
+
+ if (getsockname(fd, (struct sockaddr *)sockname_temp.addr,
+ (socklen_t *)&sockname_temp.len) < 0) {
+ err = GRPC_OS_ERROR(errno, "getsockname");
+ goto error;
+ }
+
+ *port = grpc_sockaddr_get_port(&sockname_temp);
+ return GRPC_ERROR_NONE;
+
+error:
+ GPR_ASSERT(err != GRPC_ERROR_NONE);
+ if (fd >= 0) {
+ close(fd);
+ }
+ grpc_error *ret =
+ grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Unable to configure socket", &err, 1),
+ GRPC_ERROR_INT_FD, fd);
+ GRPC_ERROR_UNREF(err);
+ return ret;
+}
+
+#endif /* GRPC_POSIX_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c b/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c
new file mode 100644
index 0000000000..2078a68126
--- /dev/null
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c
@@ -0,0 +1,196 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+
+#ifdef GRPC_HAVE_IFADDRS
+
+#include "src/core/lib/iomgr/tcp_server_utils_posix.h"
+
+#include <errno.h>
+#include <ifaddrs.h>
+#include <stddef.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+
+/* Return the listener in s with address addr or NULL. */
+static grpc_tcp_listener *find_listener_with_addr(grpc_tcp_server *s,
+ grpc_resolved_address *addr) {
+ grpc_tcp_listener *l;
+ gpr_mu_lock(&s->mu);
+ for (l = s->head; l != NULL; l = l->next) {
+ if (l->addr.len != addr->len) {
+ continue;
+ }
+ if (memcmp(l->addr.addr, addr->addr, addr->len) == 0) {
+ break;
+ }
+ }
+ gpr_mu_unlock(&s->mu);
+ return l;
+}
+
+/* Bind to "::" to get a port number not used by any address. */
+static grpc_error *get_unused_port(int *port) {
+ grpc_resolved_address wild;
+ grpc_sockaddr_make_wildcard6(0, &wild);
+ grpc_dualstack_mode dsmode;
+ int fd;
+ grpc_error *err =
+ grpc_create_dualstack_socket(&wild, SOCK_STREAM, 0, &dsmode, &fd);
+ if (err != GRPC_ERROR_NONE) {
+ return err;
+ }
+ if (dsmode == GRPC_DSMODE_IPV4) {
+ grpc_sockaddr_make_wildcard4(0, &wild);
+ }
+ if (bind(fd, (const struct sockaddr *)wild.addr, (socklen_t)wild.len) != 0) {
+ err = GRPC_OS_ERROR(errno, "bind");
+ close(fd);
+ return err;
+ }
+ if (getsockname(fd, (struct sockaddr *)wild.addr, (socklen_t *)&wild.len) !=
+ 0) {
+ err = GRPC_OS_ERROR(errno, "getsockname");
+ close(fd);
+ return err;
+ }
+ close(fd);
+ *port = grpc_sockaddr_get_port(&wild);
+ return *port <= 0 ? GRPC_ERROR_CREATE_FROM_STATIC_STRING("Bad port")
+ : GRPC_ERROR_NONE;
+}
+
+grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s,
+ unsigned port_index,
+ int requested_port,
+ int *out_port) {
+ struct ifaddrs *ifa = NULL;
+ struct ifaddrs *ifa_it;
+ unsigned fd_index = 0;
+ grpc_tcp_listener *sp = NULL;
+ grpc_error *err = GRPC_ERROR_NONE;
+ if (requested_port == 0) {
+ /* Note: There could be a race where some local addrs can listen on the
+ selected port and some can't. The sane way to handle this would be to
+ retry by recreating the whole grpc_tcp_server. Backing out individual
+ listeners and orphaning the FDs looks like too much trouble. */
+ if ((err = get_unused_port(&requested_port)) != GRPC_ERROR_NONE) {
+ return err;
+ } else if (requested_port <= 0) {
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Bad get_unused_port()");
+ }
+ gpr_log(GPR_DEBUG, "Picked unused port %d", requested_port);
+ }
+ if (getifaddrs(&ifa) != 0 || ifa == NULL) {
+ return GRPC_OS_ERROR(errno, "getifaddrs");
+ }
+ for (ifa_it = ifa; ifa_it != NULL; ifa_it = ifa_it->ifa_next) {
+ grpc_resolved_address addr;
+ char *addr_str = NULL;
+ grpc_dualstack_mode dsmode;
+ grpc_tcp_listener *new_sp = NULL;
+ const char *ifa_name = (ifa_it->ifa_name ? ifa_it->ifa_name : "<unknown>");
+ if (ifa_it->ifa_addr == NULL) {
+ continue;
+ } else if (ifa_it->ifa_addr->sa_family == AF_INET) {
+ addr.len = sizeof(struct sockaddr_in);
+ } else if (ifa_it->ifa_addr->sa_family == AF_INET6) {
+ addr.len = sizeof(struct sockaddr_in6);
+ } else {
+ continue;
+ }
+ memcpy(addr.addr, ifa_it->ifa_addr, addr.len);
+ if (!grpc_sockaddr_set_port(&addr, requested_port)) {
+ /* Should never happen, because we check sa_family above. */
+ err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to set port");
+ break;
+ }
+ if (grpc_sockaddr_to_string(&addr_str, &addr, 0) < 0) {
+ addr_str = gpr_strdup("<error>");
+ }
+ gpr_log(GPR_DEBUG,
+ "Adding local addr from interface %s flags 0x%x to server: %s",
+ ifa_name, ifa_it->ifa_flags, addr_str);
+ /* We could have multiple interfaces with the same address (e.g., bonding),
+ so look for duplicates. */
+ if (find_listener_with_addr(s, &addr) != NULL) {
+ gpr_log(GPR_DEBUG, "Skipping duplicate addr %s on interface %s", addr_str,
+ ifa_name);
+ gpr_free(addr_str);
+ continue;
+ }
+ if ((err = grpc_tcp_server_add_addr(s, &addr, port_index, fd_index, &dsmode,
+ &new_sp)) != GRPC_ERROR_NONE) {
+ char *err_str = NULL;
+ grpc_error *root_err;
+ if (gpr_asprintf(&err_str, "Failed to add listener: %s", addr_str) < 0) {
+ err_str = gpr_strdup("Failed to add listener");
+ }
+ root_err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_str);
+ gpr_free(err_str);
+ gpr_free(addr_str);
+ err = grpc_error_add_child(root_err, err);
+ break;
+ } else {
+ GPR_ASSERT(requested_port == new_sp->port);
+ ++fd_index;
+ if (sp != NULL) {
+ new_sp->is_sibling = 1;
+ sp->sibling = new_sp;
+ }
+ sp = new_sp;
+ }
+ gpr_free(addr_str);
+ }
+ freeifaddrs(ifa);
+ if (err != GRPC_ERROR_NONE) {
+ return err;
+ } else if (sp == NULL) {
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("No local addresses");
+ } else {
+ *out_port = sp->port;
+ return GRPC_ERROR_NONE;
+ }
+}
+
+bool grpc_tcp_server_have_ifaddrs(void) { return true; }
+
+#endif /* GRPC_HAVE_IFADDRS */
diff --git a/src/core/ext/client_channel/initial_connect_string.c b/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c
index 8ebd06c458..d6a1a0693e 100644
--- a/src/core/ext/client_channel/initial_connect_string.c
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,22 +31,19 @@
*
*/
-#include "src/core/ext/client_channel/initial_connect_string.h"
+#include "src/core/lib/iomgr/port.h"
-#include <stddef.h>
+#if defined(GRPC_POSIX_SOCKET) && !defined(GRPC_HAVE_IFADDRS)
-extern void grpc_set_default_initial_connect_string(
- grpc_resolved_address **addr, grpc_slice *initial_str);
+#include "src/core/lib/iomgr/tcp_server_utils_posix.h"
-static grpc_set_initial_connect_string_func g_set_initial_connect_string_func =
- grpc_set_default_initial_connect_string;
-
-void grpc_test_set_initial_connect_string_function(
- grpc_set_initial_connect_string_func func) {
- g_set_initial_connect_string_func = func;
+grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s,
+ unsigned port_index,
+ int requested_port,
+ int *out_port) {
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("no ifaddrs available");
}
-void grpc_set_initial_connect_string(grpc_resolved_address **addr,
- grpc_slice *initial_str) {
- g_set_initial_connect_string_func(addr, initial_str);
-}
+bool grpc_tcp_server_have_ifaddrs(void) { return false; }
+
+#endif /* defined(GRPC_POSIX_SOCKET) && !defined(GRPC_HAVE_IFADDRS) */
diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c
index eed2773f8a..e9246948f5 100644
--- a/src/core/lib/iomgr/tcp_server_uv.c
+++ b/src/core/lib/iomgr/tcp_server_uv.c
@@ -95,8 +95,8 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
} else {
grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
gpr_free(s);
- return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
- " must be a pointer to a buffer pool");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool");
}
}
}
@@ -244,17 +244,19 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle,
// The last argument to uv_tcp_bind is flags
status = uv_tcp_bind(handle, (struct sockaddr *)addr->addr, 0);
if (status != 0) {
- error = GRPC_ERROR_CREATE("Failed to bind to port");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to bind to port");
error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string(uv_strerror(status)));
return error;
}
status = uv_listen((uv_stream_t *)handle, SOMAXCONN, on_connect);
if (status != 0) {
- error = GRPC_ERROR_CREATE("Failed to listen to port");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to listen to port");
error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string(uv_strerror(status)));
return error;
}
@@ -262,9 +264,10 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle,
status = uv_tcp_getsockname(handle, (struct sockaddr *)&sockname_temp.addr,
(int *)&sockname_temp.len);
if (status != 0) {
- error = GRPC_ERROR_CREATE("getsockname failed");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getsockname failed");
error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string(uv_strerror(status)));
return error;
}
@@ -346,15 +349,17 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
if (status == 0) {
error = add_socket_to_server(s, handle, addr, port_index, &sp);
} else {
- error = GRPC_ERROR_CREATE("Failed to initialize UV tcp handle");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Failed to initialize UV tcp handle");
error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string(uv_strerror(status)));
}
gpr_free(allocated_addr);
if (error != GRPC_ERROR_NONE) {
- grpc_error *error_out = GRPC_ERROR_CREATE_REFERENCING(
+ grpc_error *error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to add port to server", &error, 1);
GRPC_ERROR_UNREF(error);
error = error_out;
diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c
index bd4b9b2df1..12ce7d3fdd 100644
--- a/src/core/lib/iomgr/tcp_server_windows.c
+++ b/src/core/lib/iomgr/tcp_server_windows.c
@@ -122,8 +122,8 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
} else {
grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
gpr_free(s);
- return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
- " must be a pointer to a buffer pool");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool");
}
}
}
@@ -248,9 +248,10 @@ failure:
GPR_ASSERT(error != GRPC_ERROR_NONE);
char *tgtaddr = grpc_sockaddr_to_uri(addr);
grpc_error_set_int(
- grpc_error_set_str(GRPC_ERROR_CREATE_REFERENCING(
+ grpc_error_set_str(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to prepare server socket", &error, 1),
- GRPC_ERROR_STR_TARGET_ADDRESS, tgtaddr),
+ GRPC_ERROR_STR_TARGET_ADDRESS,
+ grpc_slice_from_copied_string(tgtaddr)),
GRPC_ERROR_INT_FD, (intptr_t)sock);
gpr_free(tgtaddr);
GRPC_ERROR_UNREF(error);
@@ -533,7 +534,7 @@ done:
gpr_free(allocated_addr);
if (error != GRPC_ERROR_NONE) {
- grpc_error *error_out = GRPC_ERROR_CREATE_REFERENCING(
+ grpc_error *error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to add port to server", &error, 1);
GRPC_ERROR_UNREF(error);
error = error_out;
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c
index 5541c62068..8e8db9f7b4 100644
--- a/src/core/lib/iomgr/tcp_uv.c
+++ b/src/core/lib/iomgr/tcp_uv.c
@@ -152,7 +152,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
// TODO(murgatroid99): figure out what the return value here means
uv_read_stop(stream);
if (nread == UV_EOF) {
- error = GRPC_ERROR_CREATE("EOF");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF");
} else if (nread > 0) {
// Successful read
sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread);
@@ -173,7 +173,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
}
} else {
// nread < 0: Error
- error = GRPC_ERROR_CREATE("TCP Read failed");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed");
}
grpc_closure_sched(&exec_ctx, cb, error);
grpc_exec_ctx_finish(&exec_ctx);
@@ -193,9 +193,10 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
status =
uv_read_start((uv_stream_t *)tcp->handle, alloc_uv_buf, read_callback);
if (status != 0) {
- error = GRPC_ERROR_CREATE("TCP Read failed at start");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed at start");
error =
- grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
+ grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
+ grpc_slice_from_static_string(uv_strerror(status)));
grpc_closure_sched(exec_ctx, cb, error);
}
if (grpc_tcp_trace) {
@@ -214,7 +215,7 @@ static void write_callback(uv_write_t *req, int status) {
if (status == 0) {
error = GRPC_ERROR_NONE;
} else {
- error = GRPC_ERROR_CREATE("TCP Write failed");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed");
}
if (grpc_tcp_trace) {
const char *str = grpc_error_string(error);
@@ -249,8 +250,8 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
if (tcp->shutting_down) {
- grpc_closure_sched(exec_ctx, cb,
- GRPC_ERROR_CREATE("TCP socket is shutting down"));
+ grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "TCP socket is shutting down"));
return;
}
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c
index 6c413971e3..9134883226 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.c
@@ -175,7 +175,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
if (error == GRPC_ERROR_NONE) {
if (info->wsa_error != 0 && !tcp->shutting_down) {
char *utf8_message = gpr_format_message(info->wsa_error);
- error = GRPC_ERROR_CREATE(utf8_message);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message);
gpr_free(utf8_message);
grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
} else {
@@ -185,9 +185,9 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
} else {
grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
error = tcp->shutting_down
- ? GRPC_ERROR_CREATE_REFERENCING("TCP stream shutting down",
- &tcp->shutdown_error, 1)
- : GRPC_ERROR_CREATE("End of TCP stream");
+ ? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "TCP stream shutting down", &tcp->shutdown_error, 1)
+ : GRPC_ERROR_CREATE_FROM_STATIC_STRING("End of TCP stream");
}
}
}
@@ -208,9 +208,10 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
WSABUF buffer;
if (tcp->shutting_down) {
- grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_CREATE_REFERENCING(
- "TCP socket is shutting down",
- &tcp->shutdown_error, 1));
+ grpc_closure_sched(
+ exec_ctx, cb,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "TCP socket is shutting down", &tcp->shutdown_error, 1));
return;
}
@@ -297,9 +298,10 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
size_t len;
if (tcp->shutting_down) {
- grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_CREATE_REFERENCING(
- "TCP socket is shutting down",
- &tcp->shutdown_error, 1));
+ grpc_closure_sched(
+ exec_ctx, cb,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "TCP socket is shutting down", &tcp->shutdown_error, 1));
return;
}
diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c
index d4df96c214..e53c801929 100644
--- a/src/core/lib/iomgr/timer_generic.c
+++ b/src/core/lib/iomgr/timer_generic.c
@@ -109,8 +109,9 @@ void grpc_timer_list_init(gpr_timespec now) {
void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {
int i;
- run_some_expired_timers(exec_ctx, gpr_inf_future(g_clock_type), NULL,
- GRPC_ERROR_CREATE("Timer list shutdown"));
+ run_some_expired_timers(
+ exec_ctx, gpr_inf_future(g_clock_type), NULL,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i];
gpr_mu_destroy(&shard->mu);
@@ -182,9 +183,9 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
if (!g_initialized) {
timer->pending = false;
- grpc_closure_sched(
- exec_ctx, timer->closure,
- GRPC_ERROR_CREATE("Attempt to create timer before initialization"));
+ grpc_closure_sched(exec_ctx, timer->closure,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Attempt to create timer before initialization"));
return;
}
@@ -376,7 +377,7 @@ bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
exec_ctx, now, next,
gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0
? GRPC_ERROR_NONE
- : GRPC_ERROR_CREATE("Shutting down timer system"));
+ : GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system"));
}
#endif /* GRPC_TIMER_USE_GENERIC */
diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c
index f28a14405d..8e8a07578c 100644
--- a/src/core/lib/iomgr/timer_uv.c
+++ b/src/core/lib/iomgr/timer_uv.c
@@ -78,6 +78,10 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
uv_timer->data = timer;
timer->uv_timer = uv_timer;
uv_timer_start(uv_timer, run_expired_timer, timeout, 0);
+ /* We assume that gRPC timers are only used alongside other active gRPC
+ objects, and that there will therefore always be something else keeping
+ the uv loop alive whenever there is a timer */
+ uv_unref((uv_handle_t *)uv_timer);
}
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index 71e295770a..60579e18ba 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -59,11 +59,13 @@
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
+#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/socket_factory_posix.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
@@ -89,6 +91,9 @@ struct grpc_udp_listener {
struct grpc_udp_server {
gpr_mu mu;
+ /* factory to use for creating and binding sockets, or NULL */
+ grpc_socket_factory *socket_factory;
+
/* active port count: how many ports are actually still listening */
size_t active_ports;
/* destroyed port count: how many ports are completely destroyed */
@@ -113,9 +118,24 @@ struct grpc_udp_server {
void *user_data;
};
-grpc_udp_server *grpc_udp_server_create(void) {
+static grpc_socket_factory *get_socket_factory(const grpc_channel_args *args) {
+ if (args) {
+ const grpc_arg *arg = grpc_channel_args_find(args, GRPC_ARG_SOCKET_FACTORY);
+ if (arg) {
+ GPR_ASSERT(arg->type == GRPC_ARG_POINTER);
+ return arg->value.pointer.p;
+ }
+ }
+ return NULL;
+}
+
+grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args) {
grpc_udp_server *s = gpr_malloc(sizeof(grpc_udp_server));
gpr_mu_init(&s->mu);
+ s->socket_factory = get_socket_factory(args);
+ if (s->socket_factory) {
+ grpc_socket_factory_ref(s->socket_factory);
+ }
s->active_ports = 0;
s->destroyed_ports = 0;
s->shutdown = 0;
@@ -139,6 +159,10 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
gpr_free(sp);
}
+ if (s->socket_factory) {
+ grpc_socket_factory_unref(s->socket_factory);
+ }
+
gpr_free(s);
}
@@ -162,10 +186,7 @@ static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
/* delete ALL the things */
gpr_mu_lock(&s->mu);
- if (!s->shutdown) {
- gpr_mu_unlock(&s->mu);
- return;
- }
+ GPR_ASSERT(s->shutdown);
if (s->head) {
grpc_udp_listener *sp;
@@ -205,8 +226,8 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(sp->orphan_cb);
sp->orphan_cb(exec_ctx, sp->emfd, sp->server->user_data);
- grpc_fd_shutdown(exec_ctx, sp->emfd,
- GRPC_ERROR_CREATE("Server destroyed"));
+ grpc_fd_shutdown(exec_ctx, sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Server destroyed"));
}
gpr_mu_unlock(&s->mu);
} else {
@@ -215,8 +236,17 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
}
}
+static int bind_socket(grpc_socket_factory *socket_factory, int sockfd,
+ const grpc_resolved_address *addr) {
+ return (socket_factory != NULL)
+ ? grpc_socket_factory_bind(socket_factory, sockfd, addr)
+ : bind(sockfd, (struct sockaddr *)addr->addr,
+ (socklen_t)addr->len);
+}
+
/* Prepare a recently-created socket for listening. */
-static int prepare_socket(int fd, const grpc_resolved_address *addr) {
+static int prepare_socket(grpc_socket_factory *socket_factory, int fd,
+ const grpc_resolved_address *addr) {
grpc_resolved_address sockname_temp;
struct sockaddr *addr_ptr = (struct sockaddr *)addr->addr;
/* Set send/receive socket buffers to 1 MB */
@@ -246,7 +276,7 @@ static int prepare_socket(int fd, const grpc_resolved_address *addr) {
}
GPR_ASSERT(addr->len < ~(socklen_t)0);
- if (bind(fd, (struct sockaddr *)addr, (socklen_t)addr->len) < 0) {
+ if (bind_socket(socket_factory, fd, addr) < 0) {
char *addr_str;
grpc_sockaddr_to_string(&addr_str, addr, 0);
gpr_log(GPR_ERROR, "bind addr=%s: %s", addr_str, strerror(errno));
@@ -288,7 +318,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_mu_lock(&sp->server->mu);
if (error != GRPC_ERROR_NONE) {
- if (0 == --sp->server->active_ports) {
+ if (0 == --sp->server->active_ports && sp->server->shutdown) {
gpr_mu_unlock(&sp->server->mu);
deactivated_all_ports(exec_ctx, sp->server);
} else {
@@ -311,7 +341,7 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_mu_lock(&(sp->server->mu));
if (error != GRPC_ERROR_NONE) {
- if (0 == --sp->server->active_ports) {
+ if (0 == --sp->server->active_ports && sp->server->shutdown) {
gpr_mu_unlock(&sp->server->mu);
deactivated_all_ports(exec_ctx, sp->server);
} else {
@@ -339,7 +369,7 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
char *addr_str;
char *name;
- port = prepare_socket(fd, addr);
+ port = prepare_socket(s->socket_factory, fd, addr);
if (port >= 0) {
grpc_sockaddr_to_string(&addr_str, addr, 1);
gpr_asprintf(&name, "udp-server-listener:%s", addr_str);
@@ -417,8 +447,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
/* Try listening on IPv6 first. */
addr = &wild6;
// TODO(rjshade): Test and propagate the returned grpc_error*:
- GRPC_ERROR_UNREF(grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP,
- &dsmode, &fd));
+ GRPC_ERROR_UNREF(grpc_create_dualstack_socket_using_factory(
+ s->socket_factory, addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd));
allocated_port1 =
add_socket_to_server(s, fd, addr, read_cb, write_cb, orphan_cb);
if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
@@ -433,8 +463,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
}
// TODO(rjshade): Test and propagate the returned grpc_error*:
- GRPC_ERROR_UNREF(grpc_create_dualstack_socket(addr, SOCK_DGRAM, IPPROTO_UDP,
- &dsmode, &fd));
+ GRPC_ERROR_UNREF(grpc_create_dualstack_socket_using_factory(
+ s->socket_factory, addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd));
if (fd < 0) {
gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno));
}
diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h
index 90842a47f0..9df3fe4d1f 100644
--- a/src/core/lib/iomgr/udp_server.h
+++ b/src/core/lib/iomgr/udp_server.h
@@ -58,7 +58,7 @@ typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx *exec_ctx,
grpc_fd *emfd, void *user_data);
/* Create a server, initially not bound to any ports */
-grpc_udp_server *grpc_udp_server_create(void);
+grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args);
/* Start listening to bound ports. user_data is passed to callbacks. */
void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *udp_server,
diff --git a/src/core/lib/iomgr/unix_sockets_posix.c b/src/core/lib/iomgr/unix_sockets_posix.c
index 1233cec04e..281865aece 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.c
+++ b/src/core/lib/iomgr/unix_sockets_posix.c
@@ -60,7 +60,7 @@ grpc_error *grpc_resolve_unix_domain_address(const char *name,
gpr_asprintf(&err_msg,
"Path name should not have more than %" PRIuPTR " characters.",
GPR_ARRAY_SIZE(un->sun_path) - 1);
- err = GRPC_ERROR_CREATE(err_msg);
+ err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_msg);
gpr_free(err_msg);
return err;
}
diff --git a/src/core/lib/iomgr/unix_sockets_posix_noop.c b/src/core/lib/iomgr/unix_sockets_posix_noop.c
index 1daf5152c1..b9602cbf8b 100644
--- a/src/core/lib/iomgr/unix_sockets_posix_noop.c
+++ b/src/core/lib/iomgr/unix_sockets_posix_noop.c
@@ -47,7 +47,8 @@ void grpc_create_socketpair_if_unix(int sv[2]) {
grpc_error *grpc_resolve_unix_domain_address(
const char *name, grpc_resolved_addresses **addresses) {
*addresses = NULL;
- return GRPC_ERROR_CREATE("Unix domain sockets are not supported on Windows");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Unix domain sockets are not supported on Windows");
}
int grpc_is_unix_socket(const grpc_resolved_address *addr) { return false; }
diff --git a/src/core/lib/profiling/basic_timers.c b/src/core/lib/profiling/basic_timers.c
index 1f1987fb8e..bc8e27714c 100644
--- a/src/core/lib/profiling/basic_timers.c
+++ b/src/core/lib/profiling/basic_timers.c
@@ -218,7 +218,7 @@ void gpr_timers_set_log_filename(const char *filename) {
static void init_output() {
gpr_thd_options options = gpr_thd_options_default();
gpr_thd_options_set_joinable(&options);
- gpr_thd_new(&g_writing_thread, writing_thread, NULL, &options);
+ GPR_ASSERT(gpr_thd_new(&g_writing_thread, writing_thread, NULL, &options));
atexit(finish_writing);
}
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index dd44621347..97501e6788 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -180,7 +180,7 @@ static grpc_error *create_default_creds_from_path(
grpc_slice creds_data = grpc_empty_slice();
grpc_error *error = GRPC_ERROR_NONE;
if (creds_path == NULL) {
- error = GRPC_ERROR_CREATE("creds_path unset");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("creds_path unset");
goto end;
}
error = grpc_load_file(creds_path, 0, &creds_data);
@@ -190,10 +190,9 @@ static grpc_error *create_default_creds_from_path(
json = grpc_json_parse_string_with_len(
(char *)GRPC_SLICE_START_PTR(creds_data), GRPC_SLICE_LENGTH(creds_data));
if (json == NULL) {
- char *dump = grpc_dump_slice(creds_data, GPR_DUMP_HEX | GPR_DUMP_ASCII);
- error = grpc_error_set_str(GRPC_ERROR_CREATE("Failed to parse JSON"),
- GRPC_ERROR_STR_RAW_BYTES, dump);
- gpr_free(dump);
+ error = grpc_error_set_str(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to parse JSON"),
+ GRPC_ERROR_STR_RAW_BYTES, grpc_slice_ref_internal(creds_data));
goto end;
}
@@ -204,7 +203,7 @@ static grpc_error *create_default_creds_from_path(
grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
exec_ctx, key, grpc_max_auth_token_lifetime());
if (result == NULL) {
- error = GRPC_ERROR_CREATE(
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"grpc_service_account_jwt_access_credentials_create_from_auth_json_"
"key failed");
}
@@ -217,7 +216,7 @@ static grpc_error *create_default_creds_from_path(
result =
grpc_refresh_token_credentials_create_from_auth_refresh_token(token);
if (result == NULL) {
- error = GRPC_ERROR_CREATE(
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"grpc_refresh_token_credentials_create_from_auth_refresh_token "
"failed");
}
@@ -236,7 +235,8 @@ end:
grpc_channel_credentials *grpc_google_default_credentials_create(void) {
grpc_channel_credentials *result = NULL;
grpc_call_credentials *call_creds = NULL;
- grpc_error *error = GRPC_ERROR_CREATE("Failed to create Google credentials");
+ grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Failed to create Google credentials");
grpc_error *err;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@@ -274,7 +274,8 @@ grpc_channel_credentials *grpc_google_default_credentials_create(void) {
call_creds = grpc_google_compute_engine_credentials_create(NULL);
if (call_creds == NULL) {
error = grpc_error_add_child(
- error, GRPC_ERROR_CREATE("Failed to get credentials from network"));
+ error, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Failed to get credentials from network"));
}
}
}
diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c
index a23082a866..8f321b9911 100644
--- a/src/core/lib/security/transport/client_auth_filter.c
+++ b/src/core/lib/security/transport/client_auth_filter.c
@@ -95,7 +95,8 @@ static void reset_auth_metadata_context(
static void add_error(grpc_error **combined, grpc_error *error) {
if (error == GRPC_ERROR_NONE) return;
if (*combined == GRPC_ERROR_NONE) {
- *combined = GRPC_ERROR_CREATE("Client auth metadata plugin error");
+ *combined = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Client auth metadata plugin error");
}
*combined = grpc_error_add_child(*combined, error);
}
@@ -114,9 +115,10 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *error = GRPC_ERROR_NONE;
if (status != GRPC_CREDENTIALS_OK) {
error = grpc_error_set_int(
- GRPC_ERROR_CREATE(error_details != NULL && strlen(error_details) > 0
- ? error_details
- : "Credentials failed to get metadata."),
+ GRPC_ERROR_CREATE_FROM_COPIED_STRING(
+ error_details != NULL && strlen(error_details) > 0
+ ? error_details
+ : "Credentials failed to get metadata."),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED);
} else {
GPR_ASSERT(num_md <= MAX_CREDENTIALS_METADATA_COUNT);
@@ -192,7 +194,7 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op_finish_with_failure(
exec_ctx, op,
grpc_error_set_int(
- GRPC_ERROR_CREATE(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Incompatible credentials set on channel and call."),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED));
return;
@@ -225,9 +227,10 @@ static void on_host_checked(grpc_exec_ctx *exec_ctx, void *user_data,
host);
gpr_free(host);
grpc_call_element_signal_error(
- exec_ctx, elem, grpc_error_set_int(GRPC_ERROR_CREATE(error_msg),
- GRPC_ERROR_INT_GRPC_STATUS,
- GRPC_STATUS_UNAUTHENTICATED));
+ exec_ctx, elem,
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg),
+ GRPC_ERROR_INT_GRPC_STATUS,
+ GRPC_STATUS_UNAUTHENTICATED));
gpr_free(error_msg);
}
}
@@ -318,7 +321,7 @@ static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {
+ grpc_closure *ignored) {
call_data *calld = elem->call_data;
grpc_call_credentials_unref(exec_ctx, calld->creds);
if (calld->have_host) {
diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c
index 7d58843d69..568d70fa38 100644
--- a/src/core/lib/security/transport/secure_endpoint.c
+++ b/src/core/lib/security/transport/secure_endpoint.c
@@ -162,7 +162,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer);
- call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING(
+ call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Secure read failed", &error, 1));
return;
}
@@ -220,8 +220,10 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
if (result != TSI_OK) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer);
- call_read_cb(exec_ctx, ep, grpc_set_tsi_error_result(
- GRPC_ERROR_CREATE("Unwrap failed"), result));
+ call_read_cb(
+ exec_ctx, ep,
+ grpc_set_tsi_error_result(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unwrap failed"), result));
return;
}
@@ -332,7 +334,8 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer);
grpc_closure_sched(
exec_ctx, cb,
- grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result));
+ grpc_set_tsi_error_result(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Wrap failed"), result));
GPR_TIMER_END("secure_endpoint.endpoint_write", 0);
return;
}
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index ad083a730f..b0cbc83639 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -137,9 +137,9 @@ void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx,
grpc_auth_context **auth_context,
grpc_closure *on_peer_checked) {
if (sc == NULL) {
- grpc_closure_sched(
- exec_ctx, on_peer_checked,
- GRPC_ERROR_CREATE("cannot check peer -- no security connector"));
+ grpc_closure_sched(exec_ctx, on_peer_checked,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "cannot check peer -- no security connector"));
tsi_peer_destruct(&peer);
} else {
sc->vtable->check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked);
@@ -330,7 +330,8 @@ static void fake_check_peer(grpc_exec_ctx *exec_ctx,
grpc_error *error = GRPC_ERROR_NONE;
*auth_context = NULL;
if (peer.property_count != 1) {
- error = GRPC_ERROR_CREATE("Fake peers should only have 1 property.");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Fake peers should only have 1 property.");
goto end;
}
prop_name = peer.properties[0].name;
@@ -339,13 +340,14 @@ static void fake_check_peer(grpc_exec_ctx *exec_ctx,
char *msg;
gpr_asprintf(&msg, "Unexpected property in fake peer: %s.",
prop_name == NULL ? "<EMPTY>" : prop_name);
- error = GRPC_ERROR_CREATE(msg);
+ error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
goto end;
}
if (strncmp(peer.properties[0].value.data, TSI_FAKE_CERTIFICATE_TYPE,
peer.properties[0].value.length)) {
- error = GRPC_ERROR_CREATE("Invalid value for cert type property.");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Invalid value for cert type property.");
goto end;
}
*auth_context = grpc_auth_context_create(NULL);
@@ -586,18 +588,19 @@ static grpc_error *ssl_check_peer(grpc_security_connector *sc,
const tsi_peer_property *p =
tsi_peer_get_property_by_name(peer, TSI_SSL_ALPN_SELECTED_PROTOCOL);
if (p == NULL) {
- return GRPC_ERROR_CREATE(
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Cannot check peer: missing selected ALPN property.");
}
if (!grpc_chttp2_is_alpn_version_supported(p->value.data, p->value.length)) {
- return GRPC_ERROR_CREATE("Cannot check peer: invalid ALPN value.");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Cannot check peer: invalid ALPN value.");
}
/* Check the peer name if specified. */
if (peer_name != NULL && !ssl_host_matches_name(peer, peer_name)) {
char *msg;
gpr_asprintf(&msg, "Peer name %s is not in peer certificate", peer_name);
- grpc_error *error = GRPC_ERROR_CREATE(msg);
+ grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);
return error;
}
diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c
index 7065d261ba..2f39327670 100644
--- a/src/core/lib/security/transport/security_handshaker.c
+++ b/src/core/lib/security/transport/security_handshaker.c
@@ -120,7 +120,7 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
if (error == GRPC_ERROR_NONE) {
// If we were shut down after the handshake succeeded but before an
// endpoint callback was invoked, we need to generate our own error.
- error = GRPC_ERROR_CREATE("Handshaker shutdown");
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshaker shutdown");
}
const char *msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "Security handshake failed: %s", msg);
@@ -156,7 +156,8 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
tsi_handshaker_create_frame_protector(h->handshaker, NULL, &protector);
if (result != TSI_OK) {
error = grpc_set_tsi_error_result(
- GRPC_ERROR_CREATE("Frame protector creation failed"), result);
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Frame protector creation failed"),
+ result);
security_handshake_failed_locked(exec_ctx, h, error);
goto done;
}
@@ -191,7 +192,7 @@ static grpc_error *check_peer_locked(grpc_exec_ctx *exec_ctx,
tsi_result result = tsi_handshaker_extract_peer(h->handshaker, &peer);
if (result != TSI_OK) {
return grpc_set_tsi_error_result(
- GRPC_ERROR_CREATE("Peer extraction failed"), result);
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Peer extraction failed"), result);
}
grpc_security_connector_check_peer(exec_ctx, h->connector, peer,
&h->auth_context, &h->on_peer_checked);
@@ -215,8 +216,8 @@ static grpc_error *send_handshake_bytes_to_peer_locked(grpc_exec_ctx *exec_ctx,
}
} while (result == TSI_INCOMPLETE_DATA);
if (result != TSI_OK) {
- return grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Handshake failed"),
- result);
+ return grpc_set_tsi_error_result(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake failed"), result);
}
// Send data.
grpc_slice to_send =
@@ -234,8 +235,8 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
- exec_ctx, h,
- GRPC_ERROR_CREATE_REFERENCING("Handshake read failed", &error, 1));
+ exec_ctx, h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Handshake read failed", &error, 1));
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
return;
@@ -270,8 +271,9 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
}
if (result != TSI_OK) {
security_handshake_failed_locked(
- exec_ctx, h, grpc_set_tsi_error_result(
- GRPC_ERROR_CREATE("Handshake failed"), result));
+ exec_ctx, h,
+ grpc_set_tsi_error_result(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake failed"), result));
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
return;
@@ -314,8 +316,8 @@ static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
- exec_ctx, h,
- GRPC_ERROR_CREATE_REFERENCING("Handshake write failed", &error, 1));
+ exec_ctx, h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Handshake write failed", &error, 1));
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
return;
@@ -429,7 +431,8 @@ static void fail_handshaker_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_closure *on_handshake_done,
grpc_handshaker_args *args) {
grpc_closure_sched(exec_ctx, on_handshake_done,
- GRPC_ERROR_CREATE("Failed to create security handshaker"));
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Failed to create security handshaker"));
}
static const grpc_handshaker_vtable fail_handshaker_vtable = {
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index 14619d97ca..3cf0632220 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -144,9 +144,10 @@ static void on_md_processing_done(
calld->transport_op->send_message = NULL;
}
calld->transport_op->send_trailing_metadata = NULL;
- grpc_closure_sched(&exec_ctx, calld->on_done_recv,
- grpc_error_set_int(GRPC_ERROR_CREATE(error_details),
- GRPC_ERROR_INT_GRPC_STATUS, status));
+ grpc_closure_sched(
+ &exec_ctx, calld->on_done_recv,
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
+ GRPC_ERROR_INT_GRPC_STATUS, status));
}
grpc_exec_ctx_finish(&exec_ctx);
@@ -158,7 +159,7 @@ static void auth_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
if (error == GRPC_ERROR_NONE) {
- if (chand->creds->processor.process != NULL) {
+ if (chand->creds != NULL && chand->creds->processor.process != NULL) {
calld->md = metadata_batch_to_md_array(calld->recv_initial_metadata);
chand->creds->processor.process(
chand->creds->processor.state, calld->auth_context,
@@ -227,7 +228,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {}
+ grpc_closure *ignored) {}
/* Constructor for channel_data */
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
@@ -242,7 +243,6 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(!args->is_last);
GPR_ASSERT(auth_context != NULL);
- GPR_ASSERT(creds != NULL);
/* initialize members */
chand->auth_context =
diff --git a/src/core/lib/security/transport/tsi_error.c b/src/core/lib/security/transport/tsi_error.c
index afc1733567..eae0a676b0 100644
--- a/src/core/lib/security/transport/tsi_error.c
+++ b/src/core/lib/security/transport/tsi_error.c
@@ -34,7 +34,9 @@
#include "src/core/lib/security/transport/tsi_error.h"
grpc_error *grpc_set_tsi_error_result(grpc_error *error, tsi_result result) {
- return grpc_error_set_int(grpc_error_set_str(error, GRPC_ERROR_STR_TSI_ERROR,
- tsi_result_to_string(result)),
- GRPC_ERROR_INT_TSI_CODE, result);
+ return grpc_error_set_int(
+ grpc_error_set_str(
+ error, GRPC_ERROR_STR_TSI_ERROR,
+ grpc_slice_from_static_string(tsi_result_to_string(result))),
+ GRPC_ERROR_INT_TSI_CODE, result);
}
diff --git a/src/core/lib/security/util/b64.c b/src/core/lib/security/util/b64.c
index 09c8213131..0d5a917660 100644
--- a/src/core/lib/security/util/b64.c
+++ b/src/core/lib/security/util/b64.c
@@ -71,15 +71,31 @@ static const char base64_url_safe_chars[] =
char *grpc_base64_encode(const void *vdata, size_t data_size, int url_safe,
int multiline) {
- const unsigned char *data = vdata;
- const char *base64_chars =
- url_safe ? base64_url_safe_chars : base64_url_unsafe_chars;
+ size_t result_projected_size =
+ grpc_base64_estimate_encoded_size(data_size, url_safe, multiline);
+ char *result = gpr_malloc(result_projected_size);
+ grpc_base64_encode_core(result, vdata, data_size, url_safe, multiline);
+ return result;
+}
+
+size_t grpc_base64_estimate_encoded_size(size_t data_size, int url_safe,
+ int multiline) {
size_t result_projected_size =
4 * ((data_size + 3) / 3) +
2 * (multiline ? (data_size / (3 * GRPC_BASE64_MULTILINE_NUM_BLOCKS))
: 0) +
1;
- char *result = gpr_malloc(result_projected_size);
+ return result_projected_size;
+}
+
+void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size,
+ int url_safe, int multiline) {
+ const unsigned char *data = vdata;
+ const char *base64_chars =
+ url_safe ? base64_url_safe_chars : base64_url_unsafe_chars;
+ const size_t result_projected_size =
+ grpc_base64_estimate_encoded_size(data_size, url_safe, multiline);
+
char *current = result;
size_t num_blocks = 0;
size_t i = 0;
@@ -119,7 +135,6 @@ char *grpc_base64_encode(const void *vdata, size_t data_size, int url_safe,
GPR_ASSERT(current >= result);
GPR_ASSERT((uintptr_t)(current - result) < result_projected_size);
result[current - result] = '\0';
- return result;
}
grpc_slice grpc_base64_decode(grpc_exec_ctx *exec_ctx, const char *b64,
diff --git a/src/core/lib/security/util/b64.h b/src/core/lib/security/util/b64.h
index d42a136f61..ef52291c6a 100644
--- a/src/core/lib/security/util/b64.h
+++ b/src/core/lib/security/util/b64.h
@@ -37,10 +37,22 @@
#include <grpc/slice.h>
/* Encodes data using base64. It is the caller's responsability to free
- the returned char * using gpr_free. Returns NULL on NULL input. */
+ the returned char * using gpr_free. Returns NULL on NULL input.
+ TODO(makdharma) : change the flags to bool from int */
char *grpc_base64_encode(const void *data, size_t data_size, int url_safe,
int multiline);
+/* estimate the upper bound on size of base64 encoded data. The actual size
+ * is guaranteed to be less than or equal to the size returned here. */
+size_t grpc_base64_estimate_encoded_size(size_t data_size, int url_safe,
+ int multiline);
+
+/* Encodes data using base64 and write it to memory pointed to by result. It is
+ * the caller's responsiblity to allocate enough memory in |result| to fit the
+ * encoded data. */
+void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size,
+ int url_safe, int multiline);
+
/* Decodes data according to the base64 specification. Returns an empty
slice in case of failure. */
grpc_slice grpc_base64_decode(grpc_exec_ctx *exec_ctx, const char *b64,
diff --git a/src/core/ext/client_channel/default_initial_connect_string.c b/src/core/lib/support/atm.c
index 6db82d84ef..06e8432caf 100644
--- a/src/core/ext/client_channel/default_initial_connect_string.c
+++ b/src/core/lib/support/atm.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,17 @@
*
*/
-#include <grpc/slice.h>
-#include "src/core/lib/iomgr/resolve_address.h"
+#include <grpc/support/atm.h>
+#include <grpc/support/useful.h>
-void grpc_set_default_initial_connect_string(grpc_resolved_address **addr,
- grpc_slice *initial_str) {}
+gpr_atm gpr_atm_no_barrier_clamped_add(gpr_atm *value, gpr_atm delta,
+ gpr_atm min, gpr_atm max) {
+ gpr_atm current;
+ gpr_atm new;
+ do {
+ current = gpr_atm_no_barrier_load(value);
+ new = GPR_CLAMP(current + delta, min, max);
+ if (new == current) break;
+ } while (!gpr_atm_no_barrier_cas(value, current, new));
+ return new;
+}
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 47f36be5fd..895a8a3b06 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -51,6 +51,7 @@
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
+#include "src/core/lib/support/arena.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/call.h"
@@ -138,6 +139,7 @@ typedef struct batch_control {
} batch_control;
struct grpc_call {
+ gpr_arena *arena;
grpc_completion_queue *cq;
grpc_polling_entity pollent;
grpc_channel *channel;
@@ -212,6 +214,8 @@ struct grpc_call {
grpc_closure receiving_initial_metadata_ready;
uint32_t test_only_last_message_flags;
+ grpc_closure release_call;
+
union {
struct {
grpc_status_code *status;
@@ -260,7 +264,7 @@ static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl,
static void add_init_error(grpc_error **composite, grpc_error *new) {
if (new == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE)
- *composite = GRPC_ERROR_CREATE("Call creation failed");
+ *composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Call creation failed");
*composite = grpc_error_add_child(*composite, new);
}
@@ -273,7 +277,11 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
grpc_channel_get_channel_stack(args->channel);
grpc_call *call;
GPR_TIMER_BEGIN("grpc_call_create", 0);
- call = gpr_zalloc(sizeof(grpc_call) + channel_stack->call_stack_size);
+ gpr_arena *arena =
+ gpr_arena_create(grpc_channel_get_call_size_estimate(args->channel));
+ call = gpr_arena_alloc(arena,
+ sizeof(grpc_call) + channel_stack->call_stack_size);
+ call->arena = arena;
*out_call = call;
gpr_mu_init(&call->child_list_mu);
call->channel = args->channel;
@@ -327,17 +335,17 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
* call. */
if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
if (0 == (args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT)) {
- add_init_error(&error,
- GRPC_ERROR_CREATE("Census tracing propagation requested "
- "without Census context propagation"));
+ add_init_error(&error, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Census tracing propagation requested "
+ "without Census context propagation"));
}
grpc_call_context_set(
call, GRPC_CONTEXT_TRACING,
args->parent_call->context[GRPC_CONTEXT_TRACING].value, NULL);
} else if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT) {
- add_init_error(&error,
- GRPC_ERROR_CREATE("Census context propagation requested "
- "without Census tracing propagation"));
+ add_init_error(&error, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Census context propagation requested "
+ "without Census tracing propagation"));
}
if (args->propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
call->cancellation_is_inherited = 1;
@@ -364,11 +372,16 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx,
GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
/* initial refcount dropped by grpc_call_destroy */
+ grpc_call_element_args call_args = {
+ .call_stack = CALL_STACK_FROM_CALL(call),
+ .server_transport_data = args->server_transport_data,
+ .context = call->context,
+ .path = path,
+ .start_time = call->start_time,
+ .deadline = send_deadline,
+ .arena = call->arena};
add_init_error(&error, grpc_call_stack_init(exec_ctx, channel_stack, 1,
- destroy_call, call, call->context,
- args->server_transport_data, path,
- call->start_time, send_deadline,
- CALL_STACK_FROM_CALL(call)));
+ destroy_call, call, &call_args));
if (error != GRPC_ERROR_NONE) {
cancel_with_error(exec_ctx, call, STATUS_FROM_SURFACE,
GRPC_ERROR_REF(error));
@@ -425,6 +438,14 @@ void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) {
GRPC_CALL_STACK_UNREF(exec_ctx, CALL_STACK_FROM_CALL(c), REF_REASON);
}
+static void release_call(grpc_exec_ctx *exec_ctx, void *call,
+ grpc_error *error) {
+ grpc_call *c = call;
+ grpc_channel *channel = c->channel;
+ grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena));
+ GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");
+}
+
static void set_status_value_directly(grpc_status_code status, void *dest);
static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
@@ -451,7 +472,6 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
if (c->cq) {
GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
}
- grpc_channel *channel = c->channel;
get_final_status(call, set_status_value_directly, &c->final_info.final_status,
NULL);
@@ -463,8 +483,9 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call,
unpack_received_status(gpr_atm_acq_load(&c->status[i])).error);
}
- grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, c);
- GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");
+ grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info,
+ grpc_closure_init(&c->release_call, release_call, c,
+ grpc_schedule_on_exec_ctx));
GPR_TIMER_END("destroy_call", 0);
}
@@ -582,8 +603,9 @@ static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c,
static grpc_error *error_from_status(grpc_status_code status,
const char *description) {
return grpc_error_set_int(
- grpc_error_set_str(GRPC_ERROR_CREATE(description),
- GRPC_ERROR_STR_GRPC_MESSAGE, description),
+ grpc_error_set_str(GRPC_ERROR_CREATE_FROM_COPIED_STRING(description),
+ GRPC_ERROR_STR_GRPC_MESSAGE,
+ grpc_slice_from_copied_string(description)),
GRPC_ERROR_INT_GRPC_STATUS, status);
}
@@ -603,16 +625,15 @@ static bool get_final_status_from(
void (*set_value)(grpc_status_code code, void *user_data),
void *set_value_user_data, grpc_slice *details) {
grpc_status_code code;
- const char *msg = NULL;
- grpc_error_get_status(error, call->send_deadline, &code, &msg, NULL);
+ grpc_slice slice;
+ grpc_error_get_status(error, call->send_deadline, &code, &slice, NULL);
if (code == GRPC_STATUS_OK && !allow_ok_status) {
return false;
}
set_value(code, set_value_user_data);
if (details != NULL) {
- *details =
- msg == NULL ? grpc_empty_slice() : grpc_slice_from_copied_string(msg);
+ *details = grpc_slice_ref_internal(slice);
}
return true;
}
@@ -875,18 +896,19 @@ static void recv_common_filter(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_error *error =
status_code == GRPC_STATUS_OK
? GRPC_ERROR_NONE
- : grpc_error_set_int(GRPC_ERROR_CREATE("Error received from peer"),
+ : grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Error received from peer"),
GRPC_ERROR_INT_GRPC_STATUS,
(intptr_t)status_code);
if (b->idx.named.grpc_message != NULL) {
- char *msg =
- grpc_slice_to_c_string(GRPC_MDVALUE(b->idx.named.grpc_message->md));
- error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE, msg);
- gpr_free(msg);
+ error = grpc_error_set_str(
+ error, GRPC_ERROR_STR_GRPC_MESSAGE,
+ grpc_slice_ref_internal(GRPC_MDVALUE(b->idx.named.grpc_message->md)));
grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_message);
} else if (error != GRPC_ERROR_NONE) {
- error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE, "");
+ error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE,
+ grpc_empty_slice());
}
set_status_from_error(exec_ctx, call, STATUS_FROM_WIRE, error);
@@ -1035,8 +1057,8 @@ static grpc_error *consolidate_batch_errors(batch_control *bctl) {
bctl->errors[0] = NULL;
return e;
} else {
- grpc_error *error =
- GRPC_ERROR_CREATE_REFERENCING("Call batch failed", bctl->errors, n);
+ grpc_error *error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Call batch failed", bctl->errors, n);
for (size_t i = 0; i < n; i++) {
GRPC_ERROR_UNREF(bctl->errors[i]);
bctl->errors[i] = NULL;
@@ -1500,7 +1522,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
{
grpc_error *override_error = GRPC_ERROR_NONE;
if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
- override_error = GRPC_ERROR_CREATE("Error from server send status");
+ override_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Error from server send status");
}
if (op->data.send_status_from_server.status_details != NULL) {
call->send_extra_metadata[1].md = grpc_mdelem_from_slices(
@@ -1510,8 +1533,9 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
call->send_extra_metadata_count++;
char *msg = grpc_slice_to_c_string(
GRPC_MDVALUE(call->send_extra_metadata[1].md));
- override_error = grpc_error_set_str(
- override_error, GRPC_ERROR_STR_GRPC_MESSAGE, msg);
+ override_error =
+ grpc_error_set_str(override_error, GRPC_ERROR_STR_GRPC_MESSAGE,
+ grpc_slice_from_copied_string(msg));
gpr_free(msg);
}
set_status_from_error(exec_ctx, call, STATUS_FROM_API_OVERRIDE,
diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c
index d6acd392c1..b4bfb92042 100644
--- a/src/core/lib/surface/channel.c
+++ b/src/core/lib/surface/channel.c
@@ -68,6 +68,8 @@ struct grpc_channel {
grpc_compression_options compression_options;
grpc_mdelem default_authority;
+ gpr_atm call_size_estimate;
+
gpr_mu registered_call_mu;
registered_call *registered_calls;
@@ -83,19 +85,10 @@ struct grpc_channel {
static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
-grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
- const grpc_channel_args *input_args,
- grpc_channel_stack_type channel_stack_type,
- grpc_transport *optional_transport) {
- grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create();
- grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
- input_args);
- grpc_channel_stack_builder_set_target(builder, target);
- grpc_channel_stack_builder_set_transport(builder, optional_transport);
- if (!grpc_channel_init_create_stack(exec_ctx, builder, channel_stack_type)) {
- grpc_channel_stack_builder_destroy(exec_ctx, builder);
- return NULL;
- }
+grpc_channel *grpc_channel_create_with_builder(
+ grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder,
+ grpc_channel_stack_type channel_stack_type) {
+ char *target = gpr_strdup(grpc_channel_stack_builder_get_target(builder));
grpc_channel_args *args = grpc_channel_args_copy(
grpc_channel_stack_builder_get_channel_arguments(builder));
grpc_channel *channel;
@@ -106,15 +99,20 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
gpr_log(GPR_ERROR, "channel stack builder failed: %s",
grpc_error_string(error));
GRPC_ERROR_UNREF(error);
+ gpr_free(target);
goto done;
}
memset(channel, 0, sizeof(*channel));
- channel->target = gpr_strdup(target);
+ channel->target = target;
channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type);
gpr_mu_init(&channel->registered_call_mu);
channel->registered_calls = NULL;
+ gpr_atm_no_barrier_store(
+ &channel->call_size_estimate,
+ (gpr_atm)CHANNEL_STACK_FROM_CHANNEL(channel)->call_stack_size);
+
grpc_compression_options_init(&channel->compression_options);
for (size_t i = 0; i < args->num_args; i++) {
if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) {
@@ -177,6 +175,55 @@ done:
return channel;
}
+grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
+ const grpc_channel_args *input_args,
+ grpc_channel_stack_type channel_stack_type,
+ grpc_transport *optional_transport) {
+ grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create();
+ grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
+ input_args);
+ grpc_channel_stack_builder_set_target(builder, target);
+ grpc_channel_stack_builder_set_transport(builder, optional_transport);
+ if (!grpc_channel_init_create_stack(exec_ctx, builder, channel_stack_type)) {
+ grpc_channel_stack_builder_destroy(exec_ctx, builder);
+ return NULL;
+ }
+ return grpc_channel_create_with_builder(exec_ctx, builder,
+ channel_stack_type);
+}
+
+size_t grpc_channel_get_call_size_estimate(grpc_channel *channel) {
+#define ROUND_UP_SIZE 256
+ /* We round up our current estimate to the NEXT value of ROUND_UP_SIZE.
+ This ensures:
+ 1. a consistent size allocation when our estimate is drifting slowly
+ (which is common) - which tends to help most allocators reuse memory
+ 2. a small amount of allowed growth over the estimate without hitting
+ the arena size doubling case, reducing overall memory usage */
+ return ((size_t)gpr_atm_no_barrier_load(&channel->call_size_estimate) +
+ 2 * ROUND_UP_SIZE) &
+ ~(size_t)(ROUND_UP_SIZE - 1);
+}
+
+void grpc_channel_update_call_size_estimate(grpc_channel *channel,
+ size_t size) {
+ size_t cur = (size_t)gpr_atm_no_barrier_load(&channel->call_size_estimate);
+ if (cur < size) {
+ /* size grew: update estimate */
+ gpr_atm_no_barrier_cas(&channel->call_size_estimate, (gpr_atm)cur,
+ (gpr_atm)size);
+ /* if we lose: never mind, something else will likely update soon enough */
+ } else if (cur == size) {
+ /* no change: holding pattern */
+ } else if (cur > 0) {
+ /* size shrank: decrease estimate */
+ gpr_atm_no_barrier_cas(
+ &channel->call_size_estimate, (gpr_atm)cur,
+ (gpr_atm)(GPR_MIN(cur - 1, (255 * cur + size) / 256)));
+ /* if we lose: never mind, something else will likely update soon enough */
+ }
+}
+
char *grpc_channel_get_target(grpc_channel *channel) {
GRPC_API_TRACE("grpc_channel_get_target(channel=%p)", 1, (channel));
return gpr_strdup(channel->target);
@@ -348,7 +395,8 @@ void grpc_channel_destroy(grpc_channel *channel) {
grpc_channel_element *elem;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_API_TRACE("grpc_channel_destroy(channel=%p)", 1, (channel));
- op->disconnect_with_error = GRPC_ERROR_CREATE("Channel Destroyed");
+ op->disconnect_with_error =
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Destroyed");
elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
elem->filter->start_transport_op(&exec_ctx, elem, op);
diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h
index 3a441d7add..0f203a3e59 100644
--- a/src/core/lib/surface/channel.h
+++ b/src/core/lib/surface/channel.h
@@ -35,6 +35,7 @@
#define GRPC_CORE_LIB_SURFACE_CHANNEL_H
#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/channel/channel_stack_builder.h"
#include "src/core/lib/surface/channel_stack_type.h"
grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
@@ -42,6 +43,10 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
grpc_channel_stack_type channel_stack_type,
grpc_transport *optional_transport);
+grpc_channel *grpc_channel_create_with_builder(
+ grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder,
+ grpc_channel_stack_type channel_stack_type);
+
/** Create a call given a grpc_channel, in order to call \a method.
Progress is tied to activity on \a pollset_set. The returned call object is
meant to be used with \a grpc_call_start_batch_and_execute, which relies on
@@ -66,6 +71,9 @@ grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx,
grpc_channel *channel,
int status_code);
+size_t grpc_channel_get_call_size_estimate(grpc_channel *channel);
+void grpc_channel_update_call_size_estimate(grpc_channel *channel, size_t size);
+
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_channel_internal_ref(grpc_channel *channel, const char *reason);
void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
diff --git a/src/core/lib/surface/completion_queue_factory.c b/src/core/lib/surface/completion_queue_factory.c
new file mode 100644
index 0000000000..db67a5192b
--- /dev/null
+++ b/src/core/lib/surface/completion_queue_factory.c
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2017, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "src/core/lib/surface/completion_queue_factory.h"
+#include "src/core/lib/surface/completion_queue.h"
+
+#include <grpc/support/log.h>
+
+/* TODO (sreek) - Currently this does not use the attributes arg. This will be
+ added in a future PR */
+static grpc_completion_queue* default_create(
+ const grpc_completion_queue_factory* factory,
+ const grpc_completion_queue_attributes* attributes) {
+ return grpc_completion_queue_create(NULL);
+}
+
+static grpc_completion_queue_factory_vtable default_vtable = {default_create};
+
+static const grpc_completion_queue_factory g_default_cq_factory = {
+ "Default Factory", NULL, &default_vtable};
+
+const grpc_completion_queue_factory* grpc_completion_queue_factory_lookup(
+ const grpc_completion_queue_attributes* attributes) {
+ /* As we add more fields to grpc_completion_queue_attributes, we may have to
+ change this assert to:
+ GPR_ASSERT (attributes->version >= 1 &&
+ attributes->version <= GRPC_CQ_CURRENT_VERSION) */
+ GPR_ASSERT(attributes->version == 1);
+
+ /* The default factory can handle version 1 of the attributes structure. We
+ may have to change this as more fields are added to the structure */
+ return &g_default_cq_factory;
+}
+
+grpc_completion_queue* grpc_completion_queue_create_for_next(void* reserved) {
+ GPR_ASSERT(!reserved);
+ grpc_completion_queue_attributes attr = {1, GRPC_CQ_NEXT,
+ GRPC_CQ_DEFAULT_POLLING};
+ return g_default_cq_factory.vtable->create(&g_default_cq_factory, &attr);
+}
+
+grpc_completion_queue* grpc_completion_queue_create_for_pluck(void* reserved) {
+ GPR_ASSERT(!reserved);
+ grpc_completion_queue_attributes attr = {1, GRPC_CQ_PLUCK,
+ GRPC_CQ_DEFAULT_POLLING};
+ return g_default_cq_factory.vtable->create(&g_default_cq_factory, &attr);
+}
diff --git a/src/core/ext/client_channel/initial_connect_string.h b/src/core/lib/surface/completion_queue_factory.h
index 876abea40e..57e90b5090 100644
--- a/src/core/ext/client_channel/initial_connect_string.h
+++ b/src/core/lib/surface/completion_queue_factory.h
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2015, Google Inc.
+ * Copyright 2017, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,20 +31,21 @@
*
*/
-#ifndef GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H
-#define GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H
+#ifndef GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_FACTORY_H
+#define GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_FACTORY_H
-#include <grpc/slice.h>
-#include "src/core/lib/iomgr/resolve_address.h"
+#include <grpc/grpc.h>
+#include "src/core/lib/surface/completion_queue.h"
-typedef void (*grpc_set_initial_connect_string_func)(
- grpc_resolved_address **addr, grpc_slice *initial_str);
+typedef struct grpc_completion_queue_factory_vtable {
+ grpc_completion_queue* (*create)(const grpc_completion_queue_factory*,
+ const grpc_completion_queue_attributes*);
+} grpc_completion_queue_factory_vtable;
-void grpc_test_set_initial_connect_string_function(
- grpc_set_initial_connect_string_func func);
+struct grpc_completion_queue_factory {
+ const char* name;
+ void* data; /* Factory specific data */
+ grpc_completion_queue_factory_vtable* vtable;
+};
-/** Set a string to be sent once connected. Optionally reset addr. */
-void grpc_set_initial_connect_string(grpc_resolved_address **addr,
- grpc_slice *connect_string);
-
-#endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_INITIAL_CONNECT_STRING_H */
+#endif /* GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_FACTORY_H */
diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c
index 49bc4c114b..0c408aa288 100644
--- a/src/core/lib/surface/lame_client.c
+++ b/src/core/lib/surface/lame_client.c
@@ -90,7 +90,8 @@ static void lame_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
fill_metadata(exec_ctx, elem, op->recv_trailing_metadata);
}
grpc_transport_stream_op_finish_with_failure(
- exec_ctx, op, GRPC_ERROR_CREATE("lame client channel"));
+ exec_ctx, op,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
}
static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
@@ -111,8 +112,9 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_NONE);
}
if (op->send_ping != NULL) {
- grpc_closure_sched(exec_ctx, op->send_ping,
- GRPC_ERROR_CREATE("lame client channel"));
+ grpc_closure_sched(
+ exec_ctx, op->send_ping,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
}
GRPC_ERROR_UNREF(op->disconnect_with_error);
if (op->on_consumed != NULL) {
@@ -130,8 +132,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *and_free_memory) {
- gpr_free(and_free_memory);
+ grpc_closure *then_schedule_closure) {
+ grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
}
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index b360579553..a123c9ca43 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -288,10 +288,10 @@ static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
grpc_channel_element *elem;
op->goaway_error =
- send_goaway
- ? grpc_error_set_int(GRPC_ERROR_CREATE("Server shutdown"),
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_OK)
- : GRPC_ERROR_NONE;
+ send_goaway ? grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown"),
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_OK)
+ : GRPC_ERROR_NONE;
op->set_accept_stream = true;
sc->slice = grpc_slice_from_copied_string("Server shutdown");
op->disconnect_with_error = send_disconnect;
@@ -712,8 +712,9 @@ static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
return;
}
- kill_pending_work_locked(exec_ctx, server,
- GRPC_ERROR_CREATE("Server Shutdown"));
+ kill_pending_work_locked(
+ exec_ctx, server,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
if (server->root_channel_data.next != &server->root_channel_data ||
server->listeners_destroyed < num_listeners(server)) {
@@ -771,8 +772,8 @@ static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr,
/* do nothing */
} else {
grpc_error *src_error = error;
- error =
- GRPC_ERROR_CREATE_REFERENCING("Missing :authority or :path", &error, 1);
+ error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Missing :authority or :path", &error, 1);
GRPC_ERROR_UNREF(src_error);
}
@@ -898,7 +899,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *ignored) {
+ grpc_closure *ignored) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
@@ -1219,7 +1220,8 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s,
op->on_connectivity_state_change = &chand->channel_connectivity_changed;
op->connectivity_state = &chand->connectivity_state;
if (gpr_atm_acq_load(&s->shutdown_flag) != 0) {
- op->disconnect_with_error = GRPC_ERROR_CREATE("Server shutdown");
+ op->disconnect_with_error =
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown");
}
grpc_transport_perform_op(exec_ctx, transport, op);
}
@@ -1277,8 +1279,9 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* collect all unregistered then registered calls */
gpr_mu_lock(&server->mu_call);
- kill_pending_work_locked(&exec_ctx, server,
- GRPC_ERROR_CREATE("Server Shutdown"));
+ kill_pending_work_locked(
+ &exec_ctx, server,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
gpr_mu_unlock(&server->mu_call);
maybe_finish_shutdown(&exec_ctx, server);
@@ -1308,8 +1311,9 @@ void grpc_server_cancel_all_calls(grpc_server *server) {
channel_broadcaster_init(server, &broadcaster);
gpr_mu_unlock(&server->mu_global);
- channel_broadcaster_shutdown(&exec_ctx, &broadcaster, false /* send_goaway */,
- GRPC_ERROR_CREATE("Cancelling all calls"));
+ channel_broadcaster_shutdown(
+ &exec_ctx, &broadcaster, false /* send_goaway */,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Cancelling all calls"));
grpc_exec_ctx_finish(&exec_ctx);
}
@@ -1357,16 +1361,16 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
int request_id;
if (gpr_atm_acq_load(&server->shutdown_flag)) {
fail_call(exec_ctx, server, cq_idx, rc,
- GRPC_ERROR_CREATE("Server Shutdown"));
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
return GRPC_CALL_OK;
}
request_id = gpr_stack_lockfree_pop(server->request_freelist_per_cq[cq_idx]);
if (request_id == -1) {
/* out of request ids: just fail this one */
fail_call(exec_ctx, server, cq_idx, rc,
- grpc_error_set_int(GRPC_ERROR_CREATE("Out of request ids"),
- GRPC_ERROR_INT_LIMIT,
- server->max_requested_calls_per_cq));
+ grpc_error_set_int(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Out of request ids"),
+ GRPC_ERROR_INT_LIMIT, server->max_requested_calls_per_cq));
return GRPC_CALL_OK;
}
switch (rc->type) {
diff --git a/src/core/lib/surface/validate_metadata.c b/src/core/lib/surface/validate_metadata.c
index 7ec9137265..6e76c4efe7 100644
--- a/src/core/lib/surface/validate_metadata.c
+++ b/src/core/lib/surface/validate_metadata.c
@@ -39,6 +39,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
static grpc_error *conforms_to(grpc_slice slice, const uint8_t *legal_bits,
@@ -52,9 +53,10 @@ static grpc_error *conforms_to(grpc_slice slice, const uint8_t *legal_bits,
if ((legal_bits[byte] & (1 << bit)) == 0) {
char *dump = grpc_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
grpc_error *error = grpc_error_set_str(
- grpc_error_set_int(GRPC_ERROR_CREATE(err_desc), GRPC_ERROR_INT_OFFSET,
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_desc),
+ GRPC_ERROR_INT_OFFSET,
p - GRPC_SLICE_START_PTR(slice)),
- GRPC_ERROR_STR_RAW_BYTES, dump);
+ GRPC_ERROR_STR_RAW_BYTES, grpc_slice_from_copied_string(dump));
gpr_free(dump);
return error;
}
@@ -74,10 +76,12 @@ grpc_error *grpc_validate_header_key_is_legal(grpc_slice slice) {
0x80, 0xfe, 0xff, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
if (GRPC_SLICE_LENGTH(slice) == 0) {
- return GRPC_ERROR_CREATE("Metadata keys cannot be zero length");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Metadata keys cannot be zero length");
}
if (GRPC_SLICE_START_PTR(slice)[0] == ':') {
- return GRPC_ERROR_CREATE("Metadata keys cannot start with :");
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "Metadata keys cannot start with :");
}
return conforms_to(slice, legal_header_bits, "Illegal header key");
}
diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c
index 1143a9e044..ba80bd801e 100644
--- a/src/core/lib/surface/version.c
+++ b/src/core/lib/surface/version.c
@@ -38,4 +38,4 @@
const char *grpc_version_string(void) { return "3.0.0-dev"; }
-const char *grpc_g_stands_for(void) { return "green"; }
+const char *grpc_g_stands_for(void) { return "gentle"; }
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index afe1f6164d..3757b25267 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -79,7 +79,8 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx,
*w->current = GRPC_CHANNEL_SHUTDOWN;
error = GRPC_ERROR_NONE;
} else {
- error = GRPC_ERROR_CREATE("Shutdown connectivity owner");
+ error =
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutdown connectivity owner");
}
grpc_closure_sched(exec_ctx, w->notify, error);
gpr_free(w);
diff --git a/src/core/lib/transport/error_utils.c b/src/core/lib/transport/error_utils.c
index ef55e561fb..4e70f8749d 100644
--- a/src/core/lib/transport/error_utils.c
+++ b/src/core/lib/transport/error_utils.c
@@ -55,7 +55,7 @@ static grpc_error *recursively_find_error_with_field(grpc_error *error,
}
void grpc_error_get_status(grpc_error *error, gpr_timespec deadline,
- grpc_status_code *code, const char **msg,
+ grpc_status_code *code, grpc_slice *slice,
grpc_http2_error_code *http_error) {
// Start with the parent error and recurse through the tree of children
// until we find the first one that has a status code.
@@ -97,11 +97,11 @@ void grpc_error_get_status(grpc_error *error, gpr_timespec deadline,
// If the error has a status message, use it. Otherwise, fall back to
// the error description.
- if (msg != NULL) {
- *msg = grpc_error_get_str(found_error, GRPC_ERROR_STR_GRPC_MESSAGE);
- if (*msg == NULL && error != GRPC_ERROR_NONE) {
- *msg = grpc_error_get_str(found_error, GRPC_ERROR_STR_DESCRIPTION);
- if (*msg == NULL) *msg = "unknown error"; // Just in case.
+ if (slice != NULL) {
+ if (!grpc_error_get_str(found_error, GRPC_ERROR_STR_GRPC_MESSAGE, slice)) {
+ if (!grpc_error_get_str(found_error, GRPC_ERROR_STR_DESCRIPTION, slice)) {
+ *slice = grpc_slice_from_static_string("unknown error");
+ }
}
}
diff --git a/src/core/lib/transport/error_utils.h b/src/core/lib/transport/error_utils.h
index 105338880a..3b44466ab8 100644
--- a/src/core/lib/transport/error_utils.h
+++ b/src/core/lib/transport/error_utils.h
@@ -44,7 +44,7 @@
/// attributes (code, msg, http_status) are unneeded, they can be passed as
/// NULL.
void grpc_error_get_status(grpc_error *error, gpr_timespec deadline,
- grpc_status_code *code, const char **msg,
+ grpc_status_code *code, grpc_slice *slice,
grpc_http2_error_code *http_status);
/// A utility function to check whether there is a clear status code that
diff --git a/src/core/lib/transport/metadata_batch.c b/src/core/lib/transport/metadata_batch.c
index fc2c52bd8a..fa73244aa4 100644
--- a/src/core/lib/transport/metadata_batch.c
+++ b/src/core/lib/transport/metadata_batch.c
@@ -101,12 +101,10 @@ void grpc_metadata_batch_destroy(grpc_exec_ctx *exec_ctx,
}
grpc_error *grpc_attach_md_to_error(grpc_error *src, grpc_mdelem md) {
- char *k = grpc_slice_to_c_string(GRPC_MDKEY(md));
- char *v = grpc_slice_to_c_string(GRPC_MDVALUE(md));
grpc_error *out = grpc_error_set_str(
- grpc_error_set_str(src, GRPC_ERROR_STR_KEY, k), GRPC_ERROR_STR_VALUE, v);
- gpr_free(k);
- gpr_free(v);
+ grpc_error_set_str(src, GRPC_ERROR_STR_KEY,
+ grpc_slice_ref_internal(GRPC_MDKEY(md))),
+ GRPC_ERROR_STR_VALUE, grpc_slice_ref_internal(GRPC_MDVALUE(md)));
return out;
}
@@ -126,7 +124,8 @@ static grpc_error *maybe_link_callout(grpc_metadata_batch *batch,
return GRPC_ERROR_NONE;
}
return grpc_attach_md_to_error(
- GRPC_ERROR_CREATE("Unallowed duplicate metadata"), storage->md);
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unallowed duplicate metadata"),
+ storage->md);
}
static void maybe_unlink_callout(grpc_metadata_batch *batch,
@@ -302,7 +301,7 @@ static void add_error(grpc_error **composite, grpc_error *error,
const char *composite_error_string) {
if (error == GRPC_ERROR_NONE) return;
if (*composite == GRPC_ERROR_NONE) {
- *composite = GRPC_ERROR_CREATE(composite_error_string);
+ *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(composite_error_string);
}
*composite = grpc_error_add_child(*composite, error);
}
diff --git a/src/core/lib/transport/service_config.c b/src/core/lib/transport/service_config.c
index 12da2a88fe..1195f75044 100644
--- a/src/core/lib/transport/service_config.c
+++ b/src/core/lib/transport/service_config.c
@@ -93,6 +93,18 @@ void grpc_service_config_destroy(grpc_service_config* service_config) {
gpr_free(service_config);
}
+void grpc_service_config_parse_global_params(
+ const grpc_service_config* service_config,
+ void (*process_json)(const grpc_json* json, void* arg), void* arg) {
+ const grpc_json* json = service_config->json_tree;
+ if (json->type != GRPC_JSON_OBJECT || json->key != NULL) return;
+ for (grpc_json* field = json->child; field != NULL; field = field->next) {
+ if (field->key == NULL) return;
+ if (strcmp(field->key, "methodConfig") == 0) continue;
+ process_json(field, arg);
+ }
+}
+
const char* grpc_service_config_get_lb_policy_name(
const grpc_service_config* service_config) {
const grpc_json* json = service_config->json_tree;
diff --git a/src/core/lib/transport/service_config.h b/src/core/lib/transport/service_config.h
index cd739a593c..ebfc59b534 100644
--- a/src/core/lib/transport/service_config.h
+++ b/src/core/lib/transport/service_config.h
@@ -42,6 +42,12 @@ typedef struct grpc_service_config grpc_service_config;
grpc_service_config* grpc_service_config_create(const char* json_string);
void grpc_service_config_destroy(grpc_service_config* service_config);
+/// Invokes \a process_json() for each global parameter in the service
+/// config. \a arg is passed as the second argument to \a process_json().
+void grpc_service_config_parse_global_params(
+ const grpc_service_config* service_config,
+ void (*process_json)(const grpc_json* json, void* arg), void* arg);
+
/// Gets the LB policy name from \a service_config.
/// Returns NULL if no LB policy name was specified.
/// Caller does NOT take ownership.
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index 3024f2ae78..d56cb31ee0 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -162,9 +162,9 @@ void grpc_transport_destroy(grpc_exec_ctx *exec_ctx,
int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
grpc_transport *transport, grpc_stream *stream,
grpc_stream_refcount *refcount,
- const void *server_data) {
+ const void *server_data, gpr_arena *arena) {
return transport->vtable->init_stream(exec_ctx, transport, stream, refcount,
- server_data);
+ server_data, arena);
}
void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx,
@@ -197,9 +197,10 @@ void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport,
void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
- grpc_stream *stream, void *and_free_memory) {
+ grpc_stream *stream,
+ grpc_closure *then_schedule_closure) {
transport->vtable->destroy_stream(exec_ctx, transport, stream,
- and_free_memory);
+ then_schedule_closure);
}
char *grpc_transport_get_peer(grpc_exec_ctx *exec_ctx,
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index cc1c277b35..950b18aeda 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -41,6 +41,7 @@
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
+#include "src/core/lib/support/arena.h"
#include "src/core/lib/transport/byte_stream.h"
#include "src/core/lib/transport/metadata_batch.h"
@@ -229,7 +230,7 @@ size_t grpc_transport_stream_size(grpc_transport *transport);
int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx,
grpc_transport *transport, grpc_stream *stream,
grpc_stream_refcount *refcount,
- const void *server_data);
+ const void *server_data, gpr_arena *arena);
void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport,
grpc_stream *stream, grpc_polling_entity *pollent);
@@ -246,7 +247,8 @@ void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport,
caller, but any child memory must be cleaned up) */
void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx,
grpc_transport *transport,
- grpc_stream *stream, void *and_free_memory);
+ grpc_stream *stream,
+ grpc_closure *then_schedule_closure);
void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
grpc_transport_stream_op *op,
diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h
index 8553148c35..6f688bf8d2 100644
--- a/src/core/lib/transport/transport_impl.h
+++ b/src/core/lib/transport/transport_impl.h
@@ -47,7 +47,7 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_init_stream */
int (*init_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
grpc_stream *stream, grpc_stream_refcount *refcount,
- const void *server_data);
+ const void *server_data, gpr_arena *arena);
/* implementation of grpc_transport_set_pollset */
void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
@@ -67,7 +67,8 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_destroy_stream */
void (*destroy_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self,
- grpc_stream *stream, void *and_free_memory);
+ grpc_stream *stream,
+ grpc_closure *then_schedule_closure);
/* implementation of grpc_transport_destroy */
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self);
diff --git a/src/core/plugin_registry/grpc_plugin_registry.c b/src/core/plugin_registry/grpc_plugin_registry.c
index 2efd9cd1ad..596e3b7114 100644
--- a/src/core/plugin_registry/grpc_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_plugin_registry.c
@@ -43,6 +43,8 @@ extern void grpc_lb_policy_pick_first_init(void);
extern void grpc_lb_policy_pick_first_shutdown(void);
extern void grpc_lb_policy_round_robin_init(void);
extern void grpc_lb_policy_round_robin_shutdown(void);
+extern void grpc_resolver_dns_ares_init(void);
+extern void grpc_resolver_dns_ares_shutdown(void);
extern void grpc_resolver_dns_native_init(void);
extern void grpc_resolver_dns_native_shutdown(void);
extern void grpc_resolver_sockaddr_init(void);
@@ -63,6 +65,8 @@ void grpc_register_built_in_plugins(void) {
grpc_lb_policy_pick_first_shutdown);
grpc_register_plugin(grpc_lb_policy_round_robin_init,
grpc_lb_policy_round_robin_shutdown);
+ grpc_register_plugin(grpc_resolver_dns_ares_init,
+ grpc_resolver_dns_ares_shutdown);
grpc_register_plugin(grpc_resolver_dns_native_init,
grpc_resolver_dns_native_shutdown);
grpc_register_plugin(grpc_resolver_sockaddr_init,
diff --git a/src/core/plugin_registry/grpc_unsecure_plugin_registry.c b/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
index 8b18af699d..a05ebcb3af 100644
--- a/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
+++ b/src/core/plugin_registry/grpc_unsecure_plugin_registry.c
@@ -37,6 +37,8 @@ extern void grpc_chttp2_plugin_init(void);
extern void grpc_chttp2_plugin_shutdown(void);
extern void grpc_client_channel_init(void);
extern void grpc_client_channel_shutdown(void);
+extern void grpc_resolver_dns_ares_init(void);
+extern void grpc_resolver_dns_ares_shutdown(void);
extern void grpc_resolver_dns_native_init(void);
extern void grpc_resolver_dns_native_shutdown(void);
extern void grpc_resolver_sockaddr_init(void);
@@ -57,6 +59,8 @@ void grpc_register_built_in_plugins(void) {
grpc_chttp2_plugin_shutdown);
grpc_register_plugin(grpc_client_channel_init,
grpc_client_channel_shutdown);
+ grpc_register_plugin(grpc_resolver_dns_ares_init,
+ grpc_resolver_dns_ares_shutdown);
grpc_register_plugin(grpc_resolver_dns_native_init,
grpc_resolver_dns_native_shutdown);
grpc_register_plugin(grpc_resolver_sockaddr_init,
diff --git a/src/cpp/common/channel_arguments.cc b/src/cpp/common/channel_arguments.cc
index 65f3277499..53e4a9c39c 100644
--- a/src/cpp/common/channel_arguments.cc
+++ b/src/cpp/common/channel_arguments.cc
@@ -81,6 +81,16 @@ ChannelArguments::ChannelArguments(const ChannelArguments& other)
}
}
+ChannelArguments::~ChannelArguments() {
+ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+ for (auto it = args_.begin(); it != args_.end(); ++it) {
+ if (it->type == GRPC_ARG_POINTER) {
+ it->value.pointer.vtable->destroy(&exec_ctx, it->value.pointer.p);
+ }
+ }
+ grpc_exec_ctx_finish(&exec_ctx);
+}
+
void ChannelArguments::Swap(ChannelArguments& other) {
args_.swap(other.args_);
strings_.swap(other.strings_);
@@ -101,8 +111,10 @@ void ChannelArguments::SetSocketMutator(grpc_socket_mutator* mutator) {
for (auto it = args_.begin(); it != args_.end(); ++it) {
if (it->type == mutator_arg.type &&
grpc::string(it->key) == grpc::string(mutator_arg.key)) {
+ GPR_ASSERT(!replaced);
it->value.pointer.vtable->destroy(&exec_ctx, it->value.pointer.p);
it->value.pointer = mutator_arg.value.pointer;
+ replaced = true;
}
}
grpc_exec_ctx_finish(&exec_ctx);
@@ -121,14 +133,19 @@ void ChannelArguments::SetUserAgentPrefix(
return;
}
bool replaced = false;
+ auto strings_it = strings_.begin();
for (auto it = args_.begin(); it != args_.end(); ++it) {
const grpc_arg& arg = *it;
- if (arg.type == GRPC_ARG_STRING &&
- grpc::string(arg.key) == GRPC_ARG_PRIMARY_USER_AGENT_STRING) {
- strings_.push_back(user_agent_prefix + " " + arg.value.string);
- it->value.string = const_cast<char*>(strings_.back().c_str());
- replaced = true;
- break;
+ ++strings_it;
+ if (arg.type == GRPC_ARG_STRING) {
+ if (grpc::string(arg.key) == GRPC_ARG_PRIMARY_USER_AGENT_STRING) {
+ GPR_ASSERT(arg.value.string == strings_it->c_str());
+ *(strings_it) = user_agent_prefix + " " + arg.value.string;
+ it->value.string = const_cast<char*>(strings_it->c_str());
+ replaced = true;
+ break;
+ }
+ ++strings_it;
}
}
if (!replaced) {
@@ -185,7 +202,7 @@ void ChannelArguments::SetPointerWithVtable(
arg.type = GRPC_ARG_POINTER;
strings_.push_back(key);
arg.key = const_cast<char*>(strings_.back().c_str());
- arg.value.pointer.p = value;
+ arg.value.pointer.p = vtable->copy(value);
arg.value.pointer.vtable = vtable;
args_.push_back(arg);
}
diff --git a/src/cpp/common/channel_filter.h b/src/cpp/common/channel_filter.h
index 79c4bab985..494d5d64d7 100644
--- a/src/cpp/common/channel_filter.h
+++ b/src/cpp/common/channel_filter.h
@@ -318,7 +318,8 @@ class ChannelFilter final {
static void DestroyCallElement(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
- void *and_free_memory) {
+ grpc_closure *then_call_closure) {
+ GPR_ASSERT(then_call_closure == NULL);
reinterpret_cast<CallDataType *>(elem->call_data)->~CallDataType();
}
diff --git a/src/csharp/Grpc.Core/ChannelOptions.cs b/src/csharp/Grpc.Core/ChannelOptions.cs
index b6eeceabc4..46a2c6695f 100644
--- a/src/csharp/Grpc.Core/ChannelOptions.cs
+++ b/src/csharp/Grpc.Core/ChannelOptions.cs
@@ -151,7 +151,14 @@ namespace Grpc.Core
public const string MaxConcurrentStreams = "grpc.max_concurrent_streams";
/// <summary>Maximum message length that the channel can receive</summary>
- public const string MaxMessageLength = "grpc.max_message_length";
+ public const string MaxReceiveMessageLength = "grpc.max_receive_message_length";
+
+ /// <summary>Maximum message length that the channel can send</summary>
+ public const string MaxSendMessageLength = "grpc.max_send_message_length";
+
+ /// <summary>Obsolete, for backward compatibility only.</summary>
+ [Obsolete("Use MaxReceiveMessageLength instead.")]
+ public const string MaxMessageLength = MaxReceiveMessageLength;
/// <summary>Initial sequence number for http2 transports</summary>
public const string Http2InitialSequenceNumber = "grpc.http2.initial_sequence_number";
diff --git a/src/csharp/Grpc.Examples/MathGrpc.cs b/src/csharp/Grpc.Examples/MathGrpc.cs
index 3364b8ce8e..1f2e67d916 100644
--- a/src/csharp/Grpc.Examples/MathGrpc.cs
+++ b/src/csharp/Grpc.Examples/MathGrpc.cs
@@ -35,41 +35,41 @@
using System;
using System.Threading;
using System.Threading.Tasks;
-using Grpc.Core;
+using grpc = global::Grpc.Core;
namespace Math {
public static partial class Math
{
static readonly string __ServiceName = "math.Math";
- static readonly Marshaller<global::Math.DivArgs> __Marshaller_DivArgs = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Math.DivArgs.Parser.ParseFrom);
- static readonly Marshaller<global::Math.DivReply> __Marshaller_DivReply = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Math.DivReply.Parser.ParseFrom);
- static readonly Marshaller<global::Math.FibArgs> __Marshaller_FibArgs = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Math.FibArgs.Parser.ParseFrom);
- static readonly Marshaller<global::Math.Num> __Marshaller_Num = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Math.Num.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Math.DivArgs> __Marshaller_DivArgs = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Math.DivArgs.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Math.DivReply> __Marshaller_DivReply = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Math.DivReply.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Math.FibArgs> __Marshaller_FibArgs = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Math.FibArgs.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Math.Num> __Marshaller_Num = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Math.Num.Parser.ParseFrom);
- static readonly Method<global::Math.DivArgs, global::Math.DivReply> __Method_Div = new Method<global::Math.DivArgs, global::Math.DivReply>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Math.DivArgs, global::Math.DivReply> __Method_Div = new grpc::Method<global::Math.DivArgs, global::Math.DivReply>(
+ grpc::MethodType.Unary,
__ServiceName,
"Div",
__Marshaller_DivArgs,
__Marshaller_DivReply);
- static readonly Method<global::Math.DivArgs, global::Math.DivReply> __Method_DivMany = new Method<global::Math.DivArgs, global::Math.DivReply>(
- MethodType.DuplexStreaming,
+ static readonly grpc::Method<global::Math.DivArgs, global::Math.DivReply> __Method_DivMany = new grpc::Method<global::Math.DivArgs, global::Math.DivReply>(
+ grpc::MethodType.DuplexStreaming,
__ServiceName,
"DivMany",
__Marshaller_DivArgs,
__Marshaller_DivReply);
- static readonly Method<global::Math.FibArgs, global::Math.Num> __Method_Fib = new Method<global::Math.FibArgs, global::Math.Num>(
- MethodType.ServerStreaming,
+ static readonly grpc::Method<global::Math.FibArgs, global::Math.Num> __Method_Fib = new grpc::Method<global::Math.FibArgs, global::Math.Num>(
+ grpc::MethodType.ServerStreaming,
__ServiceName,
"Fib",
__Marshaller_FibArgs,
__Marshaller_Num);
- static readonly Method<global::Math.Num, global::Math.Num> __Method_Sum = new Method<global::Math.Num, global::Math.Num>(
- MethodType.ClientStreaming,
+ static readonly grpc::Method<global::Math.Num, global::Math.Num> __Method_Sum = new grpc::Method<global::Math.Num, global::Math.Num>(
+ grpc::MethodType.ClientStreaming,
__ServiceName,
"Sum",
__Marshaller_Num,
@@ -91,9 +91,9 @@ namespace Math {
/// <param name="request">The request received from the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>The response to send back to the client (wrapped by a task).</returns>
- public virtual global::System.Threading.Tasks.Task<global::Math.DivReply> Div(global::Math.DivArgs request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Math.DivReply> Div(global::Math.DivArgs request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -106,9 +106,9 @@ namespace Math {
/// <param name="responseStream">Used for sending responses back to the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>A task indicating completion of the handler.</returns>
- public virtual global::System.Threading.Tasks.Task DivMany(IAsyncStreamReader<global::Math.DivArgs> requestStream, IServerStreamWriter<global::Math.DivReply> responseStream, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task DivMany(grpc::IAsyncStreamReader<global::Math.DivArgs> requestStream, grpc::IServerStreamWriter<global::Math.DivReply> responseStream, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -120,9 +120,9 @@ namespace Math {
/// <param name="responseStream">Used for sending responses back to the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>A task indicating completion of the handler.</returns>
- public virtual global::System.Threading.Tasks.Task Fib(global::Math.FibArgs request, IServerStreamWriter<global::Math.Num> responseStream, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task Fib(global::Math.FibArgs request, grpc::IServerStreamWriter<global::Math.Num> responseStream, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -132,24 +132,24 @@ namespace Math {
/// <param name="requestStream">Used for reading requests from the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>The response to send back to the client (wrapped by a task).</returns>
- public virtual global::System.Threading.Tasks.Task<global::Math.Num> Sum(IAsyncStreamReader<global::Math.Num> requestStream, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Math.Num> Sum(grpc::IAsyncStreamReader<global::Math.Num> requestStream, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
}
/// <summary>Client for Math</summary>
- public partial class MathClient : ClientBase<MathClient>
+ public partial class MathClient : grpc::ClientBase<MathClient>
{
/// <summary>Creates a new client for Math</summary>
/// <param name="channel">The channel to use to make remote calls.</param>
- public MathClient(Channel channel) : base(channel)
+ public MathClient(grpc::Channel channel) : base(channel)
{
}
/// <summary>Creates a new client for Math that uses a custom <c>CallInvoker</c>.</summary>
/// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
- public MathClient(CallInvoker callInvoker) : base(callInvoker)
+ public MathClient(grpc::CallInvoker callInvoker) : base(callInvoker)
{
}
/// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
@@ -171,9 +171,9 @@ namespace Math {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Math.DivReply Div(global::Math.DivArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Math.DivReply Div(global::Math.DivArgs request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return Div(request, new CallOptions(headers, deadline, cancellationToken));
+ return Div(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient
@@ -182,7 +182,7 @@ namespace Math {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Math.DivReply Div(global::Math.DivArgs request, CallOptions options)
+ public virtual global::Math.DivReply Div(global::Math.DivArgs request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_Div, null, options, request);
}
@@ -195,9 +195,9 @@ namespace Math {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return DivAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return DivAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient
@@ -206,7 +206,7 @@ namespace Math {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_Div, null, options, request);
}
@@ -220,9 +220,9 @@ namespace Math {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Math.DivArgs, global::Math.DivReply> DivMany(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Math.DivArgs, global::Math.DivReply> DivMany(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return DivMany(new CallOptions(headers, deadline, cancellationToken));
+ return DivMany(new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// DivMany accepts an arbitrary number of division args from the client stream
@@ -232,7 +232,7 @@ namespace Math {
/// </summary>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Math.DivArgs, global::Math.DivReply> DivMany(CallOptions options)
+ public virtual grpc::AsyncDuplexStreamingCall<global::Math.DivArgs, global::Math.DivReply> DivMany(grpc::CallOptions options)
{
return CallInvoker.AsyncDuplexStreamingCall(__Method_DivMany, null, options);
}
@@ -246,9 +246,9 @@ namespace Math {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncServerStreamingCall<global::Math.Num> Fib(global::Math.FibArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncServerStreamingCall<global::Math.Num> Fib(global::Math.FibArgs request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return Fib(request, new CallOptions(headers, deadline, cancellationToken));
+ return Fib(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib
@@ -258,7 +258,7 @@ namespace Math {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncServerStreamingCall<global::Math.Num> Fib(global::Math.FibArgs request, CallOptions options)
+ public virtual grpc::AsyncServerStreamingCall<global::Math.Num> Fib(global::Math.FibArgs request, grpc::CallOptions options)
{
return CallInvoker.AsyncServerStreamingCall(__Method_Fib, null, options, request);
}
@@ -270,9 +270,9 @@ namespace Math {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncClientStreamingCall<global::Math.Num, global::Math.Num> Sum(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncClientStreamingCall<global::Math.Num, global::Math.Num> Sum(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return Sum(new CallOptions(headers, deadline, cancellationToken));
+ return Sum(new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Sum sums a stream of numbers, returning the final result once the stream
@@ -280,7 +280,7 @@ namespace Math {
/// </summary>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncClientStreamingCall<global::Math.Num, global::Math.Num> Sum(CallOptions options)
+ public virtual grpc::AsyncClientStreamingCall<global::Math.Num, global::Math.Num> Sum(grpc::CallOptions options)
{
return CallInvoker.AsyncClientStreamingCall(__Method_Sum, null, options);
}
@@ -293,9 +293,9 @@ namespace Math {
/// <summary>Creates service definition that can be registered with a server</summary>
/// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
- public static ServerServiceDefinition BindService(MathBase serviceImpl)
+ public static grpc::ServerServiceDefinition BindService(MathBase serviceImpl)
{
- return ServerServiceDefinition.CreateBuilder()
+ return grpc::ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_Div, serviceImpl.Div)
.AddMethod(__Method_DivMany, serviceImpl.DivMany)
.AddMethod(__Method_Fib, serviceImpl.Fib)
diff --git a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
index 020c2df565..d3115f3da1 100644
--- a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
+++ b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
@@ -35,18 +35,18 @@
using System;
using System.Threading;
using System.Threading.Tasks;
-using Grpc.Core;
+using grpc = global::Grpc.Core;
namespace Grpc.Health.V1 {
public static partial class Health
{
static readonly string __ServiceName = "grpc.health.v1.Health";
- static readonly Marshaller<global::Grpc.Health.V1.HealthCheckRequest> __Marshaller_HealthCheckRequest = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Health.V1.HealthCheckRequest.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Health.V1.HealthCheckResponse> __Marshaller_HealthCheckResponse = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Health.V1.HealthCheckResponse.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Health.V1.HealthCheckRequest> __Marshaller_HealthCheckRequest = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Health.V1.HealthCheckRequest.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Health.V1.HealthCheckResponse> __Marshaller_HealthCheckResponse = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Health.V1.HealthCheckResponse.Parser.ParseFrom);
- static readonly Method<global::Grpc.Health.V1.HealthCheckRequest, global::Grpc.Health.V1.HealthCheckResponse> __Method_Check = new Method<global::Grpc.Health.V1.HealthCheckRequest, global::Grpc.Health.V1.HealthCheckResponse>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Grpc.Health.V1.HealthCheckRequest, global::Grpc.Health.V1.HealthCheckResponse> __Method_Check = new grpc::Method<global::Grpc.Health.V1.HealthCheckRequest, global::Grpc.Health.V1.HealthCheckResponse>(
+ grpc::MethodType.Unary,
__ServiceName,
"Check",
__Marshaller_HealthCheckRequest,
@@ -61,24 +61,24 @@ namespace Grpc.Health.V1 {
/// <summary>Base class for server-side implementations of Health</summary>
public abstract partial class HealthBase
{
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Health.V1.HealthCheckResponse> Check(global::Grpc.Health.V1.HealthCheckRequest request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Health.V1.HealthCheckResponse> Check(global::Grpc.Health.V1.HealthCheckRequest request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
}
/// <summary>Client for Health</summary>
- public partial class HealthClient : ClientBase<HealthClient>
+ public partial class HealthClient : grpc::ClientBase<HealthClient>
{
/// <summary>Creates a new client for Health</summary>
/// <param name="channel">The channel to use to make remote calls.</param>
- public HealthClient(Channel channel) : base(channel)
+ public HealthClient(grpc::Channel channel) : base(channel)
{
}
/// <summary>Creates a new client for Health that uses a custom <c>CallInvoker</c>.</summary>
/// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
- public HealthClient(CallInvoker callInvoker) : base(callInvoker)
+ public HealthClient(grpc::CallInvoker callInvoker) : base(callInvoker)
{
}
/// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
@@ -91,19 +91,19 @@ namespace Grpc.Health.V1 {
{
}
- public virtual global::Grpc.Health.V1.HealthCheckResponse Check(global::Grpc.Health.V1.HealthCheckRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Health.V1.HealthCheckResponse Check(global::Grpc.Health.V1.HealthCheckRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return Check(request, new CallOptions(headers, deadline, cancellationToken));
+ return Check(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
- public virtual global::Grpc.Health.V1.HealthCheckResponse Check(global::Grpc.Health.V1.HealthCheckRequest request, CallOptions options)
+ public virtual global::Grpc.Health.V1.HealthCheckResponse Check(global::Grpc.Health.V1.HealthCheckRequest request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_Check, null, options, request);
}
- public virtual AsyncUnaryCall<global::Grpc.Health.V1.HealthCheckResponse> CheckAsync(global::Grpc.Health.V1.HealthCheckRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Health.V1.HealthCheckResponse> CheckAsync(global::Grpc.Health.V1.HealthCheckRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return CheckAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return CheckAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
- public virtual AsyncUnaryCall<global::Grpc.Health.V1.HealthCheckResponse> CheckAsync(global::Grpc.Health.V1.HealthCheckRequest request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Health.V1.HealthCheckResponse> CheckAsync(global::Grpc.Health.V1.HealthCheckRequest request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_Check, null, options, request);
}
@@ -116,9 +116,9 @@ namespace Grpc.Health.V1 {
/// <summary>Creates service definition that can be registered with a server</summary>
/// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
- public static ServerServiceDefinition BindService(HealthBase serviceImpl)
+ public static grpc::ServerServiceDefinition BindService(HealthBase serviceImpl)
{
- return ServerServiceDefinition.CreateBuilder()
+ return grpc::ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_Check, serviceImpl.Check).Build();
}
diff --git a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
index 8b58622d53..c80ffa8cf6 100644
--- a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
@@ -41,26 +41,26 @@
using System;
using System.Threading;
using System.Threading.Tasks;
-using Grpc.Core;
+using grpc = global::Grpc.Core;
namespace Grpc.Testing {
public static partial class MetricsService
{
static readonly string __ServiceName = "grpc.testing.MetricsService";
- static readonly Marshaller<global::Grpc.Testing.EmptyMessage> __Marshaller_EmptyMessage = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.EmptyMessage.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.GaugeResponse> __Marshaller_GaugeResponse = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.GaugeResponse.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.GaugeRequest> __Marshaller_GaugeRequest = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.GaugeRequest.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.EmptyMessage> __Marshaller_EmptyMessage = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.EmptyMessage.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.GaugeResponse> __Marshaller_GaugeResponse = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.GaugeResponse.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.GaugeRequest> __Marshaller_GaugeRequest = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.GaugeRequest.Parser.ParseFrom);
- static readonly Method<global::Grpc.Testing.EmptyMessage, global::Grpc.Testing.GaugeResponse> __Method_GetAllGauges = new Method<global::Grpc.Testing.EmptyMessage, global::Grpc.Testing.GaugeResponse>(
- MethodType.ServerStreaming,
+ static readonly grpc::Method<global::Grpc.Testing.EmptyMessage, global::Grpc.Testing.GaugeResponse> __Method_GetAllGauges = new grpc::Method<global::Grpc.Testing.EmptyMessage, global::Grpc.Testing.GaugeResponse>(
+ grpc::MethodType.ServerStreaming,
__ServiceName,
"GetAllGauges",
__Marshaller_EmptyMessage,
__Marshaller_GaugeResponse);
- static readonly Method<global::Grpc.Testing.GaugeRequest, global::Grpc.Testing.GaugeResponse> __Method_GetGauge = new Method<global::Grpc.Testing.GaugeRequest, global::Grpc.Testing.GaugeResponse>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Grpc.Testing.GaugeRequest, global::Grpc.Testing.GaugeResponse> __Method_GetGauge = new grpc::Method<global::Grpc.Testing.GaugeRequest, global::Grpc.Testing.GaugeResponse>(
+ grpc::MethodType.Unary,
__ServiceName,
"GetGauge",
__Marshaller_GaugeRequest,
@@ -83,9 +83,9 @@ namespace Grpc.Testing {
/// <param name="responseStream">Used for sending responses back to the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>A task indicating completion of the handler.</returns>
- public virtual global::System.Threading.Tasks.Task GetAllGauges(global::Grpc.Testing.EmptyMessage request, IServerStreamWriter<global::Grpc.Testing.GaugeResponse> responseStream, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task GetAllGauges(global::Grpc.Testing.EmptyMessage request, grpc::IServerStreamWriter<global::Grpc.Testing.GaugeResponse> responseStream, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -94,24 +94,24 @@ namespace Grpc.Testing {
/// <param name="request">The request received from the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>The response to send back to the client (wrapped by a task).</returns>
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.GaugeResponse> GetGauge(global::Grpc.Testing.GaugeRequest request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.GaugeResponse> GetGauge(global::Grpc.Testing.GaugeRequest request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
}
/// <summary>Client for MetricsService</summary>
- public partial class MetricsServiceClient : ClientBase<MetricsServiceClient>
+ public partial class MetricsServiceClient : grpc::ClientBase<MetricsServiceClient>
{
/// <summary>Creates a new client for MetricsService</summary>
/// <param name="channel">The channel to use to make remote calls.</param>
- public MetricsServiceClient(Channel channel) : base(channel)
+ public MetricsServiceClient(grpc::Channel channel) : base(channel)
{
}
/// <summary>Creates a new client for MetricsService that uses a custom <c>CallInvoker</c>.</summary>
/// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
- public MetricsServiceClient(CallInvoker callInvoker) : base(callInvoker)
+ public MetricsServiceClient(grpc::CallInvoker callInvoker) : base(callInvoker)
{
}
/// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
@@ -133,9 +133,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncServerStreamingCall<global::Grpc.Testing.GaugeResponse> GetAllGauges(global::Grpc.Testing.EmptyMessage request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncServerStreamingCall<global::Grpc.Testing.GaugeResponse> GetAllGauges(global::Grpc.Testing.EmptyMessage request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return GetAllGauges(request, new CallOptions(headers, deadline, cancellationToken));
+ return GetAllGauges(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Returns the values of all the gauges that are currently being maintained by
@@ -144,7 +144,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncServerStreamingCall<global::Grpc.Testing.GaugeResponse> GetAllGauges(global::Grpc.Testing.EmptyMessage request, CallOptions options)
+ public virtual grpc::AsyncServerStreamingCall<global::Grpc.Testing.GaugeResponse> GetAllGauges(global::Grpc.Testing.EmptyMessage request, grpc::CallOptions options)
{
return CallInvoker.AsyncServerStreamingCall(__Method_GetAllGauges, null, options, request);
}
@@ -156,9 +156,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.GaugeResponse GetGauge(global::Grpc.Testing.GaugeRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.GaugeResponse GetGauge(global::Grpc.Testing.GaugeRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return GetGauge(request, new CallOptions(headers, deadline, cancellationToken));
+ return GetGauge(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Returns the value of one gauge
@@ -166,7 +166,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.GaugeResponse GetGauge(global::Grpc.Testing.GaugeRequest request, CallOptions options)
+ public virtual global::Grpc.Testing.GaugeResponse GetGauge(global::Grpc.Testing.GaugeRequest request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_GetGauge, null, options, request);
}
@@ -178,9 +178,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.GaugeResponse> GetGaugeAsync(global::Grpc.Testing.GaugeRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.GaugeResponse> GetGaugeAsync(global::Grpc.Testing.GaugeRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return GetGaugeAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return GetGaugeAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Returns the value of one gauge
@@ -188,7 +188,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.GaugeResponse> GetGaugeAsync(global::Grpc.Testing.GaugeRequest request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.GaugeResponse> GetGaugeAsync(global::Grpc.Testing.GaugeRequest request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_GetGauge, null, options, request);
}
@@ -201,9 +201,9 @@ namespace Grpc.Testing {
/// <summary>Creates service definition that can be registered with a server</summary>
/// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
- public static ServerServiceDefinition BindService(MetricsServiceBase serviceImpl)
+ public static grpc::ServerServiceDefinition BindService(MetricsServiceBase serviceImpl)
{
- return ServerServiceDefinition.CreateBuilder()
+ return grpc::ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_GetAllGauges, serviceImpl.GetAllGauges)
.AddMethod(__Method_GetGauge, serviceImpl.GetGauge).Build();
}
diff --git a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
index 5135d9ab66..bb95c8a549 100644
--- a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
@@ -37,25 +37,25 @@
using System;
using System.Threading;
using System.Threading.Tasks;
-using Grpc.Core;
+using grpc = global::Grpc.Core;
namespace Grpc.Testing {
public static partial class BenchmarkService
{
static readonly string __ServiceName = "grpc.testing.BenchmarkService";
- static readonly Marshaller<global::Grpc.Testing.SimpleRequest> __Marshaller_SimpleRequest = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.SimpleRequest.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.SimpleResponse> __Marshaller_SimpleResponse = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.SimpleResponse.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.SimpleRequest> __Marshaller_SimpleRequest = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.SimpleRequest.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.SimpleResponse> __Marshaller_SimpleResponse = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.SimpleResponse.Parser.ParseFrom);
- static readonly Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> __Method_UnaryCall = new Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> __Method_UnaryCall = new grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse>(
+ grpc::MethodType.Unary,
__ServiceName,
"UnaryCall",
__Marshaller_SimpleRequest,
__Marshaller_SimpleResponse);
- static readonly Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> __Method_StreamingCall = new Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse>(
- MethodType.DuplexStreaming,
+ static readonly grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> __Method_StreamingCall = new grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse>(
+ grpc::MethodType.DuplexStreaming,
__ServiceName,
"StreamingCall",
__Marshaller_SimpleRequest,
@@ -77,9 +77,9 @@ namespace Grpc.Testing {
/// <param name="request">The request received from the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>The response to send back to the client (wrapped by a task).</returns>
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> UnaryCall(global::Grpc.Testing.SimpleRequest request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> UnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -90,24 +90,24 @@ namespace Grpc.Testing {
/// <param name="responseStream">Used for sending responses back to the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>A task indicating completion of the handler.</returns>
- public virtual global::System.Threading.Tasks.Task StreamingCall(IAsyncStreamReader<global::Grpc.Testing.SimpleRequest> requestStream, IServerStreamWriter<global::Grpc.Testing.SimpleResponse> responseStream, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task StreamingCall(grpc::IAsyncStreamReader<global::Grpc.Testing.SimpleRequest> requestStream, grpc::IServerStreamWriter<global::Grpc.Testing.SimpleResponse> responseStream, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
}
/// <summary>Client for BenchmarkService</summary>
- public partial class BenchmarkServiceClient : ClientBase<BenchmarkServiceClient>
+ public partial class BenchmarkServiceClient : grpc::ClientBase<BenchmarkServiceClient>
{
/// <summary>Creates a new client for BenchmarkService</summary>
/// <param name="channel">The channel to use to make remote calls.</param>
- public BenchmarkServiceClient(Channel channel) : base(channel)
+ public BenchmarkServiceClient(grpc::Channel channel) : base(channel)
{
}
/// <summary>Creates a new client for BenchmarkService that uses a custom <c>CallInvoker</c>.</summary>
/// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
- public BenchmarkServiceClient(CallInvoker callInvoker) : base(callInvoker)
+ public BenchmarkServiceClient(grpc::CallInvoker callInvoker) : base(callInvoker)
{
}
/// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
@@ -129,9 +129,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return UnaryCall(request, new CallOptions(headers, deadline, cancellationToken));
+ return UnaryCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// One request followed by one response.
@@ -140,7 +140,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, CallOptions options)
+ public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_UnaryCall, null, options, request);
}
@@ -153,9 +153,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return UnaryCallAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return UnaryCallAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// One request followed by one response.
@@ -164,7 +164,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_UnaryCall, null, options, request);
}
@@ -176,9 +176,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingCall(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return StreamingCall(new CallOptions(headers, deadline, cancellationToken));
+ return StreamingCall(new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// One request followed by one response.
@@ -186,7 +186,7 @@ namespace Grpc.Testing {
/// </summary>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingCall(CallOptions options)
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingCall(grpc::CallOptions options)
{
return CallInvoker.AsyncDuplexStreamingCall(__Method_StreamingCall, null, options);
}
@@ -199,9 +199,9 @@ namespace Grpc.Testing {
/// <summary>Creates service definition that can be registered with a server</summary>
/// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
- public static ServerServiceDefinition BindService(BenchmarkServiceBase serviceImpl)
+ public static grpc::ServerServiceDefinition BindService(BenchmarkServiceBase serviceImpl)
{
- return ServerServiceDefinition.CreateBuilder()
+ return grpc::ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_UnaryCall, serviceImpl.UnaryCall)
.AddMethod(__Method_StreamingCall, serviceImpl.StreamingCall).Build();
}
@@ -211,37 +211,37 @@ namespace Grpc.Testing {
{
static readonly string __ServiceName = "grpc.testing.WorkerService";
- static readonly Marshaller<global::Grpc.Testing.ServerArgs> __Marshaller_ServerArgs = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ServerArgs.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.ServerStatus> __Marshaller_ServerStatus = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ServerStatus.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.ClientArgs> __Marshaller_ClientArgs = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ClientArgs.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.ClientStatus> __Marshaller_ClientStatus = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ClientStatus.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.CoreRequest> __Marshaller_CoreRequest = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.CoreRequest.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.CoreResponse> __Marshaller_CoreResponse = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.CoreResponse.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.Void> __Marshaller_Void = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.Void.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.ServerArgs> __Marshaller_ServerArgs = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ServerArgs.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.ServerStatus> __Marshaller_ServerStatus = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ServerStatus.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.ClientArgs> __Marshaller_ClientArgs = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ClientArgs.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.ClientStatus> __Marshaller_ClientStatus = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ClientStatus.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.CoreRequest> __Marshaller_CoreRequest = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.CoreRequest.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.CoreResponse> __Marshaller_CoreResponse = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.CoreResponse.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.Void> __Marshaller_Void = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.Void.Parser.ParseFrom);
- static readonly Method<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> __Method_RunServer = new Method<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus>(
- MethodType.DuplexStreaming,
+ static readonly grpc::Method<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> __Method_RunServer = new grpc::Method<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus>(
+ grpc::MethodType.DuplexStreaming,
__ServiceName,
"RunServer",
__Marshaller_ServerArgs,
__Marshaller_ServerStatus);
- static readonly Method<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> __Method_RunClient = new Method<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus>(
- MethodType.DuplexStreaming,
+ static readonly grpc::Method<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> __Method_RunClient = new grpc::Method<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus>(
+ grpc::MethodType.DuplexStreaming,
__ServiceName,
"RunClient",
__Marshaller_ClientArgs,
__Marshaller_ClientStatus);
- static readonly Method<global::Grpc.Testing.CoreRequest, global::Grpc.Testing.CoreResponse> __Method_CoreCount = new Method<global::Grpc.Testing.CoreRequest, global::Grpc.Testing.CoreResponse>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Grpc.Testing.CoreRequest, global::Grpc.Testing.CoreResponse> __Method_CoreCount = new grpc::Method<global::Grpc.Testing.CoreRequest, global::Grpc.Testing.CoreResponse>(
+ grpc::MethodType.Unary,
__ServiceName,
"CoreCount",
__Marshaller_CoreRequest,
__Marshaller_CoreResponse);
- static readonly Method<global::Grpc.Testing.Void, global::Grpc.Testing.Void> __Method_QuitWorker = new Method<global::Grpc.Testing.Void, global::Grpc.Testing.Void>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Grpc.Testing.Void, global::Grpc.Testing.Void> __Method_QuitWorker = new grpc::Method<global::Grpc.Testing.Void, global::Grpc.Testing.Void>(
+ grpc::MethodType.Unary,
__ServiceName,
"QuitWorker",
__Marshaller_Void,
@@ -268,9 +268,9 @@ namespace Grpc.Testing {
/// <param name="responseStream">Used for sending responses back to the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>A task indicating completion of the handler.</returns>
- public virtual global::System.Threading.Tasks.Task RunServer(IAsyncStreamReader<global::Grpc.Testing.ServerArgs> requestStream, IServerStreamWriter<global::Grpc.Testing.ServerStatus> responseStream, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task RunServer(grpc::IAsyncStreamReader<global::Grpc.Testing.ServerArgs> requestStream, grpc::IServerStreamWriter<global::Grpc.Testing.ServerStatus> responseStream, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -285,9 +285,9 @@ namespace Grpc.Testing {
/// <param name="responseStream">Used for sending responses back to the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>A task indicating completion of the handler.</returns>
- public virtual global::System.Threading.Tasks.Task RunClient(IAsyncStreamReader<global::Grpc.Testing.ClientArgs> requestStream, IServerStreamWriter<global::Grpc.Testing.ClientStatus> responseStream, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task RunClient(grpc::IAsyncStreamReader<global::Grpc.Testing.ClientArgs> requestStream, grpc::IServerStreamWriter<global::Grpc.Testing.ClientStatus> responseStream, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -296,9 +296,9 @@ namespace Grpc.Testing {
/// <param name="request">The request received from the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>The response to send back to the client (wrapped by a task).</returns>
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.CoreResponse> CoreCount(global::Grpc.Testing.CoreRequest request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.CoreResponse> CoreCount(global::Grpc.Testing.CoreRequest request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -307,24 +307,24 @@ namespace Grpc.Testing {
/// <param name="request">The request received from the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>The response to send back to the client (wrapped by a task).</returns>
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Void> QuitWorker(global::Grpc.Testing.Void request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Void> QuitWorker(global::Grpc.Testing.Void request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
}
/// <summary>Client for WorkerService</summary>
- public partial class WorkerServiceClient : ClientBase<WorkerServiceClient>
+ public partial class WorkerServiceClient : grpc::ClientBase<WorkerServiceClient>
{
/// <summary>Creates a new client for WorkerService</summary>
/// <param name="channel">The channel to use to make remote calls.</param>
- public WorkerServiceClient(Channel channel) : base(channel)
+ public WorkerServiceClient(grpc::Channel channel) : base(channel)
{
}
/// <summary>Creates a new client for WorkerService that uses a custom <c>CallInvoker</c>.</summary>
/// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
- public WorkerServiceClient(CallInvoker callInvoker) : base(callInvoker)
+ public WorkerServiceClient(grpc::CallInvoker callInvoker) : base(callInvoker)
{
}
/// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
@@ -349,9 +349,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> RunServer(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> RunServer(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return RunServer(new CallOptions(headers, deadline, cancellationToken));
+ return RunServer(new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Start server with specified workload.
@@ -363,7 +363,7 @@ namespace Grpc.Testing {
/// </summary>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> RunServer(CallOptions options)
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> RunServer(grpc::CallOptions options)
{
return CallInvoker.AsyncDuplexStreamingCall(__Method_RunServer, null, options);
}
@@ -379,9 +379,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> RunClient(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> RunClient(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return RunClient(new CallOptions(headers, deadline, cancellationToken));
+ return RunClient(new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Start client with specified workload.
@@ -393,7 +393,7 @@ namespace Grpc.Testing {
/// </summary>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> RunClient(CallOptions options)
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> RunClient(grpc::CallOptions options)
{
return CallInvoker.AsyncDuplexStreamingCall(__Method_RunClient, null, options);
}
@@ -405,9 +405,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.CoreResponse CoreCount(global::Grpc.Testing.CoreRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.CoreResponse CoreCount(global::Grpc.Testing.CoreRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return CoreCount(request, new CallOptions(headers, deadline, cancellationToken));
+ return CoreCount(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Just return the core count - unary call
@@ -415,7 +415,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.CoreResponse CoreCount(global::Grpc.Testing.CoreRequest request, CallOptions options)
+ public virtual global::Grpc.Testing.CoreResponse CoreCount(global::Grpc.Testing.CoreRequest request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_CoreCount, null, options, request);
}
@@ -427,9 +427,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.CoreResponse> CoreCountAsync(global::Grpc.Testing.CoreRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.CoreResponse> CoreCountAsync(global::Grpc.Testing.CoreRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return CoreCountAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return CoreCountAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Just return the core count - unary call
@@ -437,7 +437,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.CoreResponse> CoreCountAsync(global::Grpc.Testing.CoreRequest request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.CoreResponse> CoreCountAsync(global::Grpc.Testing.CoreRequest request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_CoreCount, null, options, request);
}
@@ -449,9 +449,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Void QuitWorker(global::Grpc.Testing.Void request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.Void QuitWorker(global::Grpc.Testing.Void request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return QuitWorker(request, new CallOptions(headers, deadline, cancellationToken));
+ return QuitWorker(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Quit this worker
@@ -459,7 +459,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Void QuitWorker(global::Grpc.Testing.Void request, CallOptions options)
+ public virtual global::Grpc.Testing.Void QuitWorker(global::Grpc.Testing.Void request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_QuitWorker, null, options, request);
}
@@ -471,9 +471,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.Void> QuitWorkerAsync(global::Grpc.Testing.Void request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Void> QuitWorkerAsync(global::Grpc.Testing.Void request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return QuitWorkerAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return QuitWorkerAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// Quit this worker
@@ -481,7 +481,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.Void> QuitWorkerAsync(global::Grpc.Testing.Void request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Void> QuitWorkerAsync(global::Grpc.Testing.Void request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_QuitWorker, null, options, request);
}
@@ -494,9 +494,9 @@ namespace Grpc.Testing {
/// <summary>Creates service definition that can be registered with a server</summary>
/// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
- public static ServerServiceDefinition BindService(WorkerServiceBase serviceImpl)
+ public static grpc::ServerServiceDefinition BindService(WorkerServiceBase serviceImpl)
{
- return ServerServiceDefinition.CreateBuilder()
+ return grpc::ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_RunServer, serviceImpl.RunServer)
.AddMethod(__Method_RunClient, serviceImpl.RunClient)
.AddMethod(__Method_CoreCount, serviceImpl.CoreCount)
diff --git a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
index 0265f8e821..77f76ebbe9 100644
--- a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
@@ -38,7 +38,7 @@
using System;
using System.Threading;
using System.Threading.Tasks;
-using Grpc.Core;
+using grpc = global::Grpc.Core;
namespace Grpc.Testing {
/// <summary>
@@ -49,65 +49,65 @@ namespace Grpc.Testing {
{
static readonly string __ServiceName = "grpc.testing.TestService";
- static readonly Marshaller<global::Grpc.Testing.Empty> __Marshaller_Empty = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.Empty.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.SimpleRequest> __Marshaller_SimpleRequest = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.SimpleRequest.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.SimpleResponse> __Marshaller_SimpleResponse = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.SimpleResponse.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.StreamingOutputCallRequest> __Marshaller_StreamingOutputCallRequest = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.StreamingOutputCallRequest.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.StreamingOutputCallResponse> __Marshaller_StreamingOutputCallResponse = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.StreamingOutputCallResponse.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.StreamingInputCallRequest> __Marshaller_StreamingInputCallRequest = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.StreamingInputCallRequest.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.StreamingInputCallResponse> __Marshaller_StreamingInputCallResponse = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.StreamingInputCallResponse.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.Empty> __Marshaller_Empty = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.Empty.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.SimpleRequest> __Marshaller_SimpleRequest = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.SimpleRequest.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.SimpleResponse> __Marshaller_SimpleResponse = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.SimpleResponse.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.StreamingOutputCallRequest> __Marshaller_StreamingOutputCallRequest = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.StreamingOutputCallRequest.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.StreamingOutputCallResponse> __Marshaller_StreamingOutputCallResponse = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.StreamingOutputCallResponse.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.StreamingInputCallRequest> __Marshaller_StreamingInputCallRequest = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.StreamingInputCallRequest.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.StreamingInputCallResponse> __Marshaller_StreamingInputCallResponse = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.StreamingInputCallResponse.Parser.ParseFrom);
- static readonly Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty> __Method_EmptyCall = new Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty> __Method_EmptyCall = new grpc::Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty>(
+ grpc::MethodType.Unary,
__ServiceName,
"EmptyCall",
__Marshaller_Empty,
__Marshaller_Empty);
- static readonly Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> __Method_UnaryCall = new Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> __Method_UnaryCall = new grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse>(
+ grpc::MethodType.Unary,
__ServiceName,
"UnaryCall",
__Marshaller_SimpleRequest,
__Marshaller_SimpleResponse);
- static readonly Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> __Method_CacheableUnaryCall = new Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> __Method_CacheableUnaryCall = new grpc::Method<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse>(
+ grpc::MethodType.Unary,
__ServiceName,
"CacheableUnaryCall",
__Marshaller_SimpleRequest,
__Marshaller_SimpleResponse);
- static readonly Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> __Method_StreamingOutputCall = new Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse>(
- MethodType.ServerStreaming,
+ static readonly grpc::Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> __Method_StreamingOutputCall = new grpc::Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse>(
+ grpc::MethodType.ServerStreaming,
__ServiceName,
"StreamingOutputCall",
__Marshaller_StreamingOutputCallRequest,
__Marshaller_StreamingOutputCallResponse);
- static readonly Method<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> __Method_StreamingInputCall = new Method<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse>(
- MethodType.ClientStreaming,
+ static readonly grpc::Method<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> __Method_StreamingInputCall = new grpc::Method<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse>(
+ grpc::MethodType.ClientStreaming,
__ServiceName,
"StreamingInputCall",
__Marshaller_StreamingInputCallRequest,
__Marshaller_StreamingInputCallResponse);
- static readonly Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> __Method_FullDuplexCall = new Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse>(
- MethodType.DuplexStreaming,
+ static readonly grpc::Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> __Method_FullDuplexCall = new grpc::Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse>(
+ grpc::MethodType.DuplexStreaming,
__ServiceName,
"FullDuplexCall",
__Marshaller_StreamingOutputCallRequest,
__Marshaller_StreamingOutputCallResponse);
- static readonly Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> __Method_HalfDuplexCall = new Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse>(
- MethodType.DuplexStreaming,
+ static readonly grpc::Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> __Method_HalfDuplexCall = new grpc::Method<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse>(
+ grpc::MethodType.DuplexStreaming,
__ServiceName,
"HalfDuplexCall",
__Marshaller_StreamingOutputCallRequest,
__Marshaller_StreamingOutputCallResponse);
- static readonly Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty> __Method_UnimplementedCall = new Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty> __Method_UnimplementedCall = new grpc::Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty>(
+ grpc::MethodType.Unary,
__ServiceName,
"UnimplementedCall",
__Marshaller_Empty,
@@ -128,9 +128,9 @@ namespace Grpc.Testing {
/// <param name="request">The request received from the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>The response to send back to the client (wrapped by a task).</returns>
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> EmptyCall(global::Grpc.Testing.Empty request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> EmptyCall(global::Grpc.Testing.Empty request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -139,9 +139,9 @@ namespace Grpc.Testing {
/// <param name="request">The request received from the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>The response to send back to the client (wrapped by a task).</returns>
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> UnaryCall(global::Grpc.Testing.SimpleRequest request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> UnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -152,9 +152,9 @@ namespace Grpc.Testing {
/// <param name="request">The request received from the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>The response to send back to the client (wrapped by a task).</returns>
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -165,9 +165,9 @@ namespace Grpc.Testing {
/// <param name="responseStream">Used for sending responses back to the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>A task indicating completion of the handler.</returns>
- public virtual global::System.Threading.Tasks.Task StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, grpc::IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -177,9 +177,9 @@ namespace Grpc.Testing {
/// <param name="requestStream">Used for reading requests from the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>The response to send back to the client (wrapped by a task).</returns>
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(IAsyncStreamReader<global::Grpc.Testing.StreamingInputCallRequest> requestStream, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(grpc::IAsyncStreamReader<global::Grpc.Testing.StreamingInputCallRequest> requestStream, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -191,9 +191,9 @@ namespace Grpc.Testing {
/// <param name="responseStream">Used for sending responses back to the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>A task indicating completion of the handler.</returns>
- public virtual global::System.Threading.Tasks.Task FullDuplexCall(IAsyncStreamReader<global::Grpc.Testing.StreamingOutputCallRequest> requestStream, IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task FullDuplexCall(grpc::IAsyncStreamReader<global::Grpc.Testing.StreamingOutputCallRequest> requestStream, grpc::IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -206,9 +206,9 @@ namespace Grpc.Testing {
/// <param name="responseStream">Used for sending responses back to the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>A task indicating completion of the handler.</returns>
- public virtual global::System.Threading.Tasks.Task HalfDuplexCall(IAsyncStreamReader<global::Grpc.Testing.StreamingOutputCallRequest> requestStream, IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task HalfDuplexCall(grpc::IAsyncStreamReader<global::Grpc.Testing.StreamingOutputCallRequest> requestStream, grpc::IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
/// <summary>
@@ -218,24 +218,24 @@ namespace Grpc.Testing {
/// <param name="request">The request received from the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>The response to send back to the client (wrapped by a task).</returns>
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> UnimplementedCall(global::Grpc.Testing.Empty request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> UnimplementedCall(global::Grpc.Testing.Empty request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
}
/// <summary>Client for TestService</summary>
- public partial class TestServiceClient : ClientBase<TestServiceClient>
+ public partial class TestServiceClient : grpc::ClientBase<TestServiceClient>
{
/// <summary>Creates a new client for TestService</summary>
/// <param name="channel">The channel to use to make remote calls.</param>
- public TestServiceClient(Channel channel) : base(channel)
+ public TestServiceClient(grpc::Channel channel) : base(channel)
{
}
/// <summary>Creates a new client for TestService that uses a custom <c>CallInvoker</c>.</summary>
/// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
- public TestServiceClient(CallInvoker callInvoker) : base(callInvoker)
+ public TestServiceClient(grpc::CallInvoker callInvoker) : base(callInvoker)
{
}
/// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
@@ -256,9 +256,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Empty EmptyCall(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.Empty EmptyCall(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return EmptyCall(request, new CallOptions(headers, deadline, cancellationToken));
+ return EmptyCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// One empty request followed by one empty response.
@@ -266,7 +266,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Empty EmptyCall(global::Grpc.Testing.Empty request, CallOptions options)
+ public virtual global::Grpc.Testing.Empty EmptyCall(global::Grpc.Testing.Empty request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_EmptyCall, null, options, request);
}
@@ -278,9 +278,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> EmptyCallAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> EmptyCallAsync(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return EmptyCallAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return EmptyCallAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// One empty request followed by one empty response.
@@ -288,7 +288,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> EmptyCallAsync(global::Grpc.Testing.Empty request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> EmptyCallAsync(global::Grpc.Testing.Empty request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_EmptyCall, null, options, request);
}
@@ -300,9 +300,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return UnaryCall(request, new CallOptions(headers, deadline, cancellationToken));
+ return UnaryCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// One request followed by one response.
@@ -310,7 +310,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, CallOptions options)
+ public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_UnaryCall, null, options, request);
}
@@ -322,9 +322,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return UnaryCallAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return UnaryCallAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// One request followed by one response.
@@ -332,7 +332,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_UnaryCall, null, options, request);
}
@@ -346,9 +346,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.SimpleResponse CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.SimpleResponse CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return CacheableUnaryCall(request, new CallOptions(headers, deadline, cancellationToken));
+ return CacheableUnaryCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// One request followed by one response. Response has cache control
@@ -358,7 +358,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.SimpleResponse CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, CallOptions options)
+ public virtual global::Grpc.Testing.SimpleResponse CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_CacheableUnaryCall, null, options, request);
}
@@ -372,9 +372,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> CacheableUnaryCallAsync(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> CacheableUnaryCallAsync(global::Grpc.Testing.SimpleRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return CacheableUnaryCallAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return CacheableUnaryCallAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// One request followed by one response. Response has cache control
@@ -384,7 +384,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> CacheableUnaryCallAsync(global::Grpc.Testing.SimpleRequest request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> CacheableUnaryCallAsync(global::Grpc.Testing.SimpleRequest request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_CacheableUnaryCall, null, options, request);
}
@@ -397,9 +397,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncServerStreamingCall<global::Grpc.Testing.StreamingOutputCallResponse> StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncServerStreamingCall<global::Grpc.Testing.StreamingOutputCallResponse> StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return StreamingOutputCall(request, new CallOptions(headers, deadline, cancellationToken));
+ return StreamingOutputCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// One request followed by a sequence of responses (streamed download).
@@ -408,7 +408,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncServerStreamingCall<global::Grpc.Testing.StreamingOutputCallResponse> StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, CallOptions options)
+ public virtual grpc::AsyncServerStreamingCall<global::Grpc.Testing.StreamingOutputCallResponse> StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, grpc::CallOptions options)
{
return CallInvoker.AsyncServerStreamingCall(__Method_StreamingOutputCall, null, options, request);
}
@@ -420,9 +420,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncClientStreamingCall<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncClientStreamingCall<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return StreamingInputCall(new CallOptions(headers, deadline, cancellationToken));
+ return StreamingInputCall(new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// A sequence of requests followed by one response (streamed upload).
@@ -430,7 +430,7 @@ namespace Grpc.Testing {
/// </summary>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncClientStreamingCall<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(CallOptions options)
+ public virtual grpc::AsyncClientStreamingCall<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(grpc::CallOptions options)
{
return CallInvoker.AsyncClientStreamingCall(__Method_StreamingInputCall, null, options);
}
@@ -443,9 +443,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> FullDuplexCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> FullDuplexCall(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return FullDuplexCall(new CallOptions(headers, deadline, cancellationToken));
+ return FullDuplexCall(new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// A sequence of requests with each request served by the server immediately.
@@ -454,7 +454,7 @@ namespace Grpc.Testing {
/// </summary>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> FullDuplexCall(CallOptions options)
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> FullDuplexCall(grpc::CallOptions options)
{
return CallInvoker.AsyncDuplexStreamingCall(__Method_FullDuplexCall, null, options);
}
@@ -468,9 +468,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> HalfDuplexCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> HalfDuplexCall(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return HalfDuplexCall(new CallOptions(headers, deadline, cancellationToken));
+ return HalfDuplexCall(new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// A sequence of requests followed by a sequence of responses.
@@ -480,7 +480,7 @@ namespace Grpc.Testing {
/// </summary>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> HalfDuplexCall(CallOptions options)
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> HalfDuplexCall(grpc::CallOptions options)
{
return CallInvoker.AsyncDuplexStreamingCall(__Method_HalfDuplexCall, null, options);
}
@@ -493,9 +493,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return UnimplementedCall(request, new CallOptions(headers, deadline, cancellationToken));
+ return UnimplementedCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// The test server will not implement this method. It will be used
@@ -504,7 +504,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, CallOptions options)
+ public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_UnimplementedCall, null, options, request);
}
@@ -517,9 +517,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return UnimplementedCallAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return UnimplementedCallAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// The test server will not implement this method. It will be used
@@ -528,7 +528,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_UnimplementedCall, null, options, request);
}
@@ -541,9 +541,9 @@ namespace Grpc.Testing {
/// <summary>Creates service definition that can be registered with a server</summary>
/// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
- public static ServerServiceDefinition BindService(TestServiceBase serviceImpl)
+ public static grpc::ServerServiceDefinition BindService(TestServiceBase serviceImpl)
{
- return ServerServiceDefinition.CreateBuilder()
+ return grpc::ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_EmptyCall, serviceImpl.EmptyCall)
.AddMethod(__Method_UnaryCall, serviceImpl.UnaryCall)
.AddMethod(__Method_CacheableUnaryCall, serviceImpl.CacheableUnaryCall)
@@ -563,10 +563,10 @@ namespace Grpc.Testing {
{
static readonly string __ServiceName = "grpc.testing.UnimplementedService";
- static readonly Marshaller<global::Grpc.Testing.Empty> __Marshaller_Empty = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.Empty.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.Empty> __Marshaller_Empty = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.Empty.Parser.ParseFrom);
- static readonly Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty> __Method_UnimplementedCall = new Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty> __Method_UnimplementedCall = new grpc::Method<global::Grpc.Testing.Empty, global::Grpc.Testing.Empty>(
+ grpc::MethodType.Unary,
__ServiceName,
"UnimplementedCall",
__Marshaller_Empty,
@@ -587,24 +587,24 @@ namespace Grpc.Testing {
/// <param name="request">The request received from the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>The response to send back to the client (wrapped by a task).</returns>
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> UnimplementedCall(global::Grpc.Testing.Empty request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> UnimplementedCall(global::Grpc.Testing.Empty request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
}
/// <summary>Client for UnimplementedService</summary>
- public partial class UnimplementedServiceClient : ClientBase<UnimplementedServiceClient>
+ public partial class UnimplementedServiceClient : grpc::ClientBase<UnimplementedServiceClient>
{
/// <summary>Creates a new client for UnimplementedService</summary>
/// <param name="channel">The channel to use to make remote calls.</param>
- public UnimplementedServiceClient(Channel channel) : base(channel)
+ public UnimplementedServiceClient(grpc::Channel channel) : base(channel)
{
}
/// <summary>Creates a new client for UnimplementedService that uses a custom <c>CallInvoker</c>.</summary>
/// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
- public UnimplementedServiceClient(CallInvoker callInvoker) : base(callInvoker)
+ public UnimplementedServiceClient(grpc::CallInvoker callInvoker) : base(callInvoker)
{
}
/// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
@@ -625,9 +625,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return UnimplementedCall(request, new CallOptions(headers, deadline, cancellationToken));
+ return UnimplementedCall(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// A call that no server should implement
@@ -635,7 +635,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The response received from the server.</returns>
- public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, CallOptions options)
+ public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_UnimplementedCall, null, options, request);
}
@@ -647,9 +647,9 @@ namespace Grpc.Testing {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return UnimplementedCallAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return UnimplementedCallAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// A call that no server should implement
@@ -657,7 +657,7 @@ namespace Grpc.Testing {
/// <param name="request">The request to send to the server.</param>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_UnimplementedCall, null, options, request);
}
@@ -670,9 +670,9 @@ namespace Grpc.Testing {
/// <summary>Creates service definition that can be registered with a server</summary>
/// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
- public static ServerServiceDefinition BindService(UnimplementedServiceBase serviceImpl)
+ public static grpc::ServerServiceDefinition BindService(UnimplementedServiceBase serviceImpl)
{
- return ServerServiceDefinition.CreateBuilder()
+ return grpc::ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_UnimplementedCall, serviceImpl.UnimplementedCall).Build();
}
@@ -684,19 +684,19 @@ namespace Grpc.Testing {
{
static readonly string __ServiceName = "grpc.testing.ReconnectService";
- static readonly Marshaller<global::Grpc.Testing.ReconnectParams> __Marshaller_ReconnectParams = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ReconnectParams.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.Empty> __Marshaller_Empty = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.Empty.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Testing.ReconnectInfo> __Marshaller_ReconnectInfo = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ReconnectInfo.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.ReconnectParams> __Marshaller_ReconnectParams = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ReconnectParams.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.Empty> __Marshaller_Empty = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.Empty.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Testing.ReconnectInfo> __Marshaller_ReconnectInfo = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Testing.ReconnectInfo.Parser.ParseFrom);
- static readonly Method<global::Grpc.Testing.ReconnectParams, global::Grpc.Testing.Empty> __Method_Start = new Method<global::Grpc.Testing.ReconnectParams, global::Grpc.Testing.Empty>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Grpc.Testing.ReconnectParams, global::Grpc.Testing.Empty> __Method_Start = new grpc::Method<global::Grpc.Testing.ReconnectParams, global::Grpc.Testing.Empty>(
+ grpc::MethodType.Unary,
__ServiceName,
"Start",
__Marshaller_ReconnectParams,
__Marshaller_Empty);
- static readonly Method<global::Grpc.Testing.Empty, global::Grpc.Testing.ReconnectInfo> __Method_Stop = new Method<global::Grpc.Testing.Empty, global::Grpc.Testing.ReconnectInfo>(
- MethodType.Unary,
+ static readonly grpc::Method<global::Grpc.Testing.Empty, global::Grpc.Testing.ReconnectInfo> __Method_Stop = new grpc::Method<global::Grpc.Testing.Empty, global::Grpc.Testing.ReconnectInfo>(
+ grpc::MethodType.Unary,
__ServiceName,
"Stop",
__Marshaller_Empty,
@@ -711,29 +711,29 @@ namespace Grpc.Testing {
/// <summary>Base class for server-side implementations of ReconnectService</summary>
public abstract partial class ReconnectServiceBase
{
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> Start(global::Grpc.Testing.ReconnectParams request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> Start(global::Grpc.Testing.ReconnectParams request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
- public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.ReconnectInfo> Stop(global::Grpc.Testing.Empty request, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.ReconnectInfo> Stop(global::Grpc.Testing.Empty request, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
}
/// <summary>Client for ReconnectService</summary>
- public partial class ReconnectServiceClient : ClientBase<ReconnectServiceClient>
+ public partial class ReconnectServiceClient : grpc::ClientBase<ReconnectServiceClient>
{
/// <summary>Creates a new client for ReconnectService</summary>
/// <param name="channel">The channel to use to make remote calls.</param>
- public ReconnectServiceClient(Channel channel) : base(channel)
+ public ReconnectServiceClient(grpc::Channel channel) : base(channel)
{
}
/// <summary>Creates a new client for ReconnectService that uses a custom <c>CallInvoker</c>.</summary>
/// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
- public ReconnectServiceClient(CallInvoker callInvoker) : base(callInvoker)
+ public ReconnectServiceClient(grpc::CallInvoker callInvoker) : base(callInvoker)
{
}
/// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
@@ -746,35 +746,35 @@ namespace Grpc.Testing {
{
}
- public virtual global::Grpc.Testing.Empty Start(global::Grpc.Testing.ReconnectParams request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.Empty Start(global::Grpc.Testing.ReconnectParams request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return Start(request, new CallOptions(headers, deadline, cancellationToken));
+ return Start(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
- public virtual global::Grpc.Testing.Empty Start(global::Grpc.Testing.ReconnectParams request, CallOptions options)
+ public virtual global::Grpc.Testing.Empty Start(global::Grpc.Testing.ReconnectParams request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_Start, null, options, request);
}
- public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> StartAsync(global::Grpc.Testing.ReconnectParams request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> StartAsync(global::Grpc.Testing.ReconnectParams request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return StartAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return StartAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
- public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> StartAsync(global::Grpc.Testing.ReconnectParams request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.Empty> StartAsync(global::Grpc.Testing.ReconnectParams request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_Start, null, options, request);
}
- public virtual global::Grpc.Testing.ReconnectInfo Stop(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual global::Grpc.Testing.ReconnectInfo Stop(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return Stop(request, new CallOptions(headers, deadline, cancellationToken));
+ return Stop(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
- public virtual global::Grpc.Testing.ReconnectInfo Stop(global::Grpc.Testing.Empty request, CallOptions options)
+ public virtual global::Grpc.Testing.ReconnectInfo Stop(global::Grpc.Testing.Empty request, grpc::CallOptions options)
{
return CallInvoker.BlockingUnaryCall(__Method_Stop, null, options, request);
}
- public virtual AsyncUnaryCall<global::Grpc.Testing.ReconnectInfo> StopAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.ReconnectInfo> StopAsync(global::Grpc.Testing.Empty request, grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return StopAsync(request, new CallOptions(headers, deadline, cancellationToken));
+ return StopAsync(request, new grpc::CallOptions(headers, deadline, cancellationToken));
}
- public virtual AsyncUnaryCall<global::Grpc.Testing.ReconnectInfo> StopAsync(global::Grpc.Testing.Empty request, CallOptions options)
+ public virtual grpc::AsyncUnaryCall<global::Grpc.Testing.ReconnectInfo> StopAsync(global::Grpc.Testing.Empty request, grpc::CallOptions options)
{
return CallInvoker.AsyncUnaryCall(__Method_Stop, null, options, request);
}
@@ -787,9 +787,9 @@ namespace Grpc.Testing {
/// <summary>Creates service definition that can be registered with a server</summary>
/// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
- public static ServerServiceDefinition BindService(ReconnectServiceBase serviceImpl)
+ public static grpc::ServerServiceDefinition BindService(ReconnectServiceBase serviceImpl)
{
- return ServerServiceDefinition.CreateBuilder()
+ return grpc::ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_Start, serviceImpl.Start)
.AddMethod(__Method_Stop, serviceImpl.Stop).Build();
}
diff --git a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
index 5bd7558be5..45321587f5 100644
--- a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
+++ b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
@@ -37,18 +37,18 @@
using System;
using System.Threading;
using System.Threading.Tasks;
-using Grpc.Core;
+using grpc = global::Grpc.Core;
namespace Grpc.Reflection.V1Alpha {
public static partial class ServerReflection
{
static readonly string __ServiceName = "grpc.reflection.v1alpha.ServerReflection";
- static readonly Marshaller<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest> __Marshaller_ServerReflectionRequest = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Reflection.V1Alpha.ServerReflectionRequest.Parser.ParseFrom);
- static readonly Marshaller<global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> __Marshaller_ServerReflectionResponse = Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Reflection.V1Alpha.ServerReflectionResponse.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest> __Marshaller_ServerReflectionRequest = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Reflection.V1Alpha.ServerReflectionRequest.Parser.ParseFrom);
+ static readonly grpc::Marshaller<global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> __Marshaller_ServerReflectionResponse = grpc::Marshallers.Create((arg) => global::Google.Protobuf.MessageExtensions.ToByteArray(arg), global::Grpc.Reflection.V1Alpha.ServerReflectionResponse.Parser.ParseFrom);
- static readonly Method<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> __Method_ServerReflectionInfo = new Method<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse>(
- MethodType.DuplexStreaming,
+ static readonly grpc::Method<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> __Method_ServerReflectionInfo = new grpc::Method<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse>(
+ grpc::MethodType.DuplexStreaming,
__ServiceName,
"ServerReflectionInfo",
__Marshaller_ServerReflectionRequest,
@@ -71,24 +71,24 @@ namespace Grpc.Reflection.V1Alpha {
/// <param name="responseStream">Used for sending responses back to the client.</param>
/// <param name="context">The context of the server-side call handler being invoked.</param>
/// <returns>A task indicating completion of the handler.</returns>
- public virtual global::System.Threading.Tasks.Task ServerReflectionInfo(IAsyncStreamReader<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest> requestStream, IServerStreamWriter<global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> responseStream, ServerCallContext context)
+ public virtual global::System.Threading.Tasks.Task ServerReflectionInfo(grpc::IAsyncStreamReader<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest> requestStream, grpc::IServerStreamWriter<global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> responseStream, grpc::ServerCallContext context)
{
- throw new RpcException(new Status(StatusCode.Unimplemented, ""));
+ throw new grpc::RpcException(new grpc::Status(grpc::StatusCode.Unimplemented, ""));
}
}
/// <summary>Client for ServerReflection</summary>
- public partial class ServerReflectionClient : ClientBase<ServerReflectionClient>
+ public partial class ServerReflectionClient : grpc::ClientBase<ServerReflectionClient>
{
/// <summary>Creates a new client for ServerReflection</summary>
/// <param name="channel">The channel to use to make remote calls.</param>
- public ServerReflectionClient(Channel channel) : base(channel)
+ public ServerReflectionClient(grpc::Channel channel) : base(channel)
{
}
/// <summary>Creates a new client for ServerReflection that uses a custom <c>CallInvoker</c>.</summary>
/// <param name="callInvoker">The callInvoker to use to make remote calls.</param>
- public ServerReflectionClient(CallInvoker callInvoker) : base(callInvoker)
+ public ServerReflectionClient(grpc::CallInvoker callInvoker) : base(callInvoker)
{
}
/// <summary>Protected parameterless constructor to allow creation of test doubles.</summary>
@@ -109,9 +109,9 @@ namespace Grpc.Reflection.V1Alpha {
/// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param>
/// <param name="cancellationToken">An optional token for canceling the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> ServerReflectionInfo(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> ServerReflectionInfo(grpc::Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken))
{
- return ServerReflectionInfo(new CallOptions(headers, deadline, cancellationToken));
+ return ServerReflectionInfo(new grpc::CallOptions(headers, deadline, cancellationToken));
}
/// <summary>
/// The reflection service is structured as a bidirectional stream, ensuring
@@ -119,7 +119,7 @@ namespace Grpc.Reflection.V1Alpha {
/// </summary>
/// <param name="options">The options for the call.</param>
/// <returns>The call object.</returns>
- public virtual AsyncDuplexStreamingCall<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> ServerReflectionInfo(CallOptions options)
+ public virtual grpc::AsyncDuplexStreamingCall<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> ServerReflectionInfo(grpc::CallOptions options)
{
return CallInvoker.AsyncDuplexStreamingCall(__Method_ServerReflectionInfo, null, options);
}
@@ -132,9 +132,9 @@ namespace Grpc.Reflection.V1Alpha {
/// <summary>Creates service definition that can be registered with a server</summary>
/// <param name="serviceImpl">An object implementing the server-side handling logic.</param>
- public static ServerServiceDefinition BindService(ServerReflectionBase serviceImpl)
+ public static grpc::ServerServiceDefinition BindService(ServerReflectionBase serviceImpl)
{
- return ServerServiceDefinition.CreateBuilder()
+ return grpc::ServerServiceDefinition.CreateBuilder()
.AddMethod(__Method_ServerReflectionInfo, serviceImpl.ServerReflectionInfo).Build();
}
diff --git a/src/node/ext/server_uv.cc b/src/node/ext/server_uv.cc
index bf8b609a63..c5e5ca9f42 100644
--- a/src/node/ext/server_uv.cc
+++ b/src/node/ext/server_uv.cc
@@ -47,12 +47,12 @@ namespace grpc {
namespace node {
using Nan::Callback;
+using Nan::MaybeLocal;
using v8::External;
using v8::Function;
using v8::FunctionTemplate;
using v8::Local;
-using v8::MaybeLocal;
using v8::Object;
using v8::Value;
diff --git a/src/node/src/common.js b/src/node/src/common.js
index 98eabf5c0b..a0fe4480ea 100644
--- a/src/node/src/common.js
+++ b/src/node/src/common.js
@@ -149,6 +149,7 @@ exports.getProtobufServiceAttrs = function getProtobufServiceAttrs(service,
return _.camelCase(method.name);
}), _.map(service.children, function(method) {
return {
+ originalName: method.name,
path: prefix + method.name,
requestStream: method.requestStream,
responseStream: method.responseStream,
diff --git a/src/node/src/server.js b/src/node/src/server.js
index a5a0ea2642..bdb4a56203 100644
--- a/src/node/src/server.js
+++ b/src/node/src/server.js
@@ -755,9 +755,16 @@ Server.prototype.addService = function(service, implementation) {
}
var impl;
if (implementation[name] === undefined) {
- common.log(grpc.logVerbosity.ERROR, 'Method handler for ' +
- attrs.path + ' expected but not provided');
- impl = defaultHandler[method_type];
+ /* Handle the case where the method is passed with the name exactly as
+ written in the proto file, instead of using JavaScript function
+ naming style */
+ if (implementation[attrs.originalName] === undefined) {
+ common.log(grpc.logVerbosity.ERROR, 'Method handler ' + name + ' for ' +
+ attrs.path + ' expected but not provided');
+ impl = defaultHandler[method_type];
+ } else {
+ impl = _.bind(implementation[attrs.originalName], implementation);
+ }
} else {
impl = _.bind(implementation[name], implementation);
}
diff --git a/src/node/test/surface_test.js b/src/node/test/surface_test.js
index 2636ea85ac..1d739562a6 100644
--- a/src/node/test/surface_test.js
+++ b/src/node/test/surface_test.js
@@ -143,6 +143,32 @@ describe('Server.prototype.addProtoService', function() {
server.addProtoService(mathService, dummyImpls);
});
});
+ it('Should allow method names as originally written', function() {
+ var altDummyImpls = {
+ 'Div': function() {},
+ 'DivMany': function() {},
+ 'Fib': function() {},
+ 'Sum': function() {}
+ };
+ assert.doesNotThrow(function() {
+ server.addProtoService(mathService, altDummyImpls);
+ });
+ });
+ it('Should have a conflict between name variations', function() {
+ /* This is really testing that both name variations are actually used,
+ by checking that the method actually gets registered, for the
+ corresponding function, in both cases */
+ var altDummyImpls = {
+ 'Div': function() {},
+ 'DivMany': function() {},
+ 'Fib': function() {},
+ 'Sum': function() {}
+ };
+ server.addProtoService(mathService, altDummyImpls);
+ assert.throws(function() {
+ server.addProtoService(mathService, dummyImpls);
+ });
+ });
it('Should fail if the server has been started', function() {
server.start();
assert.throws(function() {
diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
index ab8f82a39e..2f41ad196a 100644
--- a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
+++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec
@@ -101,9 +101,9 @@ Pod::Spec.new do |s|
s.preserve_paths = plugin
# Restrict the protoc version to the one supported by this plugin.
- s.dependency '!ProtoCompiler', '3.1.0'
+ s.dependency '!ProtoCompiler', '3.2.0'
# For the Protobuf dependency not to complain:
- s.ios.deployment_target = '7.1'
+ s.ios.deployment_target = '7.0'
s.osx.deployment_target = '10.9'
# Restrict the gRPC runtime version to the one supported by this plugin.
s.dependency 'gRPC-ProtoRPC', v
diff --git a/src/objective-c/!ProtoCompiler.podspec b/src/objective-c/!ProtoCompiler.podspec
index dc4d8e964e..2e9b944f33 100644
--- a/src/objective-c/!ProtoCompiler.podspec
+++ b/src/objective-c/!ProtoCompiler.podspec
@@ -36,7 +36,7 @@ Pod::Spec.new do |s|
# exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed
# before them.
s.name = '!ProtoCompiler'
- v = '3.1.0'
+ v = '3.2.0'
s.version = v
s.summary = 'The Protobuf Compiler (protoc) generates Objective-C files from .proto files'
s.description = <<-DESC
@@ -110,7 +110,7 @@ Pod::Spec.new do |s|
# Restrict the protobuf runtime version to the one supported by this version of protoc.
s.dependency 'Protobuf', '~> 3.0'
# For the Protobuf dependency not to complain:
- s.ios.deployment_target = '7.1'
+ s.ios.deployment_target = '7.0'
s.osx.deployment_target = '10.9'
# This is only for local development of protoc: If the Podfile brings this pod from a local
diff --git a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m
index 1e0c8024ca..3b442645e8 100644
--- a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m
+++ b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m
@@ -273,8 +273,7 @@ static char *roots_filename;
}
- (void)testCompressedPayload {
- // NOT SUPPORTED
- // [self testIndividualCase:"compressed_payload"];
+ [self testIndividualCase:"compressed_payload"];
}
- (void)testConnectivity {
diff --git a/src/objective-c/tests/CronetUnitTests/CronetUnitTests.m b/src/objective-c/tests/CronetUnitTests/CronetUnitTests.m
index e97f3d2d1a..a76e45416b 100644
--- a/src/objective-c/tests/CronetUnitTests/CronetUnitTests.m
+++ b/src/objective-c/tests/CronetUnitTests/CronetUnitTests.m
@@ -187,6 +187,18 @@ unsigned int parse_h2_length(const char *field) {
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
+ int sl = socket(AF_INET, SOCK_STREAM, 0);
+ GPR_ASSERT(sl >= 0);
+
+ // Make an TCP endpoint to accept the connection
+ struct sockaddr_in s_addr;
+ memset(&s_addr, 0, sizeof(s_addr));
+ s_addr.sin_family = AF_INET;
+ s_addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ s_addr.sin_port = htons(port);
+ GPR_ASSERT(0 == bind(sl, (struct sockaddr *)&s_addr, sizeof(s_addr)));
+ GPR_ASSERT(0 == listen(sl, 5));
+
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
@@ -226,17 +238,6 @@ unsigned int parse_h2_length(const char *field) {
dispatch_async(
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
- int sl = socket(AF_INET, SOCK_STREAM, 0);
- GPR_ASSERT(sl >= 0);
-
- // Make and TCP endpoint to accept the connection
- struct sockaddr_in s_addr;
- memset(&s_addr, 0, sizeof(s_addr));
- s_addr.sin_family = AF_INET;
- s_addr.sin_addr.s_addr = htonl(INADDR_ANY);
- s_addr.sin_port = htons(port);
- GPR_ASSERT(0 == bind(sl, (struct sockaddr *)&s_addr, sizeof(s_addr)));
- GPR_ASSERT(0 == listen(sl, 5));
int s = accept(sl, NULL, NULL);
GPR_ASSERT(s >= 0);
@@ -324,17 +325,18 @@ unsigned int parse_h2_length(const char *field) {
__weak XCTestExpectation *expectation =
[self expectationWithDescription:@"Coalescing"];
+ int sl = socket(AF_INET, SOCK_STREAM, 0);
+ GPR_ASSERT(sl >= 0);
+ struct sockaddr_in s_addr;
+ memset(&s_addr, 0, sizeof(s_addr));
+ s_addr.sin_family = AF_INET;
+ s_addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ s_addr.sin_port = htons(port);
+ GPR_ASSERT(0 == bind(sl, (struct sockaddr *)&s_addr, sizeof(s_addr)));
+ GPR_ASSERT(0 == listen(sl, 5));
+
dispatch_async(
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
- int sl = socket(AF_INET, SOCK_STREAM, 0);
- GPR_ASSERT(sl >= 0);
- struct sockaddr_in s_addr;
- memset(&s_addr, 0, sizeof(s_addr));
- s_addr.sin_family = AF_INET;
- s_addr.sin_addr.s_addr = htonl(INADDR_ANY);
- s_addr.sin_port = htons(port);
- GPR_ASSERT(0 == bind(sl, (struct sockaddr *)&s_addr, sizeof(s_addr)));
- GPR_ASSERT(0 == listen(sl, 5));
int s = accept(sl, NULL, NULL);
GPR_ASSERT(s >= 0);
struct timeval tv;
@@ -389,9 +391,6 @@ unsigned int parse_h2_length(const char *field) {
[expectation fulfill];
});
- // Guarantees that server is listening to the port before client connects.
- sleep(1);
-
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
diff --git a/src/php/lib/Grpc/AbstractCall.php b/src/php/lib/Grpc/AbstractCall.php
index 40387abdc0..4833fdc7b6 100644
--- a/src/php/lib/Grpc/AbstractCall.php
+++ b/src/php/lib/Grpc/AbstractCall.php
@@ -131,6 +131,8 @@ abstract class AbstractCall
// Proto3 implementation
if (method_exists($data, 'encode')) {
return $data->encode();
+ } else if (method_exists($data, 'serializeToString')) {
+ return $data->serializeToString();
}
// Protobuf-PHP implementation
@@ -154,7 +156,11 @@ abstract class AbstractCall
if (is_array($this->deserialize)) {
list($className, $deserializeFunc) = $this->deserialize;
$obj = new $className();
- $obj->$deserializeFunc($value);
+ if (method_exists($obj, $deserializeFunc)) {
+ $obj->$deserializeFunc($value);
+ } else {
+ $obj->mergeFromString($value);
+ }
return $obj;
}
diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py
index a4481b2ac3..4960df3be9 100644
--- a/src/python/grpcio/grpc/__init__.py
+++ b/src/python/grpcio/grpc/__init__.py
@@ -1273,7 +1273,10 @@ def secure_channel(target, credentials, options=None):
credentials._credentials)
-def server(thread_pool, handlers=None, options=None):
+def server(thread_pool,
+ handlers=None,
+ options=None,
+ maximum_concurrent_rpcs=None):
"""Creates a Server with which RPCs can be serviced.
Args:
@@ -1286,13 +1289,17 @@ def server(thread_pool, handlers=None, options=None):
returned Server is started.
options: A sequence of string-value pairs according to which to configure
the created server.
+ maximum_concurrent_rpcs: The maximum number of concurrent RPCs this server
+ will service before returning status RESOURCE_EXHAUSTED, or None to
+ indicate no limit.
Returns:
A Server with which RPCs can be serviced.
"""
from grpc import _server # pylint: disable=cyclic-import
return _server.Server(thread_pool, () if handlers is None else handlers, ()
- if options is None else options)
+ if options is None else options,
+ maximum_concurrent_rpcs)
################################### __all__ #################################
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/security.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/security.pxd.pxi
index 3a952ca309..9915b0ed1a 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/security.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/security.pxd.pxi
@@ -29,4 +29,4 @@
cdef grpc_ssl_roots_override_result ssl_roots_override_callback(
- char **pem_root_certs) with gil
+ char **pem_root_certs) nogil
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/security.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/security.pyx.pxi
index 20fc1c5fce..357b0330d5 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/security.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/security.pyx.pxi
@@ -33,12 +33,14 @@ import pkg_resources
cdef grpc_ssl_roots_override_result ssl_roots_override_callback(
- char **pem_root_certs) with gil:
- temporary_pem_root_certs = pkg_resources.resource_string(
- __name__.rstrip('.cygrpc'), '_credentials/roots.pem')
- pem_root_certs[0] = <char *>gpr_malloc(len(temporary_pem_root_certs) + 1)
- memcpy(
- pem_root_certs[0], <char *>temporary_pem_root_certs,
- len(temporary_pem_root_certs))
- pem_root_certs[0][len(temporary_pem_root_certs)] = '\0'
+ char **pem_root_certs) nogil:
+ with gil:
+ temporary_pem_root_certs = pkg_resources.resource_string(
+ __name__.rstrip('.cygrpc'), '_credentials/roots.pem')
+ pem_root_certs[0] = <char *>gpr_malloc(len(temporary_pem_root_certs) + 1)
+ memcpy(
+ pem_root_certs[0], <char *>temporary_pem_root_certs,
+ len(temporary_pem_root_certs))
+ pem_root_certs[0][len(temporary_pem_root_certs)] = '\0'
+
return GRPC_SSL_ROOTS_OVERRIDE_OK
diff --git a/src/python/grpcio/grpc/_cython/cygrpc.pyx b/src/python/grpcio/grpc/_cython/cygrpc.pyx
index e1bd046a1a..274b5f1b8a 100644
--- a/src/python/grpcio/grpc/_cython/cygrpc.pyx
+++ b/src/python/grpcio/grpc/_cython/cygrpc.pyx
@@ -47,14 +47,14 @@ include "_cygrpc/server.pyx.pxi"
#
# initialize gRPC
#
-
-
cdef extern from "Python.h":
- int Py_AtExit(void(*func)())
-
+ int PyEval_InitThreads()
-def _initialize():
+cdef _initialize():
+ # We have Python callbacks called by c-core threads, this ensures the GIL
+ # is initialized.
+ PyEval_InitThreads()
grpc_set_ssl_roots_override_callback(
<grpc_ssl_roots_override_callback>ssl_roots_override_callback)
diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py
index 84e096d4c0..47838c2c98 100644
--- a/src/python/grpcio/grpc/_server.py
+++ b/src/python/grpcio/grpc/_server.py
@@ -504,37 +504,37 @@ def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
def _handle_unary_unary(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
- thread_pool.submit(_unary_response_in_pool, rpc_event, state,
- method_handler.unary_unary, unary_request,
- method_handler.request_deserializer,
- method_handler.response_serializer)
+ return thread_pool.submit(_unary_response_in_pool, rpc_event, state,
+ method_handler.unary_unary, unary_request,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
- thread_pool.submit(_stream_response_in_pool, rpc_event, state,
- method_handler.unary_stream, unary_request,
- method_handler.request_deserializer,
- method_handler.response_serializer)
+ return thread_pool.submit(_stream_response_in_pool, rpc_event, state,
+ method_handler.unary_stream, unary_request,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.operation_call,
method_handler.request_deserializer)
- thread_pool.submit(_unary_response_in_pool, rpc_event, state,
- method_handler.stream_unary, lambda: request_iterator,
- method_handler.request_deserializer,
- method_handler.response_serializer)
+ return thread_pool.submit(
+ _unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
+ lambda: request_iterator, method_handler.request_deserializer,
+ method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.operation_call,
method_handler.request_deserializer)
- thread_pool.submit(_stream_response_in_pool, rpc_event, state,
- method_handler.stream_stream, lambda: request_iterator,
- method_handler.request_deserializer,
- method_handler.response_serializer)
+ return thread_pool.submit(
+ _stream_response_in_pool, rpc_event, state,
+ method_handler.stream_stream, lambda: request_iterator,
+ method_handler.request_deserializer, method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers):
@@ -549,13 +549,12 @@ def _find_method_handler(rpc_event, generic_handlers):
return None
-def _handle_unrecognized_method(rpc_event):
+def _reject_rpc(rpc_event, status, details):
operations = (cygrpc.operation_send_initial_metadata(_common.EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
- _common.EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
- b'Method not found!', _EMPTY_FLAGS),)
+ _common.EMPTY_METADATA, status, details, _EMPTY_FLAGS),)
rpc_state = _RPCState()
rpc_event.operation_call.start_server_batch(
operations, lambda ignored_event: (rpc_state, (),))
@@ -572,33 +571,37 @@ def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
if method_handler.response_streaming:
- _handle_stream_stream(rpc_event, state, method_handler,
- thread_pool)
+ return state, _handle_stream_stream(rpc_event, state,
+ method_handler, thread_pool)
else:
- _handle_stream_unary(rpc_event, state, method_handler,
- thread_pool)
+ return state, _handle_stream_unary(rpc_event, state,
+ method_handler, thread_pool)
else:
if method_handler.response_streaming:
- _handle_unary_stream(rpc_event, state, method_handler,
- thread_pool)
+ return state, _handle_unary_stream(rpc_event, state,
+ method_handler, thread_pool)
else:
- _handle_unary_unary(rpc_event, state, method_handler,
- thread_pool)
- return state
+ return state, _handle_unary_unary(rpc_event, state,
+ method_handler, thread_pool)
-def _handle_call(rpc_event, generic_handlers, thread_pool):
+def _handle_call(rpc_event, generic_handlers, thread_pool,
+ concurrency_exceeded):
if not rpc_event.success:
- return None
+ return None, None
if rpc_event.request_call_details.method is not None:
method_handler = _find_method_handler(rpc_event, generic_handlers)
if method_handler is None:
- return _handle_unrecognized_method(rpc_event)
+ return _reject_rpc(rpc_event, cygrpc.StatusCode.unimplemented,
+ b'Method not found!'), None
+ elif concurrency_exceeded:
+ return _reject_rpc(rpc_event, cygrpc.StatusCode.resource_exhausted,
+ b'Concurrent RPC limit exceeded!'), None
else:
return _handle_with_method_handler(rpc_event, method_handler,
thread_pool)
else:
- return None
+ return None, None
@enum.unique
@@ -610,7 +613,8 @@ class _ServerStage(enum.Enum):
class _ServerState(object):
- def __init__(self, completion_queue, server, generic_handlers, thread_pool):
+ def __init__(self, completion_queue, server, generic_handlers, thread_pool,
+ maximum_concurrent_rpcs):
self.lock = threading.Lock()
self.completion_queue = completion_queue
self.server = server
@@ -618,6 +622,8 @@ class _ServerState(object):
self.thread_pool = thread_pool
self.stage = _ServerStage.STOPPED
self.shutdown_events = None
+ self.maximum_concurrent_rpcs = maximum_concurrent_rpcs
+ self.active_rpc_count = 0
# TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
self.rpc_states = set()
@@ -657,6 +663,11 @@ def _stop_serving(state):
return False
+def _on_call_completed(state):
+ with state.lock:
+ state.active_rpc_count -= 1
+
+
def _serve(state):
while True:
event = state.completion_queue.poll()
@@ -668,10 +679,18 @@ def _serve(state):
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
- rpc_state = _handle_call(event, state.generic_handlers,
- state.thread_pool)
+ concurrency_exceeded = (
+ state.maximum_concurrent_rpcs is not None and
+ state.active_rpc_count >= state.maximum_concurrent_rpcs)
+ rpc_state, rpc_future = _handle_call(
+ event, state.generic_handlers, state.thread_pool,
+ concurrency_exceeded)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
+ if rpc_future is not None:
+ state.active_rpc_count += 1
+ rpc_future.add_done_callback(
+ lambda unused_future: _on_call_completed(state))
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
@@ -749,12 +768,13 @@ def _start(state):
class Server(grpc.Server):
- def __init__(self, thread_pool, generic_handlers, options):
+ def __init__(self, thread_pool, generic_handlers, options,
+ maximum_concurrent_rpcs):
completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server(_common.channel_args(options))
server.register_completion_queue(completion_queue)
self._state = _ServerState(completion_queue, server, generic_handlers,
- thread_pool)
+ thread_pool, maximum_concurrent_rpcs)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
_add_generic_handlers(self._state, generic_rpc_handlers)
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index 52d3ffd2c4..258c65c08e 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -34,6 +34,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/profiling/stap_timers.c',
'src/core/lib/support/alloc.c',
'src/core/lib/support/arena.c',
+ 'src/core/lib/support/atm.c',
'src/core/lib/support/avl.c',
'src/core/lib/support/backoff.c',
'src/core/lib/support/cmdline.c',
@@ -123,6 +124,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/resolve_address_windows.c',
'src/core/lib/iomgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c',
+ 'src/core/lib/iomgr/socket_factory_posix.c',
'src/core/lib/iomgr/socket_mutator.c',
'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c',
@@ -135,6 +137,9 @@ CORE_SOURCE_FILES = [
'src/core/lib/iomgr/tcp_client_windows.c',
'src/core/lib/iomgr/tcp_posix.c',
'src/core/lib/iomgr/tcp_server_posix.c',
+ 'src/core/lib/iomgr/tcp_server_utils_posix_common.c',
+ 'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c',
+ 'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c',
'src/core/lib/iomgr/tcp_server_uv.c',
'src/core/lib/iomgr/tcp_server_windows.c',
'src/core/lib/iomgr/tcp_uv.c',
@@ -157,6 +162,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/json/json_reader.c',
'src/core/lib/json/json_string.c',
'src/core/lib/json/json_writer.c',
+ 'src/core/lib/security/util/b64.c',
'src/core/lib/slice/percent_encoding.c',
'src/core/lib/slice/slice.c',
'src/core/lib/slice/slice_buffer.c',
@@ -175,6 +181,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/surface/channel_ping.c',
'src/core/lib/surface/channel_stack_type.c',
'src/core/lib/surface/completion_queue.c',
+ 'src/core/lib/surface/completion_queue_factory.c',
'src/core/lib/surface/event_string.c',
'src/core/lib/surface/lame_client.c',
'src/core/lib/surface/metadata_array.c',
@@ -238,7 +245,6 @@ CORE_SOURCE_FILES = [
'src/core/lib/security/transport/security_handshaker.c',
'src/core/lib/security/transport/server_auth_filter.c',
'src/core/lib/security/transport/tsi_error.c',
- 'src/core/lib/security/util/b64.c',
'src/core/lib/security/util/json_util.c',
'src/core/lib/surface/init_secure.c',
'src/core/lib/tsi/fake_transport_security.c',
@@ -251,10 +257,8 @@ CORE_SOURCE_FILES = [
'src/core/ext/client_channel/client_channel_factory.c',
'src/core/ext/client_channel/client_channel_plugin.c',
'src/core/ext/client_channel/connector.c',
- 'src/core/ext/client_channel/default_initial_connect_string.c',
'src/core/ext/client_channel/http_connect_handshaker.c',
'src/core/ext/client_channel/http_proxy.c',
- 'src/core/ext/client_channel/initial_connect_string.c',
'src/core/ext/client_channel/lb_policy.c',
'src/core/ext/client_channel/lb_policy_factory.c',
'src/core/ext/client_channel/lb_policy_registry.c',
@@ -264,6 +268,7 @@ CORE_SOURCE_FILES = [
'src/core/ext/client_channel/resolver.c',
'src/core/ext/client_channel/resolver_factory.c',
'src/core/ext/client_channel/resolver_registry.c',
+ 'src/core/ext/client_channel/retry_throttle.c',
'src/core/ext/client_channel/subchannel.c',
'src/core/ext/client_channel/subchannel_index.c',
'src/core/ext/client_channel/uri_parser.c',
@@ -281,6 +286,9 @@ CORE_SOURCE_FILES = [
'third_party/nanopb/pb_encode.c',
'src/core/ext/lb_policy/pick_first/pick_first.c',
'src/core/ext/lb_policy/round_robin/round_robin.c',
+ 'src/core/ext/resolver/dns/c_ares/dns_resolver_ares.c',
+ 'src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c',
+ 'src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.c',
'src/core/ext/resolver/dns/native/dns_resolver.c',
'src/core/ext/resolver/sockaddr/sockaddr_resolver.c',
'src/core/ext/load_reporting/load_reporting.c',
@@ -619,4 +627,53 @@ CORE_SOURCE_FILES = [
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
+ 'third_party/cares/cares/ares__close_sockets.c',
+ 'third_party/cares/cares/ares__get_hostent.c',
+ 'third_party/cares/cares/ares__read_line.c',
+ 'third_party/cares/cares/ares__timeval.c',
+ 'third_party/cares/cares/ares_cancel.c',
+ 'third_party/cares/cares/ares_create_query.c',
+ 'third_party/cares/cares/ares_data.c',
+ 'third_party/cares/cares/ares_destroy.c',
+ 'third_party/cares/cares/ares_expand_name.c',
+ 'third_party/cares/cares/ares_expand_string.c',
+ 'third_party/cares/cares/ares_fds.c',
+ 'third_party/cares/cares/ares_free_hostent.c',
+ 'third_party/cares/cares/ares_free_string.c',
+ 'third_party/cares/cares/ares_getenv.c',
+ 'third_party/cares/cares/ares_gethostbyaddr.c',
+ 'third_party/cares/cares/ares_gethostbyname.c',
+ 'third_party/cares/cares/ares_getnameinfo.c',
+ 'third_party/cares/cares/ares_getopt.c',
+ 'third_party/cares/cares/ares_getsock.c',
+ 'third_party/cares/cares/ares_init.c',
+ 'third_party/cares/cares/ares_library_init.c',
+ 'third_party/cares/cares/ares_llist.c',
+ 'third_party/cares/cares/ares_mkquery.c',
+ 'third_party/cares/cares/ares_nowarn.c',
+ 'third_party/cares/cares/ares_options.c',
+ 'third_party/cares/cares/ares_parse_a_reply.c',
+ 'third_party/cares/cares/ares_parse_aaaa_reply.c',
+ 'third_party/cares/cares/ares_parse_mx_reply.c',
+ 'third_party/cares/cares/ares_parse_naptr_reply.c',
+ 'third_party/cares/cares/ares_parse_ns_reply.c',
+ 'third_party/cares/cares/ares_parse_ptr_reply.c',
+ 'third_party/cares/cares/ares_parse_soa_reply.c',
+ 'third_party/cares/cares/ares_parse_srv_reply.c',
+ 'third_party/cares/cares/ares_parse_txt_reply.c',
+ 'third_party/cares/cares/ares_platform.c',
+ 'third_party/cares/cares/ares_process.c',
+ 'third_party/cares/cares/ares_query.c',
+ 'third_party/cares/cares/ares_search.c',
+ 'third_party/cares/cares/ares_send.c',
+ 'third_party/cares/cares/ares_strcasecmp.c',
+ 'third_party/cares/cares/ares_strdup.c',
+ 'third_party/cares/cares/ares_strerror.c',
+ 'third_party/cares/cares/ares_timeout.c',
+ 'third_party/cares/cares/ares_version.c',
+ 'third_party/cares/cares/ares_writev.c',
+ 'third_party/cares/cares/bitncmp.c',
+ 'third_party/cares/cares/inet_net_pton.c',
+ 'third_party/cares/cares/inet_ntop.c',
+ 'third_party/cares/cares/windows_port.c',
]
diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py
index 52ee98a2d5..17bb3ab616 100644
--- a/src/python/grpcio_health_checking/setup.py
+++ b/src/python/grpcio_health_checking/setup.py
@@ -59,6 +59,10 @@ COMMAND_CLASS = {
setuptools.setup(
name='grpcio-health-checking',
version=grpc_version.VERSION,
+ description='Standard Health Checking Service for gRPC',
+ author='The gRPC Authors',
+ author_email='grpc-io@googlegroups.com',
+ url='http://www.grpc.io',
license='3-clause BSD',
package_dir=PACKAGE_DIRECTORIES,
packages=setuptools.find_packages('.'),
diff --git a/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py b/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
index 87f28396ce..c8ad9668ac 100644
--- a/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
+++ b/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
@@ -35,6 +35,7 @@ from google.protobuf import descriptor_pb2
from google.protobuf import descriptor_pool
from grpc_reflection.v1alpha import reflection_pb2
+from grpc_reflection.v1alpha import reflection_pb2_grpc
_POOL = descriptor_pool.Default()
@@ -64,7 +65,7 @@ class ReflectionServicer(reflection_pb2.ServerReflectionServicer):
Args:
service_names: Iterable of fully-qualified service names available.
"""
- self._service_names = list(service_names)
+ self._service_names = tuple(sorted(service_names))
self._pool = _POOL if pool is None else pool
def _file_by_filename(self, filename):
@@ -84,23 +85,32 @@ class ReflectionServicer(reflection_pb2.ServerReflectionServicer):
else:
return _file_descriptor_response(descriptor)
- def _file_containing_extension(containing_type, extension_number):
- # TODO(atash) Python protobuf currently doesn't support querying extensions.
- # https://github.com/google/protobuf/issues/2248
- return reflection_pb2.ServerReflectionResponse(
- error_response=reflection_pb2.ErrorResponse(
- error_code=grpc.StatusCode.UNIMPLEMENTED.value[0],
- error_message=grpc.StatusCode.UNIMPLMENTED.value[1].encode(),))
-
- def _extension_numbers_of_type(fully_qualified_name):
- # TODO(atash) We're allowed to leave this unsupported according to the
- # protocol, but we should still eventually implement it. Hits the same issue
- # as `_file_containing_extension`, however.
- # https://github.com/google/protobuf/issues/2248
- return reflection_pb2.ServerReflectionResponse(
- error_response=reflection_pb2.ErrorResponse(
- error_code=grpc.StatusCode.UNIMPLEMENTED.value[0],
- error_message=grpc.StatusCode.UNIMPLMENTED.value[1].encode(),))
+ def _file_containing_extension(self, containing_type, extension_number):
+ try:
+ message_descriptor = self._pool.FindMessageTypeByName(containing_type)
+ extension_descriptor = self._pool.FindExtensionByNumber(
+ message_descriptor, extension_number)
+ descriptor = self._pool.FindFileContainingSymbol(
+ extension_descriptor.full_name)
+ except KeyError:
+ return _not_found_error()
+ else:
+ return _file_descriptor_response(descriptor)
+
+ def _all_extension_numbers_of_type(self, containing_type):
+ try:
+ message_descriptor = self._pool.FindMessageTypeByName(containing_type)
+ extension_numbers = tuple(sorted(
+ extension.number
+ for extension in self._pool.FindAllExtensions(message_descriptor)))
+ except KeyError:
+ return _not_found_error()
+ else:
+ return reflection_pb2.ServerReflectionResponse(
+ all_extension_numbers_response=reflection_pb2.
+ ExtensionNumberResponse(
+ base_type_name=message_descriptor.full_name,
+ extension_number=extension_numbers))
def _list_services(self):
return reflection_pb2.ServerReflectionResponse(
@@ -121,7 +131,7 @@ class ReflectionServicer(reflection_pb2.ServerReflectionServicer):
request.file_containing_extension.containing_type,
request.file_containing_extension.extension_number)
elif request.HasField('all_extension_numbers_of_type'):
- yield _all_extension_numbers_of_type(
+ yield self._all_extension_numbers_of_type(
request.all_extension_numbers_of_type)
elif request.HasField('list_services'):
yield self._list_services()
@@ -131,3 +141,14 @@ class ReflectionServicer(reflection_pb2.ServerReflectionServicer):
error_code=grpc.StatusCode.INVALID_ARGUMENT.value[0],
error_message=grpc.StatusCode.INVALID_ARGUMENT.value[1]
.encode(),))
+
+
+def enable_server_reflection(service_names, server):
+ """Enables server reflection on a server.
+
+ Args:
+ service_names: Iterable of fully-qualified service names available.
+ server: grpc.Server to which reflection service will be added.
+ """
+ reflection_pb2_grpc.add_ServerReflectionServicer_to_server(
+ ReflectionServicer(service_names), server)
diff --git a/src/python/grpcio_reflection/setup.py b/src/python/grpcio_reflection/setup.py
index e85092db57..e6cf54745e 100644
--- a/src/python/grpcio_reflection/setup.py
+++ b/src/python/grpcio_reflection/setup.py
@@ -60,6 +60,10 @@ setuptools.setup(
name='grpcio-reflection',
version=grpc_version.VERSION,
license='3-clause BSD',
+ description='Standard Protobuf Reflection Service for gRPC',
+ author='The gRPC Authors',
+ author_email='grpc-io@googlegroups.com',
+ url='http://www.grpc.io',
package_dir=PACKAGE_DIRECTORIES,
packages=setuptools.find_packages('.'),
install_requires=INSTALL_REQUIRES,
diff --git a/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py b/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py
index 58f3b364ba..3325d54375 100644
--- a/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py
+++ b/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py
@@ -32,7 +32,7 @@ from concurrent import futures
import unittest
import grpc
-from src.proto.grpc.testing import test_pb2
+from src.proto.grpc.testing import test_pb2_grpc
from tests.interop import _intraop_test_case
from tests.interop import methods
@@ -44,11 +44,11 @@ class InsecureIntraopTest(_intraop_test_case.IntraopTestCase,
def setUp(self):
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
- test_pb2.add_TestServiceServicer_to_server(methods.TestService(),
- self.server)
+ test_pb2_grpc.add_TestServiceServicer_to_server(methods.TestService(),
+ self.server)
port = self.server.add_insecure_port('[::]:0')
self.server.start()
- self.stub = test_pb2.TestServiceStub(
+ self.stub = test_pb2_grpc.TestServiceStub(
grpc.insecure_channel('localhost:{}'.format(port)))
diff --git a/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py b/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py
index 5fe929b99e..857e00efb3 100644
--- a/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py
+++ b/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py
@@ -32,7 +32,7 @@ from concurrent import futures
import unittest
import grpc
-from src.proto.grpc.testing import test_pb2
+from src.proto.grpc.testing import test_pb2_grpc
from tests.interop import _intraop_test_case
from tests.interop import methods
@@ -45,14 +45,14 @@ class SecureIntraopTest(_intraop_test_case.IntraopTestCase, unittest.TestCase):
def setUp(self):
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
- test_pb2.add_TestServiceServicer_to_server(methods.TestService(),
- self.server)
+ test_pb2_grpc.add_TestServiceServicer_to_server(methods.TestService(),
+ self.server)
port = self.server.add_secure_port(
'[::]:0',
grpc.ssl_server_credentials(
[(resources.private_key(), resources.certificate_chain())]))
self.server.start()
- self.stub = test_pb2.TestServiceStub(
+ self.stub = test_pb2_grpc.TestServiceStub(
grpc.secure_channel('localhost:{}'.format(port),
grpc.ssl_channel_credentials(
resources.test_root_certificates()), (
diff --git a/src/python/grpcio_tests/tests/interop/methods.py b/src/python/grpcio_tests/tests/interop/methods.py
index 662ea9ce57..e1016f7c0d 100644
--- a/src/python/grpcio_tests/tests/interop/methods.py
+++ b/src/python/grpcio_tests/tests/interop/methods.py
@@ -40,7 +40,7 @@ from grpc.beta import implementations
from src.proto.grpc.testing import empty_pb2
from src.proto.grpc.testing import messages_pb2
-from src.proto.grpc.testing import test_pb2
+from src.proto.grpc.testing import test_pb2_grpc
_INITIAL_METADATA_KEY = "x-grpc-test-echo-initial"
_TRAILING_METADATA_KEY = "x-grpc-test-echo-trailing-bin"
@@ -66,7 +66,7 @@ def _maybe_echo_status_and_message(request, servicer_context):
servicer_context.set_details(request.response_status.message)
-class TestService(test_pb2.TestServiceServicer):
+class TestService(test_pb2_grpc.TestServiceServicer):
def EmptyCall(self, request, context):
_maybe_echo_metadata(context)
diff --git a/src/python/grpcio_tests/tests/interop/server.py b/src/python/grpcio_tests/tests/interop/server.py
index 65f1604eb8..0ae2c97b42 100644
--- a/src/python/grpcio_tests/tests/interop/server.py
+++ b/src/python/grpcio_tests/tests/interop/server.py
@@ -34,7 +34,7 @@ import logging
import time
import grpc
-from src.proto.grpc.testing import test_pb2
+from src.proto.grpc.testing import test_pb2_grpc
from tests.interop import methods
from tests.interop import resources
@@ -53,7 +53,8 @@ def serve():
args = parser.parse_args()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
- test_pb2.add_TestServiceServicer_to_server(methods.TestService(), server)
+ test_pb2_grpc.add_TestServiceServicer_to_server(methods.TestService(),
+ server)
if args.use_tls:
private_key = resources.private_key()
certificate_chain = resources.certificate_chain()
diff --git a/src/python/grpcio_tests/tests/qps/qps_worker.py b/src/python/grpcio_tests/tests/qps/qps_worker.py
index 025dfb9d4a..7cd53e7bd9 100644
--- a/src/python/grpcio_tests/tests/qps/qps_worker.py
+++ b/src/python/grpcio_tests/tests/qps/qps_worker.py
@@ -33,7 +33,7 @@ import time
from concurrent import futures
import grpc
-from src.proto.grpc.testing import services_pb2
+from src.proto.grpc.testing import services_pb2_grpc
from tests.qps import worker_server
@@ -41,7 +41,7 @@ from tests.qps import worker_server
def run_worker_server(port):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=5))
servicer = worker_server.WorkerServer()
- services_pb2.add_WorkerServiceServicer_to_server(servicer, server)
+ services_pb2_grpc.add_WorkerServiceServicer_to_server(servicer, server)
server.add_insecure_port('[::]:{}'.format(port))
server.start()
servicer.wait_for_quit()
diff --git a/src/python/grpcio_tests/tests/qps/worker_server.py b/src/python/grpcio_tests/tests/qps/worker_server.py
index ca1a777611..de9535f46e 100644
--- a/src/python/grpcio_tests/tests/qps/worker_server.py
+++ b/src/python/grpcio_tests/tests/qps/worker_server.py
@@ -35,7 +35,7 @@ import time
from concurrent import futures
import grpc
from src.proto.grpc.testing import control_pb2
-from src.proto.grpc.testing import services_pb2
+from src.proto.grpc.testing import services_pb2_grpc
from src.proto.grpc.testing import stats_pb2
from tests.qps import benchmark_client
@@ -45,7 +45,7 @@ from tests.qps import histogram
from tests.unit import resources
-class WorkerServer(services_pb2.WorkerServiceServicer):
+class WorkerServer(services_pb2_grpc.WorkerServiceServicer):
"""Python Worker Server implementation."""
def __init__(self):
@@ -87,8 +87,8 @@ class WorkerServer(services_pb2.WorkerServiceServicer):
futures.ThreadPoolExecutor(max_workers=server_threads))
if config.server_type == control_pb2.ASYNC_SERVER:
servicer = benchmark_server.BenchmarkServer()
- services_pb2.add_BenchmarkServiceServicer_to_server(servicer,
- server)
+ services_pb2_grpc.add_BenchmarkServiceServicer_to_server(servicer,
+ server)
elif config.server_type == control_pb2.ASYNC_GENERIC_SERVER:
resp_size = config.payload_config.bytebuf_params.resp_size
servicer = benchmark_server.GenericBenchmarkServer(resp_size)
diff --git a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
index d06ff064e2..14e6d64c66 100644
--- a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
+++ b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
@@ -34,18 +34,24 @@ import grpc
from grpc.framework.foundation import logging_pool
from grpc_reflection.v1alpha import reflection
from grpc_reflection.v1alpha import reflection_pb2
+from grpc_reflection.v1alpha import reflection_pb2_grpc
from google.protobuf import descriptor_pool
from google.protobuf import descriptor_pb2
-from src.proto.grpc.testing.proto2 import empty2_extensions_pb2
from src.proto.grpc.testing import empty_pb2
+#empty2_pb2 is imported for import-consequent side-effects.
+from src.proto.grpc.testing.proto2 import empty2_pb2 # pylint: disable=unused-import
+from src.proto.grpc.testing.proto2 import empty2_extensions_pb2
+
from tests.unit.framework.common import test_constants
_EMPTY_PROTO_FILE_NAME = 'src/proto/grpc/testing/empty.proto'
_EMPTY_PROTO_SYMBOL_NAME = 'grpc.testing.Empty'
_SERVICE_NAMES = ('Angstrom', 'Bohr', 'Curie', 'Dyson', 'Einstein', 'Feynman',
'Galilei')
+_EMPTY_EXTENSIONS_SYMBOL_NAME = 'grpc.testing.proto2.EmptyWithExtensions'
+_EMPTY_EXTENSIONS_NUMBERS = (124, 125, 126, 127, 128,)
def _file_descriptor_to_proto(descriptor):
@@ -61,12 +67,12 @@ class ReflectionServicerTest(unittest.TestCase):
server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(server_pool)
port = self._server.add_insecure_port('[::]:0')
- reflection_pb2.add_ServerReflectionServicer_to_server(servicer,
- self._server)
+ reflection_pb2_grpc.add_ServerReflectionServicer_to_server(servicer,
+ self._server)
self._server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
- self._stub = reflection_pb2.ServerReflectionStub(channel)
+ self._stub = reflection_pb2_grpc.ServerReflectionStub(channel)
def testFileByName(self):
requests = (reflection_pb2.ServerReflectionRequest(
@@ -109,12 +115,12 @@ class ReflectionServicerTest(unittest.TestCase):
self.assertSequenceEqual(expected_responses, responses)
@unittest.skip(
- 'TODO(atash): implement file-containing-extension reflection '
- '(see https://github.com/google/protobuf/issues/2248)')
+ 'TODO(mmx): enable when (pure) python protobuf issue is fixed'
+ '(see https://github.com/google/protobuf/issues/2882)')
def testFileContainingExtension(self):
requests = (reflection_pb2.ServerReflectionRequest(
file_containing_extension=reflection_pb2.ExtensionRequest(
- containing_type='grpc.testing.proto2.Empty',
+ containing_type=_EMPTY_EXTENSIONS_SYMBOL_NAME,
extension_number=125,),
), reflection_pb2.ServerReflectionRequest(
file_containing_extension=reflection_pb2.ExtensionRequest(
@@ -126,7 +132,28 @@ class ReflectionServicerTest(unittest.TestCase):
valid_host='',
file_descriptor_response=reflection_pb2.FileDescriptorResponse(
file_descriptor_proto=(_file_descriptor_to_proto(
- empty_extensions_pb2.DESCRIPTOR),))),
+ empty2_extensions_pb2.DESCRIPTOR),))),
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ error_response=reflection_pb2.ErrorResponse(
+ error_code=grpc.StatusCode.NOT_FOUND.value[0],
+ error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
+ )),)
+ self.assertSequenceEqual(expected_responses, responses)
+
+ def testExtensionNumbersOfType(self):
+ requests = (reflection_pb2.ServerReflectionRequest(
+ all_extension_numbers_of_type=_EMPTY_EXTENSIONS_SYMBOL_NAME
+ ), reflection_pb2.ServerReflectionRequest(
+ all_extension_numbers_of_type='i.donut.exist.co.uk.net.name.foo'),)
+ responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
+ expected_responses = (
+ reflection_pb2.ServerReflectionResponse(
+ valid_host='',
+ all_extension_numbers_response=reflection_pb2.
+ ExtensionNumberResponse(
+ base_type_name=_EMPTY_EXTENSIONS_SYMBOL_NAME,
+ extension_number=_EMPTY_EXTENSIONS_NUMBERS)),
reflection_pb2.ServerReflectionResponse(
valid_host='',
error_response=reflection_pb2.ErrorResponse(
diff --git a/src/python/grpcio_tests/tests/stress/client.py b/src/python/grpcio_tests/tests/stress/client.py
index b9dbe61d44..b7eb12bff8 100644
--- a/src/python/grpcio_tests/tests/stress/client.py
+++ b/src/python/grpcio_tests/tests/stress/client.py
@@ -34,7 +34,7 @@ import threading
import grpc
from six.moves import queue
-from src.proto.grpc.testing import metrics_pb2
+from src.proto.grpc.testing import metrics_pb2_grpc
from src.proto.grpc.testing import test_pb2
from tests.interop import methods
@@ -139,7 +139,7 @@ def run_test(args):
runners = []
server = grpc.server(futures.ThreadPoolExecutor(max_workers=25))
- metrics_pb2.add_MetricsServiceServicer_to_server(
+ metrics_pb2_grpc.add_MetricsServiceServicer_to_server(
metrics_server.MetricsServer(hist), server)
server.add_insecure_port('[::]:{}'.format(args.metrics_port))
server.start()
diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json
index 70d965d3ca..f750b05102 100644
--- a/src/python/grpcio_tests/tests/tests.json
+++ b/src/python/grpcio_tests/tests/tests.json
@@ -31,6 +31,7 @@
"unit._invocation_defects_test.InvocationDefectsTest",
"unit._metadata_code_details_test.MetadataCodeDetailsTest",
"unit._metadata_test.MetadataTest",
+ "unit._resource_exhausted_test.ResourceExhaustedTest",
"unit._rpc_test.RPCTest",
"unit._sanity._sanity_test.Sanity",
"unit._thread_cleanup_test.CleanupThreadTest",
diff --git a/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py b/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py
new file mode 100644
index 0000000000..88c82b5541
--- /dev/null
+++ b/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py
@@ -0,0 +1,270 @@
+# Copyright 2017, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""Tests server responding with RESOURCE_EXHAUSTED."""
+
+import threading
+import unittest
+
+import grpc
+from grpc import _channel
+from grpc.framework.foundation import logging_pool
+
+from tests.unit import test_common
+from tests.unit.framework.common import test_constants
+
+_REQUEST = b'\x00\x00\x00'
+_RESPONSE = b'\x00\x00\x00'
+
+_UNARY_UNARY = '/test/UnaryUnary'
+_UNARY_STREAM = '/test/UnaryStream'
+_STREAM_UNARY = '/test/StreamUnary'
+_STREAM_STREAM = '/test/StreamStream'
+
+
+class _TestTrigger(object):
+
+ def __init__(self, total_call_count):
+ self._total_call_count = total_call_count
+ self._pending_calls = 0
+ self._triggered = False
+ self._finish_condition = threading.Condition()
+ self._start_condition = threading.Condition()
+
+ # Wait for all calls be be blocked in their handler
+ def await_calls(self):
+ with self._start_condition:
+ while self._pending_calls < self._total_call_count:
+ self._start_condition.wait()
+
+ # Block in a response handler and wait for a trigger
+ def await_trigger(self):
+ with self._start_condition:
+ self._pending_calls += 1
+ self._start_condition.notify()
+
+ with self._finish_condition:
+ if not self._triggered:
+ self._finish_condition.wait()
+
+ # Finish all response handlers
+ def trigger(self):
+ with self._finish_condition:
+ self._triggered = True
+ self._finish_condition.notify_all()
+
+
+def handle_unary_unary(trigger, request, servicer_context):
+ trigger.await_trigger()
+ return _RESPONSE
+
+
+def handle_unary_stream(trigger, request, servicer_context):
+ trigger.await_trigger()
+ for _ in range(test_constants.STREAM_LENGTH):
+ yield _RESPONSE
+
+
+def handle_stream_unary(trigger, request_iterator, servicer_context):
+ trigger.await_trigger()
+ # TODO(issue:#6891) We should be able to remove this loop
+ for request in request_iterator:
+ pass
+ return _RESPONSE
+
+
+def handle_stream_stream(trigger, request_iterator, servicer_context):
+ trigger.await_trigger()
+ # TODO(issue:#6891) We should be able to remove this loop,
+ # and replace with return; yield
+ for request in request_iterator:
+ yield _RESPONSE
+
+
+class _MethodHandler(grpc.RpcMethodHandler):
+
+ def __init__(self, trigger, request_streaming, response_streaming):
+ self.request_streaming = request_streaming
+ self.response_streaming = response_streaming
+ self.request_deserializer = None
+ self.response_serializer = None
+ self.unary_unary = None
+ self.unary_stream = None
+ self.stream_unary = None
+ self.stream_stream = None
+ if self.request_streaming and self.response_streaming:
+ self.stream_stream = (
+ lambda x, y: handle_stream_stream(trigger, x, y))
+ elif self.request_streaming:
+ self.stream_unary = lambda x, y: handle_stream_unary(trigger, x, y)
+ elif self.response_streaming:
+ self.unary_stream = lambda x, y: handle_unary_stream(trigger, x, y)
+ else:
+ self.unary_unary = lambda x, y: handle_unary_unary(trigger, x, y)
+
+
+class _GenericHandler(grpc.GenericRpcHandler):
+
+ def __init__(self, trigger):
+ self._trigger = trigger
+
+ def service(self, handler_call_details):
+ if handler_call_details.method == _UNARY_UNARY:
+ return _MethodHandler(self._trigger, False, False)
+ elif handler_call_details.method == _UNARY_STREAM:
+ return _MethodHandler(self._trigger, False, True)
+ elif handler_call_details.method == _STREAM_UNARY:
+ return _MethodHandler(self._trigger, True, False)
+ elif handler_call_details.method == _STREAM_STREAM:
+ return _MethodHandler(self._trigger, True, True)
+ else:
+ return None
+
+
+class ResourceExhaustedTest(unittest.TestCase):
+
+ def setUp(self):
+ self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
+ self._trigger = _TestTrigger(test_constants.THREAD_CONCURRENCY)
+ self._server = grpc.server(
+ self._server_pool,
+ handlers=(_GenericHandler(self._trigger),),
+ maximum_concurrent_rpcs=test_constants.THREAD_CONCURRENCY)
+ port = self._server.add_insecure_port('[::]:0')
+ self._server.start()
+ self._channel = grpc.insecure_channel('localhost:%d' % port)
+
+ def tearDown(self):
+ self._server.stop(0)
+
+ def testUnaryUnary(self):
+ multi_callable = self._channel.unary_unary(_UNARY_UNARY)
+ futures = []
+ for _ in range(test_constants.THREAD_CONCURRENCY):
+ futures.append(multi_callable.future(_REQUEST))
+
+ self._trigger.await_calls()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ multi_callable(_REQUEST)
+
+ self.assertEqual(grpc.StatusCode.RESOURCE_EXHAUSTED,
+ exception_context.exception.code())
+
+ future_exception = multi_callable.future(_REQUEST)
+ self.assertEqual(grpc.StatusCode.RESOURCE_EXHAUSTED,
+ future_exception.exception().code())
+
+ self._trigger.trigger()
+ for future in futures:
+ self.assertEqual(_RESPONSE, future.result())
+
+ # Ensure a new request can be handled
+ self.assertEqual(_RESPONSE, multi_callable(_REQUEST))
+
+ def testUnaryStream(self):
+ multi_callable = self._channel.unary_stream(_UNARY_STREAM)
+ calls = []
+ for _ in range(test_constants.THREAD_CONCURRENCY):
+ calls.append(multi_callable(_REQUEST))
+
+ self._trigger.await_calls()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(multi_callable(_REQUEST))
+
+ self.assertEqual(grpc.StatusCode.RESOURCE_EXHAUSTED,
+ exception_context.exception.code())
+
+ self._trigger.trigger()
+
+ for call in calls:
+ for response in call:
+ self.assertEqual(_RESPONSE, response)
+
+ # Ensure a new request can be handled
+ new_call = multi_callable(_REQUEST)
+ for response in new_call:
+ self.assertEqual(_RESPONSE, response)
+
+ def testStreamUnary(self):
+ multi_callable = self._channel.stream_unary(_STREAM_UNARY)
+ futures = []
+ request = iter([_REQUEST] * test_constants.STREAM_LENGTH)
+ for _ in range(test_constants.THREAD_CONCURRENCY):
+ futures.append(multi_callable.future(request))
+
+ self._trigger.await_calls()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ multi_callable(request)
+
+ self.assertEqual(grpc.StatusCode.RESOURCE_EXHAUSTED,
+ exception_context.exception.code())
+
+ future_exception = multi_callable.future(request)
+ self.assertEqual(grpc.StatusCode.RESOURCE_EXHAUSTED,
+ future_exception.exception().code())
+
+ self._trigger.trigger()
+
+ for future in futures:
+ self.assertEqual(_RESPONSE, future.result())
+
+ # Ensure a new request can be handled
+ self.assertEqual(_RESPONSE, multi_callable(request))
+
+ def testStreamStream(self):
+ multi_callable = self._channel.stream_stream(_STREAM_STREAM)
+ calls = []
+ request = iter([_REQUEST] * test_constants.STREAM_LENGTH)
+ for _ in range(test_constants.THREAD_CONCURRENCY):
+ calls.append(multi_callable(request))
+
+ self._trigger.await_calls()
+
+ with self.assertRaises(grpc.RpcError) as exception_context:
+ next(multi_callable(request))
+
+ self.assertEqual(grpc.StatusCode.RESOURCE_EXHAUSTED,
+ exception_context.exception.code())
+
+ self._trigger.trigger()
+
+ for call in calls:
+ for response in call:
+ self.assertEqual(_RESPONSE, response)
+
+ # Ensure a new request can be handled
+ new_call = multi_callable(request)
+ for response in new_call:
+ self.assertEqual(_RESPONSE, response)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/src/ruby/.rubocop.yml b/src/ruby/.rubocop.yml
index 0f61ccfa81..7174f3b15a 100644
--- a/src/ruby/.rubocop.yml
+++ b/src/ruby/.rubocop.yml
@@ -9,6 +9,7 @@ AllCops:
- 'bin/math_services_pb.rb'
- 'pb/grpc/health/v1/*'
- 'pb/test/**/*'
+ - 'end2end/lib/*'
Metrics/CyclomaticComplexity:
Max: 9
diff --git a/src/ruby/end2end/README.md b/src/ruby/end2end/README.md
new file mode 100644
index 0000000000..ea5ab6d4bc
--- /dev/null
+++ b/src/ruby/end2end/README.md
@@ -0,0 +1,18 @@
+This directory contains some grpc-ruby end to end tests.
+
+Each test here involves two files: a "driver" and a "client". For example,
+the "channel_closing" test involves channel_closing_driver.rb
+and channel_closing_client.rb.
+
+Typically, the "driver" will start up a simple "echo" server, and then
+spawn a client. It gives the client the address of the "echo" server as
+well as an address to listen on for control rpcs. Depending on the test, the
+client usually starts up a "ClientControl" grpc server for the driver to
+interact with (the driver can tell the client process to do strange things at
+different times, depending on the test).
+
+So far these tests are mostly useful for testing process-shutdown related
+situations, since the client's run in separate processes.
+
+These tests are invoked through the "tools/run_tests/run_tests.py" script (the
+Rakefile doesn't start these).
diff --git a/src/ruby/end2end/channel_closing_client.rb b/src/ruby/end2end/channel_closing_client.rb
new file mode 100755
index 0000000000..8449797360
--- /dev/null
+++ b/src/ruby/end2end/channel_closing_client.rb
@@ -0,0 +1,84 @@
+#!/usr/bin/env ruby
+
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+require_relative './end2end_common'
+
+# Calls '#close' on a Channel when "shutdown" called. This tries to
+# trigger a hang or crash bug by closing a channel actively being watched
+class ChannelClosingClientController < ClientControl::ClientController::Service
+ def initialize(ch)
+ @ch = ch
+ end
+
+ def shutdown(_, _)
+ @ch.close
+ ClientControl::Void.new
+ end
+end
+
+def main
+ client_control_port = ''
+ server_port = ''
+ OptionParser.new do |opts|
+ opts.on('--client_control_port=P', String) do |p|
+ client_control_port = p
+ end
+ opts.on('--server_port=P', String) do |p|
+ server_port = p
+ end
+ end.parse!
+
+ ch = GRPC::Core::Channel.new("localhost:#{server_port}", {},
+ :this_channel_is_insecure)
+
+ srv = GRPC::RpcServer.new
+ thd = Thread.new do
+ srv.add_http2_port("0.0.0.0:#{client_control_port}", :this_port_is_insecure)
+ srv.handle(ChannelClosingClientController.new(ch))
+ srv.run
+ end
+
+ # this should break out with an exception once the channel is closed
+ loop do
+ begin
+ state = ch.connectivity_state(true)
+ ch.watch_connectivity_state(state, Time.now + 360)
+ rescue RuntimeError => e
+ STDERR.puts "(expected) error occurred: #{e.inspect}"
+ break
+ end
+ end
+
+ srv.stop
+ thd.join
+end
+
+main
diff --git a/src/ruby/end2end/channel_closing_driver.rb b/src/ruby/end2end/channel_closing_driver.rb
new file mode 100755
index 0000000000..43e2fe8cbb
--- /dev/null
+++ b/src/ruby/end2end/channel_closing_driver.rb
@@ -0,0 +1,67 @@
+#!/usr/bin/env ruby
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# make sure that the client doesn't hang when channel is closed
+# explictly while it's used
+
+require_relative './end2end_common'
+
+def main
+ STDERR.puts 'start server'
+ server_runner = ServerRunner.new
+ server_port = server_runner.run
+
+ sleep 1
+
+ STDERR.puts 'start client'
+ control_stub, client_pid = start_client('channel_closing_client.rb',
+ server_port)
+
+ sleep 3
+
+ begin
+ Timeout.timeout(10) do
+ control_stub.shutdown(ClientControl::Void.new)
+ Process.wait(client_pid)
+ end
+ rescue Timeout::Error
+ STDERR.puts "timeout wait for client pid #{client_pid}"
+ Process.kill('SIGKILL', client_pid)
+ Process.wait(client_pid)
+ STDERR.puts 'killed client child'
+ raise 'Timed out waiting for client process. It likely hangs when a ' \
+ 'channel is closed while connectivity is watched'
+ end
+
+ server_runner.stop
+end
+
+main
diff --git a/src/ruby/end2end/channel_state_client.rb b/src/ruby/end2end/channel_state_client.rb
new file mode 100755
index 0000000000..08c21bbb8e
--- /dev/null
+++ b/src/ruby/end2end/channel_state_client.rb
@@ -0,0 +1,54 @@
+#!/usr/bin/env ruby
+
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+require_relative './end2end_common'
+
+def main
+ server_port = ''
+ OptionParser.new do |opts|
+ opts.on('--client_control_port=P', String) do
+ STDERR.puts 'client_control_port ignored'
+ end
+ opts.on('--server_port=P', String) do |p|
+ server_port = p
+ end
+ end.parse!
+
+ ch = GRPC::Core::Channel.new("localhost:#{server_port}", {},
+ :this_channel_is_insecure)
+
+ loop do
+ state = ch.connectivity_state
+ ch.watch_connectivity_state(state, Time.now + 360)
+ end
+end
+
+main
diff --git a/src/ruby/end2end/channel_state_driver.rb b/src/ruby/end2end/channel_state_driver.rb
new file mode 100755
index 0000000000..c3184bf939
--- /dev/null
+++ b/src/ruby/end2end/channel_state_driver.rb
@@ -0,0 +1,64 @@
+#!/usr/bin/env ruby
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# make sure that the client doesn't hang when process ended abruptly
+
+require_relative './end2end_common'
+
+def main
+ STDERR.puts 'start server'
+ server_runner = ServerRunner.new
+ server_port = server_runner.run
+
+ sleep 1
+
+ STDERR.puts 'start client'
+ _, client_pid = start_client('channel_state_client.rb', server_port)
+
+ sleep 3
+
+ Process.kill('SIGTERM', client_pid)
+
+ begin
+ Timeout.timeout(10) { Process.wait(client_pid) }
+ rescue Timeout::Error
+ STDERR.puts "timeout wait for client pid #{client_pid}"
+ Process.kill('SIGKILL', client_pid)
+ Process.wait(client_pid)
+ STDERR.puts 'killed client child'
+ raise 'Timed out waiting for client process. ' \
+ 'It likely hangs when ended abruptly'
+ end
+
+ server_runner.stop
+end
+
+main
diff --git a/src/ruby/end2end/end2end_common.rb b/src/ruby/end2end/end2end_common.rb
new file mode 100755
index 0000000000..9534bb2078
--- /dev/null
+++ b/src/ruby/end2end/end2end_common.rb
@@ -0,0 +1,109 @@
+#!/usr/bin/env ruby
+
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+this_dir = File.expand_path(File.dirname(__FILE__))
+protos_lib_dir = File.join(this_dir, 'lib')
+grpc_lib_dir = File.join(File.dirname(this_dir), 'lib')
+$LOAD_PATH.unshift(grpc_lib_dir) unless $LOAD_PATH.include?(grpc_lib_dir)
+$LOAD_PATH.unshift(protos_lib_dir) unless $LOAD_PATH.include?(protos_lib_dir)
+$LOAD_PATH.unshift(this_dir) unless $LOAD_PATH.include?(this_dir)
+
+require 'grpc'
+require 'echo_services_pb'
+require 'client_control_services_pb'
+require 'socket'
+require 'optparse'
+require 'thread'
+require 'timeout'
+require 'English' # see https://github.com/bbatsov/rubocop/issues/1747
+
+# GreeterServer is simple server that implements the Helloworld Greeter server.
+class EchoServerImpl < Echo::EchoServer::Service
+ # say_hello implements the SayHello rpc method.
+ def echo(echo_req, _)
+ Echo::EchoReply.new(response: echo_req.request)
+ end
+end
+
+# ServerRunner starts an "echo server" that test clients can make calls to
+class ServerRunner
+ def initialize
+ end
+
+ def run
+ @srv = GRPC::RpcServer.new
+ port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
+ @srv.handle(EchoServerImpl)
+
+ @thd = Thread.new do
+ @srv.run
+ end
+ @srv.wait_till_running
+ port
+ end
+
+ def stop
+ @srv.stop
+ @thd.join
+ fail 'server not stopped' unless @srv.stopped?
+ end
+end
+
+def start_client(client_main, server_port)
+ this_dir = File.expand_path(File.dirname(__FILE__))
+
+ tmp_server = TCPServer.new(0)
+ client_control_port = tmp_server.local_address.ip_port
+ tmp_server.close
+
+ client_path = File.join(this_dir, client_main)
+ client_pid = Process.spawn(RbConfig.ruby,
+ client_path,
+ "--client_control_port=#{client_control_port}",
+ "--server_port=#{server_port}")
+ sleep 1
+ control_stub = ClientControl::ClientController::Stub.new(
+ "localhost:#{client_control_port}", :this_channel_is_insecure)
+ [control_stub, client_pid]
+end
+
+def cleanup(control_stub, client_pid, server_runner)
+ control_stub.shutdown(ClientControl::Void.new)
+ Process.wait(client_pid)
+
+ client_exit_code = $CHILD_STATUS
+
+ if client_exit_code != 0
+ fail "term sig test failure: client exit code: #{client_exit_code}"
+ end
+
+ server_runner.stop
+end
diff --git a/src/ruby/end2end/gen_protos.sh b/src/ruby/end2end/gen_protos.sh
new file mode 100644
index 0000000000..f78d9ad394
--- /dev/null
+++ b/src/ruby/end2end/gen_protos.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+grpc_tools_ruby_protoc -I protos --ruby_out=lib --grpc_out=lib protos/echo.proto protos/client_control.proto
diff --git a/src/ruby/end2end/lib/client_control_pb.rb b/src/ruby/end2end/lib/client_control_pb.rb
new file mode 100644
index 0000000000..1a938f1b5a
--- /dev/null
+++ b/src/ruby/end2end/lib/client_control_pb.rb
@@ -0,0 +1,17 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: client_control.proto
+
+require 'google/protobuf'
+
+Google::Protobuf::DescriptorPool.generated_pool.build do
+ add_message "client_control.DoEchoRpcRequest" do
+ optional :request, :string, 1
+ end
+ add_message "client_control.Void" do
+ end
+end
+
+module ClientControl
+ DoEchoRpcRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("client_control.DoEchoRpcRequest").msgclass
+ Void = Google::Protobuf::DescriptorPool.generated_pool.lookup("client_control.Void").msgclass
+end
diff --git a/src/ruby/end2end/lib/client_control_services_pb.rb b/src/ruby/end2end/lib/client_control_services_pb.rb
new file mode 100644
index 0000000000..04b2291bc7
--- /dev/null
+++ b/src/ruby/end2end/lib/client_control_services_pb.rb
@@ -0,0 +1,53 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# Source: client_control.proto for package 'client_control'
+# Original file comments:
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+require 'grpc'
+require 'client_control_pb'
+
+module ClientControl
+ module ClientController
+ class Service
+
+ include GRPC::GenericService
+
+ self.marshal_class_method = :encode
+ self.unmarshal_class_method = :decode
+ self.service_name = 'client_control.ClientController'
+
+ rpc :DoEchoRpc, DoEchoRpcRequest, Void
+ rpc :Shutdown, Void, Void
+ end
+
+ Stub = Service.rpc_stub_class
+ end
+end
diff --git a/src/ruby/end2end/lib/echo_pb.rb b/src/ruby/end2end/lib/echo_pb.rb
new file mode 100644
index 0000000000..c62adc0753
--- /dev/null
+++ b/src/ruby/end2end/lib/echo_pb.rb
@@ -0,0 +1,18 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: echo.proto
+
+require 'google/protobuf'
+
+Google::Protobuf::DescriptorPool.generated_pool.build do
+ add_message "echo.EchoRequest" do
+ optional :request, :string, 1
+ end
+ add_message "echo.EchoReply" do
+ optional :response, :string, 1
+ end
+end
+
+module Echo
+ EchoRequest = Google::Protobuf::DescriptorPool.generated_pool.lookup("echo.EchoRequest").msgclass
+ EchoReply = Google::Protobuf::DescriptorPool.generated_pool.lookup("echo.EchoReply").msgclass
+end
diff --git a/src/ruby/end2end/lib/echo_services_pb.rb b/src/ruby/end2end/lib/echo_services_pb.rb
new file mode 100644
index 0000000000..c66e975bf3
--- /dev/null
+++ b/src/ruby/end2end/lib/echo_services_pb.rb
@@ -0,0 +1,52 @@
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# Source: echo.proto for package 'echo'
+# Original file comments:
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+require 'grpc'
+require 'echo_pb'
+
+module Echo
+ module EchoServer
+ class Service
+
+ include GRPC::GenericService
+
+ self.marshal_class_method = :encode
+ self.unmarshal_class_method = :decode
+ self.service_name = 'echo.EchoServer'
+
+ rpc :Echo, EchoRequest, EchoReply
+ end
+
+ Stub = Service.rpc_stub_class
+ end
+end
diff --git a/src/ruby/end2end/protos/client_control.proto b/src/ruby/end2end/protos/client_control.proto
new file mode 100644
index 0000000000..f985bb486d
--- /dev/null
+++ b/src/ruby/end2end/protos/client_control.proto
@@ -0,0 +1,43 @@
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package client_control;
+
+service ClientController {
+ rpc DoEchoRpc (DoEchoRpcRequest) returns (Void) {}
+ rpc Shutdown(Void) returns (Void) {}
+}
+
+message DoEchoRpcRequest {
+ string request = 1;
+}
+
+message Void{}
diff --git a/src/ruby/end2end/protos/echo.proto b/src/ruby/end2end/protos/echo.proto
new file mode 100644
index 0000000000..d47afef70f
--- /dev/null
+++ b/src/ruby/end2end/protos/echo.proto
@@ -0,0 +1,46 @@
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package echo;
+
+service EchoServer {
+ rpc Echo (EchoRequest) returns (EchoReply) {}
+}
+
+// The request message containing the user's name.
+message EchoRequest {
+ string request = 1;
+}
+
+// The response message containing the greetings
+message EchoReply {
+ string response = 1;
+}
diff --git a/src/ruby/end2end/sig_handling_client.rb b/src/ruby/end2end/sig_handling_client.rb
new file mode 100755
index 0000000000..5b1e54718e
--- /dev/null
+++ b/src/ruby/end2end/sig_handling_client.rb
@@ -0,0 +1,89 @@
+#!/usr/bin/env ruby
+
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+require_relative './end2end_common'
+
+# Test client. Sends RPC's as normal but process also has signal handlers
+class SigHandlingClientController < ClientControl::ClientController::Service
+ def initialize(srv, stub)
+ @srv = srv
+ @stub = stub
+ end
+
+ def do_echo_rpc(req, _)
+ response = @stub.echo(Echo::EchoRequest.new(request: req.request))
+ fail 'bad response' unless response.response == req.request
+ ClientControl::Void.new
+ end
+
+ def shutdown(_, _)
+ Thread.new do
+ # TODO(apolcyn) There is a race between stopping the
+ # server and the "shutdown" rpc completing,
+ # See if stop method on server can end active RPC cleanly, to
+ # avoid this sleep.
+ sleep 3
+ @srv.stop
+ end
+ ClientControl::Void.new
+ end
+end
+
+def main
+ client_control_port = ''
+ server_port = ''
+ OptionParser.new do |opts|
+ opts.on('--client_control_port=P', String) do |p|
+ client_control_port = p
+ end
+ opts.on('--server_port=P', String) do |p|
+ server_port = p
+ end
+ end.parse!
+
+ Signal.trap('TERM') do
+ STDERR.puts 'SIGTERM received'
+ end
+
+ Signal.trap('INT') do
+ STDERR.puts 'SIGINT received'
+ end
+
+ srv = GRPC::RpcServer.new
+ srv.add_http2_port("0.0.0.0:#{client_control_port}",
+ :this_port_is_insecure)
+ stub = Echo::EchoServer::Stub.new("localhost:#{server_port}",
+ :this_channel_is_insecure)
+ srv.handle(SigHandlingClientController.new(srv, stub))
+ srv.run
+end
+
+main
diff --git a/src/ruby/end2end/sig_handling_driver.rb b/src/ruby/end2end/sig_handling_driver.rb
new file mode 100755
index 0000000000..c5d46e074c
--- /dev/null
+++ b/src/ruby/end2end/sig_handling_driver.rb
@@ -0,0 +1,61 @@
+#!/usr/bin/env ruby
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# smoke test for a grpc-using app that receives and
+# handles process-ending signals
+
+require_relative './end2end_common'
+
+def main
+ STDERR.puts 'start server'
+ server_runner = ServerRunner.new
+ server_port = server_runner.run
+
+ sleep 1
+
+ STDERR.puts 'start client'
+ control_stub, client_pid = start_client('sig_handling_client.rb', server_port)
+
+ sleep 1
+
+ count = 0
+ while count < 5
+ control_stub.do_echo_rpc(
+ ClientControl::DoEchoRpcRequest.new(request: 'hello'))
+ Process.kill('SIGTERM', client_pid)
+ Process.kill('SIGINT', client_pid)
+ count += 1
+ end
+
+ cleanup(control_stub, client_pid, server_runner)
+end
+
+main
diff --git a/src/ruby/end2end/sig_int_during_channel_watch_client.rb b/src/ruby/end2end/sig_int_during_channel_watch_client.rb
new file mode 100755
index 0000000000..389fc5ba33
--- /dev/null
+++ b/src/ruby/end2end/sig_int_during_channel_watch_client.rb
@@ -0,0 +1,70 @@
+#!/usr/bin/env ruby
+
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+require_relative './end2end_common'
+
+# Start polling the channel state in both the main thread
+# and a child thread. Try to get the driver to send process-ending
+# interrupt while both a child thread and the main thread are in the
+# middle of a blocking connectivity_state call.
+def main
+ server_port = ''
+ OptionParser.new do |opts|
+ opts.on('--client_control_port=P', String) do
+ STDERR.puts 'client_control_port not used'
+ end
+ opts.on('--server_port=P', String) do |p|
+ server_port = p
+ end
+ end.parse!
+
+ thd = Thread.new do
+ child_thread_channel = GRPC::Core::Channel.new("localhost:#{server_port}",
+ {},
+ :this_channel_is_insecure)
+ loop do
+ state = child_thread_channel.connectivity_state(false)
+ child_thread_channel.watch_connectivity_state(state, Time.now + 360)
+ end
+ end
+
+ main_channel = GRPC::Core::Channel.new("localhost:#{server_port}",
+ {},
+ :this_channel_is_insecure)
+ loop do
+ state = main_channel.connectivity_state(false)
+ main_channel.watch_connectivity_state(state, Time.now + 360)
+ end
+
+ thd.join
+end
+
+main
diff --git a/src/ruby/end2end/sig_int_during_channel_watch_driver.rb b/src/ruby/end2end/sig_int_during_channel_watch_driver.rb
new file mode 100755
index 0000000000..84d039bf19
--- /dev/null
+++ b/src/ruby/end2end/sig_int_during_channel_watch_driver.rb
@@ -0,0 +1,69 @@
+#!/usr/bin/env ruby
+
+# Copyright 2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# abruptly end a process that has active calls to
+# Channel.watch_connectivity_state
+
+require_relative './end2end_common'
+
+def main
+ STDERR.puts 'start server'
+ server_runner = ServerRunner.new
+ server_port = server_runner.run
+
+ sleep 1
+
+ STDERR.puts 'start client'
+ _, client_pid = start_client('sig_int_during_channel_watch_client.rb',
+ server_port)
+
+ # give time for the client to get into the middle
+ # of a channel state watch call
+ sleep 1
+ Process.kill('SIGINT', client_pid)
+
+ begin
+ Timeout.timeout(10) do
+ Process.wait(client_pid)
+ end
+ rescue Timeout::Error
+ STDERR.puts "timeout wait for client pid #{client_pid}"
+ Process.kill('SIGKILL', client_pid)
+ Process.wait(client_pid)
+ STDERR.puts 'killed client child'
+ raise 'Timed out waiting for client process. It likely hangs when a ' \
+ 'SIGINT is sent while there is an active connectivity_state call'
+ end
+
+ server_runner.stop
+end
+
+main
diff --git a/src/ruby/ext/grpc/extconf.rb b/src/ruby/ext/grpc/extconf.rb
index ecb66239b9..7067933832 100644
--- a/src/ruby/ext/grpc/extconf.rb
+++ b/src/ruby/ext/grpc/extconf.rb
@@ -71,6 +71,7 @@ ENV['AR'] = 'libtool -o' if RUBY_PLATFORM =~ /darwin/
ENV['EMBED_OPENSSL'] = 'true'
ENV['EMBED_ZLIB'] = 'true'
+ENV['EMBED_CARES'] = 'true'
ENV['ARCH_FLAGS'] = RbConfig::CONFIG['ARCH_FLAG']
ENV['ARCH_FLAGS'] = '-arch i386 -arch x86_64' if RUBY_PLATFORM =~ /darwin/
ENV['CFLAGS'] = '-DGPR_BACKWARDS_COMPATIBILITY_MODE'
diff --git a/src/ruby/ext/grpc/rb_channel.c b/src/ruby/ext/grpc/rb_channel.c
index 84e43d3f7b..1c20c8813f 100644
--- a/src/ruby/ext/grpc/rb_channel.c
+++ b/src/ruby/ext/grpc/rb_channel.c
@@ -32,21 +32,22 @@
*/
#include <ruby/ruby.h>
+#include <ruby/thread.h>
#include "rb_grpc_imports.generated.h"
-#include "rb_channel.h"
#include "rb_byte_buffer.h"
+#include "rb_channel.h"
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
-#include "rb_grpc.h"
#include "rb_call.h"
#include "rb_channel_args.h"
#include "rb_channel_credentials.h"
#include "rb_completion_queue.h"
+#include "rb_grpc.h"
#include "rb_server.h"
/* id_channel is the name of the hidden ivar that preserves a reference to the
@@ -73,9 +74,26 @@ typedef struct grpc_rb_channel {
/* The actual channel */
grpc_channel *wrapped;
- grpc_completion_queue *queue;
+ int request_safe_destroy;
+ int safe_to_destroy;
+ grpc_connectivity_state current_connectivity_state;
+
+ int mu_init_done;
+ int abort_watch_connectivity_state;
+ gpr_mu channel_mu;
+ gpr_cv channel_cv;
} grpc_rb_channel;
+/* Forward declarations of functions involved in temporary fix to
+ * https://github.com/grpc/grpc/issues/9941 */
+static void grpc_rb_channel_try_register_connection_polling(
+ grpc_rb_channel *wrapper);
+static void grpc_rb_channel_safe_destroy(grpc_rb_channel *wrapper);
+
+static grpc_completion_queue *channel_polling_cq;
+static gpr_mu global_connection_polling_mu;
+static int abort_channel_polling = 0;
+
/* Destroys Channel instances. */
static void grpc_rb_channel_free(void *p) {
grpc_rb_channel *ch = NULL;
@@ -85,8 +103,13 @@ static void grpc_rb_channel_free(void *p) {
ch = (grpc_rb_channel *)p;
if (ch->wrapped != NULL) {
- grpc_channel_destroy(ch->wrapped);
- grpc_rb_completion_queue_destroy(ch->queue);
+ grpc_rb_channel_safe_destroy(ch);
+ ch->wrapped = NULL;
+ }
+
+ if (ch->mu_init_done) {
+ gpr_mu_destroy(&ch->channel_mu);
+ gpr_cv_destroy(&ch->channel_cv);
}
xfree(p);
@@ -104,13 +127,15 @@ static void grpc_rb_channel_mark(void *p) {
}
}
-static rb_data_type_t grpc_channel_data_type = {
- "grpc_channel",
- {grpc_rb_channel_mark, grpc_rb_channel_free, GRPC_RB_MEMSIZE_UNAVAILABLE,
- {NULL, NULL}},
- NULL, NULL,
+static rb_data_type_t grpc_channel_data_type = {"grpc_channel",
+ {grpc_rb_channel_mark,
+ grpc_rb_channel_free,
+ GRPC_RB_MEMSIZE_UNAVAILABLE,
+ {NULL, NULL}},
+ NULL,
+ NULL,
#ifdef RUBY_TYPED_FREE_IMMEDIATELY
- RUBY_TYPED_FREE_IMMEDIATELY
+ RUBY_TYPED_FREE_IMMEDIATELY
#endif
};
@@ -145,6 +170,7 @@ static VALUE grpc_rb_channel_init(int argc, VALUE *argv, VALUE self) {
rb_scan_args(argc, argv, "3", &target, &channel_args, &credentials);
TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
+ wrapper->mu_init_done = 0;
target_chars = StringValueCStr(target);
grpc_rb_hash_convert_to_channel_args(channel_args, &args);
if (TYPE(credentials) == T_SYMBOL) {
@@ -159,6 +185,27 @@ static VALUE grpc_rb_channel_init(int argc, VALUE *argv, VALUE self) {
creds = grpc_rb_get_wrapped_channel_credentials(credentials);
ch = grpc_secure_channel_create(creds, target_chars, &args, NULL);
}
+
+ GPR_ASSERT(ch);
+
+ wrapper->wrapped = ch;
+
+ gpr_mu_init(&wrapper->channel_mu);
+ gpr_cv_init(&wrapper->channel_cv);
+ wrapper->mu_init_done = 1;
+
+ gpr_mu_lock(&wrapper->channel_mu);
+ wrapper->abort_watch_connectivity_state = 0;
+ wrapper->current_connectivity_state = grpc_channel_check_connectivity_state(wrapper->wrapped, 0);
+ wrapper->safe_to_destroy = 0;
+ wrapper->request_safe_destroy = 0;
+
+ gpr_cv_broadcast(&wrapper->channel_cv);
+ gpr_mu_unlock(&wrapper->channel_mu);
+
+
+ grpc_rb_channel_try_register_connection_polling(wrapper);
+
if (args.args != NULL) {
xfree(args.args); /* Allocated by grpc_rb_hash_convert_to_channel_args */
}
@@ -169,25 +216,28 @@ static VALUE grpc_rb_channel_init(int argc, VALUE *argv, VALUE self) {
}
rb_ivar_set(self, id_target, target);
wrapper->wrapped = ch;
- wrapper->queue = grpc_completion_queue_create(NULL);
return self;
}
/*
call-seq:
- insecure_channel = Channel:new("myhost:8080", {'arg1': 'value1'})
- creds = ...
- secure_channel = Channel:new("myhost:443", {'arg1': 'value1'}, creds)
+ ch.connectivity_state -> state
+ ch.connectivity_state(true) -> state
- Creates channel instances. */
+ Indicates the current state of the channel, whose value is one of the
+ constants defined in GRPC::Core::ConnectivityStates.
+
+ It also tries to connect if the chennel is idle in the second form. */
static VALUE grpc_rb_channel_get_connectivity_state(int argc, VALUE *argv,
VALUE self) {
- VALUE try_to_connect = Qfalse;
+ VALUE try_to_connect_param = Qfalse;
+ int grpc_try_to_connect = 0;
grpc_rb_channel *wrapper = NULL;
grpc_channel *ch = NULL;
/* "01" == 0 mandatory args, 1 (try_to_connect) is optional */
- rb_scan_args(argc, argv, "01", try_to_connect);
+ rb_scan_args(argc, argv, "01", &try_to_connect_param);
+ grpc_try_to_connect = RTEST(try_to_connect_param) ? 1 : 0;
TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
ch = wrapper->wrapped;
@@ -195,57 +245,88 @@ static VALUE grpc_rb_channel_get_connectivity_state(int argc, VALUE *argv,
rb_raise(rb_eRuntimeError, "closed!");
return Qnil;
}
- return NUM2LONG(
- grpc_channel_check_connectivity_state(ch, (int)try_to_connect));
+ return LONG2NUM(grpc_channel_check_connectivity_state(wrapper->wrapped, grpc_try_to_connect));
}
-/* Watch for a change in connectivity state.
+typedef struct watch_state_stack {
+ grpc_rb_channel *wrapper;
+ gpr_timespec deadline;
+ int last_state;
+} watch_state_stack;
+
+static void *watch_channel_state_without_gvl(void *arg) {
+ watch_state_stack *stack = (watch_state_stack*)arg;
+ gpr_timespec deadline = stack->deadline;
+ grpc_rb_channel *wrapper = stack->wrapper;
+ int last_state = stack->last_state;
+ void *return_value = (void*)0;
+
+ gpr_mu_lock(&wrapper->channel_mu);
+ while(wrapper->current_connectivity_state == last_state &&
+ !wrapper->request_safe_destroy &&
+ !wrapper->safe_to_destroy &&
+ !wrapper->abort_watch_connectivity_state &&
+ gpr_time_cmp(deadline, gpr_now(GPR_CLOCK_REALTIME)) > 0) {
+ gpr_cv_wait(&wrapper->channel_cv, &wrapper->channel_mu, deadline);
+ }
+ if (wrapper->current_connectivity_state != last_state) {
+ return_value = (void*)1;
+ }
+ gpr_mu_unlock(&wrapper->channel_mu);
+
+ return return_value;
+}
- Once the channel connectivity state is different from the last observed
- state, tag will be enqueued on cq with success=1
+static void watch_channel_state_unblocking_func(void *arg) {
+ grpc_rb_channel *wrapper = (grpc_rb_channel*)arg;
+ gpr_log(GPR_DEBUG, "GRPC_RUBY: watch channel state unblocking func called");
+ gpr_mu_lock(&wrapper->channel_mu);
+ wrapper->abort_watch_connectivity_state = 1;
+ gpr_cv_broadcast(&wrapper->channel_cv);
+ gpr_mu_unlock(&wrapper->channel_mu);
+}
- If deadline expires BEFORE the state is changed, tag will be enqueued on
- the completion queue with success=0 */
+/* Wait until the channel's connectivity state becomes different from
+ * "last_state", or "deadline" expires.
+ * Returns true if the the channel's connectivity state becomes
+ * different from "last_state" within "deadline".
+ * Returns false if "deadline" expires before the channel's connectivity
+ * state changes from "last_state".
+ * */
static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
VALUE last_state,
VALUE deadline) {
grpc_rb_channel *wrapper = NULL;
- grpc_channel *ch = NULL;
- grpc_completion_queue *cq = NULL;
-
- void *tag = wrapper;
-
- grpc_event event;
+ watch_state_stack stack;
+ void* out;
TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
- ch = wrapper->wrapped;
- cq = wrapper->queue;
- if (ch == NULL) {
+
+ if (wrapper->wrapped == NULL) {
rb_raise(rb_eRuntimeError, "closed!");
return Qnil;
}
- grpc_channel_watch_connectivity_state(
- ch,
- (grpc_connectivity_state)NUM2LONG(last_state),
- grpc_rb_time_timeval(deadline, /* absolute time */ 0),
- cq,
- tag);
- event = rb_completion_queue_pluck(cq, tag,
- gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+ if (!FIXNUM_P(last_state)) {
+ rb_raise(rb_eTypeError, "bad type for last_state. want a GRPC::Core::ChannelState constant");
+ return Qnil;
+ }
- if (event.success) {
+ stack.wrapper = wrapper;
+ stack.deadline = grpc_rb_time_timeval(deadline, 0);
+ stack.last_state = NUM2LONG(last_state);
+ out = rb_thread_call_without_gvl(watch_channel_state_without_gvl, &stack, watch_channel_state_unblocking_func, wrapper);
+ if (out) {
return Qtrue;
- } else {
- return Qfalse;
}
+ return Qfalse;
}
/* Create a call given a grpc_channel, in order to call method. The request
is not sent until grpc_call_invoke is called. */
-static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent,
- VALUE mask, VALUE method,
- VALUE host, VALUE deadline) {
+static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
+ VALUE method, VALUE host,
+ VALUE deadline) {
VALUE res = Qnil;
grpc_rb_channel *wrapper = NULL;
grpc_call *call = NULL;
@@ -256,10 +337,11 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent,
grpc_slice method_slice;
grpc_slice host_slice;
grpc_slice *host_slice_ptr = NULL;
- char* tmp_str = NULL;
+ char *tmp_str = NULL;
if (host != Qnil) {
- host_slice = grpc_slice_from_copied_buffer(RSTRING_PTR(host), RSTRING_LEN(host));
+ host_slice =
+ grpc_slice_from_copied_buffer(RSTRING_PTR(host), RSTRING_LEN(host));
host_slice_ptr = &host_slice;
}
if (mask != Qnil) {
@@ -277,17 +359,18 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent,
return Qnil;
}
- method_slice = grpc_slice_from_copied_buffer(RSTRING_PTR(method), RSTRING_LEN(method));
+ method_slice =
+ grpc_slice_from_copied_buffer(RSTRING_PTR(method), RSTRING_LEN(method));
call = grpc_channel_create_call(ch, parent_call, flags, cq, method_slice,
- host_slice_ptr, grpc_rb_time_timeval(
- deadline,
- /* absolute time */ 0), NULL);
+ host_slice_ptr,
+ grpc_rb_time_timeval(deadline,
+ /* absolute time */ 0),
+ NULL);
if (call == NULL) {
tmp_str = grpc_slice_to_c_string(method_slice);
- rb_raise(rb_eRuntimeError, "cannot create call with method %s",
- tmp_str);
+ rb_raise(rb_eRuntimeError, "cannot create call with method %s", tmp_str);
return Qnil;
}
@@ -304,7 +387,6 @@ static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent,
return res;
}
-
/* Closes the channel, calling it's destroy method */
static VALUE grpc_rb_channel_destroy(VALUE self) {
grpc_rb_channel *wrapper = NULL;
@@ -313,19 +395,18 @@ static VALUE grpc_rb_channel_destroy(VALUE self) {
TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
ch = wrapper->wrapped;
if (ch != NULL) {
- grpc_channel_destroy(ch);
+ grpc_rb_channel_safe_destroy(wrapper);
wrapper->wrapped = NULL;
}
return Qnil;
}
-
/* Called to obtain the target that this channel accesses. */
static VALUE grpc_rb_channel_get_target(VALUE self) {
grpc_rb_channel *wrapper = NULL;
VALUE res = Qnil;
- char* target = NULL;
+ char *target = NULL;
TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
target = grpc_channel_get_target(wrapper->wrapped);
@@ -335,10 +416,122 @@ static VALUE grpc_rb_channel_get_target(VALUE self) {
return res;
}
+// Either start polling channel connection state or signal that it's free to
+// destroy.
+// Not safe to call while a channel's connection state is polled.
+static void grpc_rb_channel_try_register_connection_polling(
+ grpc_rb_channel *wrapper) {
+ grpc_connectivity_state conn_state;
+ gpr_timespec sleep_time = gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(20, GPR_TIMESPAN));
+
+ GPR_ASSERT(wrapper);
+ GPR_ASSERT(wrapper->wrapped);
+ gpr_mu_lock(&wrapper->channel_mu);
+ if (wrapper->request_safe_destroy) {
+ wrapper->safe_to_destroy = 1;
+ gpr_cv_broadcast(&wrapper->channel_cv);
+ gpr_mu_unlock(&wrapper->channel_mu);
+ return;
+ }
+ gpr_mu_lock(&global_connection_polling_mu);
+
+ conn_state = grpc_channel_check_connectivity_state(wrapper->wrapped, 0);
+ if (conn_state != wrapper->current_connectivity_state) {
+ wrapper->current_connectivity_state = conn_state;
+ gpr_cv_broadcast(&wrapper->channel_cv);
+ }
+ // avoid posting work to the channel polling cq if it's been shutdown
+ if (!abort_channel_polling && conn_state != GRPC_CHANNEL_SHUTDOWN) {
+ grpc_channel_watch_connectivity_state(
+ wrapper->wrapped, conn_state, sleep_time, channel_polling_cq, wrapper);
+ } else {
+ wrapper->safe_to_destroy = 1;
+ gpr_cv_broadcast(&wrapper->channel_cv);
+ }
+ gpr_mu_unlock(&global_connection_polling_mu);
+ gpr_mu_unlock(&wrapper->channel_mu);
+}
+
+// Note requires wrapper->wrapped, wrapper->channel_mu/cv initialized
+static void grpc_rb_channel_safe_destroy(grpc_rb_channel *wrapper) {
+ gpr_mu_lock(&wrapper->channel_mu);
+ wrapper->request_safe_destroy = 1;
+
+ while (!wrapper->safe_to_destroy) {
+ gpr_cv_wait(&wrapper->channel_cv, &wrapper->channel_mu,
+ gpr_inf_future(GPR_CLOCK_REALTIME));
+ }
+ GPR_ASSERT(wrapper->safe_to_destroy);
+ gpr_mu_unlock(&wrapper->channel_mu);
+
+ grpc_channel_destroy(wrapper->wrapped);
+}
+
+// Note this loop breaks out with a single call of
+// "grpc_rb_event_unblocking_func".
+// This assumes that a ruby call the unblocking func
+// indicates process shutdown.
+// In the worst case, this stops polling channel connectivity
+// early and falls back to current behavior.
+static void *run_poll_channels_loop_no_gil(void *arg) {
+ grpc_event event;
+ (void)arg;
+ for (;;) {
+ event = grpc_completion_queue_next(
+ channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
+ if (event.type == GRPC_QUEUE_SHUTDOWN) {
+ break;
+ }
+ if (event.type == GRPC_OP_COMPLETE) {
+ grpc_rb_channel_try_register_connection_polling((grpc_rb_channel *)event.tag);
+ }
+ }
+ grpc_completion_queue_destroy(channel_polling_cq);
+ gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling loop");
+ return NULL;
+}
+
+// Notify the channel polling loop to cleanup and shutdown.
+static void grpc_rb_event_unblocking_func(void *arg) {
+ (void)arg;
+ gpr_mu_lock(&global_connection_polling_mu);
+ gpr_log(GPR_DEBUG, "GRPC_RUBY: grpc_rb_event_unblocking_func - begin aborting connection polling");
+ abort_channel_polling = 1;
+ grpc_completion_queue_shutdown(channel_polling_cq);
+ gpr_mu_unlock(&global_connection_polling_mu);
+}
+
+// Poll channel connectivity states in background thread without the GIL.
+static VALUE run_poll_channels_loop(VALUE arg) {
+ (void)arg;
+ gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
+ rb_thread_call_without_gvl(run_poll_channels_loop_no_gil, NULL,
+ grpc_rb_event_unblocking_func, NULL);
+ return Qnil;
+}
+
+/* Temporary fix for
+ * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
+ * Transports in idle channels can get destroyed. Normally c-core re-connects,
+ * but in grpc-ruby core never gets a thread until an RPC is made, because ruby
+ * only calls c-core's "completion_queu_pluck" API.
+ * This uses a global background thread that calls
+ * "completion_queue_next" on registered "watch_channel_connectivity_state"
+ * calls - so that c-core can reconnect if needed, when there aren't any RPC's.
+ * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
+ */
+static void start_poll_channels_loop() {
+ channel_polling_cq = grpc_completion_queue_create(NULL);
+ gpr_mu_init(&global_connection_polling_mu);
+ abort_channel_polling = 0;
+ rb_thread_create(run_poll_channels_loop, NULL);
+}
+
static void Init_grpc_propagate_masks() {
/* Constants representing call propagation masks in grpc.h */
- VALUE grpc_rb_mPropagateMasks = rb_define_module_under(
- grpc_rb_mGrpcCore, "PropagateMasks");
+ VALUE grpc_rb_mPropagateMasks =
+ rb_define_module_under(grpc_rb_mGrpcCore, "PropagateMasks");
rb_define_const(grpc_rb_mPropagateMasks, "DEADLINE",
UINT2NUM(GRPC_PROPAGATE_DEADLINE));
rb_define_const(grpc_rb_mPropagateMasks, "CENSUS_STATS_CONTEXT",
@@ -353,8 +546,8 @@ static void Init_grpc_propagate_masks() {
static void Init_grpc_connectivity_states() {
/* Constants representing call propagation masks in grpc.h */
- VALUE grpc_rb_mConnectivityStates = rb_define_module_under(
- grpc_rb_mGrpcCore, "ConnectivityStates");
+ VALUE grpc_rb_mConnectivityStates =
+ rb_define_module_under(grpc_rb_mGrpcCore, "ConnectivityStates");
rb_define_const(grpc_rb_mConnectivityStates, "IDLE",
LONG2NUM(GRPC_CHANNEL_IDLE));
rb_define_const(grpc_rb_mConnectivityStates, "CONNECTING",
@@ -382,12 +575,11 @@ void Init_grpc_channel() {
/* Add ruby analogues of the Channel methods. */
rb_define_method(grpc_rb_cChannel, "connectivity_state",
- grpc_rb_channel_get_connectivity_state,
- -1);
+ grpc_rb_channel_get_connectivity_state, -1);
rb_define_method(grpc_rb_cChannel, "watch_connectivity_state",
- grpc_rb_channel_watch_connectivity_state, 4);
- rb_define_method(grpc_rb_cChannel, "create_call",
- grpc_rb_channel_create_call, 5);
+ grpc_rb_channel_watch_connectivity_state, 2);
+ rb_define_method(grpc_rb_cChannel, "create_call", grpc_rb_channel_create_call,
+ 5);
rb_define_method(grpc_rb_cChannel, "target", grpc_rb_channel_get_target, 0);
rb_define_method(grpc_rb_cChannel, "destroy", grpc_rb_channel_destroy, 0);
rb_define_alias(grpc_rb_cChannel, "close", "destroy");
@@ -405,6 +597,7 @@ void Init_grpc_channel() {
id_insecure_channel = rb_intern("this_channel_is_insecure");
Init_grpc_propagate_masks();
Init_grpc_connectivity_states();
+ start_poll_channels_loop();
}
/* Gets the wrapped channel from the ruby wrapper */
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
index 3ef6f0eb29..063f92114c 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
@@ -91,6 +91,9 @@ grpc_init_type grpc_init_import;
grpc_shutdown_type grpc_shutdown_import;
grpc_version_string_type grpc_version_string_import;
grpc_g_stands_for_type grpc_g_stands_for_import;
+grpc_completion_queue_factory_lookup_type grpc_completion_queue_factory_lookup_import;
+grpc_completion_queue_create_for_next_type grpc_completion_queue_create_for_next_import;
+grpc_completion_queue_create_for_pluck_type grpc_completion_queue_create_for_pluck_import;
grpc_completion_queue_create_type grpc_completion_queue_create_import;
grpc_completion_queue_next_type grpc_completion_queue_next_import;
grpc_completion_queue_pluck_type grpc_completion_queue_pluck_import;
@@ -385,6 +388,9 @@ void grpc_rb_load_imports(HMODULE library) {
grpc_shutdown_import = (grpc_shutdown_type) GetProcAddress(library, "grpc_shutdown");
grpc_version_string_import = (grpc_version_string_type) GetProcAddress(library, "grpc_version_string");
grpc_g_stands_for_import = (grpc_g_stands_for_type) GetProcAddress(library, "grpc_g_stands_for");
+ grpc_completion_queue_factory_lookup_import = (grpc_completion_queue_factory_lookup_type) GetProcAddress(library, "grpc_completion_queue_factory_lookup");
+ grpc_completion_queue_create_for_next_import = (grpc_completion_queue_create_for_next_type) GetProcAddress(library, "grpc_completion_queue_create_for_next");
+ grpc_completion_queue_create_for_pluck_import = (grpc_completion_queue_create_for_pluck_type) GetProcAddress(library, "grpc_completion_queue_create_for_pluck");
grpc_completion_queue_create_import = (grpc_completion_queue_create_type) GetProcAddress(library, "grpc_completion_queue_create");
grpc_completion_queue_next_import = (grpc_completion_queue_next_type) GetProcAddress(library, "grpc_completion_queue_next");
grpc_completion_queue_pluck_import = (grpc_completion_queue_pluck_type) GetProcAddress(library, "grpc_completion_queue_pluck");
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
index ef9845dfe0..f5dcd68a8e 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
@@ -224,6 +224,15 @@ extern grpc_version_string_type grpc_version_string_import;
typedef const char *(*grpc_g_stands_for_type)(void);
extern grpc_g_stands_for_type grpc_g_stands_for_import;
#define grpc_g_stands_for grpc_g_stands_for_import
+typedef const grpc_completion_queue_factory *(*grpc_completion_queue_factory_lookup_type)(const grpc_completion_queue_attributes *attributes);
+extern grpc_completion_queue_factory_lookup_type grpc_completion_queue_factory_lookup_import;
+#define grpc_completion_queue_factory_lookup grpc_completion_queue_factory_lookup_import
+typedef grpc_completion_queue *(*grpc_completion_queue_create_for_next_type)(void *reserved);
+extern grpc_completion_queue_create_for_next_type grpc_completion_queue_create_for_next_import;
+#define grpc_completion_queue_create_for_next grpc_completion_queue_create_for_next_import
+typedef grpc_completion_queue *(*grpc_completion_queue_create_for_pluck_type)(void *reserved);
+extern grpc_completion_queue_create_for_pluck_type grpc_completion_queue_create_for_pluck_import;
+#define grpc_completion_queue_create_for_pluck grpc_completion_queue_create_for_pluck_import
typedef grpc_completion_queue *(*grpc_completion_queue_create_type)(void *reserved);
extern grpc_completion_queue_create_type grpc_completion_queue_create_import;
#define grpc_completion_queue_create grpc_completion_queue_create_import
diff --git a/src/ruby/spec/channel_connection_spec.rb b/src/ruby/spec/channel_connection_spec.rb
new file mode 100644
index 0000000000..940d68b9b0
--- /dev/null
+++ b/src/ruby/spec/channel_connection_spec.rb
@@ -0,0 +1,141 @@
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+require 'grpc'
+
+# A test message
+class EchoMsg
+ def self.marshal(_o)
+ ''
+ end
+
+ def self.unmarshal(_o)
+ EchoMsg.new
+ end
+end
+
+# A test service with an echo implementation.
+class EchoService
+ include GRPC::GenericService
+ rpc :an_rpc, EchoMsg, EchoMsg
+ attr_reader :received_md
+
+ def initialize(**kw)
+ @trailing_metadata = kw
+ @received_md = []
+ end
+
+ def an_rpc(req, call)
+ GRPC.logger.info('echo service received a request')
+ call.output_metadata.update(@trailing_metadata)
+ @received_md << call.metadata unless call.metadata.nil?
+ req
+ end
+end
+
+EchoStub = EchoService.rpc_stub_class
+
+def start_server(port = 0)
+ @srv = GRPC::RpcServer.new
+ server_port = @srv.add_http2_port("localhost:#{port}", :this_port_is_insecure)
+ @srv.handle(EchoService)
+ @server_thd = Thread.new { @srv.run }
+ @srv.wait_till_running
+ server_port
+end
+
+def stop_server
+ expect(@srv.stopped?).to be(false)
+ @srv.stop
+ @server_thd.join
+ expect(@srv.stopped?).to be(true)
+end
+
+describe 'channel connection behavior' do
+ it 'the client channel handles temporary loss of a transport' do
+ port = start_server
+ stub = EchoStub.new("localhost:#{port}", :this_channel_is_insecure)
+ req = EchoMsg.new
+ expect(stub.an_rpc(req)).to be_a(EchoMsg)
+ stop_server
+ sleep 1
+ # TODO(apolcyn) grabbing the same port might fail, is this stable enough?
+ start_server(port)
+ expect(stub.an_rpc(req)).to be_a(EchoMsg)
+ stop_server
+ end
+
+ it 'observably connects and reconnects to transient server' \
+ ' when using the channel state API' do
+ port = start_server
+ ch = GRPC::Core::Channel.new("localhost:#{port}", {},
+ :this_channel_is_insecure)
+
+ expect(ch.connectivity_state).to be(GRPC::Core::ConnectivityStates::IDLE)
+
+ state = ch.connectivity_state(true)
+
+ count = 0
+ while count < 20 && state != GRPC::Core::ConnectivityStates::READY
+ ch.watch_connectivity_state(state, Time.now + 60)
+ state = ch.connectivity_state(true)
+ count += 1
+ end
+
+ expect(state).to be(GRPC::Core::ConnectivityStates::READY)
+
+ stop_server
+
+ state = ch.connectivity_state
+
+ count = 0
+ while count < 20 && state == GRPC::Core::ConnectivityStates::READY
+ ch.watch_connectivity_state(state, Time.now + 60)
+ state = ch.connectivity_state
+ count += 1
+ end
+
+ expect(state).to_not be(GRPC::Core::ConnectivityStates::READY)
+
+ start_server(port)
+
+ state = ch.connectivity_state(true)
+
+ count = 0
+ while count < 20 && state != GRPC::Core::ConnectivityStates::READY
+ ch.watch_connectivity_state(state, Time.now + 60)
+ state = ch.connectivity_state(true)
+ count += 1
+ end
+
+ expect(state).to be(GRPC::Core::ConnectivityStates::READY)
+
+ stop_server
+ end
+end
diff --git a/src/ruby/spec/channel_spec.rb b/src/ruby/spec/channel_spec.rb
index 740eac631a..a289a00f04 100644
--- a/src/ruby/spec/channel_spec.rb
+++ b/src/ruby/spec/channel_spec.rb
@@ -153,6 +153,35 @@ describe GRPC::Core::Channel do
end
end
+ describe '#connectivity_state' do
+ it 'returns an enum' do
+ ch = GRPC::Core::Channel.new(fake_host, nil, :this_channel_is_insecure)
+ valid_states = [
+ GRPC::Core::ConnectivityStates::IDLE,
+ GRPC::Core::ConnectivityStates::CONNECTING,
+ GRPC::Core::ConnectivityStates::READY,
+ GRPC::Core::ConnectivityStates::TRANSIENT_FAILURE,
+ GRPC::Core::ConnectivityStates::FATAL_FAILURE
+ ]
+
+ expect(valid_states).to include(ch.connectivity_state)
+ end
+
+ it 'returns an enum when trying to connect' do
+ ch = GRPC::Core::Channel.new(fake_host, nil, :this_channel_is_insecure)
+ ch.connectivity_state(true)
+ valid_states = [
+ GRPC::Core::ConnectivityStates::IDLE,
+ GRPC::Core::ConnectivityStates::CONNECTING,
+ GRPC::Core::ConnectivityStates::READY,
+ GRPC::Core::ConnectivityStates::TRANSIENT_FAILURE,
+ GRPC::Core::ConnectivityStates::FATAL_FAILURE
+ ]
+
+ expect(valid_states).to include(ch.connectivity_state)
+ end
+ end
+
describe '::SSL_TARGET' do
it 'is a symbol' do
expect(GRPC::Core::Channel::SSL_TARGET).to be_a(Symbol)