aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Yash Tibrewal <yashkt@google.com>2017-09-19 10:47:24 -0700
committerGravatar Yash Tibrewal <yashkt@google.com>2017-09-19 10:47:24 -0700
commit6712c7d20334c8f2884c19cdfa6d5b059050d4ca (patch)
tree21aa9851ccbc230106705da653a131246fff1bca
parent06312bdb43c66150e59958479640913c3726ee10 (diff)
parent0aedb8136fe935e0361c8bfa5ef8b46b4d191605 (diff)
Merging master into yashykt:ctocc6
-rw-r--r--BUILD8
-rw-r--r--config.m42
-rw-r--r--doc/environment_variables.md1
-rw-r--r--grpc.gemspec1
-rw-r--r--include/grpc/impl/codegen/grpc_types.h13
-rw-r--r--package.xml5
-rw-r--r--setup.py2
-rw-r--r--src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c3
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_plugin.c1
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.c116
-rw-r--r--src/core/ext/transport/chttp2/transport/chttp2_transport.h1
-rw-r--r--src/core/ext/transport/chttp2/transport/frame_ping.c2
-rw-r--r--src/core/ext/transport/chttp2/transport/internal.h4
-rw-r--r--src/core/ext/transport/chttp2/transport/parsing.c7
-rw-r--r--src/core/ext/transport/chttp2/transport/stream_lists.c52
-rw-r--r--src/core/ext/transport/chttp2/transport/writing.c38
-rw-r--r--src/core/lib/http/httpcli_security_connector.c3
-rw-r--r--src/core/lib/iomgr/closure.c18
-rw-r--r--src/core/lib/security/transport/security_connector.c22
-rw-r--r--src/core/tsi/ssl_transport_security.c113
-rw-r--r--src/core/tsi/ssl_transport_security.h37
-rwxr-xr-xsrc/csharp/Grpc.Auth/Grpc.Auth.csproj1
-rwxr-xr-xsrc/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj1
-rwxr-xr-xsrc/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj3
-rwxr-xr-xsrc/csharp/Grpc.Core/Grpc.Core.csproj3
-rwxr-xr-xsrc/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj1
-rwxr-xr-xsrc/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj1
-rwxr-xr-xsrc/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj2
-rwxr-xr-xsrc/csharp/Grpc.Examples/Grpc.Examples.csproj1
-rwxr-xr-xsrc/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj2
-rwxr-xr-xsrc/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj1
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj2
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj2
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj2
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj2
-rwxr-xr-xsrc/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj6
-rw-r--r--src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj2
-rwxr-xr-xsrc/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj2
-rwxr-xr-xsrc/csharp/Grpc.Reflection/Grpc.Reflection.csproj1
-rw-r--r--src/csharp/doc/.gitignore2
-rw-r--r--src/csharp/doc/README.md9
-rw-r--r--src/csharp/doc/docfx.json37
-rw-r--r--src/csharp/doc/grpc_csharp_public.shfbproj83
-rw-r--r--src/csharp/doc/toc.yml3
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi11
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi9
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi1
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi13
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi118
-rw-r--r--src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi2
-rw-r--r--src/python/grpcio_health_checking/setup.py2
-rw-r--r--src/python/grpcio_reflection/setup.py2
-rw-r--r--src/python/grpcio_testing/grpc_version.py4
-rw-r--r--src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py341
-rw-r--r--src/ruby/lib/grpc/google_rpc_status_utils.rb28
-rw-r--r--src/ruby/spec/google_rpc_status_utils_spec.rb223
-rw-r--r--templates/config.m4.template2
-rw-r--r--templates/grpc.gemspec.template1
-rw-r--r--templates/package.xml.template5
-rw-r--r--templates/src/python/grpcio_testing/grpc_version.py.template19
-rw-r--r--test/core/client_channel/lb_policies_test.c2
-rw-r--r--test/core/end2end/tests/bad_ping.c21
-rw-r--r--test/core/end2end/tests/keepalive_timeout.c14
-rw-r--r--test/core/end2end/tests/ping.c18
-rw-r--r--test/core/tsi/ssl_transport_security_test.c121
-rw-r--r--tools/distrib/python/grpcio_tools/setup.py2
-rw-r--r--tools/dockerfile/distribtest/csharp_jessie_x64/Dockerfile13
-rw-r--r--tools/dockerfile/distribtest/csharp_jessie_x86/Dockerfile13
-rw-r--r--tools/dockerfile/distribtest/csharp_ubuntu1604_x64/Dockerfile11
-rw-r--r--tools/dockerfile/test/csharp_coreclr_x64/Dockerfile114
-rw-r--r--tools/internal_ci/linux/pull_request/grpc_basictests_c_dbg.cfg30
-rw-r--r--tools/internal_ci/linux/pull_request/grpc_basictests_c_opt.cfg30
-rw-r--r--tools/internal_ci/linux/pull_request/grpc_basictests_cpp_dbg.cfg30
-rw-r--r--tools/internal_ci/linux/pull_request/grpc_basictests_cpp_opt.cfg30
-rw-r--r--tools/run_tests/artifacts/artifact_targets.py1
-rw-r--r--tools/run_tests/artifacts/distribtest_targets.py4
-rw-r--r--tools/run_tests/artifacts/package_targets.py2
77 files changed, 1296 insertions, 559 deletions
diff --git a/BUILD b/BUILD
index 281375fc1d..7b55e663cb 100644
--- a/BUILD
+++ b/BUILD
@@ -574,6 +574,8 @@ grpc_cc_library(
"src/core/lib/compression/compression.c",
"src/core/lib/compression/message_compress.c",
"src/core/lib/compression/stream_compression.c",
+ "src/core/lib/debug/stats.c",
+ "src/core/lib/debug/stats_data.c",
"src/core/lib/http/format_request.c",
"src/core/lib/http/httpcli.c",
"src/core/lib/http/parser.c",
@@ -690,8 +692,6 @@ grpc_cc_library(
"src/core/lib/transport/timeout_encoding.c",
"src/core/lib/transport/transport.c",
"src/core/lib/transport/transport_op_string.c",
- "src/core/lib/debug/stats.c",
- "src/core/lib/debug/stats_data.c",
],
hdrs = [
"src/core/lib/channel/channel_args.h",
@@ -705,6 +705,8 @@ grpc_cc_library(
"src/core/lib/compression/algorithm_metadata.h",
"src/core/lib/compression/message_compress.h",
"src/core/lib/compression/stream_compression.h",
+ "src/core/lib/debug/stats.h",
+ "src/core/lib/debug/stats_data.h",
"src/core/lib/http/format_request.h",
"src/core/lib/http/httpcli.h",
"src/core/lib/http/parser.h",
@@ -807,8 +809,6 @@ grpc_cc_library(
"src/core/lib/transport/timeout_encoding.h",
"src/core/lib/transport/transport.h",
"src/core/lib/transport/transport_impl.h",
- "src/core/lib/debug/stats.h",
- "src/core/lib/debug/stats_data.h",
],
external_deps = [
"zlib",
diff --git a/config.m4 b/config.m4
index d52e37ca28..656a37e9b4 100644
--- a/config.m4
+++ b/config.m4
@@ -12,7 +12,7 @@ if test "$PHP_GRPC" != "no"; then
LIBS="-lpthread $LIBS"
CFLAGS="-Wall -Werror -Wno-parentheses-equality -Wno-unused-value -std=c11"
- CXXFLAGS="-std=c++11"
+ CXXFLAGS="-std=c++11 -fno-exceptions -fno-rtti"
GRPC_SHARED_LIBADD="-lpthread $GRPC_SHARED_LIBADD"
PHP_REQUIRE_CXX()
PHP_ADD_LIBRARY(pthread)
diff --git a/doc/environment_variables.md b/doc/environment_variables.md
index b79cd97363..f90f1d5b10 100644
--- a/doc/environment_variables.md
+++ b/doc/environment_variables.md
@@ -50,6 +50,7 @@ some configuration as environment variables that can be set.
- channel_stack_builder - traces information about channel stacks being built
- executor - traces grpc's internal thread pool ('the executor')
- http - traces state in the http2 transport engine
+ - http2_stream_state - traces all http2 stream state mutations.
- http1 - traces HTTP/1.x operations performed by gRPC
- inproc - traces the in-process transport
- flowctl - traces http2 flow control
diff --git a/grpc.gemspec b/grpc.gemspec
index 2d0f9fd450..96323aed10 100644
--- a/grpc.gemspec
+++ b/grpc.gemspec
@@ -29,6 +29,7 @@ Gem::Specification.new do |s|
s.add_dependency 'google-protobuf', '~> 3.1'
s.add_dependency 'googleauth', '~> 0.5.1'
+ s.add_dependency 'googleapis-common-protos-types', '~> 1.0.0'
s.add_development_dependency 'bundler', '~> 1.9'
s.add_development_dependency 'facter', '~> 2.4'
diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h
index 748dc717a3..90f03f49a3 100644
--- a/include/grpc/impl/codegen/grpc_types.h
+++ b/include/grpc/impl/codegen/grpc_types.h
@@ -188,9 +188,14 @@ typedef struct {
#define GRPC_ARG_HTTP2_MAX_FRAME_SIZE "grpc.http2.max_frame_size"
/** Should BDP probing be performed? */
#define GRPC_ARG_HTTP2_BDP_PROBE "grpc.http2.bdp_probe"
-/** Minimum time (in milliseconds) between successive ping frames being sent */
-#define GRPC_ARG_HTTP2_MIN_TIME_BETWEEN_PINGS_MS \
+/** Minimum time between sending successive ping frames without receiving any
+ data frame, Int valued, milliseconds. */
+#define GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS \
"grpc.http2.min_time_between_pings_ms"
+/** Minimum allowed time between receiving successive ping frames without
+ sending any data frame. Int valued, milliseconds */
+#define GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS \
+ "grpc.http2.min_ping_interval_without_data_ms"
/** Channel arg to override the http2 :scheme header */
#define GRPC_ARG_HTTP2_SCHEME "grpc.http2_scheme"
/** How many pings can we send before needing to send a data frame or header
@@ -202,10 +207,6 @@ typedef struct {
closing the transport? (0 indicates that the server can bear an infinite
number of misbehaving pings) */
#define GRPC_ARG_HTTP2_MAX_PING_STRIKES "grpc.http2.max_ping_strikes"
-/** Minimum allowed time between two pings without sending any data frame. Int
- valued, seconds */
-#define GRPC_ARG_HTTP2_MIN_PING_INTERVAL_WITHOUT_DATA_MS \
- "grpc.http2.min_ping_interval_without_data_ms"
/** How much data are we willing to queue up per stream if
GRPC_WRITE_BUFFER_HINT is set? This is an upper bound */
#define GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE "grpc.http2.write_buffer_size"
diff --git a/package.xml b/package.xml
index b7c0e679e5..0c201afdae 100644
--- a/package.xml
+++ b/package.xml
@@ -10,7 +10,7 @@
<email>grpc-packages@google.com</email>
<active>yes</active>
</lead>
- <date>2017-05-22</date>
+ <date>2017-08-24</date>
<time>16:06:07</time>
<version>
<release>1.7.0dev</release>
@@ -25,6 +25,9 @@
- Channel are now by default persistent #11878
- Some bug fixes from 1.4 branch #12109, #12123
- Fixed hang bug when fork() was used #11814
+- License changed to Apache 2.0
+- Added support for php_namespace option in codegen plugin #11886
+- Updated gRPC C Core library version 1.6
</notes>
<contents>
<dir baseinstalldir="/" name="/">
diff --git a/setup.py b/setup.py
index d3ea83488c..12882413ce 100644
--- a/setup.py
+++ b/setup.py
@@ -70,7 +70,7 @@ CLASSIFIERS = [
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
-],
+]
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
index c4c4973b2d..85ef7894ea 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
@@ -1560,6 +1560,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
+ } else {
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+ "lb_on_response_received_locked_shutdown");
}
} else { /* empty payload: call cancelled. */
/* dispose of the "lb_on_response_received_locked" weak ref taken in
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
index 78551df9c3..6d09953830 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c
@@ -23,6 +23,7 @@
void grpc_chttp2_plugin_init(void) {
grpc_register_tracer(&grpc_http_trace);
grpc_register_tracer(&grpc_flowctl_trace);
+ grpc_register_tracer(&grpc_trace_http2_stream_state);
#ifndef NDEBUG
grpc_register_tracer(&grpc_trace_chttp2_refcount);
#endif
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index d18df04851..1a6e08fd0b 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -64,6 +64,11 @@
#define DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS false
#define KEEPALIVE_TIME_BACKOFF_MULTIPLIER 2
+#define DEFAULT_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
+#define DEFAULT_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
+#define DEFAULT_MAX_PINGS_BETWEEN_DATA 0 /* unlimited */
+#define DEFAULT_MAX_PING_STRIKES 2
+
static int g_default_client_keepalive_time_ms =
DEFAULT_CLIENT_KEEPALIVE_TIME_MS;
static int g_default_client_keepalive_timeout_ms =
@@ -75,6 +80,13 @@ static int g_default_server_keepalive_timeout_ms =
static bool g_default_keepalive_permit_without_calls =
DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS;
+static int g_default_min_sent_ping_interval_without_data_ms =
+ DEFAULT_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS;
+static int g_default_min_recv_ping_interval_without_data_ms =
+ DEFAULT_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS;
+static int g_default_max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA;
+static int g_default_max_ping_strikes = DEFAULT_MAX_PING_STRIKES;
+
#define MAX_CLIENT_STREAM_ID 0x7fffffffu
grpc_tracer_flag grpc_http_trace = GRPC_TRACER_INITIALIZER(false, "http");
grpc_tracer_flag grpc_flowctl_trace = GRPC_TRACER_INITIALIZER(false, "flowctl");
@@ -152,11 +164,6 @@ static void send_ping_locked(
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error);
-#define DEFAULT_MIN_TIME_BETWEEN_PINGS_MS 0
-#define DEFAULT_MAX_PINGS_BETWEEN_DATA 3
-#define DEFAULT_MAX_PING_STRIKES 2
-#define DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */
-
/** keepalive-relevant functions */
static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
@@ -362,12 +369,12 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
queue_setting_update(exec_ctx, t,
GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
- t->ping_policy.max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA;
- t->ping_policy.min_time_between_pings =
- gpr_time_from_millis(DEFAULT_MIN_TIME_BETWEEN_PINGS_MS, GPR_TIMESPAN);
- t->ping_policy.max_ping_strikes = DEFAULT_MAX_PING_STRIKES;
- t->ping_policy.min_ping_interval_without_data = gpr_time_from_millis(
- DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS, GPR_TIMESPAN);
+ t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
+ t->ping_policy.min_sent_ping_interval_without_data = gpr_time_from_millis(
+ g_default_min_sent_ping_interval_without_data_ms, GPR_TIMESPAN);
+ t->ping_policy.max_ping_strikes = g_default_max_ping_strikes;
+ t->ping_policy.min_recv_ping_interval_without_data = gpr_time_from_millis(
+ g_default_min_recv_ping_interval_without_data_ms, GPR_TIMESPAN);
/* Keepalive setting */
if (t->is_client) {
@@ -426,29 +433,37 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) {
t->ping_policy.max_pings_without_data = grpc_channel_arg_get_integer(
&channel_args->args[i],
- (grpc_integer_options){DEFAULT_MAX_PINGS_BETWEEN_DATA, 0, INT_MAX});
+ (grpc_integer_options){g_default_max_pings_without_data, 0,
+ INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
t->ping_policy.max_ping_strikes = grpc_channel_arg_get_integer(
&channel_args->args[i],
- (grpc_integer_options){DEFAULT_MAX_PING_STRIKES, 0, INT_MAX});
- } else if (0 == strcmp(channel_args->args[i].key,
- GRPC_ARG_HTTP2_MIN_TIME_BETWEEN_PINGS_MS)) {
- t->ping_policy.min_time_between_pings = gpr_time_from_millis(
- grpc_channel_arg_get_integer(
- &channel_args->args[i],
- (grpc_integer_options){DEFAULT_MIN_TIME_BETWEEN_PINGS_MS, 0,
- INT_MAX}),
- GPR_TIMESPAN);
+ (grpc_integer_options){g_default_max_ping_strikes, 0, INT_MAX});
} else if (0 ==
- strcmp(channel_args->args[i].key,
- GRPC_ARG_HTTP2_MIN_PING_INTERVAL_WITHOUT_DATA_MS)) {
- t->ping_policy.min_ping_interval_without_data = gpr_time_from_millis(
- grpc_channel_arg_get_integer(
- &channel_args->args[i],
- (grpc_integer_options){
- DEFAULT_MIN_PING_INTERVAL_WITHOUT_DATA_MS, 0, INT_MAX}),
- GPR_TIMESPAN);
+ strcmp(
+ channel_args->args[i].key,
+ GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
+ t->ping_policy.min_sent_ping_interval_without_data =
+ gpr_time_from_millis(
+ grpc_channel_arg_get_integer(
+ &channel_args->args[i],
+ (grpc_integer_options){
+ g_default_min_sent_ping_interval_without_data_ms, 0,
+ INT_MAX}),
+ GPR_TIMESPAN);
+ } else if (0 ==
+ strcmp(
+ channel_args->args[i].key,
+ GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
+ t->ping_policy.min_recv_ping_interval_without_data =
+ gpr_time_from_millis(
+ grpc_channel_arg_get_integer(
+ &channel_args->args[i],
+ (grpc_integer_options){
+ g_default_min_recv_ping_interval_without_data_ms, 0,
+ INT_MAX}),
+ GPR_TIMESPAN);
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) {
t->write_buffer_size = (uint32_t)grpc_channel_arg_get_integer(
@@ -555,8 +570,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
+ /* No pings allowed before receiving a header or data frame. */
+ t->ping_state.pings_before_data_required = 0;
t->ping_state.is_delayed_ping_timer_set = false;
t->ping_recv_state.last_ping_recv_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
@@ -623,6 +638,9 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx,
connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "close_transport");
grpc_endpoint_shutdown(exec_ctx, t->ep, GRPC_ERROR_REF(error));
+ if (t->ping_state.is_delayed_ping_timer_set) {
+ grpc_timer_cancel(exec_ctx, &t->ping_state.delayed_ping_timer);
+ }
switch (t->keepalive_state) {
case GRPC_CHTTP2_KEEPALIVE_STATE_WAITING:
grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
@@ -1727,8 +1745,10 @@ static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->ping_state.is_delayed_ping_timer_set = false;
- grpc_chttp2_initiate_write(exec_ctx, t,
- GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING);
+ if (error == GRPC_ERROR_NONE) {
+ grpc_chttp2_initiate_write(exec_ctx, t,
+ GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING);
+ }
}
void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@@ -2629,6 +2649,36 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
&args->args[i],
(grpc_integer_options){g_default_keepalive_permit_without_calls,
0, 1});
+ } else if (0 ==
+ strcmp(args->args[i].key, GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
+ g_default_max_ping_strikes = grpc_channel_arg_get_integer(
+ &args->args[i],
+ (grpc_integer_options){g_default_max_ping_strikes, 0, INT_MAX});
+ } else if (0 == strcmp(args->args[i].key,
+ GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) {
+ g_default_max_pings_without_data = grpc_channel_arg_get_integer(
+ &args->args[i], (grpc_integer_options){
+ g_default_max_pings_without_data, 0, INT_MAX});
+ } else if (0 ==
+ strcmp(
+ args->args[i].key,
+ GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
+ g_default_min_sent_ping_interval_without_data_ms =
+ grpc_channel_arg_get_integer(
+ &args->args[i],
+ (grpc_integer_options){
+ g_default_min_sent_ping_interval_without_data_ms, 0,
+ INT_MAX});
+ } else if (0 ==
+ strcmp(
+ args->args[i].key,
+ GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
+ g_default_min_recv_ping_interval_without_data_ms =
+ grpc_channel_arg_get_integer(
+ &args->args[i],
+ (grpc_integer_options){
+ g_default_min_recv_ping_interval_without_data_ms, 0,
+ INT_MAX});
}
}
}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.h b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
index 0c4e2a91c0..55fb1a8343 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.h
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
@@ -25,6 +25,7 @@
extern grpc_tracer_flag grpc_http_trace;
extern grpc_tracer_flag grpc_flowctl_trace;
+extern grpc_tracer_flag grpc_trace_http2_stream_state;
#ifndef NDEBUG
extern grpc_tracer_flag grpc_trace_chttp2_refcount;
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.c b/src/core/ext/transport/chttp2/transport/frame_ping.c
index 81bd02ae70..d431d6b2df 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.c
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.c
@@ -92,7 +92,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_allowed_ping =
gpr_time_add(t->ping_recv_state.last_ping_recv_time,
- t->ping_policy.min_ping_interval_without_data);
+ t->ping_policy.min_recv_ping_interval_without_data);
if (t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index c2dfce7c9c..1682be28dd 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -112,10 +112,10 @@ typedef struct {
} grpc_chttp2_ping_queue;
typedef struct {
- gpr_timespec min_time_between_pings;
int max_pings_without_data;
int max_ping_strikes;
- gpr_timespec min_ping_interval_without_data;
+ gpr_timespec min_sent_ping_interval_without_data;
+ gpr_timespec min_recv_ping_interval_without_data;
} grpc_chttp2_repeated_ping_policy;
typedef struct {
diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c
index 6c12c91365..3db1ad4123 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.c
+++ b/src/core/ext/transport/chttp2/transport/parsing.c
@@ -383,6 +383,9 @@ error_handler:
/* t->parser = grpc_chttp2_data_parser_parse;*/
t->parser = grpc_chttp2_data_parser_parse;
t->parser_data = &s->data_parser;
+ t->ping_state.pings_before_data_required =
+ t->ping_policy.max_pings_without_data;
+ t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
return GRPC_ERROR_NONE;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) {
/* handle stream errors by closing the stream */
@@ -559,6 +562,10 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
(t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0;
}
+ t->ping_state.pings_before_data_required =
+ t->ping_policy.max_pings_without_data;
+ t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC);
+
/* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */
s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (s == NULL) {
diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.c b/src/core/ext/transport/chttp2/transport/stream_lists.c
index 7cc85dea9c..47cd22d177 100644
--- a/src/core/ext/transport/chttp2/transport/stream_lists.c
+++ b/src/core/ext/transport/chttp2/transport/stream_lists.c
@@ -20,6 +20,27 @@
#include <grpc/support/log.h>
+static char *stream_list_id_string(grpc_chttp2_stream_list_id id) {
+ switch (id) {
+ case GRPC_CHTTP2_LIST_WRITABLE:
+ return "writable";
+ case GRPC_CHTTP2_LIST_WRITING:
+ return "writing";
+ case GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT:
+ return "stalled_by_transport";
+ case GRPC_CHTTP2_LIST_STALLED_BY_STREAM:
+ return "stalled_by_stream";
+ case GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY:
+ return "waiting_for_concurrency";
+ case STREAM_LIST_COUNT:
+ GPR_UNREACHABLE_CODE(return "unknown");
+ }
+ GPR_UNREACHABLE_CODE(return "unknown");
+}
+
+grpc_tracer_flag grpc_trace_http2_stream_state =
+ GRPC_TRACER_INITIALIZER(false, "http2_stream_state");
+
/* core list management */
static bool stream_list_empty(grpc_chttp2_transport *t,
@@ -44,6 +65,10 @@ static bool stream_list_pop(grpc_chttp2_transport *t,
s->included[id] = 0;
}
*stream = s;
+ if (s && GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
+ gpr_log(GPR_DEBUG, "%p[%d][%s]: pop from %s", t, s->id,
+ t->is_client ? "cli" : "svr", stream_list_id_string(id));
+ }
return s != 0;
}
@@ -62,6 +87,10 @@ static void stream_list_remove(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
} else {
t->lists[id].tail = s->links[id].prev;
}
+ if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
+ gpr_log(GPR_DEBUG, "%p[%d][%s]: remove from %s", t, s->id,
+ t->is_client ? "cli" : "svr", stream_list_id_string(id));
+ }
}
static bool stream_list_maybe_remove(grpc_chttp2_transport *t,
@@ -90,6 +119,10 @@ static void stream_list_add_tail(grpc_chttp2_transport *t,
}
t->lists[id].tail = s;
s->included[id] = 1;
+ if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) {
+ gpr_log(GPR_DEBUG, "%p[%d][%s]: add to %s", t, s->id,
+ t->is_client ? "cli" : "svr", stream_list_id_string(id));
+ }
}
static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s,
@@ -150,17 +183,12 @@ void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t,
void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
- GRPC_FLOW_CONTROL_IF_TRACING(
- gpr_log(GPR_DEBUG, "stream %u stalled by transport", s->id));
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
- bool ret = stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
- GRPC_FLOW_CONTROL_IF_TRACING(if (ret) gpr_log(
- GPR_DEBUG, "stream %u un-stalled by transport", (*s)->id));
- return ret;
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT);
}
void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
@@ -170,23 +198,15 @@ void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t,
void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
- GRPC_FLOW_CONTROL_IF_TRACING(
- gpr_log(GPR_DEBUG, "stream %u stalled by stream", s->id));
stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream **s) {
- bool ret = stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
- GRPC_FLOW_CONTROL_IF_TRACING(
- if (ret) gpr_log(GPR_DEBUG, "stream %u un-stalled by stream", (*s)->id));
- return ret;
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t,
grpc_chttp2_stream *s) {
- bool ret = stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
- GRPC_FLOW_CONTROL_IF_TRACING(
- if (ret) gpr_log(GPR_DEBUG, "stream %u un-stalled by stream", s->id));
- return ret;
+ return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
}
diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c
index 3ded801985..7d9d4867e1 100644
--- a/src/core/ext/transport/chttp2/transport/writing.c
+++ b/src/core/ext/transport/chttp2/transport/writing.c
@@ -68,7 +68,7 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
}
if (t->ping_state.pings_before_data_required == 0 &&
t->ping_policy.max_pings_without_data != 0) {
- /* need to send something of substance before sending a ping again */
+ /* need to receive something of substance before sending a ping again */
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "Ping delayed [%p]: too many recent pings: %d/%d",
@@ -78,11 +78,18 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
return;
}
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec elapsed = gpr_time_sub(now, t->ping_state.last_ping_sent_time);
- /*gpr_log(GPR_DEBUG, "elapsed:%d.%09d min:%d.%09d", (int)elapsed.tv_sec,
- elapsed.tv_nsec, (int)t->ping_policy.min_time_between_pings.tv_sec,
- (int)t->ping_policy.min_time_between_pings.tv_nsec);*/
- if (gpr_time_cmp(elapsed, t->ping_policy.min_time_between_pings) < 0) {
+ gpr_timespec next_allowed_ping =
+ gpr_time_add(t->ping_state.last_ping_sent_time,
+ t->ping_policy.min_sent_ping_interval_without_data);
+ if (t->keepalive_permit_without_calls == 0 &&
+ grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
+ next_allowed_ping = gpr_time_add(t->ping_recv_state.last_ping_recv_time,
+ gpr_time_from_seconds(7200, GPR_TIMESPAN));
+ }
+ /* gpr_log(GPR_DEBUG, "next_allowed_ping:%d.%09d now:%d.%09d",
+ (int)next_allowed_ping.tv_sec, (int)next_allowed_ping.tv_nsec,
+ (int)now.tv_sec, (int)now.tv_nsec); */
+ if (gpr_time_cmp(next_allowed_ping, now) > 0) {
/* not enough elapsed time between successive pings */
if (GRPC_TRACER_ON(grpc_http_trace) ||
GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
@@ -93,9 +100,7 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
if (!t->ping_state.is_delayed_ping_timer_set) {
t->ping_state.is_delayed_ping_timer_set = true;
grpc_timer_init(exec_ctx, &t->ping_state.delayed_ping_timer,
- gpr_time_add(t->ping_state.last_ping_sent_time,
- t->ping_policy.min_time_between_pings),
- &t->retry_initiate_ping_locked,
+ next_allowed_ping, &t->retry_initiate_ping_locked,
gpr_now(GPR_CLOCK_MONOTONIC));
}
return;
@@ -119,6 +124,12 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
grpc_chttp2_ping_create(false, pq->inflight_id));
GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx);
t->ping_state.last_ping_sent_time = now;
+ if (GRPC_TRACER_ON(grpc_http_trace) ||
+ GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
+ gpr_log(GPR_DEBUG, "Ping sent [%p]: %d/%d", t->peer_string,
+ t->ping_state.pings_before_data_required,
+ t->ping_policy.max_pings_without_data);
+ }
t->ping_state.pings_before_data_required -=
(t->ping_state.pings_before_data_required != 0);
}
@@ -257,8 +268,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
.stats = &s->stats.outgoing};
grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0,
s->send_initial_metadata, &hopt, &t->outbuf);
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
+ now_writing = true;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
@@ -297,8 +307,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_window_update_create(s->id, stream_announce,
&s->stats.outgoing));
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
@@ -375,8 +383,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
send_bytes);
s->sending_bytes += send_bytes;
}
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
@@ -487,8 +493,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_slice_buffer_add(
&t->outbuf, grpc_chttp2_window_update_create(0, transport_announce,
&throwaway_stats));
- t->ping_state.pings_before_data_required =
- t->ping_policy.max_pings_without_data;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time =
gpr_inf_past(GPR_CLOCK_MONOTONIC);
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index 97c2886525..c553fa3981 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -43,7 +43,8 @@ static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx,
grpc_httpcli_ssl_channel_security_connector *c =
(grpc_httpcli_ssl_channel_security_connector *)sc;
if (c->handshaker_factory != NULL) {
- tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory);
+ tsi_ssl_client_handshaker_factory_unref(c->handshaker_factory);
+ c->handshaker_factory = NULL;
}
if (c->secure_peer_name != NULL) gpr_free(c->secure_peer_name);
gpr_free(sc);
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c
index 7236e23cf7..00edefc6ae 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.c
@@ -167,7 +167,14 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
GPR_TIMER_BEGIN("grpc_closure_sched", 0);
if (c != NULL) {
#ifndef NDEBUG
- GPR_ASSERT(!c->scheduled);
+ if (c->scheduled) {
+ gpr_log(GPR_ERROR,
+ "Closure already scheduled. (closure: %p, created: [%s:%d], "
+ "previously scheduled at: [%s: %d] run?: %s",
+ c, c->file_created, c->line_created, c->file_initiated,
+ c->line_initiated, c->run ? "true" : "false");
+ abort();
+ }
c->scheduled = true;
c->file_initiated = file;
c->line_initiated = line;
@@ -191,7 +198,14 @@ void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
while (c != NULL) {
grpc_closure *next = c->next_data.next;
#ifndef NDEBUG
- GPR_ASSERT(!c->scheduled);
+ if (c->scheduled) {
+ gpr_log(GPR_ERROR,
+ "Closure already scheduled. (closure: %p, created: [%s:%d], "
+ "previously scheduled at: [%s: %d] run?: %s",
+ c, c->file_created, c->line_created, c->file_initiated,
+ c->line_initiated, c->run ? "true" : "false");
+ abort();
+ }
c->scheduled = true;
c->file_initiated = file;
c->line_initiated = line;
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index a7568b995f..2a9e939d40 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -455,14 +455,14 @@ grpc_server_security_connector *grpc_fake_server_security_connector_create(
typedef struct {
grpc_channel_security_connector base;
- tsi_ssl_client_handshaker_factory *handshaker_factory;
+ tsi_ssl_client_handshaker_factory *client_handshaker_factory;
char *target_name;
char *overridden_target_name;
} grpc_ssl_channel_security_connector;
typedef struct {
grpc_server_security_connector base;
- tsi_ssl_server_handshaker_factory *handshaker_factory;
+ tsi_ssl_server_handshaker_factory *server_handshaker_factory;
} grpc_ssl_server_security_connector;
static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
@@ -470,9 +470,8 @@ static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx,
grpc_ssl_channel_security_connector *c =
(grpc_ssl_channel_security_connector *)sc;
grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds);
- if (c->handshaker_factory != NULL) {
- tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory);
- }
+ tsi_ssl_client_handshaker_factory_unref(c->client_handshaker_factory);
+ c->client_handshaker_factory = NULL;
if (c->target_name != NULL) gpr_free(c->target_name);
if (c->overridden_target_name != NULL) gpr_free(c->overridden_target_name);
gpr_free(sc);
@@ -482,9 +481,8 @@ static void ssl_server_destroy(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc) {
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
- if (c->handshaker_factory != NULL) {
- tsi_ssl_server_handshaker_factory_destroy(c->handshaker_factory);
- }
+ tsi_ssl_server_handshaker_factory_unref(c->server_handshaker_factory);
+ c->server_handshaker_factory = NULL;
gpr_free(sc);
}
@@ -496,7 +494,7 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx,
// Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker(
- c->handshaker_factory,
+ c->client_handshaker_factory,
c->overridden_target_name != NULL ? c->overridden_target_name
: c->target_name,
&tsi_hs);
@@ -521,7 +519,7 @@ static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx,
// Instantiate TSI handshaker.
tsi_handshaker *tsi_hs = NULL;
tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker(
- c->handshaker_factory, &tsi_hs);
+ c->server_handshaker_factory, &tsi_hs);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result));
@@ -852,7 +850,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create(
result = tsi_create_ssl_client_handshaker_factory(
has_key_cert_pair ? &config->pem_key_cert_pair : NULL, pem_root_certs,
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
- &c->handshaker_factory);
+ &c->client_handshaker_factory);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
@@ -897,7 +895,7 @@ grpc_security_status grpc_ssl_server_security_connector_create(
config->pem_root_certs, get_tsi_client_certificate_request_type(
config->client_certificate_request),
ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols,
- &c->handshaker_factory);
+ &c->server_handshaker_factory);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
diff --git a/src/core/tsi/ssl_transport_security.c b/src/core/tsi/ssl_transport_security.c
index 1fd65928f9..7ebf9dd96f 100644
--- a/src/core/tsi/ssl_transport_security.c
+++ b/src/core/tsi/ssl_transport_security.c
@@ -67,7 +67,13 @@
/* --- Structure definitions. ---*/
+struct tsi_ssl_handshaker_factory {
+ const tsi_ssl_handshaker_factory_vtable *vtable;
+ gpr_refcount refcount;
+};
+
struct tsi_ssl_client_handshaker_factory {
+ tsi_ssl_handshaker_factory base;
SSL_CTX *ssl_context;
unsigned char *alpn_protocol_list;
size_t alpn_protocol_list_length;
@@ -77,6 +83,7 @@ struct tsi_ssl_server_handshaker_factory {
/* Several contexts to support SNI.
The tsi_peer array contains the subject names of the server certificates
associated with the contexts at the same index. */
+ tsi_ssl_handshaker_factory base;
SSL_CTX **ssl_contexts;
tsi_peer *ssl_context_x509_subject_names;
size_t ssl_context_count;
@@ -90,6 +97,7 @@ typedef struct {
BIO *into_ssl;
BIO *from_ssl;
tsi_result result;
+ tsi_ssl_handshaker_factory *factory_ref;
} tsi_ssl_handshaker;
typedef struct {
@@ -846,6 +854,47 @@ static const tsi_frame_protector_vtable frame_protector_vtable = {
ssl_protector_destroy,
};
+/* --- tsi_server_handshaker_factory methods implementation. --- */
+
+static void tsi_ssl_handshaker_factory_destroy(
+ tsi_ssl_handshaker_factory *self) {
+ if (self == NULL) return;
+
+ if (self->vtable != NULL && self->vtable->destroy != NULL) {
+ self->vtable->destroy(self);
+ }
+ /* Note, we don't free(self) here because this object is always directly
+ * embedded in another object. If tsi_ssl_handshaker_factory_init allocates
+ * any memory, it should be free'd here. */
+}
+
+static tsi_ssl_handshaker_factory *tsi_ssl_handshaker_factory_ref(
+ tsi_ssl_handshaker_factory *self) {
+ if (self == NULL) return NULL;
+ gpr_refn(&self->refcount, 1);
+ return self;
+}
+
+static void tsi_ssl_handshaker_factory_unref(tsi_ssl_handshaker_factory *self) {
+ if (self == NULL) return;
+
+ if (gpr_unref(&self->refcount)) {
+ tsi_ssl_handshaker_factory_destroy(self);
+ }
+}
+
+static tsi_ssl_handshaker_factory_vtable handshaker_factory_vtable = {NULL};
+
+/* Initializes a tsi_ssl_handshaker_factory object. Caller is responsible for
+ * allocating memory for the factory. */
+static void tsi_ssl_handshaker_factory_init(
+ tsi_ssl_handshaker_factory *factory) {
+ GPR_ASSERT(factory != NULL);
+
+ factory->vtable = &handshaker_factory_vtable;
+ gpr_ref_init(&factory->refcount, 1);
+}
+
/* --- tsi_handshaker methods implementation. ---*/
static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self,
@@ -1013,6 +1062,7 @@ static tsi_result ssl_handshaker_create_frame_protector(
static void ssl_handshaker_destroy(tsi_handshaker *self) {
tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self;
SSL_free(impl->ssl); /* The BIO objects are owned by ssl */
+ tsi_ssl_handshaker_factory_unref(impl->factory_ref);
gpr_free(impl);
}
@@ -1030,6 +1080,7 @@ static const tsi_handshaker_vtable handshaker_vtable = {
static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
const char *server_name_indication,
+ tsi_ssl_handshaker_factory *factory,
tsi_handshaker **handshaker) {
SSL *ssl = SSL_new(ctx);
BIO *into_ssl = NULL;
@@ -1085,6 +1136,8 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client,
impl->from_ssl = from_ssl;
impl->result = TSI_HANDSHAKE_IN_PROGRESS;
impl->base.vtable = &handshaker_vtable;
+ impl->factory_ref = tsi_ssl_handshaker_factory_ref(factory);
+
*handshaker = &impl->base;
return TSI_OK;
}
@@ -1121,11 +1174,20 @@ tsi_result tsi_ssl_client_handshaker_factory_create_handshaker(
tsi_ssl_client_handshaker_factory *self, const char *server_name_indication,
tsi_handshaker **handshaker) {
return create_tsi_ssl_handshaker(self->ssl_context, 1, server_name_indication,
- handshaker);
+ &self->base, handshaker);
}
-void tsi_ssl_client_handshaker_factory_destroy(
+void tsi_ssl_client_handshaker_factory_unref(
tsi_ssl_client_handshaker_factory *self) {
+ if (self == NULL) return;
+ tsi_ssl_handshaker_factory_unref(&self->base);
+}
+
+static void tsi_ssl_client_handshaker_factory_destroy(
+ tsi_ssl_handshaker_factory *factory) {
+ if (factory == NULL) return;
+ tsi_ssl_client_handshaker_factory *self =
+ (tsi_ssl_client_handshaker_factory *)factory;
if (self->ssl_context != NULL) SSL_CTX_free(self->ssl_context);
if (self->alpn_protocol_list != NULL) gpr_free(self->alpn_protocol_list);
gpr_free(self);
@@ -1150,11 +1212,21 @@ tsi_result tsi_ssl_server_handshaker_factory_create_handshaker(
if (self->ssl_context_count == 0) return TSI_INVALID_ARGUMENT;
/* Create the handshaker with the first context. We will switch if needed
because of SNI in ssl_server_handshaker_factory_servername_callback. */
- return create_tsi_ssl_handshaker(self->ssl_contexts[0], 0, NULL, handshaker);
+ return create_tsi_ssl_handshaker(self->ssl_contexts[0], 0, NULL, &self->base,
+ handshaker);
}
-void tsi_ssl_server_handshaker_factory_destroy(
+void tsi_ssl_server_handshaker_factory_unref(
tsi_ssl_server_handshaker_factory *self) {
+ if (self == NULL) return;
+ tsi_ssl_handshaker_factory_unref(&self->base);
+}
+
+static void tsi_ssl_server_handshaker_factory_destroy(
+ tsi_ssl_handshaker_factory *factory) {
+ if (factory == NULL) return;
+ tsi_ssl_server_handshaker_factory *self =
+ (tsi_ssl_server_handshaker_factory *)factory;
size_t i;
for (i = 0; i < self->ssl_context_count; i++) {
if (self->ssl_contexts[i] != NULL) {
@@ -1263,6 +1335,9 @@ static int server_handshaker_factory_npn_advertised_callback(
/* --- tsi_ssl_handshaker_factory constructors. --- */
+static tsi_ssl_handshaker_factory_vtable client_handshaker_factory_vtable = {
+ tsi_ssl_client_handshaker_factory_destroy};
+
tsi_result tsi_create_ssl_client_handshaker_factory(
const tsi_ssl_pem_key_cert_pair *pem_key_cert_pair,
const char *pem_root_certs, const char *cipher_suites,
@@ -1285,6 +1360,9 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
}
impl = gpr_zalloc(sizeof(*impl));
+ tsi_ssl_handshaker_factory_init(&impl->base);
+ impl->base.vtable = &client_handshaker_factory_vtable;
+
impl->ssl_context = ssl_context;
do {
@@ -1322,7 +1400,7 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
}
} while (0);
if (result != TSI_OK) {
- tsi_ssl_client_handshaker_factory_destroy(impl);
+ tsi_ssl_handshaker_factory_unref(&impl->base);
return result;
}
SSL_CTX_set_verify(ssl_context, SSL_VERIFY_PEER, NULL);
@@ -1332,6 +1410,9 @@ tsi_result tsi_create_ssl_client_handshaker_factory(
return TSI_OK;
}
+static tsi_ssl_handshaker_factory_vtable server_handshaker_factory_vtable = {
+ tsi_ssl_server_handshaker_factory_destroy};
+
tsi_result tsi_create_ssl_server_handshaker_factory(
const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs,
size_t num_key_cert_pairs, const char *pem_client_root_certs,
@@ -1364,12 +1445,15 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
}
impl = gpr_zalloc(sizeof(*impl));
+ tsi_ssl_handshaker_factory_init(&impl->base);
+ impl->base.vtable = &server_handshaker_factory_vtable;
+
impl->ssl_contexts = gpr_zalloc(num_key_cert_pairs * sizeof(SSL_CTX *));
impl->ssl_context_x509_subject_names =
gpr_zalloc(num_key_cert_pairs * sizeof(tsi_peer));
if (impl->ssl_contexts == NULL ||
impl->ssl_context_x509_subject_names == NULL) {
- tsi_ssl_server_handshaker_factory_destroy(impl);
+ tsi_ssl_handshaker_factory_unref(&impl->base);
return TSI_OUT_OF_RESOURCES;
}
impl->ssl_context_count = num_key_cert_pairs;
@@ -1379,7 +1463,7 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
&impl->alpn_protocol_list,
&impl->alpn_protocol_list_length);
if (result != TSI_OK) {
- tsi_ssl_server_handshaker_factory_destroy(impl);
+ tsi_ssl_handshaker_factory_unref(&impl->base);
return result;
}
}
@@ -1451,10 +1535,11 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
} while (0);
if (result != TSI_OK) {
- tsi_ssl_server_handshaker_factory_destroy(impl);
+ tsi_ssl_handshaker_factory_unref(&impl->base);
return result;
}
}
+
*factory = impl;
return TSI_OK;
}
@@ -1501,3 +1586,15 @@ int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name) {
return 0; /* Not found. */
}
+
+/* --- Testing support. --- */
+const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable(
+ tsi_ssl_handshaker_factory *factory,
+ tsi_ssl_handshaker_factory_vtable *new_vtable) {
+ GPR_ASSERT(factory != NULL);
+ GPR_ASSERT(factory->vtable != NULL);
+
+ const tsi_ssl_handshaker_factory_vtable *orig_vtable = factory->vtable;
+ factory->vtable = new_vtable;
+ return orig_vtable;
+}
diff --git a/src/core/tsi/ssl_transport_security.h b/src/core/tsi/ssl_transport_security.h
index 177599930b..3abfdf5ed8 100644
--- a/src/core/tsi/ssl_transport_security.h
+++ b/src/core/tsi/ssl_transport_security.h
@@ -96,10 +96,10 @@ tsi_result tsi_ssl_client_handshaker_factory_create_handshaker(
tsi_ssl_client_handshaker_factory *self, const char *server_name_indication,
tsi_handshaker **handshaker);
-/* Destroys the handshaker factory. WARNING: it is unsafe to destroy a factory
- while handshakers created with this factory are still in use. */
-void tsi_ssl_client_handshaker_factory_destroy(
- tsi_ssl_client_handshaker_factory *self);
+/* Decrements reference count of the handshaker factory. Handshaker factory will
+ * be destroyed once no references exist. */
+void tsi_ssl_client_handshaker_factory_unref(
+ tsi_ssl_client_handshaker_factory *factory);
/* --- tsi_ssl_server_handshaker_factory object ---
@@ -158,9 +158,9 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex(
tsi_result tsi_ssl_server_handshaker_factory_create_handshaker(
tsi_ssl_server_handshaker_factory *self, tsi_handshaker **handshaker);
-/* Destroys the handshaker factory. WARNING: it is unsafe to destroy a factory
- while handshakers created with this factory are still in use. */
-void tsi_ssl_server_handshaker_factory_destroy(
+/* Decrements reference count of the handshaker factory. Handshaker factory will
+ * be destroyed once no references exist. */
+void tsi_ssl_server_handshaker_factory_unref(
tsi_ssl_server_handshaker_factory *self);
/* Util that checks that an ssl peer matches a specific name.
@@ -170,6 +170,29 @@ void tsi_ssl_server_handshaker_factory_destroy(
- handle public suffix wildchar more strictly (e.g. *.co.uk) */
int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name);
+/* --- Testing support. ---
+
+ These functions and typedefs are not intended to be used outside of testing.
+ */
+
+/* Base type of client and server handshaker factories. */
+typedef struct tsi_ssl_handshaker_factory tsi_ssl_handshaker_factory;
+
+/* Function pointer to handshaker_factory destructor. */
+typedef void (*tsi_ssl_handshaker_factory_destructor)(
+ tsi_ssl_handshaker_factory *factory);
+
+/* Virtual table for tsi_ssl_handshaker_factory. */
+typedef struct {
+ tsi_ssl_handshaker_factory_destructor destroy;
+} tsi_ssl_handshaker_factory_vtable;
+
+/* Set destructor of handshaker_factory to new_destructor, returns previous
+ destructor. */
+const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable(
+ tsi_ssl_handshaker_factory *factory,
+ tsi_ssl_handshaker_factory_vtable *new_vtable);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/csharp/Grpc.Auth/Grpc.Auth.csproj b/src/csharp/Grpc.Auth/Grpc.Auth.csproj
index abf326459c..bbcbd95be5 100755
--- a/src/csharp/Grpc.Auth/Grpc.Auth.csproj
+++ b/src/csharp/Grpc.Auth/Grpc.Auth.csproj
@@ -15,7 +15,6 @@
<PackageTags>gRPC RPC Protocol HTTP/2 Auth OAuth2</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
diff --git a/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj b/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
index 9ad6fd0c61..4d6767fa98 100755
--- a/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
+++ b/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj
@@ -15,7 +15,6 @@
<PackageTags>gRPC test testing</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
diff --git a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
index 6df68fda58..18993a93e0 100755
--- a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
+++ b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.Core.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Core.Tests</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
@@ -21,7 +19,6 @@
<PackageReference Include="Newtonsoft.Json" Version="9.0.1" />
<PackageReference Include="NUnit" Version="3.6.0" />
<PackageReference Include="NUnitLite" Version="3.6.0" />
- <PackageReference Include="NUnit.ConsoleRunner" Version="3.6.0" />
<PackageReference Include="OpenCover" Version="4.6.519" />
<PackageReference Include="ReportGenerator" Version="2.4.4.0" />
</ItemGroup>
diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj
index dde800aadd..d9950b2f20 100755
--- a/src/csharp/Grpc.Core/Grpc.Core.csproj
+++ b/src/csharp/Grpc.Core/Grpc.Core.csproj
@@ -14,7 +14,6 @@
<PackageTags>gRPC RPC Protocol HTTP/2</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
@@ -65,7 +64,7 @@
<ItemGroup Condition=" '$(TargetFramework)' == 'netstandard1.5' ">
<PackageReference Include="System.Runtime.Loader" Version="4.0.0" />
<PackageReference Include="System.Threading.Thread" Version="4.0.0" />
- <PackageReference Include="System.Threading.ThreadPool" Version="4.0.0" />
+ <PackageReference Include="System.Threading.ThreadPool" Version="4.0.10" />
</ItemGroup>
<Import Project="NativeDeps.csproj.include" />
diff --git a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
index 74deed6584..db4e3ef4e3 100755
--- a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
+++ b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj
@@ -8,7 +8,6 @@
<AssemblyName>Grpc.Examples.MathClient</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Examples.MathClient</PackageId>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
index 1abf261498..b12b418d01 100755
--- a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
+++ b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj
@@ -8,7 +8,6 @@
<AssemblyName>Grpc.Examples.MathServer</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Examples.MathServer</PackageId>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
index d2a13ed6e1..3ccc9adfaf 100755
--- a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
+++ b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.Examples.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Examples.Tests</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.Examples/Grpc.Examples.csproj b/src/csharp/Grpc.Examples/Grpc.Examples.csproj
index 491d313f17..baa3b4ce6c 100755
--- a/src/csharp/Grpc.Examples/Grpc.Examples.csproj
+++ b/src/csharp/Grpc.Examples/Grpc.Examples.csproj
@@ -7,7 +7,6 @@
<TargetFrameworks>net45;netcoreapp1.0</TargetFrameworks>
<AssemblyName>Grpc.Examples</AssemblyName>
<PackageId>Grpc.Examples</PackageId>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
index 2ccf46b9b9..9da0539dcb 100755
--- a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
+++ b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.HealthCheck.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.HealthCheck.Tests</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
index 3eb90434f3..681719d124 100755
--- a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
+++ b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj
@@ -14,7 +14,6 @@
<PackageTags>gRPC health check</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
diff --git a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
index c67beea7cd..35713156ea 100755
--- a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.IntegrationTesting.Client</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.Client</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
index e452257b1b..3ecefe3bc4 100755
--- a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj
@@ -9,8 +9,6 @@
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.QpsWorker</PackageId>
<ServerGarbageCollection>true</ServerGarbageCollection>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
index a1fb316fdb..1092b2c21e 100755
--- a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.IntegrationTesting.Server</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.Server</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
index f64bea3d2b..22272547f6 100755
--- a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
+++ b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.IntegrationTesting.StressClient</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting.StressClient</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
index f5077fe0f7..c02c9844e3 100755
--- a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
+++ b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.IntegrationTesting</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.IntegrationTesting</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
@@ -31,10 +29,6 @@
<Reference Include="Microsoft.CSharp" />
</ItemGroup>
- <ItemGroup Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">
- <PackageReference Include="System.Linq.Expressions" Version="4.1.1" />
- </ItemGroup>
-
<ItemGroup>
<Compile Include="..\Grpc.Core\Version.cs" />
</ItemGroup>
diff --git a/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj b/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj
index 17797e1e1e..108357e4eb 100644
--- a/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj
+++ b/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.Microbenchmarks</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Microbenchmarks</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj b/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj
index cf756c68ad..d368697124 100755
--- a/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj
+++ b/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj
@@ -8,8 +8,6 @@
<AssemblyName>Grpc.Reflection.Tests</AssemblyName>
<OutputType>Exe</OutputType>
<PackageId>Grpc.Reflection.Tests</PackageId>
- <PackageTargetFallback Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">$(PackageTargetFallback);portable-net45</PackageTargetFallback>
- <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
<TreatWarningsAsErrors>true</TreatWarningsAsErrors>
</PropertyGroup>
diff --git a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
index b77fd69aee..704eea5c17 100755
--- a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
+++ b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj
@@ -14,7 +14,6 @@
<PackageTags>gRPC reflection</PackageTags>
<PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
- <NetStandardImplicitPackageVersion Condition=" '$(TargetFramework)' == 'netstandard1.5' ">1.6.0</NetStandardImplicitPackageVersion>
<IncludeSymbols>true</IncludeSymbols>
<IncludeSource>true</IncludeSource>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
diff --git a/src/csharp/doc/.gitignore b/src/csharp/doc/.gitignore
new file mode 100644
index 0000000000..09ee235efc
--- /dev/null
+++ b/src/csharp/doc/.gitignore
@@ -0,0 +1,2 @@
+html
+obj
diff --git a/src/csharp/doc/README.md b/src/csharp/doc/README.md
index 585500b5ca..46cce013a1 100644
--- a/src/csharp/doc/README.md
+++ b/src/csharp/doc/README.md
@@ -1,2 +1,9 @@
+DocFX-generated C# API Reference
+--------------------------------
-SandCastle project files to generate HTML reference documentation. \ No newline at end of file
+Install docfx based on instructions here: https://github.com/dotnet/docfx
+
+```
+# generate docfx documentation into ./html directory
+$ docfx
+```
diff --git a/src/csharp/doc/docfx.json b/src/csharp/doc/docfx.json
new file mode 100644
index 0000000000..7219d0e7a6
--- /dev/null
+++ b/src/csharp/doc/docfx.json
@@ -0,0 +1,37 @@
+{
+ "metadata": [
+ {
+ "src": [
+ {
+ "files": ["Grpc.Core/Grpc.Core.csproj",
+ "Grpc.Auth/Grpc.Auth.csproj",
+ "Grpc.Core.Testing/Grpc.Core.Testing.csproj",
+ "Grpc.HealthCheck/Grpc.HealthCheck.csproj",
+ "Grpc.Reflection/Grpc.HealthCheck.csproj"],
+ "exclude": [ "**/bin/**", "**/obj/**" ],
+ "cwd": ".."
+ }
+ ],
+ "properties": { "TargetFramework": "net45" },
+ "dest": "obj/api"
+ }
+ ],
+ "build": {
+ "content": [
+ {
+ "files": [ "**/*.yml" ],
+ "cwd": "obj/api",
+ "dest": "api"
+ },
+ {
+ "files": [ "toc.yml"],
+ }
+ ],
+ "globalMetadata": {
+ "_appTitle": "gRPC C#",
+ "_enableSearch": true,
+ "_disableContribution": true
+ },
+ "dest": "html"
+ }
+}
diff --git a/src/csharp/doc/grpc_csharp_public.shfbproj b/src/csharp/doc/grpc_csharp_public.shfbproj
deleted file mode 100644
index fab953da35..0000000000
--- a/src/csharp/doc/grpc_csharp_public.shfbproj
+++ /dev/null
@@ -1,83 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <PropertyGroup>
- <!-- The configuration and platform will be used to determine which assemblies to include from solution and
- project documentation sources -->
- <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
- <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
- <SchemaVersion>2.0</SchemaVersion>
- <ProjectGuid>{77e3da09-fc92-486f-a90a-99ca788e8b59}</ProjectGuid>
- <SHFBSchemaVersion>2015.6.5.0</SHFBSchemaVersion>
- <!-- AssemblyName, Name, and RootNamespace are not used by SHFB but Visual Studio adds them anyway -->
- <AssemblyName>Documentation</AssemblyName>
- <RootNamespace>Documentation</RootNamespace>
- <Name>Documentation</Name>
- <!-- SHFB properties -->
- <FrameworkVersion>.NET Framework 4.5</FrameworkVersion>
- <OutputPath>..\..\..\doc\ref\csharp\html</OutputPath>
- <Language>en-US</Language>
- <DocumentationSources>
- <DocumentationSource sourceFile="..\Grpc.Auth\Grpc.Auth.csproj" />
-<DocumentationSource sourceFile="..\Grpc.Core\Grpc.Core.csproj" />
-<DocumentationSource sourceFile="..\Grpc.HealthCheck\Grpc.HealthCheck.csproj" />
-<DocumentationSource sourceFile="..\Grpc.Reflection\Grpc.Reflection.csproj" />
-<DocumentationSource sourceFile="..\Grpc.Core.Testing\Grpc.Core.Testing.csproj" /></DocumentationSources>
- <BuildAssemblerVerbosity>OnlyWarningsAndErrors</BuildAssemblerVerbosity>
- <HelpFileFormat>Website</HelpFileFormat>
- <IndentHtml>False</IndentHtml>
- <KeepLogFile>True</KeepLogFile>
- <DisableCodeBlockComponent>False</DisableCodeBlockComponent>
- <CleanIntermediates>True</CleanIntermediates>
- <HelpFileVersion>1.0.0.0</HelpFileVersion>
- <MaximumGroupParts>2</MaximumGroupParts>
- <NamespaceGrouping>False</NamespaceGrouping>
- <SyntaxFilters>Standard</SyntaxFilters>
- <SdkLinkTarget>Blank</SdkLinkTarget>
- <RootNamespaceContainer>True</RootNamespaceContainer>
- <PresentationStyle>VS2013</PresentationStyle>
- <Preliminary>False</Preliminary>
- <NamingMethod>MemberName</NamingMethod>
- <HelpTitle>gRPC C#</HelpTitle>
- <ContentPlacement>AboveNamespaces</ContentPlacement>
- <HtmlHelpName>Documentation</HtmlHelpName>
- <NamespaceSummaries>
- <NamespaceSummaryItem name="Grpc.Auth" isDocumented="True">Provides OAuth2 based authentication for gRPC. &lt;c&gt;Grpc.Auth&lt;/c&gt; currently consists of a set of very lightweight wrappers and uses C# &lt;a href="https://www.nuget.org/packages/Google.Apis.Auth/"&gt;Google.Apis.Auth&lt;/a&gt; library.</NamespaceSummaryItem>
- <NamespaceSummaryItem name="Grpc.Core" isDocumented="True">Main namespace for gRPC C# functionality. Contains concepts representing both client side and server side gRPC logic.
-
-&lt;seealso cref="Grpc.Core.Channel"/&gt;
-&lt;seealso cref="Grpc.Core.Server"/&gt;</NamespaceSummaryItem>
- <NamespaceSummaryItem name="Grpc.Core.Logging" isDocumented="True">Provides functionality to redirect gRPC logs to application-specified destination.</NamespaceSummaryItem>
- <NamespaceSummaryItem name="Grpc.Core.Utils" isDocumented="True">Various utilities for gRPC C#.</NamespaceSummaryItem>
- </NamespaceSummaries>
- <MissingTags>Summary, Parameter, AutoDocumentCtors, Namespace, TypeParameter, AutoDocumentDispose</MissingTags>
- </PropertyGroup>
- <!-- There are no properties for these groups. AnyCPU needs to appear in order for Visual Studio to perform
- the build. The others are optional common platform types that may appear. -->
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x86' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x86' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x64' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x64' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|Win32' ">
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|Win32' ">
- </PropertyGroup>
- <!-- Import the SHFB build targets -->
- <Import Project="$(SHFBROOT)\SandcastleHelpFileBuilder.targets" />
- <!-- The pre-build and post-build event properties must appear *after* the targets file import in order to be
- evaluated correctly. -->
- <PropertyGroup>
- <PreBuildEvent>
- </PreBuildEvent>
- <PostBuildEvent>
- </PostBuildEvent>
- <RunPostBuildEvent>OnBuildSuccess</RunPostBuildEvent>
- </PropertyGroup>
-</Project> \ No newline at end of file
diff --git a/src/csharp/doc/toc.yml b/src/csharp/doc/toc.yml
new file mode 100644
index 0000000000..c3a1e415ab
--- /dev/null
+++ b/src/csharp/doc/toc.yml
@@ -0,0 +1,3 @@
+- name: API Documentation
+ href: obj/api/
+ homepage: obj/api/Grpc.Core.yml
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
index 28c30e5d35..237f430799 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
@@ -41,9 +41,8 @@ cdef class CompletionQueue:
cdef object user_tag = None
cdef Call operation_call = None
cdef CallDetails request_call_details = None
- cdef Metadata request_metadata = None
+ cdef object request_metadata = None
cdef Operations batch_operations = None
- cdef Operation batch_operation = None
if event.type == GRPC_QUEUE_TIMEOUT:
return Event(
event.type, False, None, None, None, None, False, None)
@@ -63,14 +62,8 @@ cdef class CompletionQueue:
operation_call = tag.operation_call
request_call_details = tag.request_call_details
if tag.request_metadata is not None:
- request_metadata = tag.request_metadata
- request_metadata._claim_slice_ownership()
+ request_metadata = tuple(tag.request_metadata)
batch_operations = tag.batch_operations
- if tag.batch_operations is not None:
- for op in batch_operations.operations:
- batch_operation = <Operation>op
- if batch_operation._received_metadata is not None:
- batch_operation._received_metadata._claim_slice_ownership()
if tag.is_new_request:
# Stuff in the tag not explicitly handled by us needs to live through
# the life of the call
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
index 98d7a9820d..57816f1cab 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi
@@ -76,7 +76,7 @@ cdef class CredentialsMetadataPlugin:
"""
Args:
plugin_callback (callable): Callback accepting a service URL (str/bytes)
- and callback object (accepting a Metadata,
+ and callback object (accepting a MetadataArray,
grpc_status_code, and a str/bytes error message). This argument
when called should be non-blocking and eventually call the callback
object with the appropriate status code/details and metadata (if
@@ -129,8 +129,7 @@ cdef void plugin_get_metadata(
def python_callback(
Metadata metadata, grpc_status_code status,
bytes error_details):
- cb(user_data, metadata.c_metadata_array.metadata,
- metadata.c_metadata_array.count, status, error_details)
+ cb(user_data, metadata.c_metadata, metadata.c_count, status, error_details)
called_flag[0] = True
cdef CredentialsMetadataPlugin self = <CredentialsMetadataPlugin>state
cdef AuthMetadataContext cy_context = AuthMetadataContext()
@@ -139,8 +138,8 @@ cdef void plugin_get_metadata(
self.plugin_callback(cy_context, python_callback)
except Exception as error:
if not called_flag[0]:
- cb(user_data, Metadata([]).c_metadata_array.metadata,
- 0, StatusCode.unknown, traceback.format_exc().encode())
+ cb(user_data, NULL, 0, StatusCode.unknown,
+ traceback.format_exc().encode())
cdef void plugin_destroy_c_plugin_state(void *state) with gil:
cpython.Py_DECREF(<CredentialsMetadataPlugin>state)
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
index 5950bfa0e6..28cf114451 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
@@ -59,6 +59,7 @@ cdef extern from "grpc/grpc.h":
grpc_slice grpc_slice_malloc(size_t length) nogil
grpc_slice grpc_slice_from_copied_string(const char *source) nogil
grpc_slice grpc_slice_from_copied_buffer(const char *source, size_t len) nogil
+ grpc_slice grpc_slice_copy(grpc_slice s) nogil
# Declare functions for function-like macros (because Cython)...
void *grpc_slice_start_ptr "GRPC_SLICE_START_PTR" (grpc_slice s) nogil
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
index 8ace6aeb52..9c40ebf0c2 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
@@ -37,7 +37,7 @@ cdef class OperationTag:
cdef Server shutting_down_server
cdef Call operation_call
cdef CallDetails request_call_details
- cdef Metadata request_metadata
+ cdef MetadataArray request_metadata
cdef Operations batch_operations
cdef bint is_new_request
@@ -51,7 +51,7 @@ cdef class Event:
# For Server.request_call
cdef readonly bint is_new_request
cdef readonly CallDetails request_call_details
- cdef readonly Metadata request_metadata
+ cdef readonly object request_metadata
# For server calls
cdef readonly Call operation_call
@@ -92,15 +92,20 @@ cdef class Metadatum:
cdef class Metadata:
+ cdef grpc_metadata *c_metadata
+ cdef readonly size_t c_count
+
+
+cdef class MetadataArray:
+
cdef grpc_metadata_array c_metadata_array
- cdef void _claim_slice_ownership(self)
cdef class Operation:
cdef grpc_op c_op
cdef ByteBuffer _received_message
- cdef Metadata _received_metadata
+ cdef MetadataArray _received_metadata
cdef grpc_status_code _received_status_code
cdef grpc_slice _status_details
cdef int _received_cancelled
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
index 1b2ddd2469..0a2a6eee05 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
@@ -238,7 +238,7 @@ cdef class Event:
def __cinit__(self, grpc_completion_type type, bint success,
object tag, Call operation_call,
CallDetails request_call_details,
- Metadata request_metadata,
+ object request_metadata,
bint is_new_request,
Operations batch_operations):
self.type = type
@@ -437,48 +437,79 @@ cdef class Metadatum:
cdef class _MetadataIterator:
cdef size_t i
- cdef Metadata metadata
+ cdef size_t _length
+ cdef object _metadatum_indexable
- def __cinit__(self, Metadata metadata not None):
+ def __cinit__(self, length, metadatum_indexable):
+ self._length = length
+ self._metadatum_indexable = metadatum_indexable
self.i = 0
- self.metadata = metadata
def __iter__(self):
return self
def __next__(self):
- if self.i < len(self.metadata):
- result = self.metadata[self.i]
+ if self.i < self._length:
+ result = self._metadatum_indexable[self.i]
self.i = self.i + 1
return result
else:
raise StopIteration
+# TODO(https://github.com/grpc/grpc/issues/7950): Eliminate this; just use an
+# ordinary sequence of pairs of bytestrings all the way down to the
+# grpc_call_start_batch call.
cdef class Metadata:
+ """Metadata being passed from application to core."""
def __cinit__(self, metadata_iterable):
+ metadata_sequence = tuple(metadata_iterable)
+ cdef size_t count = len(metadata_sequence)
with nogil:
grpc_init()
- grpc_metadata_array_init(&self.c_metadata_array)
- metadata = list(metadata_iterable)
- for metadatum in metadata:
- if not isinstance(metadatum, Metadatum):
- raise TypeError("expected list of Metadatum")
- self.c_metadata_array.count = len(metadata)
- self.c_metadata_array.capacity = len(metadata)
+ self.c_metadata = <grpc_metadata *>gpr_malloc(
+ count * sizeof(grpc_metadata))
+ self.c_count = count
+ for index, metadatum in enumerate(metadata_sequence):
+ self.c_metadata[index].key = grpc_slice_copy(
+ (<Metadatum>metadatum).c_metadata.key)
+ self.c_metadata[index].value = grpc_slice_copy(
+ (<Metadatum>metadatum).c_metadata.value)
+
+ def __dealloc__(self):
+ with nogil:
+ for index in range(self.c_count):
+ grpc_slice_unref(self.c_metadata[index].key)
+ grpc_slice_unref(self.c_metadata[index].value)
+ gpr_free(self.c_metadata)
+ grpc_shutdown()
+
+ def __len__(self):
+ return self.c_count
+
+ def __getitem__(self, size_t index):
+ if index < self.c_count:
+ key = _slice_bytes(self.c_metadata[index].key)
+ value = _slice_bytes(self.c_metadata[index].value)
+ return Metadatum(key, value)
+ else:
+ raise IndexError()
+
+ def __iter__(self):
+ return _MetadataIterator(self.c_count, self)
+
+
+cdef class MetadataArray:
+ """Metadata being passed from core to application."""
+
+ def __cinit__(self):
with nogil:
- self.c_metadata_array.metadata = <grpc_metadata *>gpr_malloc(
- self.c_metadata_array.count*sizeof(grpc_metadata)
- )
- for i in range(self.c_metadata_array.count):
- (<Metadatum>metadata[i])._copy_metadatum(&self.c_metadata_array.metadata[i])
+ grpc_init()
+ grpc_metadata_array_init(&self.c_metadata_array)
def __dealloc__(self):
with nogil:
- # this frees the allocated memory for the grpc_metadata_array (although
- # it'd be nice if that were documented somewhere...)
- # TODO(atash): document this in the C core
grpc_metadata_array_destroy(&self.c_metadata_array)
grpc_shutdown()
@@ -493,21 +524,7 @@ cdef class Metadata:
return Metadatum(key=key, value=value)
def __iter__(self):
- return _MetadataIterator(self)
-
- cdef void _claim_slice_ownership(self):
- cdef grpc_metadata_array new_c_metadata_array
- grpc_metadata_array_init(&new_c_metadata_array)
- new_c_metadata_array.metadata = <grpc_metadata *>gpr_malloc(
- self.c_metadata_array.count*sizeof(grpc_metadata))
- new_c_metadata_array.count = self.c_metadata_array.count
- for i in range(self.c_metadata_array.count):
- new_c_metadata_array.metadata[i].key = _copy_slice(
- self.c_metadata_array.metadata[i].key)
- new_c_metadata_array.metadata[i].value = _copy_slice(
- self.c_metadata_array.metadata[i].value)
- grpc_metadata_array_destroy(&self.c_metadata_array)
- self.c_metadata_array = new_c_metadata_array
+ return _MetadataIterator(self.c_metadata_array.count, self)
cdef class Operation:
@@ -547,14 +564,13 @@ cdef class Operation:
if (self.c_op.type != GRPC_OP_RECV_INITIAL_METADATA and
self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT):
raise TypeError("self must be an operation receiving metadata")
- return self._received_metadata
-
- @property
- def received_metadata_or_none(self):
- if (self.c_op.type != GRPC_OP_RECV_INITIAL_METADATA and
- self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT):
- return None
- return self._received_metadata
+ # TODO(https://github.com/grpc/grpc/issues/7950): Drop the "all Cython
+ # objects must be legitimate for use from Python at any time" policy in
+ # place today, shift the policy toward "Operation objects are only usable
+ # while their calls are active", and move this making-a-copy-because-this-
+ # data-needs-to-live-much-longer-than-the-call-from-which-it-arose to the
+ # lowest Python layer.
+ return tuple(self._received_metadata)
@property
def received_status_code(self):
@@ -601,9 +617,8 @@ def operation_send_initial_metadata(Metadata metadata, int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_SEND_INITIAL_METADATA
op.c_op.flags = flags
- op.c_op.data.send_initial_metadata.count = metadata.c_metadata_array.count
- op.c_op.data.send_initial_metadata.metadata = (
- metadata.c_metadata_array.metadata)
+ op.c_op.data.send_initial_metadata.count = metadata.c_count
+ op.c_op.data.send_initial_metadata.metadata = metadata.c_metadata
op.references.append(metadata)
op.is_valid = True
return op
@@ -631,9 +646,8 @@ def operation_send_status_from_server(
op.c_op.type = GRPC_OP_SEND_STATUS_FROM_SERVER
op.c_op.flags = flags
op.c_op.data.send_status_from_server.trailing_metadata_count = (
- metadata.c_metadata_array.count)
- op.c_op.data.send_status_from_server.trailing_metadata = (
- metadata.c_metadata_array.metadata)
+ metadata.c_count)
+ op.c_op.data.send_status_from_server.trailing_metadata = metadata.c_metadata
op.c_op.data.send_status_from_server.status = code
grpc_slice_unref(op._status_details)
op._status_details = _slice_from_bytes(details)
@@ -646,7 +660,7 @@ def operation_receive_initial_metadata(int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_RECV_INITIAL_METADATA
op.c_op.flags = flags
- op._received_metadata = Metadata([])
+ op._received_metadata = MetadataArray()
op.c_op.data.receive_initial_metadata.receive_initial_metadata = (
&op._received_metadata.c_metadata_array)
op.is_valid = True
@@ -669,7 +683,7 @@ def operation_receive_status_on_client(int flags):
cdef Operation op = Operation()
op.c_op.type = GRPC_OP_RECV_STATUS_ON_CLIENT
op.c_op.flags = flags
- op._received_metadata = Metadata([])
+ op._received_metadata = MetadataArray()
op.c_op.data.receive_status_on_client.trailing_metadata = (
&op._received_metadata.c_metadata_array)
op.c_op.data.receive_status_on_client.status = (
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
index dd276fd57b..b8db27469f 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
@@ -44,7 +44,7 @@ cdef class Server:
cdef OperationTag operation_tag = OperationTag(tag)
operation_tag.operation_call = Call()
operation_tag.request_call_details = CallDetails()
- operation_tag.request_metadata = Metadata([])
+ operation_tag.request_metadata = MetadataArray()
operation_tag.references.extend([self, call_queue, server_queue])
operation_tag.is_new_request = True
operation_tag.batch_operations = Operations([])
diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py
index 0299b4cca9..1f5e9c5130 100644
--- a/src/python/grpcio_health_checking/setup.py
+++ b/src/python/grpcio_health_checking/setup.py
@@ -34,7 +34,7 @@ CLASSIFIERS = [
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
-],
+]
PACKAGE_DIRECTORIES = {
'': '.',
diff --git a/src/python/grpcio_reflection/setup.py b/src/python/grpcio_reflection/setup.py
index bed2311b59..9360550afb 100644
--- a/src/python/grpcio_reflection/setup.py
+++ b/src/python/grpcio_reflection/setup.py
@@ -35,7 +35,7 @@ CLASSIFIERS = [
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
-],
+]
PACKAGE_DIRECTORIES = {
'': '.',
diff --git a/src/python/grpcio_testing/grpc_version.py b/src/python/grpcio_testing/grpc_version.py
index 41a75d46f6..592d08efc3 100644
--- a/src/python/grpcio_testing/grpc_version.py
+++ b/src/python/grpcio_testing/grpc_version.py
@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!!
+# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_testing/grpc_version.py.template`!!!
-VERSION = '1.5.0.dev0'
+VERSION='1.7.0.dev0'
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
index 83f21ecbbb..424b153ff8 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
@@ -12,19 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import argparse
import contextlib
-import distutils.spawn
-import errno
-import itertools
+import importlib
import os
-import pkg_resources
+from os import path
+import pkgutil
import shutil
-import subprocess
import sys
import tempfile
import threading
-import time
import unittest
from six import moves
@@ -33,12 +29,22 @@ from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.foundation import future
from grpc.framework.interfaces.face import face
+from grpc_tools import protoc
from tests.unit.framework.common import test_constants
-import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2
-import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2
-import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2
-import tests.protoc_plugin.protos.service.test_service_pb2 as service_pb2
+_RELATIVE_PROTO_PATH = 'relative_proto_path'
+_RELATIVE_PYTHON_OUT = 'relative_python_out'
+
+_PROTO_FILES_PATH_COMPONENTS = (
+ ('beta_grpc_plugin_test', 'payload', 'test_payload.proto',),
+ ('beta_grpc_plugin_test', 'requests', 'r', 'test_requests.proto',),
+ ('beta_grpc_plugin_test', 'responses', 'test_responses.proto',),
+ ('beta_grpc_plugin_test', 'service', 'test_service.proto',),)
+
+_PAYLOAD_PB2 = 'beta_grpc_plugin_test.payload.test_payload_pb2'
+_REQUESTS_PB2 = 'beta_grpc_plugin_test.requests.r.test_requests_pb2'
+_RESPONSES_PB2 = 'beta_grpc_plugin_test.responses.test_responses_pb2'
+_SERVICE_PB2 = 'beta_grpc_plugin_test.service.test_service_pb2'
# Identifiers of entities we expect to find in the generated module.
SERVICER_IDENTIFIER = 'BetaTestServiceServicer'
@@ -47,12 +53,50 @@ SERVER_FACTORY_IDENTIFIER = 'beta_create_TestService_server'
STUB_FACTORY_IDENTIFIER = 'beta_create_TestService_stub'
+@contextlib.contextmanager
+def _system_path(path_insertion):
+ old_system_path = sys.path[:]
+ sys.path = sys.path[0:1] + path_insertion + sys.path[1:]
+ yield
+ sys.path = old_system_path
+
+
+def _create_directory_tree(root, path_components_sequence):
+ created = set()
+ for path_components in path_components_sequence:
+ thus_far = ''
+ for path_component in path_components:
+ relative_path = path.join(thus_far, path_component)
+ if relative_path not in created:
+ os.makedirs(path.join(root, relative_path))
+ created.add(relative_path)
+ thus_far = path.join(thus_far, path_component)
+
+
+def _massage_proto_content(raw_proto_content):
+ imports_substituted = raw_proto_content.replace(
+ b'import "tests/protoc_plugin/protos/',
+ b'import "beta_grpc_plugin_test/')
+ package_statement_substituted = imports_substituted.replace(
+ b'package grpc_protoc_plugin;', b'package beta_grpc_protoc_plugin;')
+ return package_statement_substituted
+
+
+def _packagify(directory):
+ for subdirectory, _, _ in os.walk(directory):
+ init_file_name = path.join(subdirectory, '__init__.py')
+ with open(init_file_name, 'wb') as init_file:
+ init_file.write(b'')
+
+
class _ServicerMethods(object):
- def __init__(self):
+ def __init__(self, payload_pb2, responses_pb2):
self._condition = threading.Condition()
self._paused = False
self._fail = False
+ self._payload_pb2 = payload_pb2
+ self._responses_pb2 = responses_pb2
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
@@ -79,22 +123,22 @@ class _ServicerMethods(object):
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
- response = response_pb2.SimpleResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response = self._responses_pb2.SimpleResponse()
+ response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
- response = response_pb2.StreamingOutputCallResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response = self._responses_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
- response = response_pb2.StreamingInputCallResponse()
+ response = self._responses_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
@@ -105,8 +149,8 @@ class _ServicerMethods(object):
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
- response = response_pb2.StreamingOutputCallResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response = self._responses_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
@@ -115,8 +159,8 @@ class _ServicerMethods(object):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
- response = response_pb2.StreamingOutputCallResponse()
- response.payload.payload_type = payload_pb2.COMPRESSABLE
+ response = self._responses_pb2.StreamingOutputCallResponse()
+ response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
@@ -125,7 +169,7 @@ class _ServicerMethods(object):
@contextlib.contextmanager
-def _CreateService():
+def _CreateService(payload_pb2, responses_pb2, service_pb2):
"""Provides a servicer backend and a stub.
The servicer is just the implementation of the actual servicer passed to the
@@ -136,7 +180,7 @@ def _CreateService():
the service bound to the stub and and stub is the stub on which to invoke
RPCs.
"""
- servicer_methods = _ServicerMethods()
+ servicer_methods = _ServicerMethods(payload_pb2, responses_pb2)
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
@@ -161,12 +205,12 @@ def _CreateService():
server.start()
channel = implementations.insecure_channel('localhost', port)
stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
- yield (servicer_methods, stub)
+ yield servicer_methods, stub,
server.stop(0)
@contextlib.contextmanager
-def _CreateIncompleteService():
+def _CreateIncompleteService(service_pb2):
"""Provides a servicer backend that fails to implement methods and its stub.
The servicer is just the implementation of the actual servicer passed to the
@@ -192,16 +236,16 @@ def _CreateIncompleteService():
server.stop(0)
-def _streaming_input_request_iterator():
+def _streaming_input_request_iterator(payload_pb2, requests_pb2):
for _ in range(3):
- request = request_pb2.StreamingInputCallRequest()
+ request = requests_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
-def _streaming_output_request():
- request = request_pb2.StreamingOutputCallRequest()
+def _streaming_output_request(requests_pb2):
+ request = requests_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
@@ -209,11 +253,11 @@ def _streaming_output_request():
return request
-def _full_duplex_request_iterator():
- request = request_pb2.StreamingOutputCallRequest()
+def _full_duplex_request_iterator(requests_pb2):
+ request = requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
- request = request_pb2.StreamingOutputCallRequest()
+ request = requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
@@ -227,22 +271,78 @@ class PythonPluginTest(unittest.TestCase):
methods and does not exist for response-streaming methods.
"""
+ def setUp(self):
+ self._directory = tempfile.mkdtemp(dir='.')
+ self._proto_path = path.join(self._directory, _RELATIVE_PROTO_PATH)
+ self._python_out = path.join(self._directory, _RELATIVE_PYTHON_OUT)
+
+ os.makedirs(self._proto_path)
+ os.makedirs(self._python_out)
+
+ directories_path_components = {
+ proto_file_path_components[:-1]
+ for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS
+ }
+ _create_directory_tree(self._proto_path, directories_path_components)
+ self._proto_file_names = set()
+ for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS:
+ raw_proto_content = pkgutil.get_data(
+ 'tests.protoc_plugin.protos',
+ path.join(*proto_file_path_components[1:]))
+ massaged_proto_content = _massage_proto_content(raw_proto_content)
+ proto_file_name = path.join(self._proto_path,
+ *proto_file_path_components)
+ with open(proto_file_name, 'wb') as proto_file:
+ proto_file.write(massaged_proto_content)
+ self._proto_file_names.add(proto_file_name)
+
+ def tearDown(self):
+ shutil.rmtree(self._directory)
+
+ def _protoc(self):
+ args = [
+ '',
+ '--proto_path={}'.format(self._proto_path),
+ '--python_out={}'.format(self._python_out),
+ '--grpc_python_out=grpc_1_0:{}'.format(self._python_out),
+ ] + list(self._proto_file_names)
+ protoc_exit_code = protoc.main(args)
+ self.assertEqual(0, protoc_exit_code)
+
+ _packagify(self._python_out)
+
+ with _system_path([
+ self._python_out,
+ ]):
+ self._payload_pb2 = importlib.import_module(_PAYLOAD_PB2)
+ self._requests_pb2 = importlib.import_module(_REQUESTS_PB2)
+ self._responses_pb2 = importlib.import_module(_RESPONSES_PB2)
+ self._service_pb2 = importlib.import_module(_SERVICE_PB2)
+
def testImportAttributes(self):
+ self._protoc()
+
# check that we can access the generated module and its members.
- self.assertIsNotNone(getattr(service_pb2, SERVICER_IDENTIFIER, None))
- self.assertIsNotNone(getattr(service_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(
- getattr(service_pb2, SERVER_FACTORY_IDENTIFIER, None))
+ getattr(self._service_pb2, SERVICER_IDENTIFIER, None))
+ self.assertIsNotNone(getattr(self._service_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(
- getattr(service_pb2, STUB_FACTORY_IDENTIFIER, None))
+ getattr(self._service_pb2, SERVER_FACTORY_IDENTIFIER, None))
+ self.assertIsNotNone(
+ getattr(self._service_pb2, STUB_FACTORY_IDENTIFIER, None))
def testUpDown(self):
- with _CreateService():
- request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2):
+ self._requests_pb2.SimpleRequest(response_size=13)
def testIncompleteServicer(self):
- with _CreateIncompleteService() as (_, stub):
- request = request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateIncompleteService(self._service_pb2) as (_, stub):
+ request = self._requests_pb2.SimpleRequest(response_size=13)
try:
stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
except face.AbortionError as error:
@@ -250,15 +350,21 @@ class PythonPluginTest(unittest.TestCase):
error.code)
def testUnaryCall(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = self._requests_pb2.SimpleRequest(response_size=13)
response = stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
expected_response = methods.UnaryCall(request, 'not a real context!')
self.assertEqual(expected_response, response)
def testUnaryCallFuture(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = self._requests_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with methods.pause():
response_future = stub.UnaryCall.future(
@@ -268,8 +374,11 @@ class PythonPluginTest(unittest.TestCase):
self.assertEqual(expected_response, response)
def testUnaryCallFutureExpired(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.future(
request, test_constants.SHORT_TIMEOUT)
@@ -277,24 +386,33 @@ class PythonPluginTest(unittest.TestCase):
response_future.result()
def testUnaryCallFutureCancelled(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.future(request, 1)
response_future.cancel()
self.assertTrue(response_future.cancelled())
def testUnaryCallFutureFailed(self):
- with _CreateService() as (methods, stub):
- request = request_pb2.SimpleRequest(response_size=13)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.fail():
response_future = stub.UnaryCall.future(
request, test_constants.LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testStreamingOutputCall(self):
- with _CreateService() as (methods, stub):
- request = _streaming_output_request()
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = _streaming_output_request(self._requests_pb2)
responses = stub.StreamingOutputCall(request,
test_constants.LONG_TIMEOUT)
expected_responses = methods.StreamingOutputCall(
@@ -304,8 +422,11 @@ class PythonPluginTest(unittest.TestCase):
self.assertEqual(expected_response, response)
def testStreamingOutputCallExpired(self):
- with _CreateService() as (methods, stub):
- request = _streaming_output_request()
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = _streaming_output_request(self._requests_pb2)
with methods.pause():
responses = stub.StreamingOutputCall(
request, test_constants.SHORT_TIMEOUT)
@@ -313,8 +434,11 @@ class PythonPluginTest(unittest.TestCase):
list(responses)
def testStreamingOutputCallCancelled(self):
- with _CreateService() as (methods, stub):
- request = _streaming_output_request()
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = _streaming_output_request(self._requests_pb2)
responses = stub.StreamingOutputCall(request,
test_constants.LONG_TIMEOUT)
next(responses)
@@ -323,8 +447,11 @@ class PythonPluginTest(unittest.TestCase):
next(responses)
def testStreamingOutputCallFailed(self):
- with _CreateService() as (methods, stub):
- request = _streaming_output_request()
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request = _streaming_output_request(self._requests_pb2)
with methods.fail():
responses = stub.StreamingOutputCall(request, 1)
self.assertIsNotNone(responses)
@@ -332,30 +459,46 @@ class PythonPluginTest(unittest.TestCase):
next(responses)
def testStreamingInputCall(self):
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
response = stub.StreamingInputCall(
- _streaming_input_request_iterator(),
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
test_constants.LONG_TIMEOUT)
expected_response = methods.StreamingInputCall(
- _streaming_input_request_iterator(), 'not a real RpcContext!')
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
+ 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFuture(self):
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
test_constants.LONG_TIMEOUT)
response = response_future.result()
expected_response = methods.StreamingInputCall(
- _streaming_input_request_iterator(), 'not a real RpcContext!')
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
+ 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFutureExpired(self):
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
response_future.result()
@@ -363,10 +506,14 @@ class PythonPluginTest(unittest.TestCase):
face.ExpirationError)
def testStreamingInputCallFutureCancelled(self):
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
test_constants.LONG_TIMEOUT)
response_future.cancel()
self.assertTrue(response_future.cancelled())
@@ -374,26 +521,38 @@ class PythonPluginTest(unittest.TestCase):
response_future.result()
def testStreamingInputCallFutureFailed(self):
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with methods.fail():
response_future = stub.StreamingInputCall.future(
- _streaming_input_request_iterator(),
+ _streaming_input_request_iterator(self._payload_pb2,
+ self._requests_pb2),
test_constants.LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testFullDuplexCall(self):
- with _CreateService() as (methods, stub):
- responses = stub.FullDuplexCall(_full_duplex_request_iterator(),
- test_constants.LONG_TIMEOUT)
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ responses = stub.FullDuplexCall(
+ _full_duplex_request_iterator(self._requests_pb2),
+ test_constants.LONG_TIMEOUT)
expected_responses = methods.FullDuplexCall(
- _full_duplex_request_iterator(), 'not a real RpcContext!')
+ _full_duplex_request_iterator(self._requests_pb2),
+ 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testFullDuplexCallExpired(self):
- request_iterator = _full_duplex_request_iterator()
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ request_iterator = _full_duplex_request_iterator(self._requests_pb2)
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with methods.pause():
responses = stub.FullDuplexCall(request_iterator,
test_constants.SHORT_TIMEOUT)
@@ -401,8 +560,11 @@ class PythonPluginTest(unittest.TestCase):
list(responses)
def testFullDuplexCallCancelled(self):
- with _CreateService() as (methods, stub):
- request_iterator = _full_duplex_request_iterator()
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
+ request_iterator = _full_duplex_request_iterator(self._requests_pb2)
responses = stub.FullDuplexCall(request_iterator,
test_constants.LONG_TIMEOUT)
next(responses)
@@ -411,8 +573,11 @@ class PythonPluginTest(unittest.TestCase):
next(responses)
def testFullDuplexCallFailed(self):
- request_iterator = _full_duplex_request_iterator()
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ request_iterator = _full_duplex_request_iterator(self._requests_pb2)
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with methods.fail():
responses = stub.FullDuplexCall(request_iterator,
test_constants.LONG_TIMEOUT)
@@ -421,13 +586,16 @@ class PythonPluginTest(unittest.TestCase):
next(responses)
def testHalfDuplexCall(self):
- with _CreateService() as (methods, stub):
+ self._protoc()
+
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
def half_duplex_request_iterator():
- request = request_pb2.StreamingOutputCallRequest()
+ request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
- request = request_pb2.StreamingOutputCallRequest()
+ request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
@@ -441,6 +609,8 @@ class PythonPluginTest(unittest.TestCase):
self.assertEqual(expected_response, response)
def testHalfDuplexCallWedged(self):
+ self._protoc()
+
condition = threading.Condition()
wait_cell = [False]
@@ -455,14 +625,15 @@ class PythonPluginTest(unittest.TestCase):
condition.notify_all()
def half_duplex_request_iterator():
- request = request_pb2.StreamingOutputCallRequest()
+ request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
- with _CreateService() as (methods, stub):
+ with _CreateService(self._payload_pb2, self._responses_pb2,
+ self._service_pb2) as (methods, stub):
with wait():
responses = stub.HalfDuplexCall(half_duplex_request_iterator(),
test_constants.SHORT_TIMEOUT)
diff --git a/src/ruby/lib/grpc/google_rpc_status_utils.rb b/src/ruby/lib/grpc/google_rpc_status_utils.rb
new file mode 100644
index 0000000000..fdadd6b76e
--- /dev/null
+++ b/src/ruby/lib/grpc/google_rpc_status_utils.rb
@@ -0,0 +1,28 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require_relative './grpc'
+require 'google/rpc/status_pb'
+
+# GRPC contains the General RPC module.
+module GRPC
+ # GoogleRpcStatusUtils provides utilities to convert between a
+ # GRPC::Core::Status and a deserialized Google::Rpc::Status proto
+ class GoogleRpcStatusUtils
+ def self.extract_google_rpc_status(status)
+ fail ArgumentError, 'bad type' unless status.is_a? Struct::Status
+ Google::Rpc::Status.decode(status.metadata['grpc-status-details-bin'])
+ end
+ end
+end
diff --git a/src/ruby/spec/google_rpc_status_utils_spec.rb b/src/ruby/spec/google_rpc_status_utils_spec.rb
new file mode 100644
index 0000000000..fe221c30dd
--- /dev/null
+++ b/src/ruby/spec/google_rpc_status_utils_spec.rb
@@ -0,0 +1,223 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+require 'grpc'
+require_relative '../lib/grpc/google_rpc_status_utils'
+require_relative '../pb/src/proto/grpc/testing/messages_pb'
+require_relative '../pb/src/proto/grpc/testing/messages_pb'
+require 'google/protobuf/well_known_types'
+
+include GRPC::Core
+
+describe 'conversion from a status struct to a google protobuf status' do
+ it 'fails if the input is not a status struct' do
+ begin
+ GRPC::GoogleRpcStatusUtils.extract_google_rpc_status('string')
+ rescue => e
+ exception = e
+ end
+ expect(exception.is_a?(ArgumentError)).to be true
+ expect(exception.message.include?('bad type')).to be true
+ end
+
+ it 'fails with some error if the header key is missing' do
+ status = Struct::Status.new(1, 'details', key: 'val')
+ expect(status.metadata.nil?).to be false
+ expect do
+ GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
+ end.to raise_error(StandardError)
+ end
+
+ it 'fails with some error if the header key fails to deserialize' do
+ status = Struct::Status.new(1, 'details',
+ 'grpc-status-details-bin' => 'string_val')
+ expect do
+ GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
+ end.to raise_error(StandardError)
+ end
+
+ it 'silently ignores erroneous mismatch between messages in '\
+ 'status struct and protobuf status' do
+ proto = Google::Rpc::Status.new(code: 1, message: 'proto message')
+ encoded_proto = Google::Rpc::Status.encode(proto)
+ status = Struct::Status.new(1, 'struct message',
+ 'grpc-status-details-bin' => encoded_proto)
+ rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
+ expect(rpc_status).to eq(proto)
+ end
+
+ it 'silently ignores erroneous mismatch between codes in status struct '\
+ 'and protobuf status' do
+ proto = Google::Rpc::Status.new(code: 1, message: 'matching message')
+ encoded_proto = Google::Rpc::Status.encode(proto)
+ status = Struct::Status.new(2, 'matching message',
+ 'grpc-status-details-bin' => encoded_proto)
+ rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
+ expect(rpc_status).to eq(proto)
+ end
+
+ it 'can succesfully convert a status struct into a google protobuf status '\
+ 'when there are no rpcstatus details' do
+ proto = Google::Rpc::Status.new(code: 1, message: 'matching message')
+ encoded_proto = Google::Rpc::Status.encode(proto)
+ status = Struct::Status.new(1, 'matching message',
+ 'grpc-status-details-bin' => encoded_proto)
+ out = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
+ expect(out.code).to eq(1)
+ expect(out.message).to eq('matching message')
+ expect(out.details).to eq([])
+ end
+
+ it 'can succesfully convert a status struct into a google protobuf '\
+ 'status when there are multiple rpcstatus details' do
+ simple_request_any = Google::Protobuf::Any.new
+ simple_request = Grpc::Testing::SimpleRequest.new(
+ payload: Grpc::Testing::Payload.new(body: 'request'))
+ simple_request_any.pack(simple_request)
+ simple_response_any = Google::Protobuf::Any.new
+ simple_response = Grpc::Testing::SimpleResponse.new(
+ payload: Grpc::Testing::Payload.new(body: 'response'))
+ simple_response_any.pack(simple_response)
+ payload_any = Google::Protobuf::Any.new
+ payload = Grpc::Testing::Payload.new(body: 'payload')
+ payload_any.pack(payload)
+ proto = Google::Rpc::Status.new(code: 1,
+ message: 'matching message',
+ details: [
+ simple_request_any,
+ simple_response_any,
+ payload_any
+ ])
+ encoded_proto = Google::Rpc::Status.encode(proto)
+ status = Struct::Status.new(1, 'matching message',
+ 'grpc-status-details-bin' => encoded_proto)
+ out = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status)
+ expect(out.code).to eq(1)
+ expect(out.message).to eq('matching message')
+ expect(out.details[0].unpack(
+ Grpc::Testing::SimpleRequest)).to eq(simple_request)
+ expect(out.details[1].unpack(
+ Grpc::Testing::SimpleResponse)).to eq(simple_response)
+ expect(out.details[2].unpack(
+ Grpc::Testing::Payload)).to eq(payload)
+ end
+end
+
+# Test message
+class EchoMsg
+ def self.marshal(_o)
+ ''
+ end
+
+ def self.unmarshal(_o)
+ EchoMsg.new
+ end
+end
+
+# A test service that fills in the "reserved" grpc-status-details-bin trailer,
+# for client-side testing of GoogleRpcStatus protobuf extraction from trailers.
+class GoogleRpcStatusTestService
+ include GRPC::GenericService
+ rpc :an_rpc, EchoMsg, EchoMsg
+
+ def initialize(encoded_rpc_status)
+ @encoded_rpc_status = encoded_rpc_status
+ end
+
+ def an_rpc(_, _)
+ # TODO: create a server-side utility API for sending a google rpc status.
+ # Applications are not expected to set the grpc-status-details-bin
+ # ("grpc"-fixed and reserved for library use) manually.
+ # Doing so here is only for testing of the client-side api for extracting
+ # a google rpc status, which is useful
+ # when the interacting with a server that does fill in this trailer.
+ fail GRPC::Unknown.new('test message',
+ 'grpc-status-details-bin' => @encoded_rpc_status)
+ end
+end
+
+GoogleRpcStatusTestStub = GoogleRpcStatusTestService.rpc_stub_class
+
+describe 'receving a google rpc status from a remote endpoint' do
+ def start_server(encoded_rpc_status)
+ @srv = GRPC::RpcServer.new(pool_size: 1)
+ @server_port = @srv.add_http2_port('localhost:0',
+ :this_port_is_insecure)
+ @srv.handle(GoogleRpcStatusTestService.new(encoded_rpc_status))
+ @server_thd = Thread.new { @srv.run }
+ @srv.wait_till_running
+ end
+
+ def stop_server
+ expect(@srv.stopped?).to be(false)
+ @srv.stop
+ @server_thd.join
+ expect(@srv.stopped?).to be(true)
+ end
+
+ before(:each) do
+ simple_request_any = Google::Protobuf::Any.new
+ simple_request = Grpc::Testing::SimpleRequest.new(
+ payload: Grpc::Testing::Payload.new(body: 'request'))
+ simple_request_any.pack(simple_request)
+ simple_response_any = Google::Protobuf::Any.new
+ simple_response = Grpc::Testing::SimpleResponse.new(
+ payload: Grpc::Testing::Payload.new(body: 'response'))
+ simple_response_any.pack(simple_response)
+ payload_any = Google::Protobuf::Any.new
+ payload = Grpc::Testing::Payload.new(body: 'payload')
+ payload_any.pack(payload)
+ @expected_proto = Google::Rpc::Status.new(
+ code: StatusCodes::UNKNOWN,
+ message: 'test message',
+ details: [simple_request_any, simple_response_any, payload_any])
+ start_server(Google::Rpc::Status.encode(@expected_proto))
+ end
+
+ after(:each) do
+ stop_server
+ end
+
+ it 'should receive be able to extract a google rpc status from the '\
+ 'status struct taken from a BadStatus exception' do
+ stub = GoogleRpcStatusTestStub.new("localhost:#{@server_port}",
+ :this_channel_is_insecure)
+ begin
+ stub.an_rpc(EchoMsg.new)
+ rescue GRPC::BadStatus => e
+ rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ e.to_status)
+ end
+ expect(rpc_status).to eq(@expected_proto)
+ end
+
+ it 'should receive be able to extract a google rpc status from the '\
+ 'status struct taken from the op view of a call' do
+ stub = GoogleRpcStatusTestStub.new("localhost:#{@server_port}",
+ :this_channel_is_insecure)
+ op = stub.an_rpc(EchoMsg.new, return_op: true)
+ begin
+ op.execute
+ rescue GRPC::BadStatus => e
+ status_from_exception = e.to_status
+ end
+ rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ op.status)
+ expect(rpc_status).to eq(@expected_proto)
+ # "to_status" on the bad status should give the same result
+ # as "status" on the "op view".
+ expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(
+ status_from_exception)).to eq(rpc_status)
+ end
+end
diff --git a/templates/config.m4.template b/templates/config.m4.template
index f91893c2bd..cd93fbd0fb 100644
--- a/templates/config.m4.template
+++ b/templates/config.m4.template
@@ -14,7 +14,7 @@
LIBS="-lpthread $LIBS"
CFLAGS="-Wall -Werror -Wno-parentheses-equality -Wno-unused-value -std=c11"
- CXXFLAGS="-std=c++11"
+ CXXFLAGS="-std=c++11 -fno-exceptions -fno-rtti"
GRPC_SHARED_LIBADD="-lpthread $GRPC_SHARED_LIBADD"
PHP_REQUIRE_CXX()
PHP_ADD_LIBRARY(pthread)
diff --git a/templates/grpc.gemspec.template b/templates/grpc.gemspec.template
index e62e5b2721..215d5f9df9 100644
--- a/templates/grpc.gemspec.template
+++ b/templates/grpc.gemspec.template
@@ -31,6 +31,7 @@
s.add_dependency 'google-protobuf', '~> 3.1'
s.add_dependency 'googleauth', '~> 0.5.1'
+ s.add_dependency 'googleapis-common-protos-types', '~> 1.0.0'
s.add_development_dependency 'bundler', '~> 1.9'
s.add_development_dependency 'facter', '~> 2.4'
diff --git a/templates/package.xml.template b/templates/package.xml.template
index 15da704a47..f10f75b8c0 100644
--- a/templates/package.xml.template
+++ b/templates/package.xml.template
@@ -12,7 +12,7 @@
<email>grpc-packages@google.com</email>
<active>yes</active>
</lead>
- <date>2017-05-22</date>
+ <date>2017-08-24</date>
<time>16:06:07</time>
<version>
<release>${settings.php_version.php()}</release>
@@ -27,6 +27,9 @@
- Channel are now by default persistent #11878
- Some bug fixes from 1.4 branch #12109, #12123
- Fixed hang bug when fork() was used #11814
+ - License changed to Apache 2.0
+ - Added support for php_namespace option in codegen plugin #11886
+ - Updated gRPC C Core library version 1.6
</notes>
<contents>
<dir baseinstalldir="/" name="/">
diff --git a/templates/src/python/grpcio_testing/grpc_version.py.template b/templates/src/python/grpcio_testing/grpc_version.py.template
new file mode 100644
index 0000000000..74db811d60
--- /dev/null
+++ b/templates/src/python/grpcio_testing/grpc_version.py.template
@@ -0,0 +1,19 @@
+%YAML 1.2
+--- |
+ # Copyright 2017 gRPC authors.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+
+ # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_testing/grpc_version.py.template`!!!
+
+ VERSION='${settings.python_version.pep440()}'
diff --git a/test/core/client_channel/lb_policies_test.c b/test/core/client_channel/lb_policies_test.c
index f70a9fc880..ba37cd673f 100644
--- a/test/core/client_channel/lb_policies_test.c
+++ b/test/core/client_channel/lb_policies_test.c
@@ -519,7 +519,7 @@ static grpc_channel *create_client(const servers_fixture *f) {
arg_array[1].key = GRPC_ARG_LB_POLICY_NAME;
arg_array[1].value.string = "ROUND_ROBIN";
arg_array[2].type = GRPC_ARG_INTEGER;
- arg_array[2].key = GRPC_ARG_HTTP2_MIN_TIME_BETWEEN_PINGS_MS;
+ arg_array[2].key = GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS;
arg_array[2].value.integer = 0;
args.num_args = GPR_ARRAY_SIZE(arg_array);
args.args = arg_array;
diff --git a/test/core/end2end/tests/bad_ping.c b/test/core/end2end/tests/bad_ping.c
index 12aceda688..c97d11b306 100644
--- a/test/core/end2end/tests/bad_ping.c
+++ b/test/core/end2end/tests/bad_ping.c
@@ -66,18 +66,19 @@ static void end_test(grpc_end2end_test_fixture *f) {
static void test_bad_ping(grpc_end2end_test_config config) {
grpc_end2end_test_fixture f = config.create_fixture(NULL, NULL);
cq_verifier *cqv = cq_verifier_create(f.cq);
- grpc_arg client_a[] = {{.type = GRPC_ARG_INTEGER,
- .key = GRPC_ARG_HTTP2_MIN_TIME_BETWEEN_PINGS_MS,
- .value.integer = 0},
- {.type = GRPC_ARG_INTEGER,
- .key = GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA,
- .value.integer = 20},
- {.type = GRPC_ARG_INTEGER,
- .key = GRPC_ARG_HTTP2_BDP_PROBE,
- .value.integer = 0}};
+ grpc_arg client_a[] = {
+ {.type = GRPC_ARG_INTEGER,
+ .key = GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS,
+ .value.integer = 10},
+ {.type = GRPC_ARG_INTEGER,
+ .key = GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA,
+ .value.integer = 0},
+ {.type = GRPC_ARG_INTEGER,
+ .key = GRPC_ARG_HTTP2_BDP_PROBE,
+ .value.integer = 0}};
grpc_arg server_a[] = {
{.type = GRPC_ARG_INTEGER,
- .key = GRPC_ARG_HTTP2_MIN_PING_INTERVAL_WITHOUT_DATA_MS,
+ .key = GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS,
.value.integer = 300000 /* 5 minutes */},
{.type = GRPC_ARG_INTEGER,
.key = GRPC_ARG_HTTP2_MAX_PING_STRIKES,
diff --git a/test/core/end2end/tests/keepalive_timeout.c b/test/core/end2end/tests/keepalive_timeout.c
index e0ead4ab62..8d01f23c00 100644
--- a/test/core/end2end/tests/keepalive_timeout.c
+++ b/test/core/end2end/tests/keepalive_timeout.c
@@ -106,13 +106,13 @@ static void test_keepalive_timeout(grpc_end2end_test_config config) {
.value.integer = 0},
{.type = GRPC_ARG_INTEGER,
.key = GRPC_ARG_HTTP2_BDP_PROBE,
- .value.integer = 1}};
+ .value.integer = 0}};
- grpc_channel_args *client_args = NULL;
- client_args = grpc_channel_args_copy_and_add(client_args, keepalive_args, 2);
+ grpc_channel_args client_args = {.num_args = GPR_ARRAY_SIZE(keepalive_args),
+ .args = keepalive_args};
grpc_end2end_test_fixture f =
- begin_test(config, "keepalive_timeout", client_args, NULL);
+ begin_test(config, "keepalive_timeout", &client_args, NULL);
cq_verifier *cqv = cq_verifier_create(f.cq);
grpc_op ops[6];
grpc_op *op;
@@ -216,12 +216,6 @@ static void test_keepalive_timeout(grpc_end2end_test_config config) {
grpc_byte_buffer_destroy(response_payload);
grpc_byte_buffer_destroy(response_payload_recv);
- if (client_args != NULL) {
- grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
- grpc_channel_args_destroy(&exec_ctx, client_args);
- grpc_exec_ctx_finish(&exec_ctx);
- }
-
end_test(&f);
config.tear_down_data(&f);
}
diff --git a/test/core/end2end/tests/ping.c b/test/core/end2end/tests/ping.c
index 112ad9d7d2..23c82569ba 100644
--- a/test/core/end2end/tests/ping.c
+++ b/test/core/end2end/tests/ping.c
@@ -37,15 +37,19 @@ static void test_ping(grpc_end2end_test_config config,
grpc_connectivity_state state = GRPC_CHANNEL_IDLE;
int i;
- grpc_arg client_a[] = {{.type = GRPC_ARG_INTEGER,
- .key = GRPC_ARG_HTTP2_MIN_TIME_BETWEEN_PINGS_MS,
- .value.integer = 0},
- {.type = GRPC_ARG_INTEGER,
- .key = GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA,
- .value.integer = 20}};
+ grpc_arg client_a[] = {
+ {.type = GRPC_ARG_INTEGER,
+ .key = GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS,
+ .value.integer = 10},
+ {.type = GRPC_ARG_INTEGER,
+ .key = GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA,
+ .value.integer = 0},
+ {.type = GRPC_ARG_INTEGER,
+ .key = GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS,
+ .value.integer = 1}};
grpc_arg server_a[] = {
{.type = GRPC_ARG_INTEGER,
- .key = GRPC_ARG_HTTP2_MIN_PING_INTERVAL_WITHOUT_DATA_MS,
+ .key = GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS,
.value.integer = 0},
{.type = GRPC_ARG_INTEGER,
.key = GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS,
diff --git a/test/core/tsi/ssl_transport_security_test.c b/test/core/tsi/ssl_transport_security_test.c
index 364dfa1b73..2399b054b1 100644
--- a/test/core/tsi/ssl_transport_security_test.c
+++ b/test/core/tsi/ssl_transport_security_test.c
@@ -23,7 +23,9 @@
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/security/transport/security_connector.h"
#include "src/core/tsi/ssl_transport_security.h"
+#include "src/core/tsi/transport_security.h"
#include "src/core/tsi/transport_security_adapter.h"
+#include "src/core/tsi/transport_security_interface.h"
#include "test/core/tsi/transport_security_test_lib.h"
#include "test/core/util/test_config.h"
@@ -312,10 +314,10 @@ static void ssl_test_destruct(tsi_test_fixture *fixture) {
key_cert_lib->bad_client_pem_key_cert_pair);
gpr_free(key_cert_lib->root_cert);
gpr_free(key_cert_lib);
- /* Destroy others. */
- tsi_ssl_server_handshaker_factory_destroy(
+ /* Unreference others. */
+ tsi_ssl_server_handshaker_factory_unref(
ssl_fixture->server_handshaker_factory);
- tsi_ssl_client_handshaker_factory_destroy(
+ tsi_ssl_client_handshaker_factory_unref(
ssl_fixture->client_handshaker_factory);
}
@@ -536,6 +538,118 @@ void ssl_tsi_test_do_round_trip_odd_buffer_size() {
}
}
+static const tsi_ssl_handshaker_factory_vtable *original_vtable;
+static bool handshaker_factory_destructor_called;
+
+static void ssl_tsi_test_handshaker_factory_destructor(
+ tsi_ssl_handshaker_factory *factory) {
+ GPR_ASSERT(factory != NULL);
+ handshaker_factory_destructor_called = true;
+ if (original_vtable != NULL && original_vtable->destroy != NULL) {
+ original_vtable->destroy(factory);
+ }
+}
+
+static tsi_ssl_handshaker_factory_vtable test_handshaker_factory_vtable = {
+ ssl_tsi_test_handshaker_factory_destructor};
+
+void test_tsi_ssl_client_handshaker_factory_refcounting() {
+ int i;
+ const char *cert_chain =
+ load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "client.pem");
+
+ tsi_ssl_client_handshaker_factory *client_handshaker_factory;
+ GPR_ASSERT(tsi_create_ssl_client_handshaker_factory(
+ NULL, cert_chain, NULL, NULL, 0, &client_handshaker_factory) ==
+ TSI_OK);
+
+ handshaker_factory_destructor_called = false;
+ original_vtable = tsi_ssl_handshaker_factory_swap_vtable(
+ (tsi_ssl_handshaker_factory *)client_handshaker_factory,
+ &test_handshaker_factory_vtable);
+
+ tsi_handshaker *handshaker[3];
+
+ for (i = 0; i < 3; ++i) {
+ GPR_ASSERT(tsi_ssl_client_handshaker_factory_create_handshaker(
+ client_handshaker_factory, "google.com", &handshaker[i]) ==
+ TSI_OK);
+ }
+
+ tsi_handshaker_destroy(handshaker[1]);
+ GPR_ASSERT(!handshaker_factory_destructor_called);
+
+ tsi_handshaker_destroy(handshaker[0]);
+ GPR_ASSERT(!handshaker_factory_destructor_called);
+
+ tsi_ssl_client_handshaker_factory_unref(client_handshaker_factory);
+ GPR_ASSERT(!handshaker_factory_destructor_called);
+
+ tsi_handshaker_destroy(handshaker[2]);
+ GPR_ASSERT(handshaker_factory_destructor_called);
+
+ gpr_free((void *)cert_chain);
+}
+
+void test_tsi_ssl_server_handshaker_factory_refcounting() {
+ int i;
+ tsi_ssl_server_handshaker_factory *server_handshaker_factory;
+ tsi_handshaker *handshaker[3];
+ const char *cert_chain =
+ load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "server0.pem");
+ tsi_ssl_pem_key_cert_pair cert_pair;
+
+ cert_pair.cert_chain = cert_chain;
+ cert_pair.private_key =
+ load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "server0.key");
+
+ GPR_ASSERT(tsi_create_ssl_server_handshaker_factory(
+ &cert_pair, 1, cert_chain, 0, NULL, NULL, 0,
+ &server_handshaker_factory) == TSI_OK);
+
+ handshaker_factory_destructor_called = false;
+ original_vtable = tsi_ssl_handshaker_factory_swap_vtable(
+ (tsi_ssl_handshaker_factory *)server_handshaker_factory,
+ &test_handshaker_factory_vtable);
+
+ for (i = 0; i < 3; ++i) {
+ GPR_ASSERT(tsi_ssl_server_handshaker_factory_create_handshaker(
+ server_handshaker_factory, &handshaker[i]) == TSI_OK);
+ }
+
+ tsi_handshaker_destroy(handshaker[1]);
+ GPR_ASSERT(!handshaker_factory_destructor_called);
+
+ tsi_handshaker_destroy(handshaker[0]);
+ GPR_ASSERT(!handshaker_factory_destructor_called);
+
+ tsi_ssl_server_handshaker_factory_unref(server_handshaker_factory);
+ GPR_ASSERT(!handshaker_factory_destructor_called);
+
+ tsi_handshaker_destroy(handshaker[2]);
+ GPR_ASSERT(handshaker_factory_destructor_called);
+
+ ssl_test_pem_key_cert_pair_destroy(cert_pair);
+}
+
+/* Attempting to create a handshaker factory with invalid parameters should fail
+ * but not crash. */
+void test_tsi_ssl_client_handshaker_factory_bad_params() {
+ const char *cert_chain = "This is not a valid PEM file.";
+
+ tsi_ssl_client_handshaker_factory *client_handshaker_factory;
+ GPR_ASSERT(tsi_create_ssl_client_handshaker_factory(
+ NULL, cert_chain, NULL, NULL, 0, &client_handshaker_factory) ==
+ TSI_INVALID_ARGUMENT);
+ tsi_ssl_client_handshaker_factory_unref(client_handshaker_factory);
+}
+
+void ssl_tsi_test_handshaker_factory_internals() {
+ test_tsi_ssl_client_handshaker_factory_refcounting();
+ test_tsi_ssl_server_handshaker_factory_refcounting();
+ test_tsi_ssl_client_handshaker_factory_bad_params();
+}
+
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
@@ -553,6 +667,7 @@ int main(int argc, char **argv) {
ssl_tsi_test_do_handshake_alpn_client_server_ok();
ssl_tsi_test_do_round_trip_for_all_configs();
ssl_tsi_test_do_round_trip_odd_buffer_size();
+ ssl_tsi_test_handshaker_factory_internals();
grpc_shutdown();
return 0;
}
diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py
index 5c0329bff0..e0e9226211 100644
--- a/tools/distrib/python/grpcio_tools/setup.py
+++ b/tools/distrib/python/grpcio_tools/setup.py
@@ -47,7 +47,7 @@ CLASSIFIERS = [
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
-],
+]
PY3 = sys.version_info.major == 3
diff --git a/tools/dockerfile/distribtest/csharp_jessie_x64/Dockerfile b/tools/dockerfile/distribtest/csharp_jessie_x64/Dockerfile
index d13eecaa55..02ec4c278a 100644
--- a/tools/dockerfile/distribtest/csharp_jessie_x64/Dockerfile
+++ b/tools/dockerfile/distribtest/csharp_jessie_x64/Dockerfile
@@ -14,18 +14,15 @@
FROM debian:jessie
-RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list
+RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
+RUN echo "deb http://download.mono-project.com/repo/debian jessie main" | tee /etc/apt/sources.list.d/mono-official.list
RUN echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libtiff-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
RUN apt-get update && apt-get install -y \
mono-devel \
ca-certificates-mono \
- nuget
+ nuget \
+ && apt-get clean
-# make sure we have nuget 2.12+ (in case there's an older cached docker image)
-RUN apt-get update && apt-get install -y nuget
-
-RUN apt-get update && apt-get install -y unzip
+RUN apt-get update && apt-get install -y unzip && apt-get clean
diff --git a/tools/dockerfile/distribtest/csharp_jessie_x86/Dockerfile b/tools/dockerfile/distribtest/csharp_jessie_x86/Dockerfile
index 71845b590b..758f314572 100644
--- a/tools/dockerfile/distribtest/csharp_jessie_x86/Dockerfile
+++ b/tools/dockerfile/distribtest/csharp_jessie_x86/Dockerfile
@@ -14,18 +14,15 @@
FROM 32bit/debian:jessie
-RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list
+RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
+RUN echo "deb http://download.mono-project.com/repo/debian jessie main" | tee /etc/apt/sources.list.d/mono-official.list
RUN echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libtiff-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
RUN apt-get update && apt-get install -y \
mono-devel \
ca-certificates-mono \
- nuget
+ nuget \
+ && apt-get clean
-# make sure we have nuget 2.12+ (in case there's an older cached docker image)
-RUN apt-get update && apt-get install -y nuget
-
-RUN apt-get update && apt-get install -y unzip
+RUN apt-get update && apt-get install -y unzip && apt-get clean
diff --git a/tools/dockerfile/distribtest/csharp_ubuntu1604_x64/Dockerfile b/tools/dockerfile/distribtest/csharp_ubuntu1604_x64/Dockerfile
index 6604caa42c..0f40f18e38 100644
--- a/tools/dockerfile/distribtest/csharp_ubuntu1604_x64/Dockerfile
+++ b/tools/dockerfile/distribtest/csharp_ubuntu1604_x64/Dockerfile
@@ -17,11 +17,16 @@ FROM ubuntu:16.04
RUN apt-get update && apt-get install -y \
mono-devel \
ca-certificates-mono \
- nuget
+ nuget \
+ && apt-get clean
# make sure we have nuget 2.12+ (in case there's an older cached docker image)
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list
-RUN apt-get update && apt-get install -y nuget
+RUN apt-get update && apt-get install -y nuget && apt-get clean
-RUN apt-get update && apt-get install -y unzip
+# Prevent "Error: SendFailure (Error writing headers)" when fetching nuget packages
+# See https://github.com/tianon/docker-brew-ubuntu-core/issues/86
+RUN apt-get update && apt-get install -y tzdata && apt-get clean
+
+RUN apt-get update && apt-get install -y unzip && apt-get clean
diff --git a/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile b/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile
deleted file mode 100644
index 4ccfbc43c3..0000000000
--- a/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM debian:jessie
-
-# Install Git and basic packages.
-RUN apt-get update && apt-get install -y \
- autoconf \
- autotools-dev \
- build-essential \
- bzip2 \
- ccache \
- curl \
- gcc \
- gcc-multilib \
- git \
- golang \
- gyp \
- lcov \
- libc6 \
- libc6-dbg \
- libc6-dev \
- libgtest-dev \
- libtool \
- make \
- perl \
- strace \
- python-dev \
- python-setuptools \
- python-yaml \
- telnet \
- unzip \
- wget \
- zip && apt-get clean
-
-#================
-# Build profiling
-RUN apt-get update && apt-get install -y time && apt-get clean
-
-#====================
-# Python dependencies
-
-# Install dependencies
-
-RUN apt-get update && apt-get install -y \
- python-all-dev \
- python3-all-dev \
- python-pip
-
-# Install Python packages from PyPI
-RUN pip install pip --upgrade
-RUN pip install virtualenv
-RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
-
-#================
-# C# dependencies
-
-# Update to a newer version of mono
-RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
-RUN echo "deb http://download.mono-project.com/repo/debian jessie main" | tee /etc/apt/sources.list.d/mono-official.list
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
-RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
-
-# Install dependencies
-RUN apt-get update && apt-get -y dist-upgrade && apt-get install -y \
- mono-devel \
- ca-certificates-mono \
- nuget \
- && apt-get clean
-
-RUN nuget update -self
-
-# Install dotnet SDK based on https://www.microsoft.com/net/core#debian
-RUN apt-get update && apt-get install -y curl libunwind8 gettext
-# dotnet-dev-1.0.0-preview2-003131
-RUN curl -sSL -o dotnet100.tar.gz https://go.microsoft.com/fwlink/?LinkID=827530
-RUN mkdir -p /opt/dotnet && tar zxf dotnet100.tar.gz -C /opt/dotnet
-# dotnet-dev-1.0.1
-RUN curl -sSL -o dotnet101.tar.gz https://go.microsoft.com/fwlink/?LinkID=843453
-RUN mkdir -p /opt/dotnet && tar zxf dotnet101.tar.gz -C /opt/dotnet
-RUN ln -s /opt/dotnet/dotnet /usr/local/bin
-
-# Trigger the population of the local package cache
-ENV NUGET_XMLDOC_MODE skip
-RUN mkdir warmup \
- && cd warmup \
- && dotnet new \
- && cd .. \
- && rm -rf warmup
-
-# Prepare ccache
-RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
-RUN ln -s /usr/bin/ccache /usr/local/bin/g++
-RUN ln -s /usr/bin/ccache /usr/local/bin/cc
-RUN ln -s /usr/bin/ccache /usr/local/bin/c++
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang
-RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
-
-
-RUN mkdir /var/local/jenkins
-
-# Define the default command.
-CMD ["bash"]
diff --git a/tools/internal_ci/linux/pull_request/grpc_basictests_c_dbg.cfg b/tools/internal_ci/linux/pull_request/grpc_basictests_c_dbg.cfg
new file mode 100644
index 0000000000..577cb28ae5
--- /dev/null
+++ b/tools/internal_ci/linux/pull_request/grpc_basictests_c_dbg.cfg
@@ -0,0 +1,30 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Config file for the internal CI (in protobuf text format)
+
+# Location of the continuous shell script in repository.
+build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
+timeout_mins: 240
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ regex: "github/grpc/reports/**"
+ }
+}
+
+env_vars {
+ key: "RUN_TESTS_FLAGS"
+ value: "-f basictests linux c dbg --inner_jobs 16 -j 1 --internal_ci --max_time=3600"
+}
diff --git a/tools/internal_ci/linux/pull_request/grpc_basictests_c_opt.cfg b/tools/internal_ci/linux/pull_request/grpc_basictests_c_opt.cfg
new file mode 100644
index 0000000000..9e0b724b2e
--- /dev/null
+++ b/tools/internal_ci/linux/pull_request/grpc_basictests_c_opt.cfg
@@ -0,0 +1,30 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Config file for the internal CI (in protobuf text format)
+
+# Location of the continuous shell script in repository.
+build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
+timeout_mins: 240
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ regex: "github/grpc/reports/**"
+ }
+}
+
+env_vars {
+ key: "RUN_TESTS_FLAGS"
+ value: "-f basictests linux c opt --inner_jobs 16 -j 1 --internal_ci --max_time=3600"
+}
diff --git a/tools/internal_ci/linux/pull_request/grpc_basictests_cpp_dbg.cfg b/tools/internal_ci/linux/pull_request/grpc_basictests_cpp_dbg.cfg
new file mode 100644
index 0000000000..0fda74cf44
--- /dev/null
+++ b/tools/internal_ci/linux/pull_request/grpc_basictests_cpp_dbg.cfg
@@ -0,0 +1,30 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Config file for the internal CI (in protobuf text format)
+
+# Location of the continuous shell script in repository.
+build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
+timeout_mins: 240
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ regex: "github/grpc/reports/**"
+ }
+}
+
+env_vars {
+ key: "RUN_TESTS_FLAGS"
+ value: "-f basictests linux c++ dbg --inner_jobs 16 -j 1 --internal_ci --max_time=3600"
+}
diff --git a/tools/internal_ci/linux/pull_request/grpc_basictests_cpp_opt.cfg b/tools/internal_ci/linux/pull_request/grpc_basictests_cpp_opt.cfg
new file mode 100644
index 0000000000..199a8905d9
--- /dev/null
+++ b/tools/internal_ci/linux/pull_request/grpc_basictests_cpp_opt.cfg
@@ -0,0 +1,30 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Config file for the internal CI (in protobuf text format)
+
+# Location of the continuous shell script in repository.
+build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
+timeout_mins: 240
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ regex: "github/grpc/reports/**"
+ }
+}
+
+env_vars {
+ key: "RUN_TESTS_FLAGS"
+ value: "-f basictests linux c++ opt --inner_jobs 16 -j 1 --internal_ci --max_time=3600"
+}
diff --git a/tools/run_tests/artifacts/artifact_targets.py b/tools/run_tests/artifacts/artifact_targets.py
index 12263282ae..2cc0dfceab 100644
--- a/tools/run_tests/artifacts/artifact_targets.py
+++ b/tools/run_tests/artifacts/artifact_targets.py
@@ -158,6 +158,7 @@ class PythonArtifact:
return create_jobspec(self.name,
['tools/run_tests/artifacts/build_artifact_python.sh'],
environ=environ,
+ timeout_seconds=60*60,
use_workspace=True)
def __str__(self):
diff --git a/tools/run_tests/artifacts/distribtest_targets.py b/tools/run_tests/artifacts/distribtest_targets.py
index fb1be383cd..797ed51c7f 100644
--- a/tools/run_tests/artifacts/distribtest_targets.py
+++ b/tools/run_tests/artifacts/distribtest_targets.py
@@ -105,7 +105,9 @@ class CSharpDistribTest(object):
use_workspace=True)
elif self.platform == 'windows':
if self.arch == 'x64':
- environ={'MSBUILD_EXTRA_ARGS': '/p:Platform=x64',
+ # Use double leading / as the first occurence gets removed by msys bash
+ # when invoking the .bat file (side-effect of posix path conversion)
+ environ={'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'}
else:
environ={'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
diff --git a/tools/run_tests/artifacts/package_targets.py b/tools/run_tests/artifacts/package_targets.py
index 0da13864f0..671d0f7b45 100644
--- a/tools/run_tests/artifacts/package_targets.py
+++ b/tools/run_tests/artifacts/package_targets.py
@@ -78,7 +78,7 @@ class CSharpPackage:
if self.linux:
return create_docker_jobspec(
self.name,
- 'tools/dockerfile/test/csharp_coreclr_x64',
+ 'tools/dockerfile/test/csharp_jessie_x64',
'src/csharp/build_packages_dotnetcli.sh')
else:
return create_jobspec(self.name,