diff options
355 files changed, 6641 insertions, 3005 deletions
diff --git a/.gitignore b/.gitignore index 3cc35ff7cd..1610bd40cd 100644 --- a/.gitignore +++ b/.gitignore @@ -96,7 +96,7 @@ DerivedData Pods/ # Artifacts directory -artifacts/ +/artifacts/ # Git generated files for conflicting *.orig diff --git a/.gitmodules b/.gitmodules index c32881cb95..04d155cfb4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -17,6 +17,6 @@ [submodule "third_party/thrift"] path = third_party/thrift url = https://github.com/apache/thrift.git -[submodule "third_party/google_benchmark"] - path = third_party/google_benchmark +[submodule "third_party/benchmark"] + path = third_party/benchmark url = https://github.com/google/benchmark @@ -1260,9 +1260,9 @@ pc_cxx: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc pc_cxx_unsecure: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc ifeq ($(EMBED_OPENSSL),true) -privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a +privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a $(LIBDIR)/$(CONFIG)/libbenchmark.a else -privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a +privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libbenchmark.a endif @@ -6998,43 +6998,43 @@ ifneq ($(NO_DEPS),true) endif -LIBGOOGLE_BENCHMARK_SRC = \ - third_party/google_benchmark/src/benchmark.cc \ - third_party/google_benchmark/src/benchmark_register.cc \ - third_party/google_benchmark/src/colorprint.cc \ - third_party/google_benchmark/src/commandlineflags.cc \ - third_party/google_benchmark/src/complexity.cc \ - third_party/google_benchmark/src/console_reporter.cc \ - third_party/google_benchmark/src/csv_reporter.cc \ - third_party/google_benchmark/src/json_reporter.cc \ - third_party/google_benchmark/src/reporter.cc \ - third_party/google_benchmark/src/sleep.cc \ - third_party/google_benchmark/src/string_util.cc \ - third_party/google_benchmark/src/sysinfo.cc \ - third_party/google_benchmark/src/timers.cc \ +LIBBENCHMARK_SRC = \ + third_party/benchmark/src/benchmark.cc \ + third_party/benchmark/src/benchmark_register.cc \ + third_party/benchmark/src/colorprint.cc \ + third_party/benchmark/src/commandlineflags.cc \ + third_party/benchmark/src/complexity.cc \ + third_party/benchmark/src/console_reporter.cc \ + third_party/benchmark/src/csv_reporter.cc \ + third_party/benchmark/src/json_reporter.cc \ + third_party/benchmark/src/reporter.cc \ + third_party/benchmark/src/sleep.cc \ + third_party/benchmark/src/string_util.cc \ + third_party/benchmark/src/sysinfo.cc \ + third_party/benchmark/src/timers.cc \ PUBLIC_HEADERS_CXX += \ -LIBGOOGLE_BENCHMARK_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGOOGLE_BENCHMARK_SRC)))) +LIBBENCHMARK_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBBENCHMARK_SRC)))) -$(LIBGOOGLE_BENCHMARK_OBJS): CPPFLAGS += -Ithird_party/google_benchmark/include -DHAVE_POSIX_REGEX +$(LIBBENCHMARK_OBJS): CPPFLAGS += -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX ifeq ($(NO_PROTOBUF),true) # You can't build a C++ library if you don't have protobuf - a bit overreached, but still okay. -$(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a: protobuf_dep_error +$(LIBDIR)/$(CONFIG)/libbenchmark.a: protobuf_dep_error else -$(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBGOOGLE_BENCHMARK_OBJS) +$(LIBDIR)/$(CONFIG)/libbenchmark.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBBENCHMARK_OBJS) $(E) "[AR] Creating $@" $(Q) mkdir -p `dirname $@` - $(Q) rm -f $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a - $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a $(LIBGOOGLE_BENCHMARK_OBJS) + $(Q) rm -f $(LIBDIR)/$(CONFIG)/libbenchmark.a + $(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBBENCHMARK_OBJS) ifeq ($(SYSTEM),Darwin) - $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a + $(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libbenchmark.a endif @@ -7043,7 +7043,7 @@ endif endif ifneq ($(NO_DEPS),true) --include $(LIBGOOGLE_BENCHMARK_OBJS:.o=.dep) +-include $(LIBBENCHMARK_OBJS:.o=.dep) endif @@ -11736,16 +11736,16 @@ $(BINDIR)/$(CONFIG)/bm_fullstack: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/bm_fullstack: $(PROTOBUF_DEP) $(BM_FULLSTACK_OBJS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(BINDIR)/$(CONFIG)/bm_fullstack: $(PROTOBUF_DEP) $(BM_FULLSTACK_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(BM_FULLSTACK_OBJS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_fullstack + $(Q) $(LDXX) $(LDFLAGS) $(BM_FULLSTACK_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/bm_fullstack endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_fullstack.o: $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/bm_fullstack.o: $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a deps_bm_fullstack: $(BM_FULLSTACK_OBJS:.o=.dep) @@ -13192,16 +13192,16 @@ $(BINDIR)/$(CONFIG)/noop-benchmark: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/noop-benchmark: $(PROTOBUF_DEP) $(NOOP-BENCHMARK_OBJS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a +$(BINDIR)/$(CONFIG)/noop-benchmark: $(PROTOBUF_DEP) $(NOOP-BENCHMARK_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(NOOP-BENCHMARK_OBJS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/noop-benchmark + $(Q) $(LDXX) $(LDFLAGS) $(NOOP-BENCHMARK_OBJS) $(LIBDIR)/$(CONFIG)/libbenchmark.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/noop-benchmark endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/noop-benchmark.o: $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a +$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/noop-benchmark.o: $(LIBDIR)/$(CONFIG)/libbenchmark.a deps_noop-benchmark: $(NOOP-BENCHMARK_OBJS:.o=.dep) diff --git a/build.yaml b/build.yaml index 68d19a6b44..de9d253ef1 100644 --- a/build.yaml +++ b/build.yaml @@ -2850,7 +2850,7 @@ targets: src: - test/cpp/microbenchmarks/bm_fullstack.cc deps: - - google_benchmark + - benchmark - grpc++_test_util - grpc_test_util - grpc++ @@ -3300,7 +3300,7 @@ targets: src: - test/cpp/microbenchmarks/noop-benchmark.cc deps: - - google_benchmark + - benchmark - name: proto_server_reflection_test gtest: true build: test @@ -3786,6 +3786,8 @@ configs: UBSAN_OPTIONS: halt_on_error=1:print_stacktrace=1 timeout_multiplier: 1.5 defaults: + benchmark: + CPPFLAGS: -Ithird_party/benchmark/include -DHAVE_POSIX_REGEX boringssl: CFLAGS: -Wno-sign-conversion -Wno-conversion -Wno-unused-value -Wno-unknown-pragmas -Wno-implicit-function-declaration -Wno-unused-variable -Wno-sign-compare $(NO_W_EXTRA_SEMI) @@ -3794,8 +3796,6 @@ defaults: global: CPPFLAGS: -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter LDFLAGS: -g - google_benchmark: - CPPFLAGS: -Ithird_party/google_benchmark/include -DHAVE_POSIX_REGEX zlib: CFLAGS: -Wno-sign-conversion -Wno-conversion -Wno-unused-value -Wno-implicit-function-declaration $(W_NO_SHIFT_NEGATIVE_VALUE) -fvisibility=hidden diff --git a/doc/connectivity-semantics-and-api.md b/doc/connectivity-semantics-and-api.md index cc007eaae3..6d39619d65 100644 --- a/doc/connectivity-semantics-and-api.md +++ b/doc/connectivity-semantics-and-api.md @@ -16,7 +16,7 @@ reconnect, or in the case of HTTP/2 GO_AWAY, re-resolve the name and reconnect. To hide the details of all this activity from the user of the gRPC API (i.e., application code) while exposing meaningful information about the state of a -channel, we use a state machine with four states, defined below: +channel, we use a state machine with five states, defined below: CONNECTING: The channel is trying to establish a connection and is waiting to make progress on one of the steps involved in name resolution, TCP connection @@ -116,7 +116,7 @@ Channel State API All gRPC libraries will expose a channel-level API method to poll the current state of a channel. In C++, this method is called GetCurrentState and returns -an enum for one of the four legal states. +an enum for one of the five legal states. All libraries should also expose an API that enables the application (user of the gRPC API) to be notified when the channel state changes. Since state diff --git a/doc/negative-http2-interop-test-descriptions.md b/doc/negative-http2-interop-test-descriptions.md new file mode 100644 index 0000000000..5ea3a96dff --- /dev/null +++ b/doc/negative-http2-interop-test-descriptions.md @@ -0,0 +1,194 @@ +Negative HTTP/2 Interop Test Case Descriptions +======================================= + +Client and server use +[test.proto](../src/proto/grpc/testing/test.proto). + +Server +------ +The code for the custom http2 server can be found +[here](https://github.com/grpc/grpc/tree/master/test/http2_test). +It is responsible for handling requests and sending responses, and also for +fulfilling the behavior of each particular test case. + +Server should accept these arguments: +* --port=PORT + * The port the server will run on. For example, "8080" +* --test_case=TESTCASE + * The name of the test case to execute. For example, "goaway" + +Client +------ + +Clients implement test cases that test certain functionality. Each client is +provided the test case it is expected to run as a command-line parameter. Names +should be lowercase and without spaces. + +Clients should accept these arguments: +* --server_host=HOSTNAME + * The server host to connect to. For example, "localhost" or "127.0.0.1" +* --server_port=PORT + * The server port to connect to. For example, "8080" +* --test_case=TESTCASE + * The name of the test case to execute. For example, "goaway" + +Note +----- + +Note that the server and client must be invoked with the same test case or else +the test will be meaningless. For convenience, we provide a shell script wrapper +that invokes both server and client at the same time, with the same test_case. +This is the preferred way to run these tests. + +## Test Cases + +### goaway + +This test verifies that the client correctly responds to a goaway sent by the +server. The client should handle the goaway by switching to a new stream without +the user application having to do a thing. + +Client Procedure: + 1. Client sends two UnaryCall requests with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* Call was successful. +* Response payload body is 314159 bytes in size. + +Server Procedure: + 1. Server sends a GOAWAY after receiving the first UnaryCall. + +Server asserts: +* The second UnaryCall has a different stream_id than the first one. + +### rst_after_header + +This test verifies that the client fails correctly when the server sends a +RST_STREAM immediately after sending headers to the client. + +Procedure: + 1. Client sends UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* Call was not successful. + +Server Procedure: + 1. Server sends a RST_STREAM with error code 0 after sending headers to the client. + +*At the moment the error code and message returned are not standardized throughout all +languages. Those checks will be added once all client languages behave the same way. [#9142](https://github.com/grpc/grpc/issues/9142) is in flight.* + +### rst_during_data + +This test verifies that the client fails "correctly" when the server sends a +RST_STREAM halfway through sending data to the client. + +Procedure: + 1. Client sends UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* Call was not successful. + +Server Procedure: + 1. Server sends a RST_STREAM with error code 0 after sending half of + the requested data to the client. + +### rst_after_data + +This test verifies that the client fails "correctly" when the server sends a +RST_STREAM after sending all of the data to the client. + +Procedure: + 1. Client sends UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* Call was not successful. + +Server Procedure: + 1. Server sends a RST_STREAM with error code 0 after sending all of the + data to the client. + +*Certain client languages allow the data to be accessed even though a RST_STREAM +was encountered. Once all client languages behave this way, checks will be added on +the incoming data.* + +### ping + +This test verifies that the client correctly acknowledges all pings it gets from the +server. + +Procedure: + 1. Client sends UnaryCall with: + + ``` + { + response_size: 314159 + payload:{ + body: 271828 bytes of zeros + } + } + ``` + +Client asserts: +* call was successful. +* response payload body is 314159 bytes in size. + +Server Procedure: + 1. Server tracks the number of outstanding pings (i.e. +1 when it sends a ping, and -1 + when it receives an ack from the client). + 2. Server sends pings before and after sending headers, also before and after sending data. + +Server Asserts: +* Number of outstanding pings is 0 when the connection is lost. + +### max_streams + +This test verifies that the client observes the MAX_CONCURRENT_STREAMS limit set by the server. + +Client Procedure: + 1. Client sends initial UnaryCall to allow the server to update its MAX_CONCURRENT_STREAMS settings. + 2. Client concurrently sends 10 UnaryCalls. + +Client Asserts: +* All UnaryCalls were successful, and had the correct type and payload size. + +Server Procedure: + 1. Sets MAX_CONCURRENT_STREAMS to one after the connection is made. + +*The assertion that the MAX_CONCURRENT_STREAMS limit is upheld occurs in the http2 library we used.* diff --git a/examples/cpp/helloworld/greeter_client.cc b/examples/cpp/helloworld/greeter_client.cc index 12209f37df..61f3953056 100644 --- a/examples/cpp/helloworld/greeter_client.cc +++ b/examples/cpp/helloworld/greeter_client.cc @@ -51,7 +51,7 @@ class GreeterClient { GreeterClient(std::shared_ptr<Channel> channel) : stub_(Greeter::NewStub(channel)) {} - // Assambles the client's payload, sends it and presents the response back + // Assembles the client's payload, sends it and presents the response back // from the server. std::string SayHello(const std::string& user) { // Data we are sending to the server. diff --git a/examples/python/helloworld/greeter_client.py b/examples/python/helloworld/greeter_client.py index 44d42c102b..281a68f3c3 100644 --- a/examples/python/helloworld/greeter_client.py +++ b/examples/python/helloworld/greeter_client.py @@ -34,11 +34,12 @@ from __future__ import print_function import grpc import helloworld_pb2 +import helloworld_pb2_grpc def run(): channel = grpc.insecure_channel('localhost:50051') - stub = helloworld_pb2.GreeterStub(channel) + stub = helloworld_pb2_grpc.GreeterStub(channel) response = stub.SayHello(helloworld_pb2.HelloRequest(name='you')) print("Greeter client received: " + response.message) diff --git a/examples/python/helloworld/greeter_server.py b/examples/python/helloworld/greeter_server.py index 37d8bd49cc..0afc21d243 100644 --- a/examples/python/helloworld/greeter_server.py +++ b/examples/python/helloworld/greeter_server.py @@ -35,11 +35,12 @@ import time import grpc import helloworld_pb2 +import helloworld_pb2_grpc _ONE_DAY_IN_SECONDS = 60 * 60 * 24 -class Greeter(helloworld_pb2.GreeterServicer): +class Greeter(helloworld_pb2_grpc.GreeterServicer): def SayHello(self, request, context): return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name) @@ -47,7 +48,7 @@ class Greeter(helloworld_pb2.GreeterServicer): def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) - helloworld_pb2.add_GreeterServicer_to_server(Greeter(), server) + helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server) server.add_insecure_port('[::]:50051') server.start() try: diff --git a/examples/python/helloworld/helloworld_pb2.py b/examples/python/helloworld/helloworld_pb2.py index 3ce33fbf2b..6665b1f687 100644 --- a/examples/python/helloworld/helloworld_pb2.py +++ b/examples/python/helloworld/helloworld_pb2.py @@ -107,98 +107,123 @@ _sym_db.RegisterMessage(HelloReply) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW')) -import grpc -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities +try: + # THESE ELEMENTS WILL BE DEPRECATED. + # Please use the generated *_pb2_grpc.py files instead. + import grpc + from grpc.framework.common import cardinality + from grpc.framework.interfaces.face import utilities as face_utilities + from grpc.beta import implementations as beta_implementations + from grpc.beta import interfaces as beta_interfaces + + + class GreeterStub(object): + """The greeting service definition. + """ + def __init__(self, channel): + """Constructor. -class GreeterStub(object): - """The greeting service definition. - """ + Args: + channel: A grpc.Channel. + """ + self.SayHello = channel.unary_unary( + '/helloworld.Greeter/SayHello', + request_serializer=HelloRequest.SerializeToString, + response_deserializer=HelloReply.FromString, + ) - def __init__(self, channel): - """Constructor. - Args: - channel: A grpc.Channel. + class GreeterServicer(object): + """The greeting service definition. """ - self.SayHello = channel.unary_unary( - '/helloworld.Greeter/SayHello', - request_serializer=HelloRequest.SerializeToString, - response_deserializer=HelloReply.FromString, - ) - - -class GreeterServicer(object): - """The greeting service definition. - """ - def SayHello(self, request, context): - """Sends a greeting + def SayHello(self, request, context): + """Sends a greeting + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + + def add_GreeterServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SayHello': grpc.unary_unary_rpc_method_handler( + servicer.SayHello, + request_deserializer=HelloRequest.FromString, + response_serializer=HelloReply.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'helloworld.Greeter', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + class BetaGreeterServicer(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """The greeting service definition. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_GreeterServicer_to_server(servicer, server): - rpc_method_handlers = { - 'SayHello': grpc.unary_unary_rpc_method_handler( - servicer.SayHello, - request_deserializer=HelloRequest.FromString, - response_serializer=HelloReply.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'helloworld.Greeter', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaGreeterServicer(object): - """The greeting service definition. - """ - def SayHello(self, request, context): - """Sends a greeting - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def SayHello(self, request, context): + """Sends a greeting + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + class BetaGreeterStub(object): + """The Beta API is deprecated for 0.15.0 and later. -class BetaGreeterStub(object): - """The greeting service definition. - """ - def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Sends a greeting + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """The greeting service definition. """ - raise NotImplementedError() - SayHello.future = None - - -def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString, - } - response_serializers = { - ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString, - } - method_implementations = { - ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString, - } - response_deserializers = { - ('helloworld.Greeter', 'SayHello'): HelloReply.FromString, - } - cardinalities = { - 'SayHello': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options) + def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Sends a greeting + """ + raise NotImplementedError() + SayHello.future = None + + + def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_deserializers = { + ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString, + } + response_serializers = { + ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString, + } + method_implementations = { + ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + + def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_serializers = { + ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString, + } + response_deserializers = { + ('helloworld.Greeter', 'SayHello'): HelloReply.FromString, + } + cardinalities = { + 'SayHello': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options) +except ImportError: + pass # @@protoc_insertion_point(module_scope) diff --git a/examples/python/helloworld/helloworld_pb2_grpc.py b/examples/python/helloworld/helloworld_pb2_grpc.py new file mode 100644 index 0000000000..682dc36cd8 --- /dev/null +++ b/examples/python/helloworld/helloworld_pb2_grpc.py @@ -0,0 +1,47 @@ +import grpc +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +import helloworld_pb2 as helloworld__pb2 + + +class GreeterStub(object): + """The greeting service definition. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.SayHello = channel.unary_unary( + '/helloworld.Greeter/SayHello', + request_serializer=helloworld__pb2.HelloRequest.SerializeToString, + response_deserializer=helloworld__pb2.HelloReply.FromString, + ) + + +class GreeterServicer(object): + """The greeting service definition. + """ + + def SayHello(self, request, context): + """Sends a greeting + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_GreeterServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SayHello': grpc.unary_unary_rpc_method_handler( + servicer.SayHello, + request_deserializer=helloworld__pb2.HelloRequest.FromString, + response_serializer=helloworld__pb2.HelloReply.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'helloworld.Greeter', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/examples/python/multiplex/helloworld_pb2.py b/examples/python/multiplex/helloworld_pb2.py index 3ce33fbf2b..6665b1f687 100644 --- a/examples/python/multiplex/helloworld_pb2.py +++ b/examples/python/multiplex/helloworld_pb2.py @@ -107,98 +107,123 @@ _sym_db.RegisterMessage(HelloReply) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW')) -import grpc -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities +try: + # THESE ELEMENTS WILL BE DEPRECATED. + # Please use the generated *_pb2_grpc.py files instead. + import grpc + from grpc.framework.common import cardinality + from grpc.framework.interfaces.face import utilities as face_utilities + from grpc.beta import implementations as beta_implementations + from grpc.beta import interfaces as beta_interfaces + + + class GreeterStub(object): + """The greeting service definition. + """ + def __init__(self, channel): + """Constructor. -class GreeterStub(object): - """The greeting service definition. - """ + Args: + channel: A grpc.Channel. + """ + self.SayHello = channel.unary_unary( + '/helloworld.Greeter/SayHello', + request_serializer=HelloRequest.SerializeToString, + response_deserializer=HelloReply.FromString, + ) - def __init__(self, channel): - """Constructor. - Args: - channel: A grpc.Channel. + class GreeterServicer(object): + """The greeting service definition. """ - self.SayHello = channel.unary_unary( - '/helloworld.Greeter/SayHello', - request_serializer=HelloRequest.SerializeToString, - response_deserializer=HelloReply.FromString, - ) - - -class GreeterServicer(object): - """The greeting service definition. - """ - def SayHello(self, request, context): - """Sends a greeting + def SayHello(self, request, context): + """Sends a greeting + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + + def add_GreeterServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SayHello': grpc.unary_unary_rpc_method_handler( + servicer.SayHello, + request_deserializer=HelloRequest.FromString, + response_serializer=HelloReply.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'helloworld.Greeter', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + class BetaGreeterServicer(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """The greeting service definition. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_GreeterServicer_to_server(servicer, server): - rpc_method_handlers = { - 'SayHello': grpc.unary_unary_rpc_method_handler( - servicer.SayHello, - request_deserializer=HelloRequest.FromString, - response_serializer=HelloReply.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'helloworld.Greeter', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaGreeterServicer(object): - """The greeting service definition. - """ - def SayHello(self, request, context): - """Sends a greeting - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def SayHello(self, request, context): + """Sends a greeting + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + class BetaGreeterStub(object): + """The Beta API is deprecated for 0.15.0 and later. -class BetaGreeterStub(object): - """The greeting service definition. - """ - def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Sends a greeting + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """The greeting service definition. """ - raise NotImplementedError() - SayHello.future = None - - -def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString, - } - response_serializers = { - ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString, - } - method_implementations = { - ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString, - } - response_deserializers = { - ('helloworld.Greeter', 'SayHello'): HelloReply.FromString, - } - cardinalities = { - 'SayHello': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options) + def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Sends a greeting + """ + raise NotImplementedError() + SayHello.future = None + + + def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_deserializers = { + ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString, + } + response_serializers = { + ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString, + } + method_implementations = { + ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + + def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_serializers = { + ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString, + } + response_deserializers = { + ('helloworld.Greeter', 'SayHello'): HelloReply.FromString, + } + cardinalities = { + 'SayHello': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options) +except ImportError: + pass # @@protoc_insertion_point(module_scope) diff --git a/examples/python/multiplex/helloworld_pb2_grpc.py b/examples/python/multiplex/helloworld_pb2_grpc.py new file mode 100644 index 0000000000..682dc36cd8 --- /dev/null +++ b/examples/python/multiplex/helloworld_pb2_grpc.py @@ -0,0 +1,47 @@ +import grpc +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +import helloworld_pb2 as helloworld__pb2 + + +class GreeterStub(object): + """The greeting service definition. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.SayHello = channel.unary_unary( + '/helloworld.Greeter/SayHello', + request_serializer=helloworld__pb2.HelloRequest.SerializeToString, + response_deserializer=helloworld__pb2.HelloReply.FromString, + ) + + +class GreeterServicer(object): + """The greeting service definition. + """ + + def SayHello(self, request, context): + """Sends a greeting + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_GreeterServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SayHello': grpc.unary_unary_rpc_method_handler( + servicer.SayHello, + request_deserializer=helloworld__pb2.HelloRequest.FromString, + response_serializer=helloworld__pb2.HelloReply.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'helloworld.Greeter', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/examples/python/multiplex/multiplex_client.py b/examples/python/multiplex/multiplex_client.py index 2e8162926b..b2d2021e02 100644 --- a/examples/python/multiplex/multiplex_client.py +++ b/examples/python/multiplex/multiplex_client.py @@ -37,7 +37,9 @@ import time import grpc import helloworld_pb2 +import helloworld_pb2_grpc import route_guide_pb2 +import route_guide_pb2_grpc import route_guide_resources @@ -120,8 +122,8 @@ def guide_route_chat(route_guide_stub): def run(): channel = grpc.insecure_channel('localhost:50051') - greeter_stub = helloworld_pb2.GreeterStub(channel) - route_guide_stub = route_guide_pb2.RouteGuideStub(channel) + greeter_stub = helloworld_pb2_grpc.GreeterStub(channel) + route_guide_stub = route_guide_pb2_grpc.RouteGuideStub(channel) greeter_response = greeter_stub.SayHello( helloworld_pb2.HelloRequest(name='you')) print("Greeter client received: " + greeter_response.message) diff --git a/examples/python/multiplex/multiplex_server.py b/examples/python/multiplex/multiplex_server.py index 32a4ee4a49..b8b32e7bf8 100644 --- a/examples/python/multiplex/multiplex_server.py +++ b/examples/python/multiplex/multiplex_server.py @@ -36,7 +36,9 @@ import math import grpc import helloworld_pb2 +import helloworld_pb2_grpc import route_guide_pb2 +import route_guide_pb2_grpc import route_guide_resources _ONE_DAY_IN_SECONDS = 60 * 60 * 24 @@ -70,13 +72,13 @@ def _get_distance(start, end): return R * c; -class _GreeterServicer(helloworld_pb2.GreeterServicer): +class _GreeterServicer(helloworld_pb2_grpc.GreeterServicer): def SayHello(self, request, context): return helloworld_pb2.HelloReply(message='Hello, {}!'.format(request.name)) -class _RouteGuideServicer(route_guide_pb2.RouteGuideServicer): +class _RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer): """Provides methods that implement functionality of route guide server.""" def __init__(self): @@ -133,8 +135,8 @@ class _RouteGuideServicer(route_guide_pb2.RouteGuideServicer): def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) - helloworld_pb2.add_GreeterServicer_to_server(_GreeterServicer(), server) - route_guide_pb2.add_RouteGuideServicer_to_server( + helloworld_pb2_grpc.add_GreeterServicer_to_server(_GreeterServicer(), server) + route_guide_pb2_grpc.add_RouteGuideServicer_to_server( _RouteGuideServicer(), server) server.add_insecure_port('[::]:50051') server.start() diff --git a/examples/python/multiplex/route_guide_pb2.py b/examples/python/multiplex/route_guide_pb2.py index 924e186e06..e6775eb814 100644 --- a/examples/python/multiplex/route_guide_pb2.py +++ b/examples/python/multiplex/route_guide_pb2.py @@ -277,240 +277,265 @@ _sym_db.RegisterMessage(RouteSummary) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.routeguideB\017RouteGuideProtoP\001\242\002\003RTG')) -import grpc -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - - -class RouteGuideStub(object): - """Interface exported by the server. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. +try: + # THESE ELEMENTS WILL BE DEPRECATED. + # Please use the generated *_pb2_grpc.py files instead. + import grpc + from grpc.framework.common import cardinality + from grpc.framework.interfaces.face import utilities as face_utilities + from grpc.beta import implementations as beta_implementations + from grpc.beta import interfaces as beta_interfaces + + + class RouteGuideStub(object): + """Interface exported by the server. """ - self.GetFeature = channel.unary_unary( - '/routeguide.RouteGuide/GetFeature', - request_serializer=Point.SerializeToString, - response_deserializer=Feature.FromString, - ) - self.ListFeatures = channel.unary_stream( - '/routeguide.RouteGuide/ListFeatures', - request_serializer=Rectangle.SerializeToString, - response_deserializer=Feature.FromString, - ) - self.RecordRoute = channel.stream_unary( - '/routeguide.RouteGuide/RecordRoute', - request_serializer=Point.SerializeToString, - response_deserializer=RouteSummary.FromString, - ) - self.RouteChat = channel.stream_stream( - '/routeguide.RouteGuide/RouteChat', - request_serializer=RouteNote.SerializeToString, - response_deserializer=RouteNote.FromString, - ) - - -class RouteGuideServicer(object): - """Interface exported by the server. - """ - - def GetFeature(self, request, context): - """A simple RPC. - - Obtains the feature at a given position. - - A feature with an empty name is returned if there's no feature at the given - position. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ListFeatures(self, request, context): - """A server-to-client streaming RPC. - Obtains the Features available within the given Rectangle. Results are - streamed rather than returned at once (e.g. in a response message with a - repeated field), as the rectangle may cover a large area and contain a - huge number of features. + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetFeature = channel.unary_unary( + '/routeguide.RouteGuide/GetFeature', + request_serializer=Point.SerializeToString, + response_deserializer=Feature.FromString, + ) + self.ListFeatures = channel.unary_stream( + '/routeguide.RouteGuide/ListFeatures', + request_serializer=Rectangle.SerializeToString, + response_deserializer=Feature.FromString, + ) + self.RecordRoute = channel.stream_unary( + '/routeguide.RouteGuide/RecordRoute', + request_serializer=Point.SerializeToString, + response_deserializer=RouteSummary.FromString, + ) + self.RouteChat = channel.stream_stream( + '/routeguide.RouteGuide/RouteChat', + request_serializer=RouteNote.SerializeToString, + response_deserializer=RouteNote.FromString, + ) + + + class RouteGuideServicer(object): + """Interface exported by the server. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def RecordRoute(self, request_iterator, context): - """A client-to-server streaming RPC. - Accepts a stream of Points on a route being traversed, returning a - RouteSummary when traversal is completed. + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + + def add_RouteGuideServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetFeature': grpc.unary_unary_rpc_method_handler( + servicer.GetFeature, + request_deserializer=Point.FromString, + response_serializer=Feature.SerializeToString, + ), + 'ListFeatures': grpc.unary_stream_rpc_method_handler( + servicer.ListFeatures, + request_deserializer=Rectangle.FromString, + response_serializer=Feature.SerializeToString, + ), + 'RecordRoute': grpc.stream_unary_rpc_method_handler( + servicer.RecordRoute, + request_deserializer=Point.FromString, + response_serializer=RouteSummary.SerializeToString, + ), + 'RouteChat': grpc.stream_stream_rpc_method_handler( + servicer.RouteChat, + request_deserializer=RouteNote.FromString, + response_serializer=RouteNote.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'routeguide.RouteGuide', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + class BetaRouteGuideServicer(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """Interface exported by the server. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def RouteChat(self, request_iterator, context): - """A Bidirectional streaming RPC. - - Accepts a stream of RouteNotes sent while a route is being traversed, - while receiving other RouteNotes (e.g. from other users). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_RouteGuideServicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetFeature': grpc.unary_unary_rpc_method_handler( - servicer.GetFeature, - request_deserializer=Point.FromString, - response_serializer=Feature.SerializeToString, - ), - 'ListFeatures': grpc.unary_stream_rpc_method_handler( - servicer.ListFeatures, - request_deserializer=Rectangle.FromString, - response_serializer=Feature.SerializeToString, - ), - 'RecordRoute': grpc.stream_unary_rpc_method_handler( - servicer.RecordRoute, - request_deserializer=Point.FromString, - response_serializer=RouteSummary.SerializeToString, - ), - 'RouteChat': grpc.stream_stream_rpc_method_handler( - servicer.RouteChat, - request_deserializer=RouteNote.FromString, - response_serializer=RouteNote.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'routeguide.RouteGuide', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaRouteGuideServicer(object): - """Interface exported by the server. - """ - def GetFeature(self, request, context): - """A simple RPC. - - Obtains the feature at a given position. - - A feature with an empty name is returned if there's no feature at the given - position. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def ListFeatures(self, request, context): - """A server-to-client streaming RPC. - - Obtains the Features available within the given Rectangle. Results are - streamed rather than returned at once (e.g. in a response message with a - repeated field), as the rectangle may cover a large area and contain a - huge number of features. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def RecordRoute(self, request_iterator, context): - """A client-to-server streaming RPC. - - Accepts a stream of Points on a route being traversed, returning a - RouteSummary when traversal is completed. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def RouteChat(self, request_iterator, context): - """A Bidirectional streaming RPC. - - Accepts a stream of RouteNotes sent while a route is being traversed, - while receiving other RouteNotes (e.g. from other users). - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - -class BetaRouteGuideStub(object): - """Interface exported by the server. - """ - def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """A simple RPC. - - Obtains the feature at a given position. - - A feature with an empty name is returned if there's no feature at the given - position. - """ - raise NotImplementedError() - GetFeature.future = None - def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """A server-to-client streaming RPC. - - Obtains the Features available within the given Rectangle. Results are - streamed rather than returned at once (e.g. in a response message with a - repeated field), as the rectangle may cover a large area and contain a - huge number of features. - """ - raise NotImplementedError() - def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): - """A client-to-server streaming RPC. - - Accepts a stream of Points on a route being traversed, returning a - RouteSummary when traversal is completed. - """ - raise NotImplementedError() - RecordRoute.future = None - def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): - """A Bidirectional streaming RPC. - - Accepts a stream of RouteNotes sent while a route is being traversed, - while receiving other RouteNotes (e.g. from other users). + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + + class BetaRouteGuideStub(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """Interface exported by the server. """ - raise NotImplementedError() - - -def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('routeguide.RouteGuide', 'GetFeature'): Point.FromString, - ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString, - ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, - } - response_serializers = { - ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString, - ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString, - ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, - } - method_implementations = { - ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature), - ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures), - ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute), - ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString, - ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString, - ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, - } - response_deserializers = { - ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString, - ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString, - ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, - } - cardinalities = { - 'GetFeature': cardinality.Cardinality.UNARY_UNARY, - 'ListFeatures': cardinality.Cardinality.UNARY_STREAM, - 'RecordRoute': cardinality.Cardinality.STREAM_UNARY, - 'RouteChat': cardinality.Cardinality.STREAM_STREAM, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options) + def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + raise NotImplementedError() + GetFeature.future = None + def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + raise NotImplementedError() + def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + raise NotImplementedError() + RecordRoute.future = None + def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + raise NotImplementedError() + + + def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_deserializers = { + ('routeguide.RouteGuide', 'GetFeature'): Point.FromString, + ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString, + ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, + } + response_serializers = { + ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString, + ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString, + ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, + } + method_implementations = { + ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature), + ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures), + ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute), + ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + + def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_serializers = { + ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString, + ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString, + ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, + } + response_deserializers = { + ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString, + ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString, + ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, + } + cardinalities = { + 'GetFeature': cardinality.Cardinality.UNARY_UNARY, + 'ListFeatures': cardinality.Cardinality.UNARY_STREAM, + 'RecordRoute': cardinality.Cardinality.STREAM_UNARY, + 'RouteChat': cardinality.Cardinality.STREAM_STREAM, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options) +except ImportError: + pass # @@protoc_insertion_point(module_scope) diff --git a/examples/python/multiplex/route_guide_pb2_grpc.py b/examples/python/multiplex/route_guide_pb2_grpc.py new file mode 100644 index 0000000000..27b24c747d --- /dev/null +++ b/examples/python/multiplex/route_guide_pb2_grpc.py @@ -0,0 +1,114 @@ +import grpc +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +import route_guide_pb2 as route__guide__pb2 + + +class RouteGuideStub(object): + """Interface exported by the server. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetFeature = channel.unary_unary( + '/routeguide.RouteGuide/GetFeature', + request_serializer=route__guide__pb2.Point.SerializeToString, + response_deserializer=route__guide__pb2.Feature.FromString, + ) + self.ListFeatures = channel.unary_stream( + '/routeguide.RouteGuide/ListFeatures', + request_serializer=route__guide__pb2.Rectangle.SerializeToString, + response_deserializer=route__guide__pb2.Feature.FromString, + ) + self.RecordRoute = channel.stream_unary( + '/routeguide.RouteGuide/RecordRoute', + request_serializer=route__guide__pb2.Point.SerializeToString, + response_deserializer=route__guide__pb2.RouteSummary.FromString, + ) + self.RouteChat = channel.stream_stream( + '/routeguide.RouteGuide/RouteChat', + request_serializer=route__guide__pb2.RouteNote.SerializeToString, + response_deserializer=route__guide__pb2.RouteNote.FromString, + ) + + +class RouteGuideServicer(object): + """Interface exported by the server. + """ + + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_RouteGuideServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetFeature': grpc.unary_unary_rpc_method_handler( + servicer.GetFeature, + request_deserializer=route__guide__pb2.Point.FromString, + response_serializer=route__guide__pb2.Feature.SerializeToString, + ), + 'ListFeatures': grpc.unary_stream_rpc_method_handler( + servicer.ListFeatures, + request_deserializer=route__guide__pb2.Rectangle.FromString, + response_serializer=route__guide__pb2.Feature.SerializeToString, + ), + 'RecordRoute': grpc.stream_unary_rpc_method_handler( + servicer.RecordRoute, + request_deserializer=route__guide__pb2.Point.FromString, + response_serializer=route__guide__pb2.RouteSummary.SerializeToString, + ), + 'RouteChat': grpc.stream_stream_rpc_method_handler( + servicer.RouteChat, + request_deserializer=route__guide__pb2.RouteNote.FromString, + response_serializer=route__guide__pb2.RouteNote.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'routeguide.RouteGuide', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/examples/python/multiplex/run_codegen.py b/examples/python/multiplex/run_codegen.py index 7922a0f5c7..89ac9c8fae 100755..100644 --- a/examples/python/multiplex/run_codegen.py +++ b/examples/python/multiplex/run_codegen.py @@ -29,7 +29,7 @@ """Generates protocol messages and gRPC stubs.""" -from grpc.tools import protoc +from grpc_tools import protoc protoc.main( ( diff --git a/examples/python/route_guide/route_guide_client.py b/examples/python/route_guide/route_guide_client.py index 8a80ed892d..d2955231c3 100644 --- a/examples/python/route_guide/route_guide_client.py +++ b/examples/python/route_guide/route_guide_client.py @@ -37,6 +37,7 @@ import time import grpc import route_guide_pb2 +import route_guide_pb2_grpc import route_guide_resources @@ -116,7 +117,7 @@ def guide_route_chat(stub): def run(): channel = grpc.insecure_channel('localhost:50051') - stub = route_guide_pb2.RouteGuideStub(channel) + stub = route_guide_pb2_grpc.RouteGuideStub(channel) print("-------------- GetFeature --------------") guide_get_feature(stub) print("-------------- ListFeatures --------------") diff --git a/examples/python/route_guide/route_guide_pb2.py b/examples/python/route_guide/route_guide_pb2.py index 924e186e06..e6775eb814 100644 --- a/examples/python/route_guide/route_guide_pb2.py +++ b/examples/python/route_guide/route_guide_pb2.py @@ -277,240 +277,265 @@ _sym_db.RegisterMessage(RouteSummary) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.routeguideB\017RouteGuideProtoP\001\242\002\003RTG')) -import grpc -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - - -class RouteGuideStub(object): - """Interface exported by the server. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. +try: + # THESE ELEMENTS WILL BE DEPRECATED. + # Please use the generated *_pb2_grpc.py files instead. + import grpc + from grpc.framework.common import cardinality + from grpc.framework.interfaces.face import utilities as face_utilities + from grpc.beta import implementations as beta_implementations + from grpc.beta import interfaces as beta_interfaces + + + class RouteGuideStub(object): + """Interface exported by the server. """ - self.GetFeature = channel.unary_unary( - '/routeguide.RouteGuide/GetFeature', - request_serializer=Point.SerializeToString, - response_deserializer=Feature.FromString, - ) - self.ListFeatures = channel.unary_stream( - '/routeguide.RouteGuide/ListFeatures', - request_serializer=Rectangle.SerializeToString, - response_deserializer=Feature.FromString, - ) - self.RecordRoute = channel.stream_unary( - '/routeguide.RouteGuide/RecordRoute', - request_serializer=Point.SerializeToString, - response_deserializer=RouteSummary.FromString, - ) - self.RouteChat = channel.stream_stream( - '/routeguide.RouteGuide/RouteChat', - request_serializer=RouteNote.SerializeToString, - response_deserializer=RouteNote.FromString, - ) - - -class RouteGuideServicer(object): - """Interface exported by the server. - """ - - def GetFeature(self, request, context): - """A simple RPC. - - Obtains the feature at a given position. - - A feature with an empty name is returned if there's no feature at the given - position. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ListFeatures(self, request, context): - """A server-to-client streaming RPC. - Obtains the Features available within the given Rectangle. Results are - streamed rather than returned at once (e.g. in a response message with a - repeated field), as the rectangle may cover a large area and contain a - huge number of features. + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetFeature = channel.unary_unary( + '/routeguide.RouteGuide/GetFeature', + request_serializer=Point.SerializeToString, + response_deserializer=Feature.FromString, + ) + self.ListFeatures = channel.unary_stream( + '/routeguide.RouteGuide/ListFeatures', + request_serializer=Rectangle.SerializeToString, + response_deserializer=Feature.FromString, + ) + self.RecordRoute = channel.stream_unary( + '/routeguide.RouteGuide/RecordRoute', + request_serializer=Point.SerializeToString, + response_deserializer=RouteSummary.FromString, + ) + self.RouteChat = channel.stream_stream( + '/routeguide.RouteGuide/RouteChat', + request_serializer=RouteNote.SerializeToString, + response_deserializer=RouteNote.FromString, + ) + + + class RouteGuideServicer(object): + """Interface exported by the server. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def RecordRoute(self, request_iterator, context): - """A client-to-server streaming RPC. - Accepts a stream of Points on a route being traversed, returning a - RouteSummary when traversal is completed. + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + + def add_RouteGuideServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetFeature': grpc.unary_unary_rpc_method_handler( + servicer.GetFeature, + request_deserializer=Point.FromString, + response_serializer=Feature.SerializeToString, + ), + 'ListFeatures': grpc.unary_stream_rpc_method_handler( + servicer.ListFeatures, + request_deserializer=Rectangle.FromString, + response_serializer=Feature.SerializeToString, + ), + 'RecordRoute': grpc.stream_unary_rpc_method_handler( + servicer.RecordRoute, + request_deserializer=Point.FromString, + response_serializer=RouteSummary.SerializeToString, + ), + 'RouteChat': grpc.stream_stream_rpc_method_handler( + servicer.RouteChat, + request_deserializer=RouteNote.FromString, + response_serializer=RouteNote.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'routeguide.RouteGuide', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + class BetaRouteGuideServicer(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """Interface exported by the server. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def RouteChat(self, request_iterator, context): - """A Bidirectional streaming RPC. - - Accepts a stream of RouteNotes sent while a route is being traversed, - while receiving other RouteNotes (e.g. from other users). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_RouteGuideServicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetFeature': grpc.unary_unary_rpc_method_handler( - servicer.GetFeature, - request_deserializer=Point.FromString, - response_serializer=Feature.SerializeToString, - ), - 'ListFeatures': grpc.unary_stream_rpc_method_handler( - servicer.ListFeatures, - request_deserializer=Rectangle.FromString, - response_serializer=Feature.SerializeToString, - ), - 'RecordRoute': grpc.stream_unary_rpc_method_handler( - servicer.RecordRoute, - request_deserializer=Point.FromString, - response_serializer=RouteSummary.SerializeToString, - ), - 'RouteChat': grpc.stream_stream_rpc_method_handler( - servicer.RouteChat, - request_deserializer=RouteNote.FromString, - response_serializer=RouteNote.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'routeguide.RouteGuide', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaRouteGuideServicer(object): - """Interface exported by the server. - """ - def GetFeature(self, request, context): - """A simple RPC. - - Obtains the feature at a given position. - - A feature with an empty name is returned if there's no feature at the given - position. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def ListFeatures(self, request, context): - """A server-to-client streaming RPC. - - Obtains the Features available within the given Rectangle. Results are - streamed rather than returned at once (e.g. in a response message with a - repeated field), as the rectangle may cover a large area and contain a - huge number of features. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def RecordRoute(self, request_iterator, context): - """A client-to-server streaming RPC. - - Accepts a stream of Points on a route being traversed, returning a - RouteSummary when traversal is completed. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def RouteChat(self, request_iterator, context): - """A Bidirectional streaming RPC. - - Accepts a stream of RouteNotes sent while a route is being traversed, - while receiving other RouteNotes (e.g. from other users). - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - -class BetaRouteGuideStub(object): - """Interface exported by the server. - """ - def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """A simple RPC. - - Obtains the feature at a given position. - - A feature with an empty name is returned if there's no feature at the given - position. - """ - raise NotImplementedError() - GetFeature.future = None - def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """A server-to-client streaming RPC. - - Obtains the Features available within the given Rectangle. Results are - streamed rather than returned at once (e.g. in a response message with a - repeated field), as the rectangle may cover a large area and contain a - huge number of features. - """ - raise NotImplementedError() - def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): - """A client-to-server streaming RPC. - - Accepts a stream of Points on a route being traversed, returning a - RouteSummary when traversal is completed. - """ - raise NotImplementedError() - RecordRoute.future = None - def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): - """A Bidirectional streaming RPC. - - Accepts a stream of RouteNotes sent while a route is being traversed, - while receiving other RouteNotes (e.g. from other users). + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + + class BetaRouteGuideStub(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """Interface exported by the server. """ - raise NotImplementedError() - - -def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('routeguide.RouteGuide', 'GetFeature'): Point.FromString, - ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString, - ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, - } - response_serializers = { - ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString, - ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString, - ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, - } - method_implementations = { - ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature), - ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures), - ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute), - ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString, - ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString, - ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, - } - response_deserializers = { - ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString, - ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString, - ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString, - ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, - } - cardinalities = { - 'GetFeature': cardinality.Cardinality.UNARY_UNARY, - 'ListFeatures': cardinality.Cardinality.UNARY_STREAM, - 'RecordRoute': cardinality.Cardinality.STREAM_UNARY, - 'RouteChat': cardinality.Cardinality.STREAM_STREAM, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options) + def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + raise NotImplementedError() + GetFeature.future = None + def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + raise NotImplementedError() + def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + raise NotImplementedError() + RecordRoute.future = None + def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + raise NotImplementedError() + + + def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_deserializers = { + ('routeguide.RouteGuide', 'GetFeature'): Point.FromString, + ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString, + ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, + } + response_serializers = { + ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString, + ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString, + ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, + } + method_implementations = { + ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature), + ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures), + ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute), + ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + + def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_serializers = { + ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString, + ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString, + ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString, + } + response_deserializers = { + ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString, + ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString, + ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString, + ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString, + } + cardinalities = { + 'GetFeature': cardinality.Cardinality.UNARY_UNARY, + 'ListFeatures': cardinality.Cardinality.UNARY_STREAM, + 'RecordRoute': cardinality.Cardinality.STREAM_UNARY, + 'RouteChat': cardinality.Cardinality.STREAM_STREAM, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options) +except ImportError: + pass # @@protoc_insertion_point(module_scope) diff --git a/examples/python/route_guide/route_guide_pb2_grpc.py b/examples/python/route_guide/route_guide_pb2_grpc.py new file mode 100644 index 0000000000..27b24c747d --- /dev/null +++ b/examples/python/route_guide/route_guide_pb2_grpc.py @@ -0,0 +1,114 @@ +import grpc +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + +import route_guide_pb2 as route__guide__pb2 + + +class RouteGuideStub(object): + """Interface exported by the server. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetFeature = channel.unary_unary( + '/routeguide.RouteGuide/GetFeature', + request_serializer=route__guide__pb2.Point.SerializeToString, + response_deserializer=route__guide__pb2.Feature.FromString, + ) + self.ListFeatures = channel.unary_stream( + '/routeguide.RouteGuide/ListFeatures', + request_serializer=route__guide__pb2.Rectangle.SerializeToString, + response_deserializer=route__guide__pb2.Feature.FromString, + ) + self.RecordRoute = channel.stream_unary( + '/routeguide.RouteGuide/RecordRoute', + request_serializer=route__guide__pb2.Point.SerializeToString, + response_deserializer=route__guide__pb2.RouteSummary.FromString, + ) + self.RouteChat = channel.stream_stream( + '/routeguide.RouteGuide/RouteChat', + request_serializer=route__guide__pb2.RouteNote.SerializeToString, + response_deserializer=route__guide__pb2.RouteNote.FromString, + ) + + +class RouteGuideServicer(object): + """Interface exported by the server. + """ + + def GetFeature(self, request, context): + """A simple RPC. + + Obtains the feature at a given position. + + A feature with an empty name is returned if there's no feature at the given + position. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListFeatures(self, request, context): + """A server-to-client streaming RPC. + + Obtains the Features available within the given Rectangle. Results are + streamed rather than returned at once (e.g. in a response message with a + repeated field), as the rectangle may cover a large area and contain a + huge number of features. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RecordRoute(self, request_iterator, context): + """A client-to-server streaming RPC. + + Accepts a stream of Points on a route being traversed, returning a + RouteSummary when traversal is completed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RouteChat(self, request_iterator, context): + """A Bidirectional streaming RPC. + + Accepts a stream of RouteNotes sent while a route is being traversed, + while receiving other RouteNotes (e.g. from other users). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_RouteGuideServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetFeature': grpc.unary_unary_rpc_method_handler( + servicer.GetFeature, + request_deserializer=route__guide__pb2.Point.FromString, + response_serializer=route__guide__pb2.Feature.SerializeToString, + ), + 'ListFeatures': grpc.unary_stream_rpc_method_handler( + servicer.ListFeatures, + request_deserializer=route__guide__pb2.Rectangle.FromString, + response_serializer=route__guide__pb2.Feature.SerializeToString, + ), + 'RecordRoute': grpc.stream_unary_rpc_method_handler( + servicer.RecordRoute, + request_deserializer=route__guide__pb2.Point.FromString, + response_serializer=route__guide__pb2.RouteSummary.SerializeToString, + ), + 'RouteChat': grpc.stream_stream_rpc_method_handler( + servicer.RouteChat, + request_deserializer=route__guide__pb2.RouteNote.FromString, + response_serializer=route__guide__pb2.RouteNote.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'routeguide.RouteGuide', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/examples/python/route_guide/route_guide_server.py b/examples/python/route_guide/route_guide_server.py index 3ffe678476..bf49217932 100644 --- a/examples/python/route_guide/route_guide_server.py +++ b/examples/python/route_guide/route_guide_server.py @@ -36,6 +36,7 @@ import math import grpc import route_guide_pb2 +import route_guide_pb2_grpc import route_guide_resources _ONE_DAY_IN_SECONDS = 60 * 60 * 24 @@ -68,7 +69,7 @@ def get_distance(start, end): R = 6371000; # metres return R * c; -class RouteGuideServicer(route_guide_pb2.RouteGuideServicer): +class RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer): """Provides methods that implement functionality of route guide server.""" def __init__(self): @@ -125,7 +126,7 @@ class RouteGuideServicer(route_guide_pb2.RouteGuideServicer): def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) - route_guide_pb2.add_RouteGuideServicer_to_server( + route_guide_pb2_grpc.add_RouteGuideServicer_to_server( RouteGuideServicer(), server) server.add_insecure_port('[::]:50051') server.start() diff --git a/examples/python/route_guide/run_codegen.py b/examples/python/route_guide/run_codegen.py index c7c6008580..3751e019c9 100644 --- a/examples/python/route_guide/run_codegen.py +++ b/examples/python/route_guide/run_codegen.py @@ -29,7 +29,7 @@ """Runs protoc with the gRPC plugin to generate messages and gRPC stubs.""" -from grpc.tools import protoc +from grpc_tools import protoc protoc.main( ( diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 04f7211d21..e10e534a5e 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -35,7 +35,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-Core' - version = '1.0.1' + version = '1.0.2' s.version = version s.summary = 'Core cross-platform gRPC library, written in C' s.homepage = 'http://www.grpc.io' @@ -44,7 +44,9 @@ Pod::Spec.new do |s| s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "v#{version}", + # TODO(mxyan): Change back to "v#{version}" for next release + #:tag => "v#{version}", + :tag => "objective-c-v#{version}", # TODO(jcanizales): Depend explicitly on the nanopb pod, and disable submodules. :submodules => true, } diff --git a/gRPC-ProtoRPC.podspec b/gRPC-ProtoRPC.podspec index 61d4b62d39..62eaa2aaf7 100644 --- a/gRPC-ProtoRPC.podspec +++ b/gRPC-ProtoRPC.podspec @@ -30,7 +30,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-ProtoRPC' - version = '1.0.1' + version = '1.0.2' s.version = version s.summary = 'RPC library for Protocol Buffers, based on gRPC' s.homepage = 'http://www.grpc.io' @@ -39,7 +39,7 @@ Pod::Spec.new do |s| s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "v#{version}", + :tag => "objective-c-v#{version}", } s.ios.deployment_target = '7.1' diff --git a/gRPC-RxLibrary.podspec b/gRPC-RxLibrary.podspec index d59385c039..2e8fffd2f1 100644 --- a/gRPC-RxLibrary.podspec +++ b/gRPC-RxLibrary.podspec @@ -30,7 +30,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-RxLibrary' - version = '1.0.1' + version = '1.0.2' s.version = version s.summary = 'Reactive Extensions library for iOS/OSX.' s.homepage = 'http://www.grpc.io' @@ -39,7 +39,7 @@ Pod::Spec.new do |s| s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "v#{version}", + :tag => "objective-c-v#{version}", } s.ios.deployment_target = '7.1' diff --git a/gRPC.podspec b/gRPC.podspec index 76410b17d2..e8b7709449 100644 --- a/gRPC.podspec +++ b/gRPC.podspec @@ -30,7 +30,7 @@ Pod::Spec.new do |s| s.name = 'gRPC' - version = '1.0.1' + version = '1.0.2' s.version = version s.summary = 'gRPC client library for iOS/OSX' s.homepage = 'http://www.grpc.io' @@ -39,7 +39,7 @@ Pod::Spec.new do |s| s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "v#{version}", + :tag => "objective-c-v#{version}", } s.ios.deployment_target = '7.1' diff --git a/grpc.gemspec b/grpc.gemspec index 6019b97f67..9cafd1f2f9 100755 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -27,7 +27,7 @@ Gem::Specification.new do |s| s.require_paths = %w( src/ruby/bin src/ruby/lib src/ruby/pb ) s.platform = Gem::Platform::RUBY - s.add_dependency 'google-protobuf', '~> 3.0.2' + s.add_dependency 'google-protobuf', '~> 3.1.0' s.add_dependency 'googleauth', '~> 0.5.1' s.add_development_dependency 'bundler', '~> 1.9' diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h index 9252c6a63a..2ac2f0a1ef 100644 --- a/include/grpc++/server_builder.h +++ b/include/grpc++/server_builder.h @@ -187,7 +187,7 @@ class ServerBuilder { struct SyncServerSettings { SyncServerSettings() - : num_cqs(GPR_MAX(gpr_cpu_num_cores(), 4)), + : num_cqs(1), min_pollers(1), max_pollers(INT_MAX), cq_timeout_msec(1000) {} diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index 5e486215e0..898f4d533b 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -202,9 +202,15 @@ GRPCAPI grpc_call *grpc_channel_create_registered_call( completion of type 'tag' to the completion queue bound to the call. The order of ops specified in the batch has no significance. Only one operation of each type can be active at once in any given - batch. You must call grpc_completion_queue_next or - grpc_completion_queue_pluck on the completion queue associated with 'call' - for work to be performed. + batch. + If a call to grpc_call_start_batch returns GRPC_CALL_OK you must call + grpc_completion_queue_next or grpc_completion_queue_pluck on the completion + queue associated with 'call' for work to be performed. If a call to + grpc_call_start_batch returns any value other than GRPC_CALL_OK it is + guaranteed that no state associated with 'call' is changed and it is not + appropriate to call grpc_completion_queue_next or + grpc_completion_queue_pluck consequent to the failed grpc_call_start_batch + call. THREAD SAFETY: access to grpc_call_start_batch in multi-threaded environment needs to be synchronized. As an optimization, you may synchronize batches containing just send operations independently from batches containing just diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h index da1bd9dcbf..4471ccf745 100644 --- a/include/grpc/impl/codegen/grpc_types.h +++ b/include/grpc/impl/codegen/grpc_types.h @@ -206,18 +206,12 @@ typedef struct { /** If non-zero, allow the use of SO_REUSEPORT if it's available (default 1) */ #define GRPC_ARG_ALLOW_REUSEPORT "grpc.so_reuseport" /** If non-zero, a pointer to a buffer pool (use grpc_resource_quota_arg_vtable - to fetch an appropriate pointer arg vtable */ + to fetch an appropriate pointer arg vtable) */ #define GRPC_ARG_RESOURCE_QUOTA "grpc.resource_quota" -/** Service config data, to be passed to subchannels. - Not intended for external use. */ +/** Service config data in JSON form. Not intended for use outside of tests. */ #define GRPC_ARG_SERVICE_CONFIG "grpc.service_config" /** LB policy name. */ #define GRPC_ARG_LB_POLICY_NAME "grpc.lb_policy_name" -/** Server name. Not intended for external use. */ -#define GRPC_ARG_SERVER_NAME "grpc.server_name" -/** Resolved addresses in a form used by the LB policy. - Not intended for external use. */ -#define GRPC_ARG_LB_ADDRESSES "grpc.lb_addresses" /** The grpc_socket_mutator instance that set the socket options. A pointer. */ #define GRPC_ARG_SOCKET_MUTATOR "grpc.socket_mutator" /** \} */ @@ -218,15 +218,18 @@ SETUP_REQUIRES = INSTALL_REQUIRES + ( 'six>=1.10', ) if ENABLE_DOCUMENTATION_BUILD else () -if BUILD_WITH_CYTHON: - sys.stderr.write( - "You requested a Cython build via GRPC_PYTHON_BUILD_WITH_CYTHON, " - "but do not have Cython installed. We won't stop you from using " - "other commands, but the extension files will fail to build.\n") -elif need_cython: - sys.stderr.write( - 'We could not find Cython. Setup may take 10-20 minutes.\n') - SETUP_REQUIRES += ('cython>=0.23',) +try: + import Cython +except ImportError: + if BUILD_WITH_CYTHON: + sys.stderr.write( + "You requested a Cython build via GRPC_PYTHON_BUILD_WITH_CYTHON, " + "but do not have Cython installed. We won't stop you from using " + "other commands, but the extension files will fail to build.\n") + elif need_cython: + sys.stderr.write( + 'We could not find Cython. Setup may take 10-20 minutes.\n') + SETUP_REQUIRES += ('cython>=0.23',) COMMAND_CLASS = { 'doc': commands.SphinxDocumentation, diff --git a/src/google_benchmark/gen_build_yaml.py b/src/benchmark/gen_build_yaml.py index 302e08737a..09b76115a8 100755 --- a/src/google_benchmark/gen_build_yaml.py +++ b/src/benchmark/gen_build_yaml.py @@ -39,15 +39,15 @@ os.chdir(os.path.dirname(sys.argv[0])+'/../..') out = {} out['libs'] = [{ - 'name': 'google_benchmark', + 'name': 'benchmark', 'build': 'private', 'language': 'c++', 'secure': 'no', - 'defaults': 'google_benchmark', - 'src': sorted(glob.glob('third_party/google_benchmark/src/*.cc')), + 'defaults': 'benchmark', + 'src': sorted(glob.glob('third_party/benchmark/src/*.cc')), 'headers': sorted( - glob.glob('third_party/google_benchmark/src/*.h') + - glob.glob('third_party/google_benchmark/include/benchmark/*.h')), + glob.glob('third_party/benchmark/src/*.h') + + glob.glob('third_party/benchmark/include/benchmark/*.h')), }] print yaml.dump(out) diff --git a/src/compiler/csharp_generator.cc b/src/compiler/csharp_generator.cc index a3af258d9c..cc7a7a96ae 100644 --- a/src/compiler/csharp_generator.cc +++ b/src/compiler/csharp_generator.cc @@ -68,13 +68,13 @@ namespace { // Currently, we cannot easily reuse the functionality as // google/protobuf/compiler/csharp/csharp_doc_comment.h is not a public header. // TODO(jtattermusch): reuse the functionality from google/protobuf. -void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer *printer, +bool GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer *printer, grpc::protobuf::SourceLocation location) { grpc::string comments = location.leading_comments.empty() ? location.trailing_comments : location.leading_comments; if (comments.empty()) { - return; + return false; } // XML escaping... no need for apostrophes etc as the whole text is going to // be a child @@ -107,18 +107,84 @@ void GenerateDocCommentBodyImpl(grpc::protobuf::io::Printer *printer, printer->Print("///\n"); } last_was_empty = false; - printer->Print("/// $line$\n", "line", *it); + printer->Print("///$line$\n", "line", *it); } } printer->Print("/// </summary>\n"); + return true; } template <typename DescriptorType> -void GenerateDocCommentBody(grpc::protobuf::io::Printer *printer, +bool GenerateDocCommentBody(grpc::protobuf::io::Printer *printer, const DescriptorType *descriptor) { grpc::protobuf::SourceLocation location; - if (descriptor->GetSourceLocation(&location)) { - GenerateDocCommentBodyImpl(printer, location); + if (!descriptor->GetSourceLocation(&location)) { + return false; + } + return GenerateDocCommentBodyImpl(printer, location); +} + +void GenerateDocCommentServerMethod(grpc::protobuf::io::Printer *printer, + const MethodDescriptor *method) { + if (GenerateDocCommentBody(printer, method)) { + if (method->client_streaming()) { + printer->Print( + "/// <param name=\"requestStream\">Used for reading requests from " + "the client.</param>\n"); + } else { + printer->Print( + "/// <param name=\"request\">The request received from the " + "client.</param>\n"); + } + if (method->server_streaming()) { + printer->Print( + "/// <param name=\"responseStream\">Used for sending responses back " + "to the client.</param>\n"); + } + printer->Print( + "/// <param name=\"context\">The context of the server-side call " + "handler being invoked.</param>\n"); + if (method->server_streaming()) { + printer->Print( + "/// <returns>A task indicating completion of the " + "handler.</returns>\n"); + } else { + printer->Print( + "/// <returns>The response to send back to the client (wrapped by a " + "task).</returns>\n"); + } + } +} + +void GenerateDocCommentClientMethod(grpc::protobuf::io::Printer *printer, + const MethodDescriptor *method, + bool is_sync, bool use_call_options) { + if (GenerateDocCommentBody(printer, method)) { + if (!method->client_streaming()) { + printer->Print( + "/// <param name=\"request\">The request to send to the " + "server.</param>\n"); + } + if (!use_call_options) { + printer->Print( + "/// <param name=\"headers\">The initial metadata to send with the " + "call. This parameter is optional.</param>\n"); + printer->Print( + "/// <param name=\"deadline\">An optional deadline for the call. The " + "call will be cancelled if deadline is hit.</param>\n"); + printer->Print( + "/// <param name=\"cancellationToken\">An optional token for " + "canceling the call.</param>\n"); + } else { + printer->Print( + "/// <param name=\"options\">The options for the call.</param>\n"); + } + if (is_sync) { + printer->Print( + "/// <returns>The response received from the server.</returns>\n"); + } else { + printer->Print("/// <returns>The call object.</returns>\n"); + } } } @@ -319,7 +385,7 @@ void GenerateServerClass(Printer *out, const ServiceDescriptor *service) { out->Indent(); for (int i = 0; i < service->method_count(); i++) { const MethodDescriptor *method = service->method(i); - GenerateDocCommentBody(out, method); + GenerateDocCommentServerMethod(out, method); out->Print( "public virtual $returntype$ " "$methodname$($request$$response_stream_maybe$, " @@ -393,7 +459,7 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) { if (method_type == METHODTYPE_NO_STREAMING) { // unary calls have an extra synchronous stub method - GenerateDocCommentBody(out, method); + GenerateDocCommentClientMethod(out, method, true, false); out->Print( "public virtual $response$ $methodname$($request$ request, Metadata " "headers = null, DateTime? deadline = null, CancellationToken " @@ -411,7 +477,7 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) { out->Print("}\n"); // overload taking CallOptions as a param - GenerateDocCommentBody(out, method); + GenerateDocCommentClientMethod(out, method, true, true); out->Print( "public virtual $response$ $methodname$($request$ request, " "CallOptions options)\n", @@ -432,7 +498,7 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) { if (method_type == METHODTYPE_NO_STREAMING) { method_name += "Async"; // prevent name clash with synchronous method. } - GenerateDocCommentBody(out, method); + GenerateDocCommentClientMethod(out, method, false, false); out->Print( "public virtual $returntype$ $methodname$($request_maybe$Metadata " "headers = null, DateTime? deadline = null, CancellationToken " @@ -452,7 +518,7 @@ void GenerateClientStub(Printer *out, const ServiceDescriptor *service) { out->Print("}\n"); // overload taking CallOptions as a param - GenerateDocCommentBody(out, method); + GenerateDocCommentClientMethod(out, method, false, true); out->Print( "public virtual $returntype$ $methodname$($request_maybe$CallOptions " "options)\n", @@ -518,6 +584,9 @@ void GenerateBindServiceMethod(Printer *out, const ServiceDescriptor *service) { "/// <summary>Creates service definition that can be registered with a " "server</summary>\n"); out->Print( + "/// <param name=\"serviceImpl\">An object implementing the server-side" + " handling logic.</param>\n"); + out->Print( "public static ServerServiceDefinition BindService($implclass$ " "serviceImpl)\n", "implclass", GetServerClassName(service)); diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc index b0a60092ab..4841da8da8 100644 --- a/src/compiler/python_generator.cc +++ b/src/compiler/python_generator.cc @@ -40,6 +40,7 @@ #include <map> #include <memory> #include <ostream> +#include <set> #include <sstream> #include <tuple> #include <vector> @@ -64,7 +65,9 @@ using std::make_pair; using std::map; using std::pair; using std::replace; +using std::tuple; using std::vector; +using std::set; namespace grpc_python_generator { @@ -73,6 +76,8 @@ namespace { typedef vector<const Descriptor*> DescriptorVector; typedef map<grpc::string, grpc::string> StringMap; typedef vector<grpc::string> StringVector; +typedef tuple<grpc::string, grpc::string> StringPair; +typedef set<StringPair> StringPairSet; // Provides RAII indentation handling. Use as: // { @@ -651,6 +656,7 @@ bool PrivateGenerator::PrintPreamble() { "face_utilities\n"); if (generate_in_pb2_grpc) { out->Print("\n"); + StringPairSet imports_set; for (int i = 0; i < file->service_count(); ++i) { const ServiceDescriptor* service = file->service(i); for (int j = 0; j < service->method_count(); ++j) { @@ -662,11 +668,15 @@ bool PrivateGenerator::PrintPreamble() { grpc::string type_file_name = type->file()->name(); grpc::string module_name = ModuleName(type_file_name); grpc::string module_alias = ModuleAlias(type_file_name); - out->Print("import $ModuleName$ as $ModuleAlias$\n", "ModuleName", - module_name, "ModuleAlias", module_alias); + imports_set.insert(std::make_tuple(module_name, module_alias)); } } } + for (StringPairSet::iterator it = imports_set.begin(); + it != imports_set.end(); ++it) { + out->Print("import $ModuleName$ as $ModuleAlias$\n", "ModuleName", + std::get<0>(*it), "ModuleAlias", std::get<1>(*it)); + } } return true; } @@ -714,6 +724,9 @@ pair<bool, grpc::string> PrivateGenerator::GetGrpcServices() { out = &out_printer; if (generate_in_pb2_grpc) { + out->Print( + "# Generated by the gRPC Python protocol compiler plugin. " + "DO NOT EDIT!\n"); if (!PrintPreamble()) { return make_pair(false, ""); } diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c index 397dbc40a8..3e8acc85e1 100644 --- a/src/core/ext/census/grpc_filter.c +++ b/src/core/ext/census/grpc_filter.c @@ -167,11 +167,12 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx, /* TODO(hongyu): record rpc server stats and census_tracing_end_op here */ } -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { channel_data *chand = elem->channel_data; GPR_ASSERT(chand != NULL); + return GRPC_ERROR_NONE; } static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c index 1fcff4388a..9d46338428 100644 --- a/src/core/ext/client_channel/client_channel.c +++ b/src/core/ext/client_channel/client_channel.c @@ -44,6 +44,7 @@ #include <grpc/support/useful.h> #include "src/core/ext/client_channel/lb_policy_registry.h" +#include "src/core/ext/client_channel/resolver_registry.h" #include "src/core/ext/client_channel/subchannel.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/connected_channel.h" @@ -499,24 +500,39 @@ static void cc_get_channel_info(grpc_exec_ctx *exec_ctx, } /* Constructor for channel_data */ -static void cc_init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { channel_data *chand = elem->channel_data; - memset(chand, 0, sizeof(*chand)); - GPR_ASSERT(args->is_last); GPR_ASSERT(elem->filter == &grpc_client_channel_filter); - + // Initialize data members. gpr_mu_init(&chand->mu); + chand->owning_stack = args->channel_stack; grpc_closure_init(&chand->on_resolver_result_changed, on_resolver_result_changed, chand); - chand->owning_stack = args->channel_stack; - + chand->interested_parties = grpc_pollset_set_create(); grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, "client_channel"); - chand->interested_parties = grpc_pollset_set_create(); + // Record client channel factory. + const grpc_arg *arg = grpc_channel_args_find(args->channel_args, + GRPC_ARG_CLIENT_CHANNEL_FACTORY); + GPR_ASSERT(arg != NULL); + GPR_ASSERT(arg->type == GRPC_ARG_POINTER); + grpc_client_channel_factory_ref(arg->value.pointer.p); + chand->client_channel_factory = arg->value.pointer.p; + // Instantiate resolver. + arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI); + GPR_ASSERT(arg != NULL); + GPR_ASSERT(arg->type == GRPC_ARG_STRING); + chand->resolver = + grpc_resolver_create(exec_ctx, arg->value.string, args->channel_args, + chand->interested_parties); + if (chand->resolver == NULL) { + return GRPC_ERROR_CREATE("resolver creation failed"); + } + return GRPC_ERROR_NONE; } /* Destructor for channel_data */ @@ -683,9 +699,15 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, "Failed to create subchannel", &error, 1)); } else if (GET_CALL(calld) == CANCELLED_CALL) { /* already cancelled before subchannel became ready */ - fail_locked(exec_ctx, calld, - GRPC_ERROR_CREATE_REFERENCING( - "Cancelled before creating subchannel", &error, 1)); + grpc_error *cancellation_error = GRPC_ERROR_CREATE_REFERENCING( + "Cancelled before creating subchannel", &error, 1); + /* if due to deadline, attach the deadline exceeded status to the error */ + if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) { + cancellation_error = + grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS, + GRPC_STATUS_DEADLINE_EXCEEDED); + } + fail_locked(exec_ctx, calld, cancellation_error); } else { /* Create call on subchannel. */ grpc_subchannel_call *subchannel_call = NULL; @@ -809,7 +831,6 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY; } } - // TODO(dgq): make this deadline configurable somehow. const grpc_lb_policy_pick_args inputs = { initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem, gpr_inf_future(GPR_CLOCK_MONOTONIC)}; @@ -1130,30 +1151,6 @@ const grpc_channel_filter grpc_client_channel_filter = { "client-channel", }; -void grpc_client_channel_finish_initialization( - grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack, - grpc_resolver *resolver, - grpc_client_channel_factory *client_channel_factory) { - /* post construction initialization: set the transport setup pointer */ - GPR_ASSERT(client_channel_factory != NULL); - grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack); - channel_data *chand = elem->channel_data; - gpr_mu_lock(&chand->mu); - GPR_ASSERT(!chand->resolver); - chand->resolver = resolver; - GRPC_RESOLVER_REF(resolver, "channel"); - if (!grpc_closure_list_empty(chand->waiting_for_config_closures) || - chand->exit_idle_when_lb_policy_arrives) { - chand->started_resolving = true; - GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); - grpc_resolver_next(exec_ctx, resolver, &chand->resolver_result, - &chand->on_resolver_result_changed); - } - chand->client_channel_factory = client_channel_factory; - grpc_client_channel_factory_ref(client_channel_factory); - gpr_mu_unlock(&chand->mu); -} - grpc_connectivity_state grpc_client_channel_check_connectivity_state( grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) { channel_data *chand = elem->channel_data; diff --git a/src/core/ext/client_channel/client_channel.h b/src/core/ext/client_channel/client_channel.h index ab5a84fdfb..f02587d0c1 100644 --- a/src/core/ext/client_channel/client_channel.h +++ b/src/core/ext/client_channel/client_channel.h @@ -38,6 +38,9 @@ #include "src/core/ext/client_channel/resolver.h" #include "src/core/lib/channel/channel_stack.h" +// Channel arg key for server URI string. +#define GRPC_ARG_SERVER_URI "grpc.server_uri" + /* A client channel is a channel that begins disconnected, and can connect to some endpoint on demand. If that endpoint disconnects, it will be connected to again later. @@ -47,13 +50,6 @@ extern const grpc_channel_filter grpc_client_channel_filter; -/* Post-construction initializer to give the client channel its resolver - and factory. */ -void grpc_client_channel_finish_initialization( - grpc_exec_ctx *exec_ctx, grpc_channel_stack *channel_stack, - grpc_resolver *resolver, - grpc_client_channel_factory *client_channel_factory); - grpc_connectivity_state grpc_client_channel_check_connectivity_state( grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect); diff --git a/src/core/ext/client_channel/client_channel_factory.c b/src/core/ext/client_channel/client_channel_factory.c index 4900832d57..4eb35dfcf7 100644 --- a/src/core/ext/client_channel/client_channel_factory.c +++ b/src/core/ext/client_channel/client_channel_factory.c @@ -55,3 +55,35 @@ grpc_channel* grpc_client_channel_factory_create_channel( return factory->vtable->create_client_channel(exec_ctx, factory, target, type, args); } + +static void* factory_arg_copy(void* factory) { + grpc_client_channel_factory_ref(factory); + return factory; +} + +static void factory_arg_destroy(void* factory) { + // TODO(roth): Remove local exec_ctx when + // https://github.com/grpc/grpc/pull/8705 is merged. + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_client_channel_factory_unref(&exec_ctx, factory); + grpc_exec_ctx_finish(&exec_ctx); +} + +static int factory_arg_cmp(void* factory1, void* factory2) { + if (factory1 < factory2) return -1; + if (factory1 > factory2) return 1; + return 0; +} + +static const grpc_arg_pointer_vtable factory_arg_vtable = { + factory_arg_copy, factory_arg_destroy, factory_arg_cmp}; + +grpc_arg grpc_client_channel_factory_create_channel_arg( + grpc_client_channel_factory* factory) { + grpc_arg arg; + arg.type = GRPC_ARG_POINTER; + arg.key = GRPC_ARG_CLIENT_CHANNEL_FACTORY; + arg.value.pointer.p = factory; + arg.value.pointer.vtable = &factory_arg_vtable; + return arg; +} diff --git a/src/core/ext/client_channel/client_channel_factory.h b/src/core/ext/client_channel/client_channel_factory.h index 2b8fc577b3..bf2764b537 100644 --- a/src/core/ext/client_channel/client_channel_factory.h +++ b/src/core/ext/client_channel/client_channel_factory.h @@ -39,6 +39,9 @@ #include "src/core/ext/client_channel/subchannel.h" #include "src/core/lib/channel/channel_stack.h" +// Channel arg key for client channel factory. +#define GRPC_ARG_CLIENT_CHANNEL_FACTORY "grpc.client_channel_factory" + typedef struct grpc_client_channel_factory grpc_client_channel_factory; typedef struct grpc_client_channel_factory_vtable grpc_client_channel_factory_vtable; @@ -83,4 +86,7 @@ grpc_channel *grpc_client_channel_factory_create_channel( const char *target, grpc_client_channel_type type, const grpc_channel_args *args); +grpc_arg grpc_client_channel_factory_create_channel_arg( + grpc_client_channel_factory *factory); + #endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */ diff --git a/src/core/ext/client_channel/http_connect_handshaker.c b/src/core/ext/client_channel/http_connect_handshaker.c index 572af52dfd..76c78ee853 100644 --- a/src/core/ext/client_channel/http_connect_handshaker.c +++ b/src/core/ext/client_channel/http_connect_handshaker.c @@ -40,6 +40,8 @@ #include <grpc/support/log.h> #include <grpc/support/string_util.h> +#include "src/core/ext/client_channel/client_channel.h" +#include "src/core/ext/client_channel/resolver_registry.h" #include "src/core/ext/client_channel/uri_parser.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/http/format_request.h" @@ -51,7 +53,6 @@ typedef struct http_connect_handshaker { grpc_handshaker base; char* proxy_server; - char* server_name; gpr_refcount refcount; gpr_mu mu; @@ -86,7 +87,6 @@ static void http_connect_handshaker_unref(grpc_exec_ctx* exec_ctx, gpr_free(handshaker->read_buffer_to_destroy); } gpr_free(handshaker->proxy_server); - gpr_free(handshaker->server_name); grpc_slice_buffer_destroy(&handshaker->write_buffer); grpc_http_parser_destroy(&handshaker->http_parser); grpc_http_response_destroy(&handshaker->http_response); @@ -265,18 +265,27 @@ static void http_connect_handshaker_do_handshake( grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done, grpc_handshaker_args* args) { http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in; - gpr_mu_lock(&handshaker->mu); + // Get server name from channel args. + const grpc_arg* arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI); + GPR_ASSERT(arg != NULL); + GPR_ASSERT(arg->type == GRPC_ARG_STRING); + char* canonical_uri = + grpc_resolver_factory_add_default_prefix_if_needed(arg->value.string); + grpc_uri* uri = grpc_uri_parse(canonical_uri, 1); + char* server_name = uri->path; + if (server_name[0] == '/') ++server_name; // Save state in the handshaker object. + gpr_mu_lock(&handshaker->mu); handshaker->args = args; handshaker->on_handshake_done = on_handshake_done; // Send HTTP CONNECT request. - gpr_log(GPR_INFO, "Connecting to server %s via HTTP proxy %s", - handshaker->server_name, handshaker->proxy_server); + gpr_log(GPR_INFO, "Connecting to server %s via HTTP proxy %s", server_name, + handshaker->proxy_server); grpc_httpcli_request request; memset(&request, 0, sizeof(request)); - request.host = handshaker->proxy_server; + request.host = server_name; request.http.method = "CONNECT"; - request.http.path = handshaker->server_name; + request.http.path = server_name; request.handshaker = &grpc_httpcli_plaintext; grpc_slice request_slice = grpc_httpcli_format_connect_request(&request); grpc_slice_buffer_add(&handshaker->write_buffer, request_slice); @@ -285,23 +294,23 @@ static void http_connect_handshaker_do_handshake( grpc_endpoint_write(exec_ctx, args->endpoint, &handshaker->write_buffer, &handshaker->request_done_closure); gpr_mu_unlock(&handshaker->mu); + // Clean up. + gpr_free(canonical_uri); + grpc_uri_destroy(uri); } static const grpc_handshaker_vtable http_connect_handshaker_vtable = { http_connect_handshaker_destroy, http_connect_handshaker_shutdown, http_connect_handshaker_do_handshake}; -grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server, - const char* server_name) { +grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server) { GPR_ASSERT(proxy_server != NULL); - GPR_ASSERT(server_name != NULL); http_connect_handshaker* handshaker = gpr_malloc(sizeof(*handshaker)); memset(handshaker, 0, sizeof(*handshaker)); grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base); gpr_mu_init(&handshaker->mu); gpr_ref_init(&handshaker->refcount, 1); handshaker->proxy_server = gpr_strdup(proxy_server); - handshaker->server_name = gpr_strdup(server_name); grpc_slice_buffer_init(&handshaker->write_buffer); grpc_closure_init(&handshaker->request_done_closure, on_write_done, handshaker); diff --git a/src/core/ext/client_channel/http_connect_handshaker.h b/src/core/ext/client_channel/http_connect_handshaker.h index c689df2b2b..ea293852e6 100644 --- a/src/core/ext/client_channel/http_connect_handshaker.h +++ b/src/core/ext/client_channel/http_connect_handshaker.h @@ -36,9 +36,8 @@ #include "src/core/lib/channel/handshaker.h" -/// Does NOT take ownership of \a proxy_server or \a server_name. -grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server, - const char* server_name); +/// Does NOT take ownership of \a proxy_server. +grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server); /// Returns the name of the proxy to use, or NULL if no proxy is configured. /// Caller takes ownership of result. diff --git a/src/core/ext/client_channel/lb_policy_factory.h b/src/core/ext/client_channel/lb_policy_factory.h index e2b8080a32..79b3dee259 100644 --- a/src/core/ext/client_channel/lb_policy_factory.h +++ b/src/core/ext/client_channel/lb_policy_factory.h @@ -40,6 +40,9 @@ #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/resolve_address.h" +// Channel arg key for grpc_lb_addresses. +#define GRPC_ARG_LB_ADDRESSES "grpc.lb_addresses" + typedef struct grpc_lb_policy_factory grpc_lb_policy_factory; typedef struct grpc_lb_policy_factory_vtable grpc_lb_policy_factory_vtable; diff --git a/src/core/ext/client_channel/resolver_factory.c b/src/core/ext/client_channel/resolver_factory.c index 7c3d644257..00bbb92dd0 100644 --- a/src/core/ext/client_channel/resolver_factory.c +++ b/src/core/ext/client_channel/resolver_factory.c @@ -43,9 +43,10 @@ void grpc_resolver_factory_unref(grpc_resolver_factory* factory) { /** Create a resolver instance for a name */ grpc_resolver* grpc_resolver_factory_create_resolver( - grpc_resolver_factory* factory, grpc_resolver_args* args) { + grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory, + grpc_resolver_args* args) { if (factory == NULL) return NULL; - return factory->vtable->create_resolver(factory, args); + return factory->vtable->create_resolver(exec_ctx, factory, args); } char* grpc_resolver_factory_get_default_authority( diff --git a/src/core/ext/client_channel/resolver_factory.h b/src/core/ext/client_channel/resolver_factory.h index 4da42e84d2..3792ddca18 100644 --- a/src/core/ext/client_channel/resolver_factory.h +++ b/src/core/ext/client_channel/resolver_factory.h @@ -37,6 +37,7 @@ #include "src/core/ext/client_channel/client_channel_factory.h" #include "src/core/ext/client_channel/resolver.h" #include "src/core/ext/client_channel/uri_parser.h" +#include "src/core/lib/iomgr/pollset_set.h" typedef struct grpc_resolver_factory grpc_resolver_factory; typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable; @@ -48,6 +49,7 @@ struct grpc_resolver_factory { typedef struct grpc_resolver_args { grpc_uri *uri; const grpc_channel_args *args; + grpc_pollset_set *pollset_set; } grpc_resolver_args; struct grpc_resolver_factory_vtable { @@ -55,7 +57,8 @@ struct grpc_resolver_factory_vtable { void (*unref)(grpc_resolver_factory *factory); /** Implementation of grpc_resolver_factory_create_resolver */ - grpc_resolver *(*create_resolver)(grpc_resolver_factory *factory, + grpc_resolver *(*create_resolver)(grpc_exec_ctx *exec_ctx, + grpc_resolver_factory *factory, grpc_resolver_args *args); /** Implementation of grpc_resolver_factory_get_default_authority */ @@ -70,7 +73,8 @@ void grpc_resolver_factory_unref(grpc_resolver_factory *resolver); /** Create a resolver instance for a name */ grpc_resolver *grpc_resolver_factory_create_resolver( - grpc_resolver_factory *factory, grpc_resolver_args *args); + grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, + grpc_resolver_args *args); /** Return a (freshly allocated with gpr_malloc) string representing the default authority to use for this scheme. */ diff --git a/src/core/ext/client_channel/resolver_registry.c b/src/core/ext/client_channel/resolver_registry.c index d0f0fc3f33..5110a7cad9 100644 --- a/src/core/ext/client_channel/resolver_registry.c +++ b/src/core/ext/client_channel/resolver_registry.c @@ -109,8 +109,8 @@ static grpc_resolver_factory *lookup_factory_by_uri(grpc_uri *uri) { } static grpc_resolver_factory *resolve_factory(const char *target, - grpc_uri **uri) { - char *tmp; + grpc_uri **uri, + char **canonical_target) { grpc_resolver_factory *factory = NULL; GPR_ASSERT(uri != NULL); @@ -118,37 +118,54 @@ static grpc_resolver_factory *resolve_factory(const char *target, factory = lookup_factory_by_uri(*uri); if (factory == NULL) { grpc_uri_destroy(*uri); - gpr_asprintf(&tmp, "%s%s", g_default_resolver_prefix, target); - *uri = grpc_uri_parse(tmp, 1); + gpr_asprintf(canonical_target, "%s%s", g_default_resolver_prefix, target); + *uri = grpc_uri_parse(*canonical_target, 1); factory = lookup_factory_by_uri(*uri); if (factory == NULL) { grpc_uri_destroy(grpc_uri_parse(target, 0)); - grpc_uri_destroy(grpc_uri_parse(tmp, 0)); - gpr_log(GPR_ERROR, "don't know how to resolve '%s' or '%s'", target, tmp); + grpc_uri_destroy(grpc_uri_parse(*canonical_target, 0)); + gpr_log(GPR_ERROR, "don't know how to resolve '%s' or '%s'", target, + *canonical_target); } - gpr_free(tmp); } return factory; } -grpc_resolver *grpc_resolver_create(const char *target, - const grpc_channel_args *args) { +grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target, + const grpc_channel_args *args, + grpc_pollset_set *pollset_set) { grpc_uri *uri = NULL; - grpc_resolver_factory *factory = resolve_factory(target, &uri); + char *canonical_target = NULL; + grpc_resolver_factory *factory = + resolve_factory(target, &uri, &canonical_target); grpc_resolver *resolver; grpc_resolver_args resolver_args; memset(&resolver_args, 0, sizeof(resolver_args)); resolver_args.uri = uri; resolver_args.args = args; - resolver = grpc_resolver_factory_create_resolver(factory, &resolver_args); + resolver_args.pollset_set = pollset_set; + resolver = + grpc_resolver_factory_create_resolver(exec_ctx, factory, &resolver_args); grpc_uri_destroy(uri); + gpr_free(canonical_target); return resolver; } char *grpc_get_default_authority(const char *target) { grpc_uri *uri = NULL; - grpc_resolver_factory *factory = resolve_factory(target, &uri); + char *canonical_target = NULL; + grpc_resolver_factory *factory = + resolve_factory(target, &uri, &canonical_target); char *authority = grpc_resolver_factory_get_default_authority(factory, uri); grpc_uri_destroy(uri); + gpr_free(canonical_target); return authority; } + +char *grpc_resolver_factory_add_default_prefix_if_needed(const char *target) { + grpc_uri *uri = NULL; + char *canonical_target = NULL; + resolve_factory(target, &uri, &canonical_target); + grpc_uri_destroy(uri); + return canonical_target == NULL ? gpr_strdup(target) : canonical_target; +} diff --git a/src/core/ext/client_channel/resolver_registry.h b/src/core/ext/client_channel/resolver_registry.h index 2a95a669f0..4fb16131db 100644 --- a/src/core/ext/client_channel/resolver_registry.h +++ b/src/core/ext/client_channel/resolver_registry.h @@ -35,6 +35,7 @@ #define GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_REGISTRY_H #include "src/core/ext/client_channel/resolver_factory.h" +#include "src/core/lib/iomgr/pollset_set.h" void grpc_resolver_registry_init(); void grpc_resolver_registry_shutdown(void); @@ -60,8 +61,9 @@ void grpc_register_resolver_type(grpc_resolver_factory *factory); If a resolver factory was not found, return NULL. \a args is a set of channel arguments to be included in the result (typically the set of arguments passed in from the client API). */ -grpc_resolver *grpc_resolver_create(const char *target, - const grpc_channel_args *args); +grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target, + const grpc_channel_args *args, + grpc_pollset_set *pollset_set); /** Find a resolver factory given a name and return an (owned-by-the-caller) * reference to it */ @@ -71,4 +73,8 @@ grpc_resolver_factory *grpc_resolver_factory_lookup(const char *name); representing the default authority to pass from a client. */ char *grpc_get_default_authority(const char *target); +/** Returns a newly allocated string containing \a target, adding the + default prefix if needed. */ +char *grpc_resolver_factory_add_default_prefix_if_needed(const char *target); + #endif /* GRPC_CORE_EXT_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */ diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c index 08632079d6..f294e69392 100644 --- a/src/core/ext/client_channel/subchannel.c +++ b/src/core/ext/client_channel/subchannel.c @@ -604,14 +604,20 @@ static void publish_transport_locked(grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder_set_transport(builder, c->connecting_result.transport); - if (grpc_channel_init_create_stack(exec_ctx, builder, - GRPC_CLIENT_SUBCHANNEL)) { - con = grpc_channel_stack_builder_finish(exec_ctx, builder, 0, 1, - connection_destroy, NULL); - } else { + if (!grpc_channel_init_create_stack(exec_ctx, builder, + GRPC_CLIENT_SUBCHANNEL)) { grpc_channel_stack_builder_destroy(builder); abort(); /* TODO(ctiller): what to do here (previously we just crashed) */ } + grpc_error *error = grpc_channel_stack_builder_finish( + exec_ctx, builder, 0, 1, connection_destroy, NULL, (void **)&con); + if (error != GRPC_ERROR_NONE) { + const char *msg = grpc_error_string(error); + gpr_log(GPR_ERROR, "error initializing subchannel stack: %s", msg); + grpc_error_free_string(msg); + GRPC_ERROR_UNREF(error); + abort(); /* TODO(ctiller): what to do here? */ + } stk = CHANNEL_STACK_FROM_CONNECTION(con); memset(&c->connecting_result, 0, sizeof(c->connecting_result)); diff --git a/src/core/ext/client_channel/subchannel.h b/src/core/ext/client_channel/subchannel.h index 10bae620df..24aa9f73dc 100644 --- a/src/core/ext/client_channel/subchannel.h +++ b/src/core/ext/client_channel/subchannel.h @@ -164,8 +164,6 @@ struct grpc_subchannel_args { size_t filter_count; /** Channel arguments to be supplied to the newly created channel */ const grpc_channel_args *args; - /** Server name */ - const char *server_name; /** Address to connect to */ grpc_resolved_address *addr; }; diff --git a/src/core/ext/client_channel/subchannel_index.c b/src/core/ext/client_channel/subchannel_index.c index 227013a7d7..a1ba5e945c 100644 --- a/src/core/ext/client_channel/subchannel_index.c +++ b/src/core/ext/client_channel/subchannel_index.c @@ -86,7 +86,6 @@ static grpc_subchannel_key *create_key( } else { k->args.filters = NULL; } - k->args.server_name = gpr_strdup(args->server_name); k->args.addr = gpr_malloc(sizeof(grpc_resolved_address)); k->args.addr->len = args->addr->len; if (k->args.addr->len > 0) { @@ -113,8 +112,6 @@ static int subchannel_key_compare(grpc_subchannel_key *a, if (c != 0) return c; c = GPR_ICMP(a->args.filter_count, b->args.filter_count); if (c != 0) return c; - c = strcmp(a->args.server_name, b->args.server_name); - if (c != 0) return c; if (a->args.addr->len) { c = memcmp(a->args.addr->addr, b->args.addr->addr, a->args.addr->len); if (c != 0) return c; @@ -132,7 +129,6 @@ void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx, grpc_connector_unref(exec_ctx, k->connector); gpr_free((grpc_channel_args *)k->args.filters); grpc_channel_args_destroy((grpc_channel_args *)k->args.args); - gpr_free((void *)k->args.server_name); gpr_free(k->args.addr); gpr_free(k); } diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c index df0db61c22..bed5e6c901 100644 --- a/src/core/ext/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/lb_policy/grpclb/grpclb.c @@ -106,6 +106,7 @@ #include <grpc/support/string_util.h> #include <grpc/support/time.h> +#include "src/core/ext/client_channel/client_channel.h" #include "src/core/ext/client_channel/client_channel_factory.h" #include "src/core/ext/client_channel/lb_policy_factory.h" #include "src/core/ext/client_channel/lb_policy_registry.h" @@ -743,12 +744,6 @@ static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg, static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, grpc_lb_policy_factory *factory, grpc_lb_policy_args *args) { - /* Get server name. */ - const grpc_arg *arg = - grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME); - const char *server_name = - arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL; - /* Count the number of gRPC-LB addresses. There must be at least one. * TODO(roth): For now, we ignore non-balancer addresses, but in the * future, we may change the behavior such that we fall back to using @@ -756,7 +751,8 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, * time, this should be changed to allow a list with no balancer addresses, * since the resolver might fail to return a balancer address even when * this is the right LB policy to use. */ - arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); + const grpc_arg *arg = + grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER); grpc_lb_addresses *addresses = arg->value.pointer.p; size_t num_grpclb_addrs = 0; @@ -768,13 +764,25 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy = gpr_malloc(sizeof(*glb_policy)); memset(glb_policy, 0, sizeof(*glb_policy)); + /* Get server name. */ + arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI); + GPR_ASSERT(arg != NULL); + GPR_ASSERT(arg->type == GRPC_ARG_STRING); + grpc_uri *uri = grpc_uri_parse(arg->value.string, true); + GPR_ASSERT(uri->path[0] != '\0'); + glb_policy->server_name = + gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path); + if (grpc_lb_glb_trace) { + gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.", + glb_policy->server_name); + } + grpc_uri_destroy(uri); + /* All input addresses in addresses come from a resolver that claims * they are LB services. It's the resolver's responsibility to make sure - * this - * policy is only instantiated and used in that case. + * this policy is only instantiated and used in that case. * * Create a client channel over them to communicate with a LB service */ - glb_policy->server_name = gpr_strdup(server_name); glb_policy->cc_factory = args->client_channel_factory; glb_policy->args = grpc_channel_args_copy(args->args); GPR_ASSERT(glb_policy->cc_factory != NULL); @@ -818,9 +826,14 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, * We need the LB channel to return addresses with is_balancer=false * so that it does not wind up recursively using the grpclb LB policy, * as per the special case logic in client_channel.c. + * + * Finally, we also strip out the channel arg for the server URI, + * since that will be different for the LB channel than for the parent + * channel. (The client channel factory will re-add this arg with + * the right value.) */ - static const char *keys_to_remove[] = {GRPC_ARG_LB_POLICY_NAME, - GRPC_ARG_LB_ADDRESSES}; + static const char *keys_to_remove[] = { + GRPC_ARG_LB_POLICY_NAME, GRPC_ARG_LB_ADDRESSES, GRPC_ARG_SERVER_URI}; grpc_channel_args *new_args = grpc_channel_args_copy_and_remove( args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove)); glb_policy->lb_channel = grpc_client_channel_factory_create_channel( diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c index afecb716fb..e352e0396f 100644 --- a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c @@ -1,35 +1,3 @@ -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ /* Automatically generated nanopb constant definitions */ /* Generated by nanopb-0.3.7-dev */ diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h index e36d0966f8..725aa7e386 100644 --- a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h @@ -1,35 +1,3 @@ -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ /* Automatically generated nanopb header */ /* Generated by nanopb-0.3.7-dev */ diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c index c69f773e78..b9cfe6b5c0 100644 --- a/src/core/ext/lb_policy/pick_first/pick_first.c +++ b/src/core/ext/lb_policy/pick_first/pick_first.c @@ -438,15 +438,10 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx, grpc_lb_policy_args *args) { GPR_ASSERT(args->client_channel_factory != NULL); - /* Get server name. */ - const grpc_arg *arg = - grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME); - const char *server_name = - arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL; - /* Find the number of backend addresses. We ignore balancer * addresses, since we don't know how to handle them. */ - arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); + const grpc_arg *arg = + grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER); grpc_lb_addresses *addresses = arg->value.pointer.p; size_t num_addrs = 0; @@ -472,9 +467,6 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx, } memset(&sc_args, 0, sizeof(grpc_subchannel_args)); - /* server_name will be copied as part of the subchannel creation. This makes - * the copying of server_name (a borrowed pointer) OK. */ - sc_args.server_name = server_name; sc_args.addr = &addresses->addresses[i].address; sc_args.args = args->args; diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c index 59f84054c4..f0305473d2 100644 --- a/src/core/ext/lb_policy/round_robin/round_robin.c +++ b/src/core/ext/lb_policy/round_robin/round_robin.c @@ -703,15 +703,10 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx, grpc_lb_policy_args *args) { GPR_ASSERT(args->client_channel_factory != NULL); - /* Get server name. */ - const grpc_arg *arg = - grpc_channel_args_find(args->args, GRPC_ARG_SERVER_NAME); - const char *server_name = - arg != NULL && arg->type == GRPC_ARG_STRING ? arg->value.string : NULL; - /* Find the number of backend addresses. We ignore balancer * addresses, since we don't know how to handle them. */ - arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); + const grpc_arg *arg = + grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); GPR_ASSERT(arg != NULL && arg->type == GRPC_ARG_POINTER); grpc_lb_addresses *addresses = arg->value.pointer.p; size_t num_addrs = 0; @@ -734,9 +729,6 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx, if (addresses->addresses[i].is_balancer) continue; memset(&sc_args, 0, sizeof(grpc_subchannel_args)); - /* server_name will be copied as part of the subchannel creation. This makes - * the copying of server_name (a borrowed pointer) OK. */ - sc_args.server_name = server_name; sc_args.addr = &addresses->addresses[i].address; sc_args.args = args->args; diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c index b810e20bb9..18bb826948 100644 --- a/src/core/ext/load_reporting/load_reporting_filter.c +++ b/src/core/ext/load_reporting/load_reporting_filter.c @@ -152,9 +152,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, } /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { GPR_ASSERT(!args->is_last); channel_data *chand = elem->channel_data; @@ -171,6 +171,8 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx, NULL, NULL}; */ + + return GRPC_ERROR_NONE; } /* Destructor for channel data */ diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c index 15476f5792..2675fa931f 100644 --- a/src/core/ext/resolver/dns/native/dns_resolver.c +++ b/src/core/ext/resolver/dns/native/dns_resolver.c @@ -61,6 +61,8 @@ typedef struct { char *default_port; /** channel args. */ grpc_channel_args *channel_args; + /** pollset_set to drive the name resolution process */ + grpc_pollset_set *interested_parties; /** mutex guarding the rest of the state */ gpr_mu mu; @@ -218,6 +220,7 @@ static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx, r->resolving = true; r->addresses = NULL; grpc_resolve_address(exec_ctx, r->name_to_resolve, r->default_port, + r->interested_parties, grpc_closure_create(dns_on_resolved, r), &r->addresses); } @@ -240,13 +243,15 @@ static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) { if (r->resolved_result != NULL) { grpc_channel_args_destroy(r->resolved_result); } + grpc_pollset_set_destroy(r->interested_parties); gpr_free(r->name_to_resolve); gpr_free(r->default_port); grpc_channel_args_destroy(r->channel_args); gpr_free(r); } -static grpc_resolver *dns_create(grpc_resolver_args *args, +static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx, + grpc_resolver_args *args, const char *default_port) { if (0 != strcmp(args->uri->authority, "")) { gpr_log(GPR_ERROR, "authority based dns uri's not supported"); @@ -264,12 +269,12 @@ static grpc_resolver *dns_create(grpc_resolver_args *args, grpc_resolver_init(&r->base, &dns_resolver_vtable); r->name_to_resolve = proxy_name == NULL ? gpr_strdup(path) : proxy_name; r->default_port = gpr_strdup(default_port); - grpc_arg server_name_arg; - server_name_arg.type = GRPC_ARG_STRING; - server_name_arg.key = GRPC_ARG_SERVER_NAME; - server_name_arg.value.string = (char *)path; - r->channel_args = - grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1); + r->channel_args = grpc_channel_args_copy(args->args); + r->interested_parties = grpc_pollset_set_create(); + if (args->pollset_set != NULL) { + grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties, + args->pollset_set); + } gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS, GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER, GRPC_DNS_RECONNECT_JITTER, @@ -287,8 +292,9 @@ static void dns_factory_ref(grpc_resolver_factory *factory) {} static void dns_factory_unref(grpc_resolver_factory *factory) {} static grpc_resolver *dns_factory_create_resolver( - grpc_resolver_factory *factory, grpc_resolver_args *args) { - return dns_create(args, "https"); + grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, + grpc_resolver_args *args) { + return dns_create(exec_ctx, args, "https"); } static char *dns_factory_get_default_host_name(grpc_resolver_factory *factory, diff --git a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c index 26a650aadd..88808c674f 100644 --- a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c +++ b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c @@ -198,12 +198,7 @@ static grpc_resolver *sockaddr_create(grpc_resolver_args *args, sockaddr_resolver *r = gpr_malloc(sizeof(sockaddr_resolver)); memset(r, 0, sizeof(*r)); r->addresses = addresses; - grpc_arg server_name_arg; - server_name_arg.type = GRPC_ARG_STRING; - server_name_arg.key = GRPC_ARG_SERVER_NAME; - server_name_arg.value.string = args->uri->path; - r->channel_args = - grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1); + r->channel_args = grpc_channel_args_copy(args->args); gpr_mu_init(&r->mu); grpc_resolver_init(&r->base, &sockaddr_resolver_vtable); return &r->base; @@ -219,7 +214,8 @@ static void sockaddr_factory_unref(grpc_resolver_factory *factory) {} #define DECL_FACTORY(name) \ static grpc_resolver *name##_factory_create_resolver( \ - grpc_resolver_factory *factory, grpc_resolver_args *args) { \ + grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, \ + grpc_resolver_args *args) { \ return sockaddr_create(args, parse_##name); \ } \ static const grpc_resolver_factory_vtable name##_factory_vtable = { \ diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.c index 568b114d64..114bb07222 100644 --- a/src/core/ext/transport/chttp2/client/chttp2_connector.c +++ b/src/core/ext/transport/chttp2/client/chttp2_connector.c @@ -58,9 +58,8 @@ typedef struct { bool shutdown; bool connecting; - char *server_name; - grpc_chttp2_create_handshakers_func create_handshakers; - void *create_handshakers_user_data; + grpc_chttp2_add_handshakers_func add_handshakers; + void *add_handshakers_user_data; grpc_closure *notify; grpc_connect_in_args args; @@ -89,7 +88,6 @@ static void chttp2_connector_unref(grpc_exec_ctx *exec_ctx, // If handshaking is not yet in progress, destroy the endpoint. // Otherwise, the handshaker will do this for us. if (c->endpoint != NULL) grpc_endpoint_destroy(exec_ctx, c->endpoint); - gpr_free(c->server_name); gpr_free(c); } } @@ -155,14 +153,13 @@ static void start_handshake_locked(grpc_exec_ctx *exec_ctx, c->handshake_mgr = grpc_handshake_manager_create(); char *proxy_name = grpc_get_http_proxy_server(); if (proxy_name != NULL) { - grpc_handshake_manager_add( - c->handshake_mgr, - grpc_http_connect_handshaker_create(proxy_name, c->server_name)); + grpc_handshake_manager_add(c->handshake_mgr, + grpc_http_connect_handshaker_create(proxy_name)); gpr_free(proxy_name); } - if (c->create_handshakers != NULL) { - c->create_handshakers(exec_ctx, c->create_handshakers_user_data, - c->handshake_mgr); + if (c->add_handshakers != NULL) { + c->add_handshakers(exec_ctx, c->add_handshakers_user_data, + c->handshake_mgr); } grpc_handshake_manager_do_handshake( exec_ctx, c->handshake_mgr, c->endpoint, c->args.channel_args, @@ -254,16 +251,14 @@ static const grpc_connector_vtable chttp2_connector_vtable = { chttp2_connector_connect}; grpc_connector *grpc_chttp2_connector_create( - grpc_exec_ctx *exec_ctx, const char *server_name, - grpc_chttp2_create_handshakers_func create_handshakers, - void *create_handshakers_user_data) { + grpc_exec_ctx *exec_ctx, grpc_chttp2_add_handshakers_func add_handshakers, + void *add_handshakers_user_data) { chttp2_connector *c = gpr_malloc(sizeof(*c)); memset(c, 0, sizeof(*c)); c->base.vtable = &chttp2_connector_vtable; gpr_mu_init(&c->mu); gpr_ref_init(&c->refs, 1); - c->server_name = gpr_strdup(server_name); - c->create_handshakers = create_handshakers; - c->create_handshakers_user_data = create_handshakers_user_data; + c->add_handshakers = add_handshakers; + c->add_handshakers_user_data = add_handshakers_user_data; return &c->base; } diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.h b/src/core/ext/transport/chttp2/client/chttp2_connector.h index 6c34ce1af1..58eba22417 100644 --- a/src/core/ext/transport/chttp2/client/chttp2_connector.h +++ b/src/core/ext/transport/chttp2/client/chttp2_connector.h @@ -38,15 +38,14 @@ #include "src/core/lib/channel/handshaker.h" #include "src/core/lib/iomgr/exec_ctx.h" -typedef void (*grpc_chttp2_create_handshakers_func)( +typedef void (*grpc_chttp2_add_handshakers_func)( grpc_exec_ctx* exec_ctx, void* user_data, grpc_handshake_manager* handshake_mgr); -/// If \a create_handshakers is non-NULL, it will be called with -/// \a create_handshakers_user_data to add handshakers. +/// If \a add_handshakers is non-NULL, it will be called with +/// \a add_handshakers_user_data to add handshakers. grpc_connector* grpc_chttp2_connector_create( - grpc_exec_ctx* exec_ctx, const char* server_name, - grpc_chttp2_create_handshakers_func create_handshakers, - void* create_handshakers_user_data); + grpc_exec_ctx* exec_ctx, grpc_chttp2_add_handshakers_func add_handshakers, + void* add_handshakers_user_data); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H */ diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/src/core/ext/transport/chttp2/client/insecure/channel_create.c index 29f3759d00..a0d0652ce7 100644 --- a/src/core/ext/transport/chttp2/client/insecure/channel_create.c +++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.c @@ -39,8 +39,8 @@ #include <grpc/support/string_util.h> #include "src/core/ext/client_channel/client_channel.h" -#include "src/core/ext/client_channel/resolver_registry.h" #include "src/core/ext/transport/chttp2/client/chttp2_connector.h" +#include "src/core/lib/channel/channel_args.h" #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/channel.h" @@ -54,8 +54,7 @@ static grpc_subchannel *client_channel_factory_create_subchannel( grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory, const grpc_subchannel_args *args) { grpc_connector *connector = grpc_chttp2_connector_create( - exec_ctx, args->server_name, NULL /* create_handshakers */, - NULL /* user_data */); + exec_ctx, NULL /* add_handshakers */, NULL /* user_data */); grpc_subchannel *s = grpc_subchannel_create(exec_ctx, connector, args); grpc_connector_unref(exec_ctx, connector); return s; @@ -65,17 +64,15 @@ static grpc_channel *client_channel_factory_create_channel( grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory, const char *target, grpc_client_channel_type type, const grpc_channel_args *args) { - grpc_channel *channel = - grpc_channel_create(exec_ctx, target, args, GRPC_CLIENT_CHANNEL, NULL); - grpc_resolver *resolver = grpc_resolver_create(target, args); - if (resolver == NULL) { - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, - "client_channel_factory_create_channel"); - return NULL; - } - grpc_client_channel_finish_initialization( - exec_ctx, grpc_channel_get_channel_stack(channel), resolver, cc_factory); - GRPC_RESOLVER_UNREF(exec_ctx, resolver, "create_channel"); + // Add channel arg containing the server URI. + grpc_arg arg; + arg.type = GRPC_ARG_STRING; + arg.key = GRPC_ARG_SERVER_URI; + arg.value.string = (char *)target; + grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1); + grpc_channel *channel = grpc_channel_create(exec_ctx, target, new_args, + GRPC_CLIENT_CHANNEL, NULL); + grpc_channel_args_destroy(new_args); return channel; } @@ -101,8 +98,14 @@ grpc_channel *grpc_insecure_channel_create(const char *target, GPR_ASSERT(reserved == NULL); grpc_client_channel_factory *factory = (grpc_client_channel_factory *)&client_channel_factory; + // Add channel arg containing the client channel factory. + grpc_arg arg = grpc_client_channel_factory_create_channel_arg(factory); + grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1); + // Create channel. grpc_channel *channel = client_channel_factory_create_channel( - &exec_ctx, factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, args); + &exec_ctx, factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args); + // Clean up. + grpc_channel_args_destroy(new_args); grpc_client_channel_factory_unref(&exec_ctx, factory); grpc_exec_ctx_finish(&exec_ctx); return channel != NULL ? channel : grpc_lame_client_channel_create( diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c index 35e1e1f716..f35439cd44 100644 --- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c +++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c @@ -39,7 +39,6 @@ #include <grpc/support/string_util.h> #include "src/core/ext/client_channel/client_channel.h" -#include "src/core/ext/client_channel/resolver_registry.h" #include "src/core/ext/transport/chttp2/client/chttp2_connector.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/security/credentials/credentials.h" @@ -69,11 +68,10 @@ static void client_channel_factory_unref( } } -static void create_handshakers(grpc_exec_ctx *exec_ctx, - void *security_connector, - grpc_handshake_manager *handshake_mgr) { - grpc_channel_security_connector_create_handshakers( - exec_ctx, security_connector, handshake_mgr); +static void add_handshakers(grpc_exec_ctx *exec_ctx, void *security_connector, + grpc_handshake_manager *handshake_mgr) { + grpc_channel_security_connector_add_handshakers(exec_ctx, security_connector, + handshake_mgr); } static grpc_subchannel *client_channel_factory_create_subchannel( @@ -81,7 +79,7 @@ static grpc_subchannel *client_channel_factory_create_subchannel( const grpc_subchannel_args *args) { client_channel_factory *f = (client_channel_factory *)cc_factory; grpc_connector *connector = grpc_chttp2_connector_create( - exec_ctx, args->server_name, create_handshakers, f->security_connector); + exec_ctx, add_handshakers, f->security_connector); grpc_subchannel *s = grpc_subchannel_create(exec_ctx, connector, args); grpc_connector_unref(exec_ctx, connector); return s; @@ -91,18 +89,15 @@ static grpc_channel *client_channel_factory_create_channel( grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory, const char *target, grpc_client_channel_type type, const grpc_channel_args *args) { - client_channel_factory *f = (client_channel_factory *)cc_factory; - grpc_channel *channel = - grpc_channel_create(exec_ctx, target, args, GRPC_CLIENT_CHANNEL, NULL); - grpc_resolver *resolver = grpc_resolver_create(target, args); - if (resolver == NULL) { - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, - "client_channel_factory_create_channel"); - return NULL; - } - grpc_client_channel_finish_initialization( - exec_ctx, grpc_channel_get_channel_stack(channel), resolver, &f->base); - GRPC_RESOLVER_UNREF(exec_ctx, resolver, "create_channel"); + // Add channel arg containing the server URI. + grpc_arg arg; + arg.type = GRPC_ARG_STRING; + arg.key = GRPC_ARG_SERVER_URI; + arg.value.string = (char *)target; + grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1); + grpc_channel *channel = grpc_channel_create(exec_ctx, target, new_args, + GRPC_CLIENT_CHANNEL, NULL); + grpc_channel_args_destroy(new_args); return channel; } @@ -143,14 +138,6 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds, return grpc_lame_client_channel_create( target, GRPC_STATUS_INTERNAL, "Failed to create security connector."); } - grpc_arg connector_arg = - grpc_security_connector_to_arg(&security_connector->base); - grpc_channel_args *new_args = grpc_channel_args_copy_and_add( - new_args_from_connector != NULL ? new_args_from_connector : args, - &connector_arg, 1); - if (new_args_from_connector != NULL) { - grpc_channel_args_destroy(new_args_from_connector); - } // Create client channel factory. client_channel_factory *f = gpr_malloc(sizeof(*f)); memset(f, 0, sizeof(*f)); @@ -159,13 +146,24 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds, GRPC_SECURITY_CONNECTOR_REF(&security_connector->base, "grpc_secure_channel_create"); f->security_connector = security_connector; + // Add channel args containing the client channel factory and security + // connector. + grpc_arg new_args[2]; + new_args[0] = grpc_client_channel_factory_create_channel_arg(&f->base); + new_args[1] = grpc_security_connector_to_arg(&security_connector->base); + grpc_channel_args *args_copy = grpc_channel_args_copy_and_add( + new_args_from_connector != NULL ? new_args_from_connector : args, + new_args, GPR_ARRAY_SIZE(new_args)); + if (new_args_from_connector != NULL) { + grpc_channel_args_destroy(new_args_from_connector); + } // Create channel. grpc_channel *channel = client_channel_factory_create_channel( - &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args); + &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, args_copy); // Clean up. GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base, "secure_client_channel_factory_create_channel"); - grpc_channel_args_destroy(new_args); + grpc_channel_args_destroy(args_copy); grpc_client_channel_factory_unref(&exec_ctx, &f->base); grpc_exec_ctx_finish(&exec_ctx); return channel; /* may be NULL */ diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c index 8ee7e29316..f0857714fc 100644 --- a/src/core/ext/transport/chttp2/server/chttp2_server.c +++ b/src/core/ext/transport/chttp2/server/chttp2_server.c @@ -53,13 +53,13 @@ #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/server.h" -void grpc_chttp2_server_handshaker_factory_create_handshakers( +void grpc_chttp2_server_handshaker_factory_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_chttp2_server_handshaker_factory *handshaker_factory, grpc_handshake_manager *handshake_mgr) { if (handshaker_factory != NULL) { - handshaker_factory->vtable->create_handshakers(exec_ctx, handshaker_factory, - handshake_mgr); + handshaker_factory->vtable->add_handshakers(exec_ctx, handshaker_factory, + handshake_mgr); } } @@ -139,7 +139,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, const char *error_str = grpc_error_string(error); gpr_log(GPR_ERROR, "Handshaking failed: %s", error_str); grpc_error_free_string(error_str); - if (error == GRPC_ERROR_NONE) { + if (error == GRPC_ERROR_NONE && args->endpoint != NULL) { // We were shut down after handshaking completed successfully, so // destroy the endpoint here. // TODO(ctiller): It is currently necessary to shutdown endpoints @@ -153,19 +153,26 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, gpr_free(args->read_buffer); } } else { - grpc_transport *transport = - grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 0); - grpc_server_setup_transport( - exec_ctx, connection_state->server_state->server, transport, - connection_state->accepting_pollset, args->args); - grpc_chttp2_transport_start_reading(exec_ctx, transport, args->read_buffer); - grpc_channel_args_destroy(args->args); + // If the handshaking succeeded but there is no endpoint, then the + // handshaker may have handed off the connection to some external + // code, so we can just clean up here without creating a transport. + if (args->endpoint != NULL) { + grpc_transport *transport = + grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 0); + grpc_server_setup_transport( + exec_ctx, connection_state->server_state->server, transport, + connection_state->accepting_pollset, args->args); + grpc_chttp2_transport_start_reading(exec_ctx, transport, + args->read_buffer); + grpc_channel_args_destroy(args->args); + } } pending_handshake_manager_remove_locked(connection_state->server_state, connection_state->handshake_mgr); gpr_mu_unlock(&connection_state->server_state->mu); grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr); grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp_server); + gpr_free(connection_state->acceptor); gpr_free(connection_state); } @@ -177,6 +184,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, if (state->shutdown) { gpr_mu_unlock(&state->mu); grpc_endpoint_destroy(exec_ctx, tcp); + gpr_free(acceptor); return; } grpc_handshake_manager *handshake_mgr = grpc_handshake_manager_create(); @@ -189,7 +197,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, connection_state->accepting_pollset = accepting_pollset; connection_state->acceptor = acceptor; connection_state->handshake_mgr = handshake_mgr; - grpc_chttp2_server_handshaker_factory_create_handshakers( + grpc_chttp2_server_handshaker_factory_add_handshakers( exec_ctx, state->handshaker_factory, connection_state->handshake_mgr); // TODO(roth): We should really get this timeout value from channel // args instead of hard-coding it. diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.h b/src/core/ext/transport/chttp2/server/chttp2_server.h index 3073399267..aa364b565d 100644 --- a/src/core/ext/transport/chttp2/server/chttp2_server.h +++ b/src/core/ext/transport/chttp2/server/chttp2_server.h @@ -45,7 +45,7 @@ typedef struct grpc_chttp2_server_handshaker_factory grpc_chttp2_server_handshaker_factory; typedef struct { - void (*create_handshakers)( + void (*add_handshakers)( grpc_exec_ctx *exec_ctx, grpc_chttp2_server_handshaker_factory *handshaker_factory, grpc_handshake_manager *handshake_mgr); @@ -57,7 +57,7 @@ struct grpc_chttp2_server_handshaker_factory { const grpc_chttp2_server_handshaker_factory_vtable *vtable; }; -void grpc_chttp2_server_handshaker_factory_create_handshakers( +void grpc_chttp2_server_handshaker_factory_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_chttp2_server_handshaker_factory *handshaker_factory, grpc_handshake_manager *handshake_mgr); diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c index 85c21f0ca2..a33a7a3f7d 100644 --- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c +++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c @@ -54,12 +54,12 @@ typedef struct { grpc_server_security_connector *security_connector; } server_security_handshaker_factory; -static void server_security_handshaker_factory_create_handshakers( +static void server_security_handshaker_factory_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_chttp2_server_handshaker_factory *hf, grpc_handshake_manager *handshake_mgr) { server_security_handshaker_factory *handshaker_factory = (server_security_handshaker_factory *)hf; - grpc_server_security_connector_create_handshakers( + grpc_server_security_connector_add_handshakers( exec_ctx, handshaker_factory->security_connector, handshake_mgr); } @@ -74,7 +74,7 @@ static void server_security_handshaker_factory_destroy( static const grpc_chttp2_server_handshaker_factory_vtable server_security_handshaker_factory_vtable = { - server_security_handshaker_factory_create_handshakers, + server_security_handshaker_factory_add_handshakers, server_security_handshaker_factory_destroy}; int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 3b84898fee..6bc054866b 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -1622,6 +1622,9 @@ void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx, if (s->id != 0) { remove_stream(exec_ctx, t, s->id, removal_error(GRPC_ERROR_REF(error), s, "Stream removed")); + } else { + /* Purge streams waiting on concurrency still waiting for id assignment */ + grpc_chttp2_list_remove_waiting_for_concurrency(t, s); } GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2"); } diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index 31eb1e01ac..b727965d43 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -496,6 +496,8 @@ void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t, grpc_chttp2_stream *s); int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t, grpc_chttp2_stream **s); +void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t, + grpc_chttp2_stream *s); void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t, grpc_chttp2_stream *s); diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.c b/src/core/ext/transport/chttp2/transport/stream_lists.c index 6d25b3ae57..a60264cc51 100644 --- a/src/core/ext/transport/chttp2/transport/stream_lists.c +++ b/src/core/ext/transport/chttp2/transport/stream_lists.c @@ -158,6 +158,11 @@ int grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t, return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY); } +void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t, + grpc_chttp2_stream *s) { + stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY); +} + void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t, grpc_chttp2_stream *s) { stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c index a4c110101e..afc59f4b12 100644 --- a/src/core/ext/transport/cronet/transport/cronet_transport.c +++ b/src/core/ext/transport/cronet/transport/cronet_transport.c @@ -149,6 +149,9 @@ struct write_state { struct op_state { bool state_op_done[OP_NUM_OPS]; bool state_callback_received[OP_NUM_OPS]; + bool fail_state; + bool flush_read; + grpc_error *cancel_error; /* data structure for storing data coming from server */ struct read_state rs; /* data structure for storing data going to the server */ @@ -248,6 +251,12 @@ static void free_read_buffer(stream_obj *s) { } } +static grpc_error *make_error_with_desc(int error_code, const char *desc) { + grpc_error *error = GRPC_ERROR_CREATE(desc); + error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, error_code); + return error; +} + /* Add a new stream op to op storage. */ @@ -433,6 +442,18 @@ static void on_response_headers_received( grpc_mdstr_from_string(headers->headers[i].value))); } s->state.state_callback_received[OP_RECV_INITIAL_METADATA] = true; + if (!(s->state.state_op_done[OP_CANCEL_ERROR] || + s->state.state_callback_received[OP_FAILED])) { + /* Do an extra read to trigger on_succeeded() callback in case connection + is closed */ + GPR_ASSERT(s->state.rs.length_field_received == false); + s->state.rs.read_buffer = s->state.rs.grpc_header_bytes; + s->state.rs.received_bytes = 0; + s->state.rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES; + CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs); + cronet_bidirectional_stream_read(s->cbs, s->state.rs.read_buffer, + s->state.rs.remaining_bytes); + } gpr_mu_unlock(&s->mu); execute_from_storage(s); } @@ -464,7 +485,11 @@ static void on_read_completed(cronet_bidirectional_stream *stream, char *data, count); gpr_mu_lock(&s->mu); s->state.state_callback_received[OP_RECV_MESSAGE] = true; - if (count > 0) { + if (count > 0 && s->state.flush_read) { + CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs); + cronet_bidirectional_stream_read(s->cbs, s->state.rs.read_buffer, 4096); + gpr_mu_unlock(&s->mu); + } else if (count > 0) { s->state.rs.received_bytes += count; s->state.rs.remaining_bytes -= count; if (s->state.rs.remaining_bytes > 0) { @@ -479,6 +504,10 @@ static void on_read_completed(cronet_bidirectional_stream *stream, char *data, execute_from_storage(s); } } else { + if (s->state.flush_read) { + gpr_free(s->state.rs.read_buffer); + s->state.rs.read_buffer = NULL; + } s->state.rs.read_stream_closed = true; gpr_mu_unlock(&s->mu); execute_from_storage(s); @@ -508,10 +537,27 @@ static void on_response_trailers_received( grpc_mdstr_from_string(trailers->headers[i].key), grpc_mdstr_from_string(trailers->headers[i].value))); s->state.rs.trailing_metadata_valid = true; + if (0 == strcmp(trailers->headers[i].key, "grpc-status") && + 0 != strcmp(trailers->headers[i].value, "0")) { + s->state.fail_state = true; + } } s->state.state_callback_received[OP_RECV_TRAILING_METADATA] = true; - gpr_mu_unlock(&s->mu); - execute_from_storage(s); + /* Send a EOS when server terminates the stream (testServerFinishesRequest) to + * trigger on_succeeded */ + if (!s->state.state_op_done[OP_SEND_TRAILING_METADATA] && + !(s->state.state_op_done[OP_CANCEL_ERROR] || + s->state.state_callback_received[OP_FAILED])) { + CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_write (%p, 0)", s->cbs); + s->state.state_callback_received[OP_SEND_MESSAGE] = false; + cronet_bidirectional_stream_write(s->cbs, "", 0, true); + s->state.state_op_done[OP_SEND_TRAILING_METADATA] = true; + + gpr_mu_unlock(&s->mu); + } else { + gpr_mu_unlock(&s->mu); + execute_from_storage(s); + } } /* @@ -632,9 +678,9 @@ static bool op_can_be_run(grpc_transport_stream_op *curr_op, /* When call is canceled, every op can be run, except under following conditions */ - bool is_canceled_of_failed = stream_state->state_op_done[OP_CANCEL_ERROR] || + bool is_canceled_or_failed = stream_state->state_op_done[OP_CANCEL_ERROR] || stream_state->state_callback_received[OP_FAILED]; - if (is_canceled_of_failed) { + if (is_canceled_or_failed) { if (op_id == OP_SEND_INITIAL_METADATA) result = false; if (op_id == OP_SEND_MESSAGE) result = false; if (op_id == OP_SEND_TRAILING_METADATA) result = false; @@ -778,16 +824,10 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, op_can_be_run(stream_op, stream_state, &oas->state, OP_SEND_INITIAL_METADATA)) { CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_INITIAL_METADATA", oas); - /* This OP is the beginning. Reset various states */ - memset(&s->header_array, 0, sizeof(s->header_array)); - memset(&stream_state->rs, 0, sizeof(stream_state->rs)); - memset(&stream_state->ws, 0, sizeof(stream_state->ws)); - memset(stream_state->state_op_done, 0, sizeof(stream_state->state_op_done)); - memset(stream_state->state_callback_received, 0, - sizeof(stream_state->state_callback_received)); /* Start new cronet stream. It is destroyed in on_succeeded, on_canceled, * on_failed */ GPR_ASSERT(s->cbs == NULL); + GPR_ASSERT(!stream_state->state_op_done[OP_SEND_INITIAL_METADATA]); s->cbs = cronet_bidirectional_stream_create(s->curr_ct.engine, s->curr_gs, &cronet_callbacks); CRONET_LOG(GPR_DEBUG, "%p = cronet_bidirectional_stream_create()", s->cbs); @@ -808,10 +848,13 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, op_can_be_run(stream_op, stream_state, &oas->state, OP_RECV_INITIAL_METADATA)) { CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA", oas); - if (stream_state->state_op_done[OP_CANCEL_ERROR] || - stream_state->state_callback_received[OP_FAILED]) { + if (stream_state->state_op_done[OP_CANCEL_ERROR]) { grpc_exec_ctx_sched(exec_ctx, stream_op->recv_initial_metadata_ready, GRPC_ERROR_CANCELLED, NULL); + } else if (stream_state->state_callback_received[OP_FAILED]) { + grpc_exec_ctx_sched( + exec_ctx, stream_op->recv_initial_metadata_ready, + make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL); } else { grpc_chttp2_incoming_metadata_buffer_publish( &oas->s->state.rs.initial_metadata, stream_op->recv_initial_metadata); @@ -865,12 +908,19 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, op_can_be_run(stream_op, stream_state, &oas->state, OP_RECV_MESSAGE)) { CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_MESSAGE", oas); - if (stream_state->state_op_done[OP_CANCEL_ERROR] || - stream_state->state_callback_received[OP_FAILED]) { - CRONET_LOG(GPR_DEBUG, "Stream is either cancelled or failed."); + if (stream_state->state_op_done[OP_CANCEL_ERROR]) { + CRONET_LOG(GPR_DEBUG, "Stream is cancelled."); grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready, GRPC_ERROR_CANCELLED, NULL); stream_state->state_op_done[OP_RECV_MESSAGE] = true; + result = ACTION_TAKEN_NO_CALLBACK; + } else if (stream_state->state_callback_received[OP_FAILED]) { + CRONET_LOG(GPR_DEBUG, "Stream failed."); + grpc_exec_ctx_sched( + exec_ctx, stream_op->recv_message_ready, + make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL); + stream_state->state_op_done[OP_RECV_MESSAGE] = true; + result = ACTION_TAKEN_NO_CALLBACK; } else if (stream_state->rs.read_stream_closed == true) { /* No more data will be received */ CRONET_LOG(GPR_DEBUG, "read stream closed"); @@ -878,6 +928,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, GRPC_ERROR_NONE, NULL); stream_state->state_op_done[OP_RECV_MESSAGE] = true; oas->state.state_op_done[OP_RECV_MESSAGE] = true; + result = ACTION_TAKEN_NO_CALLBACK; } else if (stream_state->rs.length_field_received == false) { if (stream_state->rs.received_bytes == GRPC_HEADER_SIZE_IN_BYTES && stream_state->rs.remaining_bytes == 0) { @@ -946,10 +997,15 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, GRPC_ERROR_NONE, NULL); stream_state->state_op_done[OP_RECV_MESSAGE] = true; oas->state.state_op_done[OP_RECV_MESSAGE] = true; - /* Clear read state of the stream, so next read op (if it were to come) - * will work */ - stream_state->rs.received_bytes = stream_state->rs.remaining_bytes = - stream_state->rs.length_field_received = 0; + /* Do an extra read to trigger on_succeeded() callback in case connection + is closed */ + stream_state->rs.read_buffer = stream_state->rs.grpc_header_bytes; + stream_state->rs.received_bytes = 0; + stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES; + stream_state->rs.length_field_received = false; + CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_read(%p)", s->cbs); + cronet_bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer, + stream_state->rs.remaining_bytes); result = ACTION_TAKEN_NO_CALLBACK; } } else if (stream_op->recv_trailing_metadata && @@ -986,17 +1042,25 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, CRONET_LOG(GPR_DEBUG, "W: cronet_bidirectional_stream_cancel(%p)", s->cbs); if (s->cbs) { cronet_bidirectional_stream_cancel(s->cbs); + result = ACTION_TAKEN_WITH_CALLBACK; + } else { + result = ACTION_TAKEN_NO_CALLBACK; } stream_state->state_op_done[OP_CANCEL_ERROR] = true; - result = ACTION_TAKEN_WITH_CALLBACK; + if (!stream_state->cancel_error) { + stream_state->cancel_error = GRPC_ERROR_REF(stream_op->cancel_error); + } } else if (stream_op->on_complete && op_can_be_run(stream_op, stream_state, &oas->state, OP_ON_COMPLETE)) { CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas); - if (stream_state->state_op_done[OP_CANCEL_ERROR] || - stream_state->state_callback_received[OP_FAILED]) { + if (stream_state->state_op_done[OP_CANCEL_ERROR]) { grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, - GRPC_ERROR_CANCELLED, NULL); + GRPC_ERROR_REF(stream_state->cancel_error), NULL); + } else if (stream_state->state_callback_received[OP_FAILED]) { + grpc_exec_ctx_sched( + exec_ctx, stream_op->on_complete, + make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL); } else { /* All actions in this stream_op are complete. Call the on_complete * callback @@ -1017,6 +1081,15 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx, make a note */ if (stream_op->recv_message) stream_state->state_op_done[OP_RECV_MESSAGE_AND_ON_COMPLETE] = true; + } else if (stream_state->fail_state && !stream_state->flush_read) { + CRONET_LOG(GPR_DEBUG, "running: %p flush read", oas); + if (stream_state->rs.read_buffer && + stream_state->rs.read_buffer != stream_state->rs.grpc_header_bytes) { + gpr_free(stream_state->rs.read_buffer); + stream_state->rs.read_buffer = NULL; + } + stream_state->rs.read_buffer = gpr_malloc(4096); + stream_state->flush_read = true; } else { result = NO_ACTION_POSSIBLE; } @@ -1042,6 +1115,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, memset(s->state.state_op_done, 0, sizeof(s->state.state_op_done)); memset(s->state.state_callback_received, 0, sizeof(s->state.state_callback_received)); + s->state.fail_state = s->state.flush_read = false; + s->state.cancel_error = NULL; gpr_mu_init(&s->mu); return 0; } @@ -1088,7 +1163,10 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, } static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, void *and_free_memory) {} + grpc_stream *gs, void *and_free_memory) { + stream_obj *s = (stream_obj *)gs; + GRPC_ERROR_UNREF(s->state.cancel_error); +} static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {} diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c index 999ad5f507..1d0b7d4f31 100644 --- a/src/core/lib/channel/channel_stack.c +++ b/src/core/lib/channel/channel_stack.c @@ -102,13 +102,11 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack, return CALL_ELEMS_FROM_STACK(call_stack) + index; } -void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs, - grpc_iomgr_cb_func destroy, void *destroy_arg, - const grpc_channel_filter **filters, - size_t filter_count, - const grpc_channel_args *channel_args, - grpc_transport *optional_transport, - const char *name, grpc_channel_stack *stack) { +grpc_error *grpc_channel_stack_init( + grpc_exec_ctx *exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy, + void *destroy_arg, const grpc_channel_filter **filters, size_t filter_count, + const grpc_channel_args *channel_args, grpc_transport *optional_transport, + const char *name, grpc_channel_stack *stack) { size_t call_size = ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) + ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element)); @@ -126,6 +124,7 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs, ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element)); /* init per-filter data */ + grpc_error *first_error = GRPC_ERROR_NONE; for (i = 0; i < filter_count; i++) { args.channel_stack = stack; args.channel_args = channel_args; @@ -134,7 +133,15 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs, args.is_last = i == (filter_count - 1); elems[i].filter = filters[i]; elems[i].channel_data = user_data; - elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args); + grpc_error *error = + elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args); + if (error != GRPC_ERROR_NONE) { + if (first_error == GRPC_ERROR_NONE) { + first_error = error; + } else { + GRPC_ERROR_UNREF(error); + } + } user_data += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data); call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data); } @@ -144,6 +151,7 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs, grpc_channel_stack_size(filters, filter_count)); stack->call_stack_size = call_size; + return first_error; } void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx, diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h index 004643d45f..d9d3a85233 100644 --- a/src/core/lib/channel/channel_stack.h +++ b/src/core/lib/channel/channel_stack.h @@ -34,6 +34,13 @@ #ifndef GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H #define GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H +////////////////////////////////////////////////////////////////////////////// +// IMPORTANT NOTE: +// +// When you update this API, please make the corresponding changes to +// the C++ API in src/cpp/common/channel_filter.{h,cc} +////////////////////////////////////////////////////////////////////////////// + /* A channel filter defines how operations on a channel are implemented. Channel filters are chained together to create full channels, and if those chains are linear, then channel stacks provide a mechanism to minimize @@ -146,8 +153,9 @@ typedef struct { is_first, is_last designate this elements position in the stack, and are useful for asserting correct configuration by upper layer code. The filter does not need to do any chaining */ - void (*init_channel_elem)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - grpc_channel_element_args *args); + grpc_error *(*init_channel_elem)(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args); /* Destroy per channel data. The filter does not need to do any chaining */ void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx, @@ -214,12 +222,11 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *stack, size_t i); size_t grpc_channel_stack_size(const grpc_channel_filter **filters, size_t filter_count); /* Initialize a channel stack given some filters */ -void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs, - grpc_iomgr_cb_func destroy, void *destroy_arg, - const grpc_channel_filter **filters, - size_t filter_count, const grpc_channel_args *args, - grpc_transport *optional_transport, - const char *name, grpc_channel_stack *stack); +grpc_error *grpc_channel_stack_init( + grpc_exec_ctx *exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy, + void *destroy_arg, const grpc_channel_filter **filters, size_t filter_count, + const grpc_channel_args *args, grpc_transport *optional_transport, + const char *name, grpc_channel_stack *stack); /* Destroy a channel stack */ void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_stack *stack); diff --git a/src/core/lib/channel/channel_stack_builder.c b/src/core/lib/channel/channel_stack_builder.c index eda4968f48..b959517afb 100644 --- a/src/core/lib/channel/channel_stack_builder.c +++ b/src/core/lib/channel/channel_stack_builder.c @@ -227,11 +227,10 @@ void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder *builder) { gpr_free(builder); } -void *grpc_channel_stack_builder_finish(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, - size_t prefix_bytes, int initial_refs, - grpc_iomgr_cb_func destroy, - void *destroy_arg) { +grpc_error *grpc_channel_stack_builder_finish( + grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, + size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy, + void *destroy_arg, void **result) { // count the number of filters size_t num_filters = 0; for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) { @@ -250,28 +249,35 @@ void *grpc_channel_stack_builder_finish(grpc_exec_ctx *exec_ctx, size_t channel_stack_size = grpc_channel_stack_size(filters, num_filters); // allocate memory, with prefix_bytes followed by channel_stack_size - char *result = gpr_malloc(prefix_bytes + channel_stack_size); + *result = gpr_malloc(prefix_bytes + channel_stack_size); // fetch a pointer to the channel stack grpc_channel_stack *channel_stack = - (grpc_channel_stack *)(result + prefix_bytes); + (grpc_channel_stack *)((char *)(*result) + prefix_bytes); // and initialize it - grpc_channel_stack_init(exec_ctx, initial_refs, destroy, - destroy_arg == NULL ? result : destroy_arg, filters, - num_filters, builder->args, builder->transport, - builder->name, channel_stack); - - // run post-initialization functions - i = 0; - for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) { - if (p->init != NULL) { - p->init(channel_stack, grpc_channel_stack_element(channel_stack, i), - p->init_arg); + grpc_error *error = grpc_channel_stack_init( + exec_ctx, initial_refs, destroy, + destroy_arg == NULL ? *result : destroy_arg, filters, num_filters, + builder->args, builder->transport, builder->name, channel_stack); + + if (error != GRPC_ERROR_NONE) { + grpc_channel_stack_destroy(exec_ctx, channel_stack); + gpr_free(*result); + *result = NULL; + } else { + // run post-initialization functions + i = 0; + for (filter_node *p = builder->begin.next; p != &builder->end; + p = p->next) { + if (p->init != NULL) { + p->init(channel_stack, grpc_channel_stack_element(channel_stack, i), + p->init_arg); + } + i++; } - i++; } grpc_channel_stack_builder_destroy(builder); gpr_free((grpc_channel_filter **)filters); - return result; + return error; } diff --git a/src/core/lib/channel/channel_stack_builder.h b/src/core/lib/channel/channel_stack_builder.h index 4a00f7bfdb..65bfebcabc 100644 --- a/src/core/lib/channel/channel_stack_builder.h +++ b/src/core/lib/channel/channel_stack_builder.h @@ -146,16 +146,15 @@ bool grpc_channel_stack_builder_append_filter( void grpc_channel_stack_builder_iterator_destroy( grpc_channel_stack_builder_iterator *iterator); -/// Destroy the builder, return the freshly minted channel stack +/// Destroy the builder, return the freshly minted channel stack in \a result. /// Allocates \a prefix_bytes bytes before the channel stack /// Returns the base pointer of the allocated block /// \a initial_refs, \a destroy, \a destroy_arg are as per /// grpc_channel_stack_init -void *grpc_channel_stack_builder_finish(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, - size_t prefix_bytes, int initial_refs, - grpc_iomgr_cb_func destroy, - void *destroy_arg); +grpc_error *grpc_channel_stack_builder_finish( + grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, + size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy, + void *destroy_arg, void **result); /// Destroy the builder without creating a channel stack void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder *builder); diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c index 2874d63fc7..0e336dc330 100644 --- a/src/core/lib/channel/compress_filter.c +++ b/src/core/lib/channel/compress_filter.c @@ -285,9 +285,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, } /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { channel_data *channeld = elem->channel_data; channeld->enabled_algorithms_bitset = @@ -315,6 +315,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx, } GPR_ASSERT(!args->is_last); + return GRPC_ERROR_NONE; } /* Destructor for channel data */ diff --git a/src/core/lib/channel/connected_channel.c b/src/core/lib/channel/connected_channel.c index 038e819f72..c2a36b5558 100644 --- a/src/core/lib/channel/connected_channel.c +++ b/src/core/lib/channel/connected_channel.c @@ -114,12 +114,13 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, } /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { channel_data *cd = (channel_data *)elem->channel_data; GPR_ASSERT(args->is_last); cd->transport = NULL; + return GRPC_ERROR_NONE; } /* Destructor for channel_data */ diff --git a/src/core/lib/channel/context.h b/src/core/lib/channel/context.h index 071c5f695c..6c931ad28a 100644 --- a/src/core/lib/channel/context.h +++ b/src/core/lib/channel/context.h @@ -47,6 +47,9 @@ typedef enum { /// Value is a \a census_context. GRPC_CONTEXT_TRACING, + /// Reserved for traffic_class_context. + GRPC_CONTEXT_TRAFFIC, + GRPC_CONTEXT_COUNT } grpc_context_index; diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c index 0e703d8d27..470ccfea57 100644 --- a/src/core/lib/channel/deadline_filter.c +++ b/src/core/lib/channel/deadline_filter.c @@ -207,10 +207,11 @@ void grpc_deadline_state_client_start_transport_stream_op( // // Constructor for channel_data. Used for both client and server filters. -static void init_channel_elem(grpc_exec_ctx* exec_ctx, - grpc_channel_element* elem, - grpc_channel_element_args* args) { +static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, + grpc_channel_element* elem, + grpc_channel_element_args* args) { GPR_ASSERT(!args->is_last); + return GRPC_ERROR_NONE; } // Destructor for channel_data. Used for both client and server filters. diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c index 90626dc2d1..23edc826ca 100644 --- a/src/core/lib/channel/handshaker.c +++ b/src/core/lib/channel/handshaker.c @@ -49,21 +49,21 @@ void grpc_handshaker_init(const grpc_handshaker_vtable* vtable, handshaker->vtable = vtable; } -static void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker) { +void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker) { handshaker->vtable->destroy(exec_ctx, handshaker); } -static void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker) { +void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker) { handshaker->vtable->shutdown(exec_ctx, handshaker); } -static void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker, - grpc_tcp_server_acceptor* acceptor, - grpc_closure* on_handshake_done, - grpc_handshaker_args* args) { +void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker, + grpc_tcp_server_acceptor* acceptor, + grpc_closure* on_handshake_done, + grpc_handshaker_args* args) { handshaker->vtable->do_handshake(exec_ctx, handshaker, acceptor, on_handshake_done, args); } @@ -157,10 +157,11 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr, grpc_error* error) { GPR_ASSERT(mgr->index <= mgr->count); - // If we got an error or we've been shut down or we've finished the last - // handshaker, invoke the on_handshake_done callback. Otherwise, call the - // next handshaker. - if (error != GRPC_ERROR_NONE || mgr->shutdown || mgr->index == mgr->count) { + // If we got an error or we've been shut down or we're exiting early or + // we've finished the last handshaker, invoke the on_handshake_done + // callback. Otherwise, call the next handshaker. + if (error != GRPC_ERROR_NONE || mgr->shutdown || mgr->args.exit_early || + mgr->index == mgr->count) { // Cancel deadline timer, since we're invoking the on_handshake_done // callback now. grpc_timer_cancel(exec_ctx, &mgr->deadline_timer); diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h index ebbc1ff7f3..450b7adaee 100644 --- a/src/core/lib/channel/handshaker.h +++ b/src/core/lib/channel/handshaker.h @@ -72,6 +72,9 @@ typedef struct { grpc_endpoint* endpoint; grpc_channel_args* args; grpc_slice_buffer* read_buffer; + // A handshaker may set this to true before invoking on_handshake_done + // to indicate that subsequent handshakers should be skipped. + bool exit_early; // User data passed through the handshake manager. Not used by // individual handshakers. void* user_data; @@ -105,6 +108,16 @@ struct grpc_handshaker { void grpc_handshaker_init(const grpc_handshaker_vtable* vtable, grpc_handshaker* handshaker); +void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker); +void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker); +void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx, + grpc_handshaker* handshaker, + grpc_tcp_server_acceptor* acceptor, + grpc_closure* on_handshake_done, + grpc_handshaker_args* args); + /// /// grpc_handshake_manager /// diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c index fd8b46afcb..1a2d08dda5 100644 --- a/src/core/lib/channel/http_client_filter.c +++ b/src/core/lib/channel/http_client_filter.c @@ -457,9 +457,9 @@ static grpc_mdstr *user_agent_from_args(const grpc_channel_args *args, } /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { channel_data *chand = elem->channel_data; GPR_ASSERT(!args->is_last); GPR_ASSERT(args->optional_transport != NULL); @@ -470,6 +470,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx, GRPC_MDSTR_USER_AGENT, user_agent_from_args(args->channel_args, args->optional_transport->vtable->name)); + return GRPC_ERROR_NONE; } /* Destructor for channel data */ diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c index b42ff06039..a5134ee21b 100644 --- a/src/core/lib/channel/http_server_filter.c +++ b/src/core/lib/channel/http_server_filter.c @@ -350,10 +350,11 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, } /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { GPR_ASSERT(!args->is_last); + return GRPC_ERROR_NONE; } /* Destructor for channel data */ diff --git a/src/core/lib/channel/message_size_filter.c b/src/core/lib/channel/message_size_filter.c index 1cf68d790d..f05c789010 100644 --- a/src/core/lib/channel/message_size_filter.c +++ b/src/core/lib/channel/message_size_filter.c @@ -192,9 +192,9 @@ static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, void* ignored) {} // Constructor for channel_data. -static void init_channel_elem(grpc_exec_ctx* exec_ctx, - grpc_channel_element* elem, - grpc_channel_element_args* args) { +static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, + grpc_channel_element* elem, + grpc_channel_element_args* args) { GPR_ASSERT(!args->is_last); channel_data* chand = elem->channel_data; memset(chand, 0, sizeof(*chand)); @@ -231,6 +231,7 @@ static void init_channel_elem(grpc_exec_ctx* exec_ctx, grpc_service_config_destroy(service_config); } } + return GRPC_ERROR_NONE; } // Destructor for channel_data. diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c index fdb8abaa2d..1035f31109 100644 --- a/src/core/lib/http/httpcli.c +++ b/src/core/lib/http/httpcli.c @@ -278,6 +278,7 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx, grpc_polling_entity_add_to_pollset_set(exec_ctx, req->pollent, req->context->pollset_set); grpc_resolve_address(exec_ctx, request->host, req->handshaker->default_port, + req->context->pollset_set, grpc_closure_create(on_resolved, req), &req->addresses); } diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c index 0ab34d00e4..14cdb1dab3 100644 --- a/src/core/lib/http/httpcli_security_connector.c +++ b/src/core/lib/http/httpcli_security_connector.c @@ -60,9 +60,9 @@ static void httpcli_ssl_destroy(grpc_security_connector *sc) { gpr_free(sc); } -static void httpcli_ssl_create_handshakers( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { +static void httpcli_ssl_add_handshakers(grpc_exec_ctx *exec_ctx, + grpc_channel_security_connector *sc, + grpc_handshake_manager *handshake_mgr) { grpc_httpcli_ssl_channel_security_connector *c = (grpc_httpcli_ssl_channel_security_connector *)sc; tsi_handshaker *handshaker = NULL; @@ -74,8 +74,9 @@ static void httpcli_ssl_create_handshakers( tsi_result_to_string(result)); } } - grpc_security_create_handshakers(exec_ctx, handshaker, &sc->base, - handshake_mgr); + grpc_handshake_manager_add( + handshake_mgr, + grpc_security_handshaker_create(exec_ctx, handshaker, &sc->base)); } static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx, @@ -132,7 +133,7 @@ static grpc_security_status httpcli_ssl_channel_security_connector_create( *sc = NULL; return GRPC_SECURITY_ERROR; } - c->base.create_handshakers = httpcli_ssl_create_handshakers; + c->base.add_handshakers = httpcli_ssl_add_handshakers; *sc = &c->base; return GRPC_SECURITY_OK; } @@ -185,8 +186,8 @@ static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg, GPR_ASSERT(httpcli_ssl_channel_security_connector_create( pem_root_certs, pem_root_certs_size, host, &sc) == GRPC_SECURITY_OK); - grpc_channel_security_connector_create_handshakers(exec_ctx, sc, - c->handshake_mgr); + grpc_channel_security_connector_add_handshakers(exec_ctx, sc, + c->handshake_mgr); grpc_handshake_manager_do_handshake( exec_ctx, c->handshake_mgr, tcp, NULL /* channel_args */, deadline, NULL /* acceptor */, on_handshake_done, c /* user_data */); diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c index 07fbfd849e..1b15e0eb4f 100644 --- a/src/core/lib/iomgr/ev_epoll_linux.c +++ b/src/core/lib/iomgr/ev_epoll_linux.c @@ -69,6 +69,9 @@ static int grpc_polling_trace = 0; /* Disabled by default */ gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \ } +/* Uncomment the following enable extra checks on poll_object operations */ +/* #define PO_DEBUG */ + static int grpc_wakeup_signal = -1; static bool is_grpc_wakeup_signal_initialized = false; @@ -95,10 +98,42 @@ void grpc_use_signal(int signum) { struct polling_island; +typedef enum { + POLL_OBJ_FD, + POLL_OBJ_POLLSET, + POLL_OBJ_POLLSET_SET +} poll_obj_type; + +typedef struct poll_obj { +#ifdef PO_DEBUG + poll_obj_type obj_type; +#endif + gpr_mu mu; + struct polling_island *pi; +} poll_obj; + +const char *poll_obj_string(poll_obj_type po_type) { + switch (po_type) { + case POLL_OBJ_FD: + return "fd"; + case POLL_OBJ_POLLSET: + return "pollset"; + case POLL_OBJ_POLLSET_SET: + return "pollset_set"; + } + + GPR_UNREACHABLE_CODE(return "UNKNOWN"); +} + /******************************************************************************* * Fd Declarations */ + +#define FD_FROM_PO(po) ((grpc_fd *)(po)) + struct grpc_fd { + poll_obj po; + int fd; /* refst format: bit 0 : 1=Active / 0=Orphaned @@ -106,8 +141,6 @@ struct grpc_fd { Ref/Unref by two to avoid altering the orphaned bit */ gpr_atm refst; - gpr_mu mu; - /* Indicates that the fd is shutdown and that any pending read/write closures should fail */ bool shutdown; @@ -120,9 +153,6 @@ struct grpc_fd { grpc_closure *read_closure; grpc_closure *write_closure; - /* The polling island to which this fd belongs to (protected by mu) */ - struct polling_island *polling_island; - struct grpc_fd *freelist_next; grpc_closure *on_done_closure; @@ -225,41 +255,21 @@ struct grpc_pollset_worker { }; struct grpc_pollset { - gpr_mu mu; + poll_obj po; + grpc_pollset_worker root_worker; bool kicked_without_pollers; bool shutting_down; /* Is the pollset shutting down ? */ bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */ grpc_closure *shutdown_done; /* Called after after shutdown is complete */ - - /* The polling island to which this pollset belongs to */ - struct polling_island *polling_island; }; /******************************************************************************* * Pollset-set Declarations */ -/* TODO: sreek - Change the pollset_set implementation such that a pollset_set - * directly points to a polling_island (and adding an fd/pollset/pollset_set to - * the current pollset_set would result in polling island merges. This would - * remove the need to maintain fd_count here. This will also significantly - * simplify the grpc_fd structure since we would no longer need to explicitly - * maintain the orphaned state */ struct grpc_pollset_set { - gpr_mu mu; - - size_t pollset_count; - size_t pollset_capacity; - grpc_pollset **pollsets; - - size_t pollset_set_count; - size_t pollset_set_capacity; - struct grpc_pollset_set **pollset_sets; - - size_t fd_count; - size_t fd_capacity; - grpc_fd **fds; + poll_obj po; }; /******************************************************************************* @@ -915,7 +925,7 @@ static void fd_global_shutdown(void) { while (fd_freelist != NULL) { grpc_fd *fd = fd_freelist; fd_freelist = fd_freelist->freelist_next; - gpr_mu_destroy(&fd->mu); + gpr_mu_destroy(&fd->po.mu); gpr_free(fd); } gpr_mu_destroy(&fd_freelist_mu); @@ -933,13 +943,17 @@ static grpc_fd *fd_create(int fd, const char *name) { if (new_fd == NULL) { new_fd = gpr_malloc(sizeof(grpc_fd)); - gpr_mu_init(&new_fd->mu); + gpr_mu_init(&new_fd->po.mu); } - /* Note: It is not really needed to get the new_fd->mu lock here. If this is a - newly created fd (or an fd we got from the freelist), no one else would be - holding a lock to it anyway. */ - gpr_mu_lock(&new_fd->mu); + /* Note: It is not really needed to get the new_fd->po.mu lock here. If this + * is a newly created fd (or an fd we got from the freelist), no one else + * would be holding a lock to it anyway. */ + gpr_mu_lock(&new_fd->po.mu); + new_fd->po.pi = NULL; +#ifdef PO_DEBUG + new_fd->po.obj_type = POLL_OBJ_FD; +#endif gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1); new_fd->fd = fd; @@ -947,12 +961,11 @@ static grpc_fd *fd_create(int fd, const char *name) { new_fd->orphaned = false; new_fd->read_closure = CLOSURE_NOT_READY; new_fd->write_closure = CLOSURE_NOT_READY; - new_fd->polling_island = NULL; new_fd->freelist_next = NULL; new_fd->on_done_closure = NULL; new_fd->read_notifier_pollset = NULL; - gpr_mu_unlock(&new_fd->mu); + gpr_mu_unlock(&new_fd->po.mu); char *fd_name; gpr_asprintf(&fd_name, "%s fd=%d", name, fd); @@ -964,17 +977,13 @@ static grpc_fd *fd_create(int fd, const char *name) { return new_fd; } -static bool fd_is_orphaned(grpc_fd *fd) { - return (gpr_atm_acq_load(&fd->refst) & 1) == 0; -} - static int fd_wrapped_fd(grpc_fd *fd) { int ret_fd = -1; - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); if (!fd->orphaned) { ret_fd = fd->fd; } - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); return ret_fd; } @@ -986,7 +995,7 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *error = GRPC_ERROR_NONE; polling_island *unref_pi = NULL; - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); fd->on_done_closure = on_done; /* If release_fd is not NULL, we should be relinquishing control of the file @@ -1006,25 +1015,25 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, /* Remove the fd from the polling island: - Get a lock on the latest polling island (i.e the last island in the - linked list pointed by fd->polling_island). This is the island that + linked list pointed by fd->po.pi). This is the island that would actually contain the fd - Remove the fd from the latest polling island - Unlock the latest polling island - - Set fd->polling_island to NULL (but remove the ref on the polling island + - Set fd->po.pi to NULL (but remove the ref on the polling island before doing this.) */ - if (fd->polling_island != NULL) { - polling_island *pi_latest = polling_island_lock(fd->polling_island); + if (fd->po.pi != NULL) { + polling_island *pi_latest = polling_island_lock(fd->po.pi); polling_island_remove_fd_locked(pi_latest, fd, is_fd_closed, &error); gpr_mu_unlock(&pi_latest->mu); - unref_pi = fd->polling_island; - fd->polling_island = NULL; + unref_pi = fd->po.pi; + fd->po.pi = NULL; } grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error), NULL); - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); UNREF_BY(fd, 2, reason); /* Drop the reference */ if (unref_pi != NULL) { /* Unref stale polling island here, outside the fd lock above. @@ -1089,23 +1098,23 @@ static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { grpc_pollset *notifier = NULL; - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); notifier = fd->read_notifier_pollset; - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); return notifier; } static bool fd_is_shutdown(grpc_fd *fd) { - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); const bool r = fd->shutdown; - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); return r; } /* Might be called multiple times */ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); /* Do the actual shutdown only once */ if (!fd->shutdown) { fd->shutdown = true; @@ -1116,28 +1125,28 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { set_ready_locked(exec_ctx, fd, &fd->read_closure); set_ready_locked(exec_ctx, fd, &fd->write_closure); } - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); } static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); notify_on_locked(exec_ctx, fd, &fd->read_closure, closure); - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); } static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - gpr_mu_lock(&fd->mu); + gpr_mu_lock(&fd->po.mu); notify_on_locked(exec_ctx, fd, &fd->write_closure, closure); - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); } static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { - gpr_mu_lock(&fd->mu); - grpc_workqueue *workqueue = GRPC_WORKQUEUE_REF( - (grpc_workqueue *)fd->polling_island, "fd_get_workqueue"); - gpr_mu_unlock(&fd->mu); + gpr_mu_lock(&fd->po.mu); + grpc_workqueue *workqueue = + GRPC_WORKQUEUE_REF((grpc_workqueue *)fd->po.pi, "fd_get_workqueue"); + gpr_mu_unlock(&fd->po.mu); return workqueue; } @@ -1277,8 +1286,12 @@ static grpc_error *kick_poller(void) { } static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { - gpr_mu_init(&pollset->mu); - *mu = &pollset->mu; + gpr_mu_init(&pollset->po.mu); + *mu = &pollset->po.mu; + pollset->po.pi = NULL; +#ifdef PO_DEBUG + pollset->po.obj_type = POLL_OBJ_POLLSET; +#endif pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker; pollset->kicked_without_pollers = false; @@ -1286,8 +1299,6 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { pollset->shutting_down = false; pollset->finish_shutdown_called = false; pollset->shutdown_done = NULL; - - pollset->polling_island = NULL; } /* Convert a timespec to milliseconds: @@ -1317,26 +1328,26 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline, static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *notifier) { - /* Need the fd->mu since we might be racing with fd_notify_on_read */ - gpr_mu_lock(&fd->mu); + /* Need the fd->po.mu since we might be racing with fd_notify_on_read */ + gpr_mu_lock(&fd->po.mu); set_ready_locked(exec_ctx, fd, &fd->read_closure); fd->read_notifier_pollset = notifier; - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); } static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - /* Need the fd->mu since we might be racing with fd_notify_on_write */ - gpr_mu_lock(&fd->mu); + /* Need the fd->po.mu since we might be racing with fd_notify_on_write */ + gpr_mu_lock(&fd->po.mu); set_ready_locked(exec_ctx, fd, &fd->write_closure); - gpr_mu_unlock(&fd->mu); + gpr_mu_unlock(&fd->po.mu); } static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, char *reason) { - if (ps->polling_island != NULL) { - PI_UNREF(exec_ctx, ps->polling_island, reason); + if (ps->po.pi != NULL) { + PI_UNREF(exec_ctx, ps->po.pi, reason); } - ps->polling_island = NULL; + ps->po.pi = NULL; } static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, @@ -1346,12 +1357,12 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, pollset->finish_shutdown_called = true; - /* Release the ref and set pollset->polling_island to NULL */ + /* Release the ref and set pollset->po.pi to NULL */ pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown"); grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL); } -/* pollset->mu lock must be held by the caller before calling this */ +/* pollset->po.mu lock must be held by the caller before calling this */ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_closure *closure) { GPR_TIMER_BEGIN("pollset_shutdown", 0); @@ -1376,7 +1387,7 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, * here */ static void pollset_destroy(grpc_pollset *pollset) { GPR_ASSERT(!pollset_has_workers(pollset)); - gpr_mu_destroy(&pollset->mu); + gpr_mu_destroy(&pollset->po.mu); } static void pollset_reset(grpc_pollset *pollset) { @@ -1386,7 +1397,7 @@ static void pollset_reset(grpc_pollset *pollset) { pollset->finish_shutdown_called = false; pollset->kicked_without_pollers = false; pollset->shutdown_done = NULL; - GPR_ASSERT(pollset->polling_island == NULL); + GPR_ASSERT(pollset->po.pi == NULL); } static bool maybe_do_workqueue_work(grpc_exec_ctx *exec_ctx, @@ -1426,7 +1437,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, GPR_TIMER_BEGIN("pollset_work_and_unlock", 0); /* We need to get the epoll_fd to wait on. The epoll_fd is in inside the - latest polling island pointed by pollset->polling_island. + latest polling island pointed by pollset->po.pi Since epoll_fd is immutable, we can read it without obtaining the polling island lock. There is however a possibility that the polling island (from @@ -1435,36 +1446,36 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, right-away from epoll_wait() and pick up the latest polling_island the next this function (i.e pollset_work_and_unlock()) is called */ - if (pollset->polling_island == NULL) { - pollset->polling_island = polling_island_create(exec_ctx, NULL, error); - if (pollset->polling_island == NULL) { + if (pollset->po.pi == NULL) { + pollset->po.pi = polling_island_create(exec_ctx, NULL, error); + if (pollset->po.pi == NULL) { GPR_TIMER_END("pollset_work_and_unlock", 0); return; /* Fatal error. We cannot continue */ } - PI_ADD_REF(pollset->polling_island, "ps"); + PI_ADD_REF(pollset->po.pi, "ps"); GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p", - (void *)pollset, (void *)pollset->polling_island); + (void *)pollset, (void *)pollset->po.pi); } - pi = polling_island_maybe_get_latest(pollset->polling_island); + pi = polling_island_maybe_get_latest(pollset->po.pi); epoll_fd = pi->epoll_fd; - /* Update the pollset->polling_island since the island being pointed by - pollset->polling_island maybe older than the one pointed by pi) */ - if (pollset->polling_island != pi) { + /* Update the pollset->po.pi since the island being pointed by + pollset->po.pi maybe older than the one pointed by pi) */ + if (pollset->po.pi != pi) { /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the polling island to be deleted */ PI_ADD_REF(pi, "ps"); - PI_UNREF(exec_ctx, pollset->polling_island, "ps"); - pollset->polling_island = pi; + PI_UNREF(exec_ctx, pollset->po.pi, "ps"); + pollset->po.pi = pi; } /* Add an extra ref so that the island does not get destroyed (which means the epoll_fd won't be closed) while we are are doing an epoll_wait() on the epoll_fd */ PI_ADD_REF(pi, "ps_work"); - gpr_mu_unlock(&pollset->mu); + gpr_mu_unlock(&pollset->po.mu); /* If we get some workqueue work to do, it might end up completing an item on the completion queue, so there's no need to poll... so we skip that and @@ -1537,17 +1548,17 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, GPR_ASSERT(pi != NULL); /* Before leaving, release the extra ref we added to the polling island. It - is important to use "pi" here (i.e our old copy of pollset->polling_island + is important to use "pi" here (i.e our old copy of pollset->po.pi that we got before releasing the polling island lock). This is because - pollset->polling_island pointer might get udpated in other parts of the + pollset->po.pi pointer might get udpated in other parts of the code when there is an island merge while we are doing epoll_wait() above */ PI_UNREF(exec_ctx, pi, "ps_work"); GPR_TIMER_END("pollset_work_and_unlock", 0); } -/* pollset->mu lock must be held by the caller before calling this. - The function pollset_work() may temporarily release the lock (pollset->mu) +/* pollset->po.mu lock must be held by the caller before calling this. + The function pollset_work() may temporarily release the lock (pollset->po.mu) during the course of its execution but it will always re-acquire the lock and ensure that it is held by the time the function returns */ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, @@ -1617,7 +1628,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, &g_orig_sigmask, &error); grpc_exec_ctx_flush(exec_ctx); - gpr_mu_lock(&pollset->mu); + gpr_mu_lock(&pollset->po.mu); /* Note: There is no need to reset worker.is_kicked to 0 since we are no longer going to use this worker */ @@ -1637,9 +1648,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0); finish_shutdown_locked(exec_ctx, pollset); - gpr_mu_unlock(&pollset->mu); + gpr_mu_unlock(&pollset->po.mu); grpc_exec_ctx_flush(exec_ctx); - gpr_mu_lock(&pollset->mu); + gpr_mu_lock(&pollset->po.mu); } *worker_hdl = NULL; @@ -1653,130 +1664,160 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, return error; } -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_fd *fd) { - GPR_TIMER_BEGIN("pollset_add_fd", 0); - - grpc_error *error = GRPC_ERROR_NONE; +static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag, + poll_obj_type bag_type, poll_obj *item, + poll_obj_type item_type) { + GPR_TIMER_BEGIN("add_poll_object", 0); - gpr_mu_lock(&pollset->mu); - gpr_mu_lock(&fd->mu); +#ifdef PO_DEBUG + GPR_ASSERT(item->obj_type == item_type); + GPR_ASSERT(bag->obj_type == bag_type); +#endif + grpc_error *error = GRPC_ERROR_NONE; polling_island *pi_new = NULL; + gpr_mu_lock(&bag->mu); + gpr_mu_lock(&item->mu); + retry: - /* 1) If fd->polling_island and pollset->polling_island are both non-NULL and - * equal, do nothing. - * 2) If fd->polling_island and pollset->polling_island are both NULL, create - * a new polling island (with a refcount of 2) and make the polling_island - * fields in both fd and pollset to point to the new island - * 3) If one of fd->polling_island or pollset->polling_island is NULL, update - * the NULL polling_island field to point to the non-NULL polling_island - * field (ensure that the refcount on the polling island is incremented by - * 1 to account for the newly added reference) - * 4) Finally, if fd->polling_island and pollset->polling_island are non-NULL - * and different, merge both the polling islands and update the - * polling_island fields in both fd and pollset to point to the merged - * polling island. + /* + * 1) If item->pi and bag->pi are both non-NULL and equal, do nothing + * 2) If item->pi and bag->pi are both NULL, create a new polling island (with + * a refcount of 2) and point item->pi and bag->pi to the new island + * 3) If exactly one of item->pi or bag->pi is NULL, update it to point to + * the other's non-NULL pi + * 4) Finally if item->pi and bag-pi are non-NULL and not-equal, merge the + * polling islands and update item->pi and bag->pi to point to the new + * island */ - if (fd->orphaned) { - gpr_mu_unlock(&fd->mu); - gpr_mu_unlock(&pollset->mu); - /* early out */ + /* Early out if we are trying to add an 'fd' to a 'bag' but the fd is already + * orphaned */ + if (item_type == POLL_OBJ_FD && (FD_FROM_PO(item))->orphaned) { + gpr_mu_unlock(&item->mu); + gpr_mu_unlock(&bag->mu); return; } - if (fd->polling_island == pollset->polling_island) { - pi_new = fd->polling_island; + if (item->pi == bag->pi) { + pi_new = item->pi; if (pi_new == NULL) { - /* Unlock before creating a new polling island: the polling island will - create a workqueue which creates a file descriptor, and holding an fd - lock here can eventually cause a loop to appear to TSAN (making it - unhappy). We don't think it's a real loop (there's an epoch point where - that loop possibility disappears), but the advantages of keeping TSAN - happy outweigh any performance advantage we might have by keeping the - lock held. */ - gpr_mu_unlock(&fd->mu); - pi_new = polling_island_create(exec_ctx, fd, &error); - gpr_mu_lock(&fd->mu); - /* Need to reverify any assumptions made between the initial lock and - getting to this branch: if they've changed, we need to throw away our - work and figure things out again. */ - if (fd->polling_island != NULL) { - GRPC_POLLING_TRACE( - "pollset_add_fd: Raced creating new polling island. pi_new: %p " - "(fd: %d, pollset: %p)", - (void *)pi_new, fd->fd, (void *)pollset); - - /* No need to lock 'pi_new' here since this is a new polling island and - * no one has a reference to it yet */ - polling_island_remove_all_fds_locked(pi_new, true, &error); - - /* Ref and unref so that the polling island gets deleted during unref */ - PI_ADD_REF(pi_new, "dance_of_destruction"); - PI_UNREF(exec_ctx, pi_new, "dance_of_destruction"); - goto retry; + /* GPR_ASSERT(item->pi == bag->pi == NULL) */ + + /* If we are adding an fd to a bag (i.e pollset or pollset_set), then + * we need to do some extra work to make TSAN happy */ + if (item_type == POLL_OBJ_FD) { + /* Unlock before creating a new polling island: the polling island will + create a workqueue which creates a file descriptor, and holding an fd + lock here can eventually cause a loop to appear to TSAN (making it + unhappy). We don't think it's a real loop (there's an epoch point + where that loop possibility disappears), but the advantages of + keeping TSAN happy outweigh any performance advantage we might have + by keeping the lock held. */ + gpr_mu_unlock(&item->mu); + pi_new = polling_island_create(exec_ctx, FD_FROM_PO(item), &error); + gpr_mu_lock(&item->mu); + + /* Need to reverify any assumptions made between the initial lock and + getting to this branch: if they've changed, we need to throw away our + work and figure things out again. */ + if (item->pi != NULL) { + GRPC_POLLING_TRACE( + "add_poll_object: Raced creating new polling island. pi_new: %p " + "(fd: %d, %s: %p)", + (void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type), + (void *)bag); + /* No need to lock 'pi_new' here since this is a new polling island + * and no one has a reference to it yet */ + polling_island_remove_all_fds_locked(pi_new, true, &error); + + /* Ref and unref so that the polling island gets deleted during unref + */ + PI_ADD_REF(pi_new, "dance_of_destruction"); + PI_UNREF(exec_ctx, pi_new, "dance_of_destruction"); + goto retry; + } } else { - GRPC_POLLING_TRACE( - "pollset_add_fd: Created new polling island. pi_new: %p (fd: %d, " - "pollset: %p)", - (void *)pi_new, fd->fd, (void *)pollset); + pi_new = polling_island_create(exec_ctx, NULL, &error); } + + GRPC_POLLING_TRACE( + "add_poll_object: Created new polling island. pi_new: %p (%s: %p, " + "%s: %p)", + (void *)pi_new, poll_obj_string(item_type), (void *)item, + poll_obj_string(bag_type), (void *)bag); + } else { + GRPC_POLLING_TRACE( + "add_poll_object: Same polling island. pi: %p (%s, %s)", + (void *)pi_new, poll_obj_string(item_type), + poll_obj_string(bag_type)); + } + } else if (item->pi == NULL) { + /* GPR_ASSERT(bag->pi != NULL) */ + /* Make pi_new point to latest pi*/ + pi_new = polling_island_lock(bag->pi); + + if (item_type == POLL_OBJ_FD) { + grpc_fd *fd = FD_FROM_PO(item); + polling_island_add_fds_locked(pi_new, &fd, 1, true, &error); } - } else if (fd->polling_island == NULL) { - pi_new = polling_island_lock(pollset->polling_island); - polling_island_add_fds_locked(pi_new, &fd, 1, true, &error); - gpr_mu_unlock(&pi_new->mu); + gpr_mu_unlock(&pi_new->mu); GRPC_POLLING_TRACE( - "pollset_add_fd: fd->pi was NULL. pi_new: %p (fd: %d, pollset: %p, " - "pollset->pi: %p)", - (void *)pi_new, fd->fd, (void *)pollset, - (void *)pollset->polling_island); - } else if (pollset->polling_island == NULL) { - pi_new = polling_island_lock(fd->polling_island); + "add_poll_obj: item->pi was NULL. pi_new: %p (item(%s): %p, " + "bag(%s): %p)", + (void *)pi_new, poll_obj_string(item_type), (void *)item, + poll_obj_string(bag_type), (void *)bag); + } else if (bag->pi == NULL) { + /* GPR_ASSERT(item->pi != NULL) */ + /* Make pi_new to point to latest pi */ + pi_new = polling_island_lock(item->pi); gpr_mu_unlock(&pi_new->mu); - GRPC_POLLING_TRACE( - "pollset_add_fd: pollset->pi was NULL. pi_new: %p (fd: %d, pollset: " - "%p, fd->pi: %p", - (void *)pi_new, fd->fd, (void *)pollset, (void *)fd->polling_island); + "add_poll_obj: bag->pi was NULL. pi_new: %p (item(%s): %p, " + "bag(%s): %p)", + (void *)pi_new, poll_obj_string(item_type), (void *)item, + poll_obj_string(bag_type), (void *)bag); } else { - pi_new = polling_island_merge(fd->polling_island, pollset->polling_island, - &error); + pi_new = polling_island_merge(item->pi, bag->pi, &error); GRPC_POLLING_TRACE( - "pollset_add_fd: polling islands merged. pi_new: %p (fd: %d, pollset: " - "%p, fd->pi: %p, pollset->pi: %p)", - (void *)pi_new, fd->fd, (void *)pollset, (void *)fd->polling_island, - (void *)pollset->polling_island); + "add_poll_obj: polling islands merged. pi_new: %p (item(%s): %p, " + "bag(%s): %p)", + (void *)pi_new, poll_obj_string(item_type), (void *)item, + poll_obj_string(bag_type), (void *)bag); } - /* At this point, pi_new is the polling island that both fd->polling_island - and pollset->polling_island must be pointing to */ + /* At this point, pi_new is the polling island that both item->pi and bag->pi + MUST be pointing to */ - if (fd->polling_island != pi_new) { - PI_ADD_REF(pi_new, "fd"); - if (fd->polling_island != NULL) { - PI_UNREF(exec_ctx, fd->polling_island, "fd"); + if (item->pi != pi_new) { + PI_ADD_REF(pi_new, poll_obj_string(item_type)); + if (item->pi != NULL) { + PI_UNREF(exec_ctx, item->pi, poll_obj_string(item_type)); } - fd->polling_island = pi_new; + item->pi = pi_new; } - if (pollset->polling_island != pi_new) { - PI_ADD_REF(pi_new, "ps"); - if (pollset->polling_island != NULL) { - PI_UNREF(exec_ctx, pollset->polling_island, "ps"); + if (bag->pi != pi_new) { + PI_ADD_REF(pi_new, poll_obj_string(bag_type)); + if (bag->pi != NULL) { + PI_UNREF(exec_ctx, bag->pi, poll_obj_string(bag_type)); } - pollset->polling_island = pi_new; + bag->pi = pi_new; } - gpr_mu_unlock(&fd->mu); - gpr_mu_unlock(&pollset->mu); + gpr_mu_unlock(&item->mu); + gpr_mu_unlock(&bag->mu); - GRPC_LOG_IF_ERROR("pollset_add_fd", error); + GRPC_LOG_IF_ERROR("add_poll_object", error); + GPR_TIMER_END("add_poll_object", 0); +} - GPR_TIMER_END("pollset_add_fd", 0); +static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, + grpc_fd *fd) { + add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po, + POLL_OBJ_FD); } /******************************************************************************* @@ -1784,142 +1825,60 @@ retry: */ static grpc_pollset_set *pollset_set_create(void) { - grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set)); - memset(pollset_set, 0, sizeof(*pollset_set)); - gpr_mu_init(&pollset_set->mu); - return pollset_set; + grpc_pollset_set *pss = gpr_malloc(sizeof(*pss)); + gpr_mu_init(&pss->po.mu); + pss->po.pi = NULL; +#ifdef PO_DEBUG + pss->po.obj_type = POLL_OBJ_POLLSET_SET; +#endif + return pss; } -static void pollset_set_destroy(grpc_pollset_set *pollset_set) { - size_t i; - gpr_mu_destroy(&pollset_set->mu); - for (i = 0; i < pollset_set->fd_count; i++) { - GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); +static void pollset_set_destroy(grpc_pollset_set *pss) { + gpr_mu_destroy(&pss->po.mu); + + if (pss->po.pi != NULL) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + PI_UNREF(&exec_ctx, pss->po.pi, "pss_destroy"); + grpc_exec_ctx_finish(&exec_ctx); } - gpr_free(pollset_set->pollsets); - gpr_free(pollset_set->pollset_sets); - gpr_free(pollset_set->fds); - gpr_free(pollset_set); + + gpr_free(pss); } -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd) { - size_t i; - gpr_mu_lock(&pollset_set->mu); - if (pollset_set->fd_count == pollset_set->fd_capacity) { - pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity); - pollset_set->fds = gpr_realloc( - pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds)); - } - GRPC_FD_REF(fd, "pollset_set"); - pollset_set->fds[pollset_set->fd_count++] = fd; - for (i = 0; i < pollset_set->pollset_count; i++) { - pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd); - } - for (i = 0; i < pollset_set->pollset_set_count; i++) { - pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd); - } - gpr_mu_unlock(&pollset_set->mu); +static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, + grpc_fd *fd) { + add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po, + POLL_OBJ_FD); } -static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd) { - size_t i; - gpr_mu_lock(&pollset_set->mu); - for (i = 0; i < pollset_set->fd_count; i++) { - if (pollset_set->fds[i] == fd) { - pollset_set->fd_count--; - GPR_SWAP(grpc_fd *, pollset_set->fds[i], - pollset_set->fds[pollset_set->fd_count]); - GRPC_FD_UNREF(fd, "pollset_set"); - break; - } - } - for (i = 0; i < pollset_set->pollset_set_count; i++) { - pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd); - } - gpr_mu_unlock(&pollset_set->mu); +static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, + grpc_fd *fd) { + /* Nothing to do */ } static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, - grpc_pollset *pollset) { - size_t i, j; - gpr_mu_lock(&pollset_set->mu); - if (pollset_set->pollset_count == pollset_set->pollset_capacity) { - pollset_set->pollset_capacity = - GPR_MAX(8, 2 * pollset_set->pollset_capacity); - pollset_set->pollsets = - gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity * - sizeof(*pollset_set->pollsets)); - } - pollset_set->pollsets[pollset_set->pollset_count++] = pollset; - for (i = 0, j = 0; i < pollset_set->fd_count; i++) { - if (fd_is_orphaned(pollset_set->fds[i])) { - GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); - } else { - pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]); - pollset_set->fds[j++] = pollset_set->fds[i]; - } - } - pollset_set->fd_count = j; - gpr_mu_unlock(&pollset_set->mu); + grpc_pollset_set *pss, grpc_pollset *ps) { + add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po, + POLL_OBJ_POLLSET); } static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, - grpc_pollset *pollset) { - size_t i; - gpr_mu_lock(&pollset_set->mu); - for (i = 0; i < pollset_set->pollset_count; i++) { - if (pollset_set->pollsets[i] == pollset) { - pollset_set->pollset_count--; - GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i], - pollset_set->pollsets[pollset_set->pollset_count]); - break; - } - } - gpr_mu_unlock(&pollset_set->mu); + grpc_pollset_set *pss, grpc_pollset *ps) { + /* Nothing to do */ } static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, grpc_pollset_set *bag, grpc_pollset_set *item) { - size_t i, j; - gpr_mu_lock(&bag->mu); - if (bag->pollset_set_count == bag->pollset_set_capacity) { - bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity); - bag->pollset_sets = - gpr_realloc(bag->pollset_sets, - bag->pollset_set_capacity * sizeof(*bag->pollset_sets)); - } - bag->pollset_sets[bag->pollset_set_count++] = item; - for (i = 0, j = 0; i < bag->fd_count; i++) { - if (fd_is_orphaned(bag->fds[i])) { - GRPC_FD_UNREF(bag->fds[i], "pollset_set"); - } else { - pollset_set_add_fd(exec_ctx, item, bag->fds[i]); - bag->fds[j++] = bag->fds[i]; - } - } - bag->fd_count = j; - gpr_mu_unlock(&bag->mu); + add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po, + POLL_OBJ_POLLSET_SET); } static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, grpc_pollset_set *bag, grpc_pollset_set *item) { - size_t i; - gpr_mu_lock(&bag->mu); - for (i = 0; i < bag->pollset_set_count; i++) { - if (bag->pollset_sets[i] == item) { - bag->pollset_set_count--; - GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i], - bag->pollset_sets[bag->pollset_set_count]); - break; - } - } - gpr_mu_unlock(&bag->mu); + /* Nothing to do */ } /* Test helper functions @@ -1927,9 +1886,9 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, void *grpc_fd_get_polling_island(grpc_fd *fd) { polling_island *pi; - gpr_mu_lock(&fd->mu); - pi = fd->polling_island; - gpr_mu_unlock(&fd->mu); + gpr_mu_lock(&fd->po.mu); + pi = fd->po.pi; + gpr_mu_unlock(&fd->po.mu); return pi; } @@ -1937,9 +1896,9 @@ void *grpc_fd_get_polling_island(grpc_fd *fd) { void *grpc_pollset_get_polling_island(grpc_pollset *ps) { polling_island *pi; - gpr_mu_lock(&ps->mu); - pi = ps->polling_island; - gpr_mu_unlock(&ps->mu); + gpr_mu_lock(&ps->po.mu); + pi = ps->po.pi; + gpr_mu_unlock(&ps->po.mu); return pi; } diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h index 275924448a..e03d16fa4e 100644 --- a/src/core/lib/iomgr/resolve_address.h +++ b/src/core/lib/iomgr/resolve_address.h @@ -36,6 +36,7 @@ #include <stddef.h> #include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/pollset_set.h" #define GRPC_MAX_SOCKADDR_SIZE 128 @@ -54,6 +55,7 @@ typedef struct { /* TODO(ctiller): add a timeout here */ extern void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *addr, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addresses); /* Destroy resolved addresses */ diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c index de791b2b67..821932e562 100644 --- a/src/core/lib/iomgr/resolve_address_posix.c +++ b/src/core/lib/iomgr/resolve_address_posix.c @@ -181,6 +181,7 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) { static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addrs) { request *r = gpr_malloc(sizeof(request)); @@ -192,9 +193,9 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, grpc_executor_push(&r->request_closure, GRPC_ERROR_NONE); } -void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name, - const char *default_port, grpc_closure *on_done, - grpc_resolved_addresses **addrs) = - resolve_address_impl; +void (*grpc_resolve_address)( + grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, + grpc_resolved_addresses **addrs) = resolve_address_impl; #endif diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c index b8295acfa1..3269c4f09f 100644 --- a/src/core/lib/iomgr/resolve_address_uv.c +++ b/src/core/lib/iomgr/resolve_address_uv.c @@ -181,6 +181,7 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) { static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addrs) { uv_getaddrinfo_t *req; @@ -223,9 +224,9 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, } } -void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name, - const char *default_port, grpc_closure *on_done, - grpc_resolved_addresses **addrs) = - resolve_address_impl; +void (*grpc_resolve_address)( + grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, + grpc_resolved_addresses **addrs) = resolve_address_impl; #endif /* GRPC_UV */ diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c index e139293c03..fada5ecbe8 100644 --- a/src/core/lib/iomgr/resolve_address_windows.c +++ b/src/core/lib/iomgr/resolve_address_windows.c @@ -169,6 +169,7 @@ void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) { static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addresses) { request *r = gpr_malloc(sizeof(request)); @@ -180,9 +181,9 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, grpc_executor_push(&r->request_closure, GRPC_ERROR_NONE); } -void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *name, - const char *default_port, grpc_closure *on_done, - grpc_resolved_addresses **addresses) = - resolve_address_impl; +void (*grpc_resolve_address)( + grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, + grpc_pollset_set *interested_parties, grpc_closure *on_done, + grpc_resolved_addresses **addresses) = resolve_address_impl; #endif diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c index 12a4797e6f..540305e4fa 100644 --- a/src/core/lib/iomgr/tcp_posix.c +++ b/src/core/lib/iomgr/tcp_posix.c @@ -107,6 +107,12 @@ typedef struct { grpc_resource_user_slice_allocator slice_allocator; } grpc_tcp; +static grpc_error *tcp_annotate_error(grpc_error *src_error, grpc_tcp *tcp) { + return grpc_error_set_str( + grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd), + GRPC_ERROR_STR_TARGET_ADDRESS, tcp->peer_string); +} + static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, grpc_error *error); static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, @@ -230,13 +236,15 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure); } else { grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer); - call_read_cb(exec_ctx, tcp, GRPC_OS_ERROR(errno, "recvmsg")); + call_read_cb(exec_ctx, tcp, + tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp)); TCP_UNREF(exec_ctx, tcp, "read"); } } else if (read_bytes == 0) { /* 0 read size ==> end of stream */ grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer); - call_read_cb(exec_ctx, tcp, GRPC_ERROR_CREATE("Socket closed")); + call_read_cb(exec_ctx, tcp, + tcp_annotate_error(GRPC_ERROR_CREATE("Socket closed"), tcp)); TCP_UNREF(exec_ctx, tcp, "read"); } else { GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); @@ -365,8 +373,13 @@ static bool tcp_flush(grpc_tcp *tcp, grpc_error **error) { tcp->outgoing_slice_idx = unwind_slice_idx; tcp->outgoing_byte_idx = unwind_byte_idx; return false; + } else if (errno == EPIPE) { + *error = grpc_error_set_int(GRPC_OS_ERROR(errno, "sendmsg"), + GRPC_ERROR_INT_GRPC_STATUS, + GRPC_STATUS_UNAVAILABLE); + return true; } else { - *error = GRPC_OS_ERROR(errno, "sendmsg"); + *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp); return true; } } @@ -447,9 +460,10 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (buf->length == 0) { GPR_TIMER_END("tcp_write", 0); - grpc_exec_ctx_sched(exec_ctx, cb, grpc_fd_is_shutdown(tcp->em_fd) - ? GRPC_ERROR_CREATE("EOF") - : GRPC_ERROR_NONE, + grpc_exec_ctx_sched(exec_ctx, cb, + grpc_fd_is_shutdown(tcp->em_fd) + ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp) + : GRPC_ERROR_NONE, NULL); return; } diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h index 6eba8c4057..437a94beff 100644 --- a/src/core/lib/iomgr/tcp_server.h +++ b/src/core/lib/iomgr/tcp_server.h @@ -52,7 +52,8 @@ typedef struct grpc_tcp_server_acceptor { unsigned fd_index; } grpc_tcp_server_acceptor; -/* Called for newly connected TCP connections. */ +/* Called for newly connected TCP connections. + Takes ownership of acceptor. */ typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *ep, grpc_pollset *accepting_pollset, diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c index 7e2fb0f1f9..179f47ef76 100644 --- a/src/core/lib/iomgr/tcp_server_posix.c +++ b/src/core/lib/iomgr/tcp_server_posix.c @@ -381,16 +381,12 @@ error: /* event manager callback when reads are ready */ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { grpc_tcp_listener *sp = arg; - grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, - sp->fd_index}; - grpc_pollset *read_notifier_pollset = NULL; - grpc_fd *fdobj; if (err != GRPC_ERROR_NONE) { goto error; } - read_notifier_pollset = + grpc_pollset *read_notifier_pollset = sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add( &sp->server->next_pollset_to_assign, 1) % sp->server->pollset_count]; @@ -426,7 +422,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str); } - fdobj = grpc_fd_create(fd, name); + grpc_fd *fdobj = grpc_fd_create(fd, name); if (read_notifier_pollset == NULL) { gpr_log(GPR_ERROR, "Read notifier pollset is not set on the fd"); @@ -435,11 +431,17 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj); + // Create acceptor. + grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor)); + acceptor->from_server = sp->server; + acceptor->port_index = sp->port_index; + acceptor->fd_index = sp->fd_index; + sp->server->on_accept_cb( exec_ctx, sp->server->on_accept_cb_arg, grpc_tcp_create(fdobj, sp->server->resource_quota, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str), - read_notifier_pollset, &acceptor); + read_notifier_pollset, acceptor); gpr_free(name); gpr_free(addr_str); diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c index b5b9b92a20..e1a174cfa2 100644 --- a/src/core/lib/iomgr/tcp_server_uv.c +++ b/src/core/lib/iomgr/tcp_server_uv.c @@ -188,7 +188,6 @@ static void accepted_connection_close_cb(uv_handle_t *handle) { static void on_connect(uv_stream_t *server, int status) { grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data; - grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, 0}; uv_tcp_t *client; grpc_endpoint *ep = NULL; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; @@ -201,6 +200,7 @@ static void on_connect(uv_stream_t *server, int status) { uv_strerror(status)); return; } + client = gpr_malloc(sizeof(uv_tcp_t)); uv_tcp_init(uv_default_loop(), client); // UV documentation says this is guaranteed to succeed @@ -220,8 +220,13 @@ static void on_connect(uv_stream_t *server, int status) { gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(status)); } ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string); + // Create acceptor. + grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor)); + acceptor->from_server = sp->server; + acceptor->port_index = sp->port_index; + acceptor->fd_index = 0; sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL, - &acceptor); + acceptor); grpc_exec_ctx_finish(&exec_ctx); } } diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c index b8a391c059..b0c8586bac 100644 --- a/src/core/lib/iomgr/tcp_server_windows.c +++ b/src/core/lib/iomgr/tcp_server_windows.c @@ -323,7 +323,6 @@ failure: /* Event manager callback when reads are ready. */ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { grpc_tcp_listener *sp = arg; - grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, 0}; SOCKET sock = sp->new_socket; grpc_winsocket_callback_info *info = &sp->socket->read_info; grpc_endpoint *ep = NULL; @@ -396,8 +395,13 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { /* The only time we should call our callback, is where we successfully managed to accept a connection, and created an endpoint. */ if (ep) { + // Create acceptor. + grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor)); + acceptor->from_server = sp->server; + acceptor->port_index = sp->port_index; + acceptor->fd_index = 0; sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL, - &acceptor); + acceptor); } /* As we were notified from the IOCP of one and exactly one accept, the former socked we created has now either been destroy or assigned diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c index 42bd89dd0a..03097a57c0 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.c +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c @@ -39,6 +39,7 @@ #include "src/core/lib/http/httpcli.h" #include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/security/util/b64.h" +#include "src/core/lib/support/string.h" #include "src/core/lib/tsi/ssl_types.h" #include <grpc/support/alloc.h> @@ -305,6 +306,17 @@ grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims, return GRPC_JWT_VERIFIER_TIME_CONSTRAINT_FAILURE; } + /* This should be probably up to the upper layer to decide but let's harcode + the 99% use case here for email issuers, where the JWT must be self + issued. */ + if (grpc_jwt_issuer_email_domain(claims->iss) != NULL && + claims->sub != NULL && strcmp(claims->iss, claims->sub) != 0) { + gpr_log(GPR_ERROR, + "Email issuer (%s) cannot assert another subject (%s) than itself.", + claims->iss, claims->sub); + return GRPC_JWT_VERIFIER_BAD_SUBJECT; + } + if (audience == NULL) { audience_ok = claims->aud == NULL; } else { @@ -705,10 +717,26 @@ static void verifier_put_mapping(grpc_jwt_verifier *v, const char *email_domain, GPR_ASSERT(v->num_mappings <= v->allocated_mappings); } +/* Very non-sophisticated way to detect an email address. Should be good + enough for now... */ +const char *grpc_jwt_issuer_email_domain(const char *issuer) { + const char *at_sign = strchr(issuer, '@'); + if (at_sign == NULL) return NULL; + const char *email_domain = at_sign + 1; + if (*email_domain == '\0') return NULL; + const char *dot = strrchr(email_domain, '.'); + if (dot == NULL || dot == email_domain) return email_domain; + GPR_ASSERT(dot > email_domain); + /* There may be a subdomain, we just want the domain. */ + dot = gpr_memrchr(email_domain, '.', (size_t)(dot - email_domain)); + if (dot == NULL) return email_domain; + return dot + 1; +} + /* Takes ownership of ctx. */ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, verifier_cb_ctx *ctx) { - const char *at_sign; + const char *email_domain; grpc_closure *http_cb; char *path_prefix = NULL; const char *iss; @@ -733,13 +761,9 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, Nobody seems to implement the account/email/webfinger part 2. of the spec so we will rely instead on email/url mappings if we detect such an issuer. Part 4, on the other hand is implemented by both google and salesforce. */ - - /* Very non-sophisticated way to detect an email address. Should be good - enough for now... */ - at_sign = strchr(iss, '@'); - if (at_sign != NULL) { + email_domain = grpc_jwt_issuer_email_domain(iss); + if (email_domain != NULL) { email_key_mapping *mapping; - const char *email_domain = at_sign + 1; GPR_ASSERT(ctx->verifier != NULL); mapping = verifier_get_mapping(ctx->verifier, email_domain); if (mapping == NULL) { diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h index f09f9d5d47..54ff9b05e5 100644 --- a/src/core/lib/security/credentials/jwt/jwt_verifier.h +++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h @@ -43,8 +43,7 @@ /* --- Constants. --- */ #define GRPC_OPENID_CONFIG_URL_SUFFIX "/.well-known/openid-configuration" -#define GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN \ - "developer.gserviceaccount.com" +#define GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN "gserviceaccount.com" #define GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX \ "www.googleapis.com/robot/v1/metadata/x509" @@ -57,6 +56,7 @@ typedef enum { GRPC_JWT_VERIFIER_BAD_AUDIENCE, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, GRPC_JWT_VERIFIER_TIME_CONSTRAINT_FAILURE, + GRPC_JWT_VERIFIER_BAD_SUBJECT, GRPC_JWT_VERIFIER_GENERIC_ERROR } grpc_jwt_verifier_status; @@ -132,5 +132,6 @@ void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx, grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, grpc_slice buffer); grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims, const char *audience); +const char *grpc_jwt_issuer_email_domain(const char *issuer); #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_VERIFIER_H */ diff --git a/src/core/lib/security/transport/client_auth_filter.c b/src/core/lib/security/transport/client_auth_filter.c index 053bf5972c..da897296e4 100644 --- a/src/core/lib/security/transport/client_auth_filter.c +++ b/src/core/lib/security/transport/client_auth_filter.c @@ -303,9 +303,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, } /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { grpc_security_connector *sc = grpc_find_security_connector_in_args(args->channel_args); grpc_auth_context *auth_context = @@ -327,6 +327,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx, sc, "client_auth_filter"); chand->auth_context = GRPC_AUTH_CONTEXT_REF(auth_context, "client_auth_filter"); + return GRPC_ERROR_NONE; } /* Destructor for channel data */ diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c index 1b278410e8..331a8f1835 100644 --- a/src/core/lib/security/transport/secure_endpoint.c +++ b/src/core/lib/security/transport/secure_endpoint.c @@ -372,7 +372,10 @@ static char *endpoint_get_peer(grpc_endpoint *secure_ep) { return grpc_endpoint_get_peer(ep->wrapped_ep); } -static int endpoint_get_fd(grpc_endpoint *secure_ep) { return -1; } +static int endpoint_get_fd(grpc_endpoint *secure_ep) { + secure_endpoint *ep = (secure_endpoint *)secure_ep; + return grpc_endpoint_get_fd(ep->wrapped_ep); +} static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) { secure_endpoint *ep = (secure_endpoint *)secure_ep; diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c index a2e0c7c7c7..5b088aa58d 100644 --- a/src/core/lib/security/transport/security_connector.c +++ b/src/core/lib/security/transport/security_connector.c @@ -43,6 +43,7 @@ #include <grpc/support/string_util.h> #include "src/core/ext/transport/chttp2/alpn/alpn.h" +#include "src/core/lib/channel/handshaker.h" #include "src/core/lib/iomgr/load_file.h" #include "src/core/lib/security/context/security_context.h" #include "src/core/lib/security/credentials/credentials.h" @@ -111,19 +112,19 @@ const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer, return NULL; } -void grpc_channel_security_connector_create_handshakers( +void grpc_channel_security_connector_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector, grpc_handshake_manager *handshake_mgr) { if (connector != NULL) { - connector->create_handshakers(exec_ctx, connector, handshake_mgr); + connector->add_handshakers(exec_ctx, connector, handshake_mgr); } } -void grpc_server_security_connector_create_handshakers( +void grpc_server_security_connector_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_server_security_connector *connector, grpc_handshake_manager *handshake_mgr) { if (connector != NULL) { - connector->create_handshakers(exec_ctx, connector, handshake_mgr); + connector->add_handshakers(exec_ctx, connector, handshake_mgr); } } @@ -285,20 +286,24 @@ static void fake_channel_check_call_host(grpc_exec_ctx *exec_ctx, cb(exec_ctx, user_data, GRPC_SECURITY_OK); } -static void fake_channel_create_handshakers( +static void fake_channel_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, grpc_handshake_manager *handshake_mgr) { - grpc_security_create_handshakers( - exec_ctx, tsi_create_fake_handshaker(true /* is_client */), &sc->base, - handshake_mgr); + grpc_handshake_manager_add( + handshake_mgr, + grpc_security_handshaker_create( + exec_ctx, tsi_create_fake_handshaker(true /* is_client */), + &sc->base)); } -static void fake_server_create_handshakers( - grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { - grpc_security_create_handshakers( - exec_ctx, tsi_create_fake_handshaker(false /* is_client */), &sc->base, - handshake_mgr); +static void fake_server_add_handshakers(grpc_exec_ctx *exec_ctx, + grpc_server_security_connector *sc, + grpc_handshake_manager *handshake_mgr) { + grpc_handshake_manager_add( + handshake_mgr, + grpc_security_handshaker_create( + exec_ctx, tsi_create_fake_handshaker(false /* is_client */), + &sc->base)); } static grpc_security_connector_vtable fake_channel_vtable = { @@ -316,7 +321,7 @@ grpc_channel_security_connector *grpc_fake_channel_security_connector_create( c->base.vtable = &fake_channel_vtable; c->request_metadata_creds = grpc_call_credentials_ref(request_metadata_creds); c->check_call_host = fake_channel_check_call_host; - c->create_handshakers = fake_channel_create_handshakers; + c->add_handshakers = fake_channel_add_handshakers; return c; } @@ -328,7 +333,7 @@ grpc_server_security_connector *grpc_fake_server_security_connector_create( gpr_ref_init(&c->base.refcount, 1); c->base.vtable = &fake_server_vtable; c->base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME; - c->create_handshakers = fake_server_create_handshakers; + c->add_handshakers = fake_server_add_handshakers; return c; } @@ -382,9 +387,9 @@ static grpc_security_status ssl_create_handshaker( return GRPC_SECURITY_OK; } -static void ssl_channel_create_handshakers( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { +static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx, + grpc_channel_security_connector *sc, + grpc_handshake_manager *handshake_mgr) { grpc_ssl_channel_security_connector *c = (grpc_ssl_channel_security_connector *)sc; // Instantiate TSI handshaker. @@ -395,12 +400,13 @@ static void ssl_channel_create_handshakers( : c->target_name, &tsi_hs); // Create handshakers. - grpc_security_create_handshakers(exec_ctx, tsi_hs, &sc->base, handshake_mgr); + grpc_handshake_manager_add(handshake_mgr, grpc_security_handshaker_create( + exec_ctx, tsi_hs, &sc->base)); } -static void ssl_server_create_handshakers( - grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { +static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx, + grpc_server_security_connector *sc, + grpc_handshake_manager *handshake_mgr) { grpc_ssl_server_security_connector *c = (grpc_ssl_server_security_connector *)sc; // Instantiate TSI handshaker. @@ -408,7 +414,8 @@ static void ssl_server_create_handshakers( ssl_create_handshaker(c->handshaker_factory, false /* is_client */, NULL /* peer_name */, &tsi_hs); // Create handshakers. - grpc_security_create_handshakers(exec_ctx, tsi_hs, &sc->base, handshake_mgr); + grpc_handshake_manager_add(handshake_mgr, grpc_security_handshaker_create( + exec_ctx, tsi_hs, &sc->base)); } static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) { @@ -708,7 +715,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create( c->base.request_metadata_creds = grpc_call_credentials_ref(request_metadata_creds); c->base.check_call_host = ssl_channel_check_call_host; - c->base.create_handshakers = ssl_channel_create_handshakers; + c->base.add_handshakers = ssl_channel_add_handshakers; gpr_split_host_port(target_name, &c->target_name, &port); gpr_free(port); if (overridden_target_name != NULL) { @@ -783,7 +790,7 @@ grpc_security_status grpc_ssl_server_security_connector_create( *sc = NULL; goto error; } - c->base.create_handshakers = ssl_server_create_handshakers; + c->base.add_handshakers = ssl_server_add_handshakers; *sc = &c->base; gpr_free((void *)alpn_protocol_strings); gpr_free(alpn_protocol_string_lengths); diff --git a/src/core/lib/security/transport/security_connector.h b/src/core/lib/security/transport/security_connector.h index 696db0e02e..a84b359051 100644 --- a/src/core/lib/security/transport/security_connector.h +++ b/src/core/lib/security/transport/security_connector.h @@ -98,7 +98,7 @@ void grpc_security_connector_unref(grpc_security_connector *policy); #endif /* Check the peer. Callee takes ownership of the peer object. - Sets *auth_context and invokes on_peer_checked when done. */ + When done, sets *auth_context and invokes on_peer_checked. */ void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc, tsi_peer peer, @@ -133,9 +133,9 @@ struct grpc_channel_security_connector { grpc_channel_security_connector *sc, const char *host, grpc_auth_context *auth_context, grpc_security_call_host_check_cb cb, void *user_data); - void (*create_handshakers)(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, - grpc_handshake_manager *handshake_mgr); + void (*add_handshakers)(grpc_exec_ctx *exec_ctx, + grpc_channel_security_connector *sc, + grpc_handshake_manager *handshake_mgr); }; /* Checks that the host that will be set for a call is acceptable. */ @@ -145,7 +145,7 @@ void grpc_channel_security_connector_check_call_host( grpc_security_call_host_check_cb cb, void *user_data); /* Registers handshakers with \a handshake_mgr. */ -void grpc_channel_security_connector_create_handshakers( +void grpc_channel_security_connector_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector, grpc_handshake_manager *handshake_mgr); @@ -158,12 +158,12 @@ typedef struct grpc_server_security_connector grpc_server_security_connector; struct grpc_server_security_connector { grpc_security_connector base; - void (*create_handshakers)(grpc_exec_ctx *exec_ctx, - grpc_server_security_connector *sc, - grpc_handshake_manager *handshake_mgr); + void (*add_handshakers)(grpc_exec_ctx *exec_ctx, + grpc_server_security_connector *sc, + grpc_handshake_manager *handshake_mgr); }; -void grpc_server_security_connector_create_handshakers( +void grpc_server_security_connector_add_handshakers( grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc, grpc_handshake_manager *handshake_mgr); diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c index fc01bec2f2..41a775db85 100644 --- a/src/core/lib/security/transport/security_handshaker.c +++ b/src/core/lib/security/transport/security_handshaker.c @@ -131,6 +131,9 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx, // Not shutting down, so the write failed. Clean up before // invoking the callback. cleanup_args_for_failure_locked(h); + // Set shutdown to true so that subsequent calls to + // security_handshaker_shutdown() do nothing. + h->shutdown = true; } // Invoke callback. grpc_exec_ctx_sched(exec_ctx, h->on_handshake_done, error, NULL); @@ -434,17 +437,14 @@ static grpc_handshaker *fail_handshaker_create() { // exported functions // -void grpc_security_create_handshakers(grpc_exec_ctx *exec_ctx, - tsi_handshaker *handshaker, - grpc_security_connector *connector, - grpc_handshake_manager *handshake_mgr) { - // If no TSI handshaker was created, add a handshaker that always fails. - // Otherwise, add a real security handshaker. +grpc_handshaker *grpc_security_handshaker_create( + grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker, + grpc_security_connector *connector) { + // If no TSI handshaker was created, return a handshaker that always fails. + // Otherwise, return a real security handshaker. if (handshaker == NULL) { - grpc_handshake_manager_add(handshake_mgr, fail_handshaker_create()); + return fail_handshaker_create(); } else { - grpc_handshake_manager_add( - handshake_mgr, - security_handshaker_create(exec_ctx, handshaker, connector)); + return security_handshaker_create(exec_ctx, handshaker, connector); } } diff --git a/src/core/lib/security/transport/security_handshaker.h b/src/core/lib/security/transport/security_handshaker.h index f71f43a359..5ddbf4b451 100644 --- a/src/core/lib/security/transport/security_handshaker.h +++ b/src/core/lib/security/transport/security_handshaker.h @@ -34,14 +34,13 @@ #ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H #define GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H -#include "src/core/lib/iomgr/endpoint.h" +#include "src/core/lib/channel/handshaker.h" +#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/security/transport/security_connector.h" -/// Creates any necessary security handshakers and adds them to -/// \a handshake_mgr. -void grpc_security_create_handshakers(grpc_exec_ctx *exec_ctx, - tsi_handshaker *handshaker, - grpc_security_connector *connector, - grpc_handshake_manager *handshake_mgr); +/// Creates a security handshaker using \a handshaker. +grpc_handshaker *grpc_security_handshaker_create( + grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker, + grpc_security_connector *connector); #endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H */ diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c index eaa1d0720b..e6a242e68f 100644 --- a/src/core/lib/security/transport/server_auth_filter.c +++ b/src/core/lib/security/transport/server_auth_filter.c @@ -238,9 +238,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, void *ignored) {} /* Constructor for channel_data */ -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { grpc_auth_context *auth_context = grpc_find_auth_context_in_args(args->channel_args); grpc_server_credentials *creds = @@ -256,6 +256,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx, chand->auth_context = GRPC_AUTH_CONTEXT_REF(auth_context, "server_auth_filter"); chand->creds = grpc_server_credentials_ref(creds); + return GRPC_ERROR_NONE; } /* Destructor for channel data */ diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.c index f10a30f0fd..426fce28f8 100644 --- a/src/core/lib/support/string.c +++ b/src/core/lib/support/string.c @@ -275,3 +275,15 @@ int gpr_stricmp(const char *a, const char *b) { } while (ca == cb && ca && cb); return ca - cb; } + +void *gpr_memrchr(const void *s, int c, size_t n) { + if (s == NULL) return NULL; + char *b = (char *)s; + size_t i; + for (i = 0; i < n; i++) { + if (b[n - i - 1] == c) { + return &b[n - i - 1]; + } + } + return NULL; +} diff --git a/src/core/lib/support/string.h b/src/core/lib/support/string.h index e933e2eb46..6d1f7cc632 100644 --- a/src/core/lib/support/string.h +++ b/src/core/lib/support/string.h @@ -118,6 +118,8 @@ char *gpr_strvec_flatten(gpr_strvec *strs, size_t *total_length); lower(a)==lower(b), >0 if lower(a)>lower(b) */ int gpr_stricmp(const char *a, const char *b); +void *gpr_memrchr(const void *s, int c, size_t n); + #ifdef __cplusplus } #endif diff --git a/src/core/lib/surface/channel.c b/src/core/lib/surface/channel.c index 1389df6886..9405015c50 100644 --- a/src/core/lib/surface/channel.c +++ b/src/core/lib/surface/channel.c @@ -86,87 +86,90 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target, const grpc_channel_args *input_args, grpc_channel_stack_type channel_stack_type, grpc_transport *optional_transport) { - bool is_client = grpc_channel_stack_type_is_client(channel_stack_type); - grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create(); grpc_channel_stack_builder_set_channel_arguments(builder, input_args); grpc_channel_stack_builder_set_target(builder, target); grpc_channel_stack_builder_set_transport(builder, optional_transport); - grpc_channel *channel; - grpc_channel_args *args; if (!grpc_channel_init_create_stack(exec_ctx, builder, channel_stack_type)) { grpc_channel_stack_builder_destroy(builder); return NULL; - } else { - args = grpc_channel_args_copy( - grpc_channel_stack_builder_get_channel_arguments(builder)); - channel = grpc_channel_stack_builder_finish( - exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL); + } + grpc_channel_args *args = grpc_channel_args_copy( + grpc_channel_stack_builder_get_channel_arguments(builder)); + grpc_channel *channel; + grpc_error *error = grpc_channel_stack_builder_finish( + exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL, + (void **)&channel); + if (error != GRPC_ERROR_NONE) { + const char *msg = grpc_error_string(error); + gpr_log(GPR_ERROR, "channel stack builder failed: %s", msg); + grpc_error_free_string(msg); + GRPC_ERROR_UNREF(error); + goto done; } memset(channel, 0, sizeof(*channel)); channel->target = gpr_strdup(target); - channel->is_client = is_client; + channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type); gpr_mu_init(&channel->registered_call_mu); channel->registered_calls = NULL; grpc_compression_options_init(&channel->compression_options); - if (args) { - for (size_t i = 0; i < args->num_args; i++) { - if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) { - if (args->args[i].type != GRPC_ARG_STRING) { - gpr_log(GPR_ERROR, "%s ignored: it must be a string", - GRPC_ARG_DEFAULT_AUTHORITY); - } else { - if (channel->default_authority) { - /* setting this takes precedence over anything else */ - GRPC_MDELEM_UNREF(channel->default_authority); - } - channel->default_authority = grpc_mdelem_from_strings( - ":authority", args->args[i].value.string); + + for (size_t i = 0; i < args->num_args; i++) { + if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) { + if (args->args[i].type != GRPC_ARG_STRING) { + gpr_log(GPR_ERROR, "%s ignored: it must be a string", + GRPC_ARG_DEFAULT_AUTHORITY); + } else { + if (channel->default_authority) { + /* setting this takes precedence over anything else */ + GRPC_MDELEM_UNREF(channel->default_authority); } - } else if (0 == - strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) { - if (args->args[i].type != GRPC_ARG_STRING) { - gpr_log(GPR_ERROR, "%s ignored: it must be a string", + channel->default_authority = + grpc_mdelem_from_strings(":authority", args->args[i].value.string); + } + } else if (0 == + strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) { + if (args->args[i].type != GRPC_ARG_STRING) { + gpr_log(GPR_ERROR, "%s ignored: it must be a string", + GRPC_SSL_TARGET_NAME_OVERRIDE_ARG); + } else { + if (channel->default_authority) { + /* other ways of setting this (notably ssl) take precedence */ + gpr_log(GPR_ERROR, + "%s ignored: default host already set some other way", GRPC_SSL_TARGET_NAME_OVERRIDE_ARG); } else { - if (channel->default_authority) { - /* other ways of setting this (notably ssl) take precedence */ - gpr_log(GPR_ERROR, - "%s ignored: default host already set some other way", - GRPC_SSL_TARGET_NAME_OVERRIDE_ARG); - } else { - channel->default_authority = grpc_mdelem_from_strings( - ":authority", args->args[i].value.string); - } + channel->default_authority = grpc_mdelem_from_strings( + ":authority", args->args[i].value.string); } - } else if (0 == strcmp(args->args[i].key, - GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) { - channel->compression_options.default_level.is_set = true; - GPR_ASSERT(args->args[i].value.integer >= 0 && - args->args[i].value.integer < GRPC_COMPRESS_LEVEL_COUNT); - channel->compression_options.default_level.level = - (grpc_compression_level)args->args[i].value.integer; - } else if (0 == strcmp(args->args[i].key, - GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) { - channel->compression_options.default_algorithm.is_set = true; - GPR_ASSERT(args->args[i].value.integer >= 0 && - args->args[i].value.integer < - GRPC_COMPRESS_ALGORITHMS_COUNT); - channel->compression_options.default_algorithm.algorithm = - (grpc_compression_algorithm)args->args[i].value.integer; - } else if (0 == - strcmp(args->args[i].key, - GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) { - channel->compression_options.enabled_algorithms_bitset = - (uint32_t)args->args[i].value.integer | - 0x1; /* always support no compression */ } + } else if (0 == strcmp(args->args[i].key, + GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) { + channel->compression_options.default_level.is_set = true; + GPR_ASSERT(args->args[i].value.integer >= 0 && + args->args[i].value.integer < GRPC_COMPRESS_LEVEL_COUNT); + channel->compression_options.default_level.level = + (grpc_compression_level)args->args[i].value.integer; + } else if (0 == strcmp(args->args[i].key, + GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) { + channel->compression_options.default_algorithm.is_set = true; + GPR_ASSERT(args->args[i].value.integer >= 0 && + args->args[i].value.integer < GRPC_COMPRESS_ALGORITHMS_COUNT); + channel->compression_options.default_algorithm.algorithm = + (grpc_compression_algorithm)args->args[i].value.integer; + } else if (0 == + strcmp(args->args[i].key, + GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) { + channel->compression_options.enabled_algorithms_bitset = + (uint32_t)args->args[i].value.integer | + 0x1; /* always support no compression */ } - grpc_channel_args_destroy(args); } +done: + grpc_channel_args_destroy(args); return channel; } diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c index 4e0feb56ac..184c1a1a16 100644 --- a/src/core/lib/surface/completion_queue.c +++ b/src/core/lib/surface/completion_queue.c @@ -354,11 +354,13 @@ static void dump_pending_tags(grpc_completion_queue *cc) { gpr_strvec v; gpr_strvec_init(&v); gpr_strvec_add(&v, gpr_strdup("PENDING TAGS:")); + gpr_mu_lock(cc->mu); for (size_t i = 0; i < cc->outstanding_tag_count; i++) { char *s; gpr_asprintf(&s, " %p", cc->outstanding_tags[i]); gpr_strvec_add(&v, s); } + gpr_mu_unlock(cc->mu); char *out = gpr_strvec_flatten(&v, NULL); gpr_strvec_destroy(&v); gpr_log(GPR_DEBUG, "%s", out); diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c index d0df8e7e17..57da94ac1e 100644 --- a/src/core/lib/surface/lame_client.c +++ b/src/core/lib/surface/lame_client.c @@ -123,11 +123,12 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, gpr_free(and_free_memory); } -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { GPR_ASSERT(args->is_first); GPR_ASSERT(args->is_last); + return GRPC_ERROR_NONE; } static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c index fe73aa375c..62d7afc8da 100644 --- a/src/core/lib/surface/server.c +++ b/src/core/lib/surface/server.c @@ -914,9 +914,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, server_unref(exec_ctx, chand->server); } -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { channel_data *chand = elem->channel_data; GPR_ASSERT(args->is_first); GPR_ASSERT(!args->is_last); @@ -927,6 +927,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx, chand->connectivity_state = GRPC_CHANNEL_IDLE; grpc_closure_init(&chand->channel_connectivity_changed, channel_connectivity_changed, chand); + return GRPC_ERROR_NONE; } static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, diff --git a/src/core/lib/surface/validate_metadata.c b/src/core/lib/surface/validate_metadata.c index 84f0a083bc..f49dd8584b 100644 --- a/src/core/lib/surface/validate_metadata.c +++ b/src/core/lib/surface/validate_metadata.c @@ -53,7 +53,7 @@ int grpc_header_key_is_legal(const char *key, size_t length) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xff, 0x03, 0x00, 0x00, 0x00, 0x80, 0xfe, 0xff, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; - if (length == 0) { + if (length == 0 || key[0] == ':') { return 0; } return conforms_to(key, length, legal_header_bits); diff --git a/src/cpp/common/channel_filter.h b/src/cpp/common/channel_filter.h index e420efc71c..c9f50df732 100644 --- a/src/cpp/common/channel_filter.h +++ b/src/cpp/common/channel_filter.h @@ -216,12 +216,13 @@ class TransportStreamOp { /// Represents channel data. class ChannelData { public: - virtual ~ChannelData() { - if (peer_) gpr_free((void *)peer_); - } + virtual ~ChannelData() {} - /// Caller does NOT take ownership of result. - const char *peer() const { return peer_; } + /// Initializes the call data. + virtual grpc_error *Init(grpc_exec_ctx *exec_ctx, + grpc_channel_element_args *args) { + return GRPC_ERROR_NONE; + } // TODO(roth): Find a way to avoid passing elem into these methods. @@ -232,11 +233,7 @@ class ChannelData { const grpc_channel_info *channel_info); protected: - /// Takes ownership of \a peer. - ChannelData(const grpc_channel_args &args, const char *peer) : peer_(peer) {} - - private: - const char *peer_; + ChannelData() {} }; /// Represents call data. @@ -245,7 +242,10 @@ class CallData { virtual ~CallData() {} /// Initializes the call data. - virtual grpc_error *Init() { return GRPC_ERROR_NONE; } + virtual grpc_error *Init(grpc_exec_ctx *exec_ctx, ChannelData *channel_data, + grpc_call_element_args *args) { + return GRPC_ERROR_NONE; + } // TODO(roth): Find a way to avoid passing elem into these methods. @@ -263,7 +263,7 @@ class CallData { virtual char *GetPeer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem); protected: - explicit CallData(const ChannelData &) {} + CallData() {} }; namespace internal { @@ -276,15 +276,11 @@ class ChannelFilter final { public: static const size_t channel_data_size = sizeof(ChannelDataType); - static void InitChannelElement(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { - const char *peer = - args->optional_transport - ? grpc_transport_get_peer(exec_ctx, args->optional_transport) - : nullptr; - // Construct the object in the already-allocated memory. - new (elem->channel_data) ChannelDataType(*args->channel_args, peer); + static grpc_error *InitChannelElement(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { + ChannelDataType *channel_data = new (elem->channel_data) ChannelDataType(); + return channel_data->Init(exec_ctx, args); } static void DestroyChannelElement(grpc_exec_ctx *exec_ctx, @@ -312,11 +308,10 @@ class ChannelFilter final { static grpc_error *InitCallElement(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, grpc_call_element_args *args) { - const ChannelDataType &channel_data = - *(ChannelDataType *)elem->channel_data; + ChannelDataType *channel_data = (ChannelDataType *)elem->channel_data; // Construct the object in the already-allocated memory. - CallDataType *call_data = new (elem->call_data) CallDataType(channel_data); - return call_data->Init(); + CallDataType *call_data = new (elem->call_data) CallDataType(); + return call_data->Init(exec_ctx, channel_data, args); } static void DestroyCallElement(grpc_exec_ctx *exec_ctx, diff --git a/src/csharp/Grpc.Auth/Grpc.Auth.csproj b/src/csharp/Grpc.Auth/Grpc.Auth.csproj index 99e8c1a3da..db55ed5a6c 100644 --- a/src/csharp/Grpc.Auth/Grpc.Auth.csproj +++ b/src/csharp/Grpc.Auth/Grpc.Auth.csproj @@ -29,15 +29,6 @@ <WarningLevel>4</WarningLevel> <ConsolePause>false</ConsolePause> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Net" /> diff --git a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj index 19a68ab9ea..646effe21a 100644 --- a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj +++ b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj @@ -25,15 +25,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="nunit.framework"> diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj index 5bfb978ca6..23e1ddcf7f 100644 --- a/src/csharp/Grpc.Core/Grpc.Core.csproj +++ b/src/csharp/Grpc.Core/Grpc.Core.csproj @@ -27,16 +27,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <DefineConstants>SIGNED</DefineConstants> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Interactive.Async"> diff --git a/src/csharp/Grpc.Core/NativeDeps.targets b/src/csharp/Grpc.Core/NativeDeps.targets index 66c5ec1292..e187f72d26 100644 --- a/src/csharp/Grpc.Core/NativeDeps.targets +++ b/src/csharp/Grpc.Core/NativeDeps.targets @@ -4,13 +4,11 @@ <PropertyGroup Condition=" '$(NativeDependenciesConfiguration)' == '' "> <NativeDependenciesConfiguration Condition=" '$(Configuration)' == 'Debug' ">Debug</NativeDependenciesConfiguration> <NativeDependenciesConfiguration Condition=" '$(Configuration)' == 'Release' ">Release</NativeDependenciesConfiguration> - <NativeDependenciesConfiguration Condition=" '$(Configuration)' == 'ReleaseSigned' ">Release</NativeDependenciesConfiguration> </PropertyGroup> <PropertyGroup Condition=" '$(NativeDependenciesConfigurationUnix)' == '' "> <NativeDependenciesConfigurationUnix Condition=" '$(Configuration)' == 'Debug' ">dbg</NativeDependenciesConfigurationUnix> <NativeDependenciesConfigurationUnix Condition=" '$(Configuration)' == 'Release' ">opt</NativeDependenciesConfigurationUnix> - <NativeDependenciesConfigurationUnix Condition=" '$(Configuration)' == 'ReleaseSigned' ">opt</NativeDependenciesConfigurationUnix> </PropertyGroup> <!-- Autodetect platform --> diff --git a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj index 65bf236def..de4005c2f6 100644 --- a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj +++ b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj @@ -27,15 +27,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> </ItemGroup> diff --git a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj index 26b42b6936..3f38de2b71 100644 --- a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj +++ b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj @@ -27,15 +27,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> </ItemGroup> diff --git a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj index 16d7a44f92..d22fe87825 100644 --- a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj +++ b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj @@ -25,15 +25,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="nunit.framework"> diff --git a/src/csharp/Grpc.Examples/Grpc.Examples.csproj b/src/csharp/Grpc.Examples/Grpc.Examples.csproj index c1ea2e2833..44acb6c2e3 100644 --- a/src/csharp/Grpc.Examples/Grpc.Examples.csproj +++ b/src/csharp/Grpc.Examples/Grpc.Examples.csproj @@ -27,15 +27,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="nunit.framework"> <HintPath>..\packages\NUnit.3.2.0\lib\net45\nunit.framework.dll</HintPath> diff --git a/src/csharp/Grpc.Examples/MathGrpc.cs b/src/csharp/Grpc.Examples/MathGrpc.cs index 8b431c7218..3364b8ce8e 100644 --- a/src/csharp/Grpc.Examples/MathGrpc.cs +++ b/src/csharp/Grpc.Examples/MathGrpc.cs @@ -85,39 +85,53 @@ namespace Math { public abstract partial class MathBase { /// <summary> - /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient - /// and remainder. + /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient + /// and remainder. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Math.DivReply> Div(global::Math.DivArgs request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// DivMany accepts an arbitrary number of division args from the client stream - /// and sends back the results in the reply stream. The stream continues until - /// the client closes its end; the server does the same after sending all the - /// replies. The stream ends immediately if either end aborts. + /// DivMany accepts an arbitrary number of division args from the client stream + /// and sends back the results in the reply stream. The stream continues until + /// the client closes its end; the server does the same after sending all the + /// replies. The stream ends immediately if either end aborts. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task DivMany(IAsyncStreamReader<global::Math.DivArgs> requestStream, IServerStreamWriter<global::Math.DivReply> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib - /// generates up to limit numbers; otherwise it continues until the call is - /// canceled. Unlike Fib above, Fib has no final FibReply. + /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib + /// generates up to limit numbers; otherwise it continues until the call is + /// canceled. Unlike Fib above, Fib has no final FibReply. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task Fib(global::Math.FibArgs request, IServerStreamWriter<global::Math.Num> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// Sum sums a stream of numbers, returning the final result once the stream - /// is closed. + /// Sum sums a stream of numbers, returning the final result once the stream + /// is closed. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Math.Num> Sum(IAsyncStreamReader<global::Math.Num> requestStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -149,87 +163,123 @@ namespace Math { } /// <summary> - /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient - /// and remainder. + /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient + /// and remainder. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Math.DivReply Div(global::Math.DivArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return Div(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient - /// and remainder. + /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient + /// and remainder. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Math.DivReply Div(global::Math.DivArgs request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_Div, null, options, request); } /// <summary> - /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient - /// and remainder. + /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient + /// and remainder. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return DivAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient - /// and remainder. + /// Div divides DivArgs.dividend by DivArgs.divisor and returns the quotient + /// and remainder. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Math.DivReply> DivAsync(global::Math.DivArgs request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_Div, null, options, request); } /// <summary> - /// DivMany accepts an arbitrary number of division args from the client stream - /// and sends back the results in the reply stream. The stream continues until - /// the client closes its end; the server does the same after sending all the - /// replies. The stream ends immediately if either end aborts. + /// DivMany accepts an arbitrary number of division args from the client stream + /// and sends back the results in the reply stream. The stream continues until + /// the client closes its end; the server does the same after sending all the + /// replies. The stream ends immediately if either end aborts. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Math.DivArgs, global::Math.DivReply> DivMany(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return DivMany(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// DivMany accepts an arbitrary number of division args from the client stream - /// and sends back the results in the reply stream. The stream continues until - /// the client closes its end; the server does the same after sending all the - /// replies. The stream ends immediately if either end aborts. + /// DivMany accepts an arbitrary number of division args from the client stream + /// and sends back the results in the reply stream. The stream continues until + /// the client closes its end; the server does the same after sending all the + /// replies. The stream ends immediately if either end aborts. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Math.DivArgs, global::Math.DivReply> DivMany(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_DivMany, null, options); } /// <summary> - /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib - /// generates up to limit numbers; otherwise it continues until the call is - /// canceled. Unlike Fib above, Fib has no final FibReply. + /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib + /// generates up to limit numbers; otherwise it continues until the call is + /// canceled. Unlike Fib above, Fib has no final FibReply. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncServerStreamingCall<global::Math.Num> Fib(global::Math.FibArgs request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return Fib(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib - /// generates up to limit numbers; otherwise it continues until the call is - /// canceled. Unlike Fib above, Fib has no final FibReply. + /// Fib generates numbers in the Fibonacci sequence. If FibArgs.limit > 0, Fib + /// generates up to limit numbers; otherwise it continues until the call is + /// canceled. Unlike Fib above, Fib has no final FibReply. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncServerStreamingCall<global::Math.Num> Fib(global::Math.FibArgs request, CallOptions options) { return CallInvoker.AsyncServerStreamingCall(__Method_Fib, null, options, request); } /// <summary> - /// Sum sums a stream of numbers, returning the final result once the stream - /// is closed. + /// Sum sums a stream of numbers, returning the final result once the stream + /// is closed. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncClientStreamingCall<global::Math.Num, global::Math.Num> Sum(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return Sum(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Sum sums a stream of numbers, returning the final result once the stream - /// is closed. + /// Sum sums a stream of numbers, returning the final result once the stream + /// is closed. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncClientStreamingCall<global::Math.Num, global::Math.Num> Sum(CallOptions options) { return CallInvoker.AsyncClientStreamingCall(__Method_Sum, null, options); @@ -242,6 +292,7 @@ namespace Math { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(MathBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() diff --git a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj index 93c3b3a55f..b82f976861 100644 --- a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj +++ b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj @@ -27,15 +27,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Core" /> diff --git a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj index 7418768316..63aa18584d 100644 --- a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj +++ b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj @@ -28,15 +28,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Core" /> diff --git a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs index ad5cf11b75..020c2df565 100644 --- a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs +++ b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs @@ -115,6 +115,7 @@ namespace Grpc.Health.V1 { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(HealthBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() diff --git a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj index ae58073b52..6bb5f33966 100644 --- a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj +++ b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj @@ -29,15 +29,6 @@ <WarningLevel>4</WarningLevel> <PlatformTarget>AnyCPU</PlatformTarget> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Net" /> diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj index 593bf0939d..3b9587e315 100644 --- a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj +++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj @@ -27,15 +27,6 @@ <WarningLevel>4</WarningLevel> <PlatformTarget>AnyCPU</PlatformTarget> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> </ItemGroup> diff --git a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj index d5c40ba948..081dc24fbf 100644 --- a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj +++ b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj @@ -29,15 +29,6 @@ <WarningLevel>4</WarningLevel> <PlatformTarget>AnyCPU</PlatformTarget> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Net" /> diff --git a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj index 8bd3d78913..0f28340450 100644 --- a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj +++ b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj @@ -27,15 +27,6 @@ <WarningLevel>4</WarningLevel> <PlatformTarget>AnyCPU</PlatformTarget> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> </ItemGroup> diff --git a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj index 161b015300..f7abcf8046 100644 --- a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj +++ b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj @@ -28,15 +28,6 @@ <WarningLevel>4</WarningLevel> <PlatformTarget>AnyCPU</PlatformTarget> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Net" /> diff --git a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs index d0bf0afc1d..8b58622d53 100644 --- a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs +++ b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs @@ -76,17 +76,24 @@ namespace Grpc.Testing { public abstract partial class MetricsServiceBase { /// <summary> - /// Returns the values of all the gauges that are currently being maintained by - /// the service + /// Returns the values of all the gauges that are currently being maintained by + /// the service /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task GetAllGauges(global::Grpc.Testing.EmptyMessage request, IServerStreamWriter<global::Grpc.Testing.GaugeResponse> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// Returns the value of one gauge + /// Returns the value of one gauge /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.GaugeResponse> GetGauge(global::Grpc.Testing.GaugeRequest request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -118,45 +125,69 @@ namespace Grpc.Testing { } /// <summary> - /// Returns the values of all the gauges that are currently being maintained by - /// the service + /// Returns the values of all the gauges that are currently being maintained by + /// the service /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncServerStreamingCall<global::Grpc.Testing.GaugeResponse> GetAllGauges(global::Grpc.Testing.EmptyMessage request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return GetAllGauges(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Returns the values of all the gauges that are currently being maintained by - /// the service + /// Returns the values of all the gauges that are currently being maintained by + /// the service /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncServerStreamingCall<global::Grpc.Testing.GaugeResponse> GetAllGauges(global::Grpc.Testing.EmptyMessage request, CallOptions options) { return CallInvoker.AsyncServerStreamingCall(__Method_GetAllGauges, null, options, request); } /// <summary> - /// Returns the value of one gauge + /// Returns the value of one gauge /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.GaugeResponse GetGauge(global::Grpc.Testing.GaugeRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return GetGauge(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Returns the value of one gauge + /// Returns the value of one gauge /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.GaugeResponse GetGauge(global::Grpc.Testing.GaugeRequest request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_GetGauge, null, options, request); } /// <summary> - /// Returns the value of one gauge + /// Returns the value of one gauge /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.GaugeResponse> GetGaugeAsync(global::Grpc.Testing.GaugeRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return GetGaugeAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Returns the value of one gauge + /// Returns the value of one gauge /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.GaugeResponse> GetGaugeAsync(global::Grpc.Testing.GaugeRequest request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_GetGauge, null, options, request); @@ -169,6 +200,7 @@ namespace Grpc.Testing { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(MetricsServiceBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() diff --git a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs index 3cc4ed9f3c..5135d9ab66 100644 --- a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs +++ b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs @@ -71,18 +71,25 @@ namespace Grpc.Testing { public abstract partial class BenchmarkServiceBase { /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> UnaryCall(global::Grpc.Testing.SimpleRequest request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task StreamingCall(IAsyncStreamReader<global::Grpc.Testing.SimpleRequest> requestStream, IServerStreamWriter<global::Grpc.Testing.SimpleResponse> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -114,49 +121,71 @@ namespace Grpc.Testing { } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnaryCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_UnaryCall, null, options, request); } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnaryCallAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_UnaryCall, null, options, request); } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return StreamingCall(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. - /// The server returns the client payload as-is. + /// One request followed by one response. + /// The server returns the client payload as-is. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.SimpleRequest, global::Grpc.Testing.SimpleResponse> StreamingCall(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_StreamingCall, null, options); @@ -169,6 +198,7 @@ namespace Grpc.Testing { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(BenchmarkServiceBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() @@ -227,42 +257,56 @@ namespace Grpc.Testing { public abstract partial class WorkerServiceBase { /// <summary> - /// Start server with specified workload. - /// First request sent specifies the ServerConfig followed by ServerStatus - /// response. After that, a "Mark" can be sent anytime to request the latest - /// stats. Closing the stream will initiate shutdown of the test server - /// and once the shutdown has finished, the OK status is sent to terminate - /// this RPC. + /// Start server with specified workload. + /// First request sent specifies the ServerConfig followed by ServerStatus + /// response. After that, a "Mark" can be sent anytime to request the latest + /// stats. Closing the stream will initiate shutdown of the test server + /// and once the shutdown has finished, the OK status is sent to terminate + /// this RPC. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task RunServer(IAsyncStreamReader<global::Grpc.Testing.ServerArgs> requestStream, IServerStreamWriter<global::Grpc.Testing.ServerStatus> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// Start client with specified workload. - /// First request sent specifies the ClientConfig followed by ClientStatus - /// response. After that, a "Mark" can be sent anytime to request the latest - /// stats. Closing the stream will initiate shutdown of the test client - /// and once the shutdown has finished, the OK status is sent to terminate - /// this RPC. + /// Start client with specified workload. + /// First request sent specifies the ClientConfig followed by ClientStatus + /// response. After that, a "Mark" can be sent anytime to request the latest + /// stats. Closing the stream will initiate shutdown of the test client + /// and once the shutdown has finished, the OK status is sent to terminate + /// this RPC. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task RunClient(IAsyncStreamReader<global::Grpc.Testing.ClientArgs> requestStream, IServerStreamWriter<global::Grpc.Testing.ClientStatus> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// Just return the core count - unary call + /// Just return the core count - unary call /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.CoreResponse> CoreCount(global::Grpc.Testing.CoreRequest request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// Quit this worker + /// Quit this worker /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Void> QuitWorker(global::Grpc.Testing.Void request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -294,105 +338,149 @@ namespace Grpc.Testing { } /// <summary> - /// Start server with specified workload. - /// First request sent specifies the ServerConfig followed by ServerStatus - /// response. After that, a "Mark" can be sent anytime to request the latest - /// stats. Closing the stream will initiate shutdown of the test server - /// and once the shutdown has finished, the OK status is sent to terminate - /// this RPC. + /// Start server with specified workload. + /// First request sent specifies the ServerConfig followed by ServerStatus + /// response. After that, a "Mark" can be sent anytime to request the latest + /// stats. Closing the stream will initiate shutdown of the test server + /// and once the shutdown has finished, the OK status is sent to terminate + /// this RPC. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> RunServer(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return RunServer(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Start server with specified workload. - /// First request sent specifies the ServerConfig followed by ServerStatus - /// response. After that, a "Mark" can be sent anytime to request the latest - /// stats. Closing the stream will initiate shutdown of the test server - /// and once the shutdown has finished, the OK status is sent to terminate - /// this RPC. + /// Start server with specified workload. + /// First request sent specifies the ServerConfig followed by ServerStatus + /// response. After that, a "Mark" can be sent anytime to request the latest + /// stats. Closing the stream will initiate shutdown of the test server + /// and once the shutdown has finished, the OK status is sent to terminate + /// this RPC. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.ServerArgs, global::Grpc.Testing.ServerStatus> RunServer(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_RunServer, null, options); } /// <summary> - /// Start client with specified workload. - /// First request sent specifies the ClientConfig followed by ClientStatus - /// response. After that, a "Mark" can be sent anytime to request the latest - /// stats. Closing the stream will initiate shutdown of the test client - /// and once the shutdown has finished, the OK status is sent to terminate - /// this RPC. + /// Start client with specified workload. + /// First request sent specifies the ClientConfig followed by ClientStatus + /// response. After that, a "Mark" can be sent anytime to request the latest + /// stats. Closing the stream will initiate shutdown of the test client + /// and once the shutdown has finished, the OK status is sent to terminate + /// this RPC. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> RunClient(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return RunClient(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Start client with specified workload. - /// First request sent specifies the ClientConfig followed by ClientStatus - /// response. After that, a "Mark" can be sent anytime to request the latest - /// stats. Closing the stream will initiate shutdown of the test client - /// and once the shutdown has finished, the OK status is sent to terminate - /// this RPC. + /// Start client with specified workload. + /// First request sent specifies the ClientConfig followed by ClientStatus + /// response. After that, a "Mark" can be sent anytime to request the latest + /// stats. Closing the stream will initiate shutdown of the test client + /// and once the shutdown has finished, the OK status is sent to terminate + /// this RPC. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.ClientArgs, global::Grpc.Testing.ClientStatus> RunClient(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_RunClient, null, options); } /// <summary> - /// Just return the core count - unary call + /// Just return the core count - unary call /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.CoreResponse CoreCount(global::Grpc.Testing.CoreRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return CoreCount(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Just return the core count - unary call + /// Just return the core count - unary call /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.CoreResponse CoreCount(global::Grpc.Testing.CoreRequest request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_CoreCount, null, options, request); } /// <summary> - /// Just return the core count - unary call + /// Just return the core count - unary call /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.CoreResponse> CoreCountAsync(global::Grpc.Testing.CoreRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return CoreCountAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Just return the core count - unary call + /// Just return the core count - unary call /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.CoreResponse> CoreCountAsync(global::Grpc.Testing.CoreRequest request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_CoreCount, null, options, request); } /// <summary> - /// Quit this worker + /// Quit this worker /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Void QuitWorker(global::Grpc.Testing.Void request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return QuitWorker(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Quit this worker + /// Quit this worker /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Void QuitWorker(global::Grpc.Testing.Void request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_QuitWorker, null, options, request); } /// <summary> - /// Quit this worker + /// Quit this worker /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Void> QuitWorkerAsync(global::Grpc.Testing.Void request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return QuitWorkerAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// Quit this worker + /// Quit this worker /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Void> QuitWorkerAsync(global::Grpc.Testing.Void request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_QuitWorker, null, options, request); @@ -405,6 +493,7 @@ namespace Grpc.Testing { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(WorkerServiceBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() diff --git a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs index 43dbc2865f..0265f8e821 100644 --- a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs +++ b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs @@ -42,8 +42,8 @@ using Grpc.Core; namespace Grpc.Testing { /// <summary> - /// A simple service to test the various types of RPCs and experiment with - /// performance with various types of payload. + /// A simple service to test the various types of RPCs and experiment with + /// performance with various types of payload. /// </summary> public static partial class TestService { @@ -123,74 +123,101 @@ namespace Grpc.Testing { public abstract partial class TestServiceBase { /// <summary> - /// One empty request followed by one empty response. + /// One empty request followed by one empty response. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> EmptyCall(global::Grpc.Testing.Empty request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// One request followed by one response. + /// One request followed by one response. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> UnaryCall(global::Grpc.Testing.SimpleRequest request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// One request followed by one response. Response has cache control - /// headers set such that a caching HTTP proxy (such as GFE) can - /// satisfy subsequent requests. + /// One request followed by one response. Response has cache control + /// headers set such that a caching HTTP proxy (such as GFE) can + /// satisfy subsequent requests. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.SimpleResponse> CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// One request followed by a sequence of responses (streamed download). - /// The server returns the payload with client desired type and sizes. + /// One request followed by a sequence of responses (streamed download). + /// The server returns the payload with client desired type and sizes. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// A sequence of requests followed by one response (streamed upload). - /// The server returns the aggregated size of client payload as the result. + /// A sequence of requests followed by one response (streamed upload). + /// The server returns the aggregated size of client payload as the result. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(IAsyncStreamReader<global::Grpc.Testing.StreamingInputCallRequest> requestStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// A sequence of requests with each request served by the server immediately. - /// As one request could lead to multiple responses, this interface - /// demonstrates the idea of full duplexing. + /// A sequence of requests with each request served by the server immediately. + /// As one request could lead to multiple responses, this interface + /// demonstrates the idea of full duplexing. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task FullDuplexCall(IAsyncStreamReader<global::Grpc.Testing.StreamingOutputCallRequest> requestStream, IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// A sequence of requests followed by a sequence of responses. - /// The server buffers all the client requests and then serves them in order. A - /// stream of responses are returned to the client when the server starts with - /// first request. + /// A sequence of requests followed by a sequence of responses. + /// The server buffers all the client requests and then serves them in order. A + /// stream of responses are returned to the client when the server starts with + /// first request. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task HalfDuplexCall(IAsyncStreamReader<global::Grpc.Testing.StreamingOutputCallRequest> requestStream, IServerStreamWriter<global::Grpc.Testing.StreamingOutputCallResponse> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); } /// <summary> - /// The test server will not implement this method. It will be used - /// to test the behavior when clients call unimplemented methods. + /// The test server will not implement this method. It will be used + /// to test the behavior when clients call unimplemented methods. /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> UnimplementedCall(global::Grpc.Testing.Empty request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -222,195 +249,285 @@ namespace Grpc.Testing { } /// <summary> - /// One empty request followed by one empty response. + /// One empty request followed by one empty response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Empty EmptyCall(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return EmptyCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One empty request followed by one empty response. + /// One empty request followed by one empty response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Empty EmptyCall(global::Grpc.Testing.Empty request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_EmptyCall, null, options, request); } /// <summary> - /// One empty request followed by one empty response. + /// One empty request followed by one empty response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> EmptyCallAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return EmptyCallAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One empty request followed by one empty response. + /// One empty request followed by one empty response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> EmptyCallAsync(global::Grpc.Testing.Empty request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_EmptyCall, null, options, request); } /// <summary> - /// One request followed by one response. + /// One request followed by one response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnaryCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. + /// One request followed by one response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.SimpleResponse UnaryCall(global::Grpc.Testing.SimpleRequest request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_UnaryCall, null, options, request); } /// <summary> - /// One request followed by one response. + /// One request followed by one response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnaryCallAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. + /// One request followed by one response. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> UnaryCallAsync(global::Grpc.Testing.SimpleRequest request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_UnaryCall, null, options, request); } /// <summary> - /// One request followed by one response. Response has cache control - /// headers set such that a caching HTTP proxy (such as GFE) can - /// satisfy subsequent requests. + /// One request followed by one response. Response has cache control + /// headers set such that a caching HTTP proxy (such as GFE) can + /// satisfy subsequent requests. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.SimpleResponse CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return CacheableUnaryCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. Response has cache control - /// headers set such that a caching HTTP proxy (such as GFE) can - /// satisfy subsequent requests. + /// One request followed by one response. Response has cache control + /// headers set such that a caching HTTP proxy (such as GFE) can + /// satisfy subsequent requests. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.SimpleResponse CacheableUnaryCall(global::Grpc.Testing.SimpleRequest request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_CacheableUnaryCall, null, options, request); } /// <summary> - /// One request followed by one response. Response has cache control - /// headers set such that a caching HTTP proxy (such as GFE) can - /// satisfy subsequent requests. + /// One request followed by one response. Response has cache control + /// headers set such that a caching HTTP proxy (such as GFE) can + /// satisfy subsequent requests. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> CacheableUnaryCallAsync(global::Grpc.Testing.SimpleRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return CacheableUnaryCallAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by one response. Response has cache control - /// headers set such that a caching HTTP proxy (such as GFE) can - /// satisfy subsequent requests. + /// One request followed by one response. Response has cache control + /// headers set such that a caching HTTP proxy (such as GFE) can + /// satisfy subsequent requests. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.SimpleResponse> CacheableUnaryCallAsync(global::Grpc.Testing.SimpleRequest request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_CacheableUnaryCall, null, options, request); } /// <summary> - /// One request followed by a sequence of responses (streamed download). - /// The server returns the payload with client desired type and sizes. + /// One request followed by a sequence of responses (streamed download). + /// The server returns the payload with client desired type and sizes. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncServerStreamingCall<global::Grpc.Testing.StreamingOutputCallResponse> StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return StreamingOutputCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// One request followed by a sequence of responses (streamed download). - /// The server returns the payload with client desired type and sizes. + /// One request followed by a sequence of responses (streamed download). + /// The server returns the payload with client desired type and sizes. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncServerStreamingCall<global::Grpc.Testing.StreamingOutputCallResponse> StreamingOutputCall(global::Grpc.Testing.StreamingOutputCallRequest request, CallOptions options) { return CallInvoker.AsyncServerStreamingCall(__Method_StreamingOutputCall, null, options, request); } /// <summary> - /// A sequence of requests followed by one response (streamed upload). - /// The server returns the aggregated size of client payload as the result. + /// A sequence of requests followed by one response (streamed upload). + /// The server returns the aggregated size of client payload as the result. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncClientStreamingCall<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return StreamingInputCall(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// A sequence of requests followed by one response (streamed upload). - /// The server returns the aggregated size of client payload as the result. + /// A sequence of requests followed by one response (streamed upload). + /// The server returns the aggregated size of client payload as the result. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncClientStreamingCall<global::Grpc.Testing.StreamingInputCallRequest, global::Grpc.Testing.StreamingInputCallResponse> StreamingInputCall(CallOptions options) { return CallInvoker.AsyncClientStreamingCall(__Method_StreamingInputCall, null, options); } /// <summary> - /// A sequence of requests with each request served by the server immediately. - /// As one request could lead to multiple responses, this interface - /// demonstrates the idea of full duplexing. + /// A sequence of requests with each request served by the server immediately. + /// As one request could lead to multiple responses, this interface + /// demonstrates the idea of full duplexing. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> FullDuplexCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return FullDuplexCall(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// A sequence of requests with each request served by the server immediately. - /// As one request could lead to multiple responses, this interface - /// demonstrates the idea of full duplexing. + /// A sequence of requests with each request served by the server immediately. + /// As one request could lead to multiple responses, this interface + /// demonstrates the idea of full duplexing. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> FullDuplexCall(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_FullDuplexCall, null, options); } /// <summary> - /// A sequence of requests followed by a sequence of responses. - /// The server buffers all the client requests and then serves them in order. A - /// stream of responses are returned to the client when the server starts with - /// first request. + /// A sequence of requests followed by a sequence of responses. + /// The server buffers all the client requests and then serves them in order. A + /// stream of responses are returned to the client when the server starts with + /// first request. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> HalfDuplexCall(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return HalfDuplexCall(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// A sequence of requests followed by a sequence of responses. - /// The server buffers all the client requests and then serves them in order. A - /// stream of responses are returned to the client when the server starts with - /// first request. + /// A sequence of requests followed by a sequence of responses. + /// The server buffers all the client requests and then serves them in order. A + /// stream of responses are returned to the client when the server starts with + /// first request. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Testing.StreamingOutputCallRequest, global::Grpc.Testing.StreamingOutputCallResponse> HalfDuplexCall(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_HalfDuplexCall, null, options); } /// <summary> - /// The test server will not implement this method. It will be used - /// to test the behavior when clients call unimplemented methods. + /// The test server will not implement this method. It will be used + /// to test the behavior when clients call unimplemented methods. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnimplementedCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// The test server will not implement this method. It will be used - /// to test the behavior when clients call unimplemented methods. + /// The test server will not implement this method. It will be used + /// to test the behavior when clients call unimplemented methods. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_UnimplementedCall, null, options, request); } /// <summary> - /// The test server will not implement this method. It will be used - /// to test the behavior when clients call unimplemented methods. + /// The test server will not implement this method. It will be used + /// to test the behavior when clients call unimplemented methods. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnimplementedCallAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// The test server will not implement this method. It will be used - /// to test the behavior when clients call unimplemented methods. + /// The test server will not implement this method. It will be used + /// to test the behavior when clients call unimplemented methods. /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_UnimplementedCall, null, options, request); @@ -423,6 +540,7 @@ namespace Grpc.Testing { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(TestServiceBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() @@ -438,8 +556,8 @@ namespace Grpc.Testing { } /// <summary> - /// A simple service NOT implemented at servers so clients can test for - /// that case. + /// A simple service NOT implemented at servers so clients can test for + /// that case. /// </summary> public static partial class UnimplementedService { @@ -464,8 +582,11 @@ namespace Grpc.Testing { public abstract partial class UnimplementedServiceBase { /// <summary> - /// A call that no server should implement + /// A call that no server should implement /// </summary> + /// <param name="request">The request received from the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>The response to send back to the client (wrapped by a task).</returns> public virtual global::System.Threading.Tasks.Task<global::Grpc.Testing.Empty> UnimplementedCall(global::Grpc.Testing.Empty request, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -497,29 +618,45 @@ namespace Grpc.Testing { } /// <summary> - /// A call that no server should implement + /// A call that no server should implement /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnimplementedCall(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// A call that no server should implement + /// A call that no server should implement /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The response received from the server.</returns> public virtual global::Grpc.Testing.Empty UnimplementedCall(global::Grpc.Testing.Empty request, CallOptions options) { return CallInvoker.BlockingUnaryCall(__Method_UnimplementedCall, null, options, request); } /// <summary> - /// A call that no server should implement + /// A call that no server should implement /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return UnimplementedCallAsync(request, new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// A call that no server should implement + /// A call that no server should implement /// </summary> + /// <param name="request">The request to send to the server.</param> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncUnaryCall<global::Grpc.Testing.Empty> UnimplementedCallAsync(global::Grpc.Testing.Empty request, CallOptions options) { return CallInvoker.AsyncUnaryCall(__Method_UnimplementedCall, null, options, request); @@ -532,6 +669,7 @@ namespace Grpc.Testing { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(UnimplementedServiceBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() @@ -540,7 +678,7 @@ namespace Grpc.Testing { } /// <summary> - /// A service used to control reconnect server. + /// A service used to control reconnect server. /// </summary> public static partial class ReconnectService { @@ -648,6 +786,7 @@ namespace Grpc.Testing { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(ReconnectServiceBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() diff --git a/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj b/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj index cebcf59ce8..c5918b194e 100644 --- a/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj +++ b/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj @@ -27,15 +27,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Core" /> diff --git a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj index ea65998ce3..4e254a0b53 100644 --- a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj +++ b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj @@ -28,15 +28,6 @@ <ErrorReport>prompt</ErrorReport> <WarningLevel>4</WarningLevel> </PropertyGroup> - <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'ReleaseSigned|AnyCPU' "> - <DebugType>pdbonly</DebugType> - <Optimize>true</Optimize> - <OutputPath>bin\ReleaseSigned</OutputPath> - <ErrorReport>prompt</ErrorReport> - <WarningLevel>4</WarningLevel> - <SignAssembly>True</SignAssembly> - <AssemblyOriginatorKeyFile>..\keys\Grpc.snk</AssemblyOriginatorKeyFile> - </PropertyGroup> <ItemGroup> <Reference Include="System" /> <Reference Include="System.Core" /> diff --git a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs index 1b6f96ce7c..5bd7558be5 100644 --- a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs +++ b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs @@ -64,9 +64,13 @@ namespace Grpc.Reflection.V1Alpha { public abstract partial class ServerReflectionBase { /// <summary> - /// The reflection service is structured as a bidirectional stream, ensuring - /// all related requests go to a single server. + /// The reflection service is structured as a bidirectional stream, ensuring + /// all related requests go to a single server. /// </summary> + /// <param name="requestStream">Used for reading requests from the client.</param> + /// <param name="responseStream">Used for sending responses back to the client.</param> + /// <param name="context">The context of the server-side call handler being invoked.</param> + /// <returns>A task indicating completion of the handler.</returns> public virtual global::System.Threading.Tasks.Task ServerReflectionInfo(IAsyncStreamReader<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest> requestStream, IServerStreamWriter<global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> responseStream, ServerCallContext context) { throw new RpcException(new Status(StatusCode.Unimplemented, "")); @@ -98,17 +102,23 @@ namespace Grpc.Reflection.V1Alpha { } /// <summary> - /// The reflection service is structured as a bidirectional stream, ensuring - /// all related requests go to a single server. + /// The reflection service is structured as a bidirectional stream, ensuring + /// all related requests go to a single server. /// </summary> + /// <param name="headers">The initial metadata to send with the call. This parameter is optional.</param> + /// <param name="deadline">An optional deadline for the call. The call will be cancelled if deadline is hit.</param> + /// <param name="cancellationToken">An optional token for canceling the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> ServerReflectionInfo(Metadata headers = null, DateTime? deadline = null, CancellationToken cancellationToken = default(CancellationToken)) { return ServerReflectionInfo(new CallOptions(headers, deadline, cancellationToken)); } /// <summary> - /// The reflection service is structured as a bidirectional stream, ensuring - /// all related requests go to a single server. + /// The reflection service is structured as a bidirectional stream, ensuring + /// all related requests go to a single server. /// </summary> + /// <param name="options">The options for the call.</param> + /// <returns>The call object.</returns> public virtual AsyncDuplexStreamingCall<global::Grpc.Reflection.V1Alpha.ServerReflectionRequest, global::Grpc.Reflection.V1Alpha.ServerReflectionResponse> ServerReflectionInfo(CallOptions options) { return CallInvoker.AsyncDuplexStreamingCall(__Method_ServerReflectionInfo, null, options); @@ -121,6 +131,7 @@ namespace Grpc.Reflection.V1Alpha { } /// <summary>Creates service definition that can be registered with a server</summary> + /// <param name="serviceImpl">An object implementing the server-side handling logic.</param> public static ServerServiceDefinition BindService(ServerReflectionBase serviceImpl) { return ServerServiceDefinition.CreateBuilder() diff --git a/src/csharp/Grpc.sln b/src/csharp/Grpc.sln index 2e6a8fd435..179e731380 100644 --- a/src/csharp/Grpc.sln +++ b/src/csharp/Grpc.sln @@ -44,105 +44,72 @@ Global GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Release|Any CPU = Release|Any CPU
- ReleaseSigned|Any CPU = ReleaseSigned|Any CPU
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{143B1C29-C442-4BE0-BF3F-A8F92288AC9F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{143B1C29-C442-4BE0-BF3F-A8F92288AC9F}.Debug|Any CPU.Build.0 = Debug|Any CPU
{143B1C29-C442-4BE0-BF3F-A8F92288AC9F}.Release|Any CPU.ActiveCfg = Release|Any CPU
{143B1C29-C442-4BE0-BF3F-A8F92288AC9F}.Release|Any CPU.Build.0 = Release|Any CPU
- {143B1C29-C442-4BE0-BF3F-A8F92288AC9F}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {143B1C29-C442-4BE0-BF3F-A8F92288AC9F}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{3D166931-BA2D-416E-95A3-D36E8F6E90B9}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{3D166931-BA2D-416E-95A3-D36E8F6E90B9}.Debug|Any CPU.Build.0 = Debug|Any CPU
{3D166931-BA2D-416E-95A3-D36E8F6E90B9}.Release|Any CPU.ActiveCfg = Release|Any CPU
{3D166931-BA2D-416E-95A3-D36E8F6E90B9}.Release|Any CPU.Build.0 = Release|Any CPU
- {3D166931-BA2D-416E-95A3-D36E8F6E90B9}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {3D166931-BA2D-416E-95A3-D36E8F6E90B9}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{4F18CF52-B3DB-4A77-97C5-7F7F4B6C1715}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{4F18CF52-B3DB-4A77-97C5-7F7F4B6C1715}.Debug|Any CPU.Build.0 = Debug|Any CPU
{4F18CF52-B3DB-4A77-97C5-7F7F4B6C1715}.Release|Any CPU.ActiveCfg = Release|Any CPU
{4F18CF52-B3DB-4A77-97C5-7F7F4B6C1715}.Release|Any CPU.Build.0 = Release|Any CPU
- {4F18CF52-B3DB-4A77-97C5-7F7F4B6C1715}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {4F18CF52-B3DB-4A77-97C5-7F7F4B6C1715}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{61ECB8EE-0C96-4F8E-B187-8E4D227417C0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{61ECB8EE-0C96-4F8E-B187-8E4D227417C0}.Debug|Any CPU.Build.0 = Debug|Any CPU
{61ECB8EE-0C96-4F8E-B187-8E4D227417C0}.Release|Any CPU.ActiveCfg = Release|Any CPU
{61ECB8EE-0C96-4F8E-B187-8E4D227417C0}.Release|Any CPU.Build.0 = Release|Any CPU
- {61ECB8EE-0C96-4F8E-B187-8E4D227417C0}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {61ECB8EE-0C96-4F8E-B187-8E4D227417C0}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{7DC1433E-3225-42C7-B7EA-546D56E27A4B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{7DC1433E-3225-42C7-B7EA-546D56E27A4B}.Debug|Any CPU.Build.0 = Debug|Any CPU
{7DC1433E-3225-42C7-B7EA-546D56E27A4B}.Release|Any CPU.ActiveCfg = Release|Any CPU
{7DC1433E-3225-42C7-B7EA-546D56E27A4B}.Release|Any CPU.Build.0 = Release|Any CPU
- {7DC1433E-3225-42C7-B7EA-546D56E27A4B}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {7DC1433E-3225-42C7-B7EA-546D56E27A4B}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{86EC5CB4-4EA2-40A2-8057-86542A0353BB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{86EC5CB4-4EA2-40A2-8057-86542A0353BB}.Debug|Any CPU.Build.0 = Debug|Any CPU
{86EC5CB4-4EA2-40A2-8057-86542A0353BB}.Release|Any CPU.ActiveCfg = Release|Any CPU
{86EC5CB4-4EA2-40A2-8057-86542A0353BB}.Release|Any CPU.Build.0 = Release|Any CPU
- {86EC5CB4-4EA2-40A2-8057-86542A0353BB}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {86EC5CB4-4EA2-40A2-8057-86542A0353BB}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Debug|Any CPU.Build.0 = Debug|Any CPU
{A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Release|Any CPU.ActiveCfg = Release|Any CPU
{A654F3B8-E859-4E6A-B30D-227527DBEF0D}.Release|Any CPU.Build.0 = Release|Any CPU
- {A654F3B8-E859-4E6A-B30D-227527DBEF0D}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {A654F3B8-E859-4E6A-B30D-227527DBEF0D}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{AA5E328A-8835-49D7-98ED-C29F2B3049F0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{AA5E328A-8835-49D7-98ED-C29F2B3049F0}.Debug|Any CPU.Build.0 = Debug|Any CPU
{AA5E328A-8835-49D7-98ED-C29F2B3049F0}.Release|Any CPU.ActiveCfg = Release|Any CPU
{AA5E328A-8835-49D7-98ED-C29F2B3049F0}.Release|Any CPU.Build.0 = Release|Any CPU
- {AA5E328A-8835-49D7-98ED-C29F2B3049F0}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {AA5E328A-8835-49D7-98ED-C29F2B3049F0}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{ADEBA147-80AE-4710-82E9-5B7F93690266}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{ADEBA147-80AE-4710-82E9-5B7F93690266}.Debug|Any CPU.Build.0 = Debug|Any CPU
{ADEBA147-80AE-4710-82E9-5B7F93690266}.Release|Any CPU.ActiveCfg = Release|Any CPU
{ADEBA147-80AE-4710-82E9-5B7F93690266}.Release|Any CPU.Build.0 = Release|Any CPU
- {ADEBA147-80AE-4710-82E9-5B7F93690266}.ReleaseSigned|Any CPU.ActiveCfg = Release|Any CPU
- {ADEBA147-80AE-4710-82E9-5B7F93690266}.ReleaseSigned|Any CPU.Build.0 = Release|Any CPU
{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.Debug|Any CPU.Build.0 = Debug|Any CPU
{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.Release|Any CPU.ActiveCfg = Release|Any CPU
{AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.Release|Any CPU.Build.0 = Release|Any CPU
- {AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {AE21D0EE-9A2C-4C15-AB7F-5224EED5B0EA}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{B82B7DFE-7F7B-40EF-B3D6-064FF2B01294}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{B82B7DFE-7F7B-40EF-B3D6-064FF2B01294}.Debug|Any CPU.Build.0 = Debug|Any CPU
{B82B7DFE-7F7B-40EF-B3D6-064FF2B01294}.Release|Any CPU.ActiveCfg = Release|Any CPU
{B82B7DFE-7F7B-40EF-B3D6-064FF2B01294}.Release|Any CPU.Build.0 = Release|Any CPU
- {B82B7DFE-7F7B-40EF-B3D6-064FF2B01294}.ReleaseSigned|Any CPU.ActiveCfg = Release|Any CPU
- {B82B7DFE-7F7B-40EF-B3D6-064FF2B01294}.ReleaseSigned|Any CPU.Build.0 = Release|Any CPU
{B88F91D6-436D-4C78-8B99-47800FA8DE03}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{B88F91D6-436D-4C78-8B99-47800FA8DE03}.Debug|Any CPU.Build.0 = Debug|Any CPU
{B88F91D6-436D-4C78-8B99-47800FA8DE03}.Release|Any CPU.ActiveCfg = Release|Any CPU
{B88F91D6-436D-4C78-8B99-47800FA8DE03}.Release|Any CPU.Build.0 = Release|Any CPU
- {B88F91D6-436D-4C78-8B99-47800FA8DE03}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {B88F91D6-436D-4C78-8B99-47800FA8DE03}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{BF62FE08-373A-43D6-9D73-41CAA38B7011}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{BF62FE08-373A-43D6-9D73-41CAA38B7011}.Debug|Any CPU.Build.0 = Debug|Any CPU
{BF62FE08-373A-43D6-9D73-41CAA38B7011}.Release|Any CPU.ActiveCfg = Release|Any CPU
{BF62FE08-373A-43D6-9D73-41CAA38B7011}.Release|Any CPU.Build.0 = Release|Any CPU
- {BF62FE08-373A-43D6-9D73-41CAA38B7011}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {BF62FE08-373A-43D6-9D73-41CAA38B7011}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{C61154BA-DD4A-4838-8420-0162A28925E0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{C61154BA-DD4A-4838-8420-0162A28925E0}.Debug|Any CPU.Build.0 = Debug|Any CPU
{C61154BA-DD4A-4838-8420-0162A28925E0}.Release|Any CPU.ActiveCfg = Release|Any CPU
{C61154BA-DD4A-4838-8420-0162A28925E0}.Release|Any CPU.Build.0 = Release|Any CPU
- {C61154BA-DD4A-4838-8420-0162A28925E0}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {C61154BA-DD4A-4838-8420-0162A28925E0}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{CCC4440E-49F7-4790-B0AF-FEABB0837AE7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{CCC4440E-49F7-4790-B0AF-FEABB0837AE7}.Debug|Any CPU.Build.0 = Debug|Any CPU
{CCC4440E-49F7-4790-B0AF-FEABB0837AE7}.Release|Any CPU.ActiveCfg = Release|Any CPU
{CCC4440E-49F7-4790-B0AF-FEABB0837AE7}.Release|Any CPU.Build.0 = Release|Any CPU
- {CCC4440E-49F7-4790-B0AF-FEABB0837AE7}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {CCC4440E-49F7-4790-B0AF-FEABB0837AE7}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
{F8C6D937-C44B-4EE3-A431-B0FBAEACE47D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{F8C6D937-C44B-4EE3-A431-B0FBAEACE47D}.Debug|Any CPU.Build.0 = Debug|Any CPU
{F8C6D937-C44B-4EE3-A431-B0FBAEACE47D}.Release|Any CPU.ActiveCfg = Release|Any CPU
{F8C6D937-C44B-4EE3-A431-B0FBAEACE47D}.Release|Any CPU.Build.0 = Release|Any CPU
- {F8C6D937-C44B-4EE3-A431-B0FBAEACE47D}.ReleaseSigned|Any CPU.ActiveCfg = ReleaseSigned|Any CPU
- {F8C6D937-C44B-4EE3-A431-B0FBAEACE47D}.ReleaseSigned|Any CPU.Build.0 = ReleaseSigned|Any CPU
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
EndGlobalSection
diff --git a/src/node/performance/worker_service_impl.js b/src/node/performance/worker_service_impl.js index 3f317f6429..38888a7219 100644 --- a/src/node/performance/worker_service_impl.js +++ b/src/node/performance/worker_service_impl.js @@ -55,9 +55,8 @@ module.exports = function WorkerServiceImpl(benchmark_impl, server) { } this.quitWorker = function quitWorker(call, callback) { - server.tryShutdown(function() { - callback(null, {}); - }); + callback(null, {}); + server.tryShutdown(function() {}); }; this.runClient = function runClient(call) { diff --git a/src/node/src/client.js b/src/node/src/client.js index 9c1562e8b8..134ef239c2 100644 --- a/src/node/src/client.js +++ b/src/node/src/client.js @@ -99,7 +99,18 @@ function ClientWritableStream(call, serialize) { function _write(chunk, encoding, callback) { /* jshint validthis: true */ var batch = {}; - var message = this.serialize(chunk); + var message; + try { + message = this.serialize(chunk); + } catch (e) { + /* Sending this error to the server and emitting it immediately on the + client may put the call in a slightly weird state on the client side, + but passing an object that causes a serialization failure is a misuse + of the API anyway, so that's OK. The primary purpose here is to give the + programmer a useful error and to stop the stream properly */ + this.call.cancelWithStatus(grpc.status.INTERNAL, "Serialization failure"); + callback(e); + } if (_.isFinite(encoding)) { /* Attach the encoding if it is a finite number. This is the closest we * can get to checking that it is valid flags */ @@ -184,14 +195,15 @@ function _emitStatusIfDone() { } else { status = this.received_status; } - this.emit('status', status); - if (status.code !== grpc.status.OK) { + if (status.code === grpc.status.OK) { + this.push(null); + } else { var error = new Error(status.details); error.code = status.code; error.metadata = status.metadata; this.emit('error', error); - return; } + this.emit('status', status); } } @@ -224,9 +236,11 @@ function _read(size) { } catch (e) { self._readsDone({code: grpc.status.INTERNAL, details: 'Failed to parse server response'}); + return; } if (data === null) { self._readsDone(); + return; } if (self.push(deserialized) && data !== null) { var read_batch = {}; @@ -396,6 +410,8 @@ function makeUnaryRequestFunction(method, serialize, deserialize) { var status = response.status; var error; var deserialized; + emitter.emit('metadata', Metadata._fromCoreRepresentation( + response.metadata)); if (status.code === grpc.status.OK) { if (err) { // Got a batch error, but OK status. Something went wrong @@ -423,8 +439,6 @@ function makeUnaryRequestFunction(method, serialize, deserialize) { args.callback(null, deserialized); } emitter.emit('status', status); - emitter.emit('metadata', Metadata._fromCoreRepresentation( - response.metadata)); }); return emitter; } diff --git a/src/node/src/server.js b/src/node/src/server.js index b3b414969a..da9c6b2d7f 100644 --- a/src/node/src/server.js +++ b/src/node/src/server.js @@ -127,7 +127,14 @@ function sendUnaryResponse(call, value, serialize, metadata, flags) { (new Metadata())._getCoreRepresentation(); call.metadataSent = true; } - var message = serialize(value); + var message; + try { + message = serialize(value); + } catch (e) { + e.code = grpc.status.INTERNAL; + handleError(e); + return; + } message.grpcWriteFlags = flags; end_batch[grpc.opType.SEND_MESSAGE] = message; end_batch[grpc.opType.SEND_STATUS_FROM_SERVER] = status; @@ -278,7 +285,14 @@ function _write(chunk, encoding, callback) { (new Metadata())._getCoreRepresentation(); this.call.metadataSent = true; } - var message = this.serialize(chunk); + var message; + try { + message = this.serialize(chunk); + } catch (e) { + e.code = grpc.status.INTERNAL; + callback(e); + return; + } if (_.isFinite(encoding)) { /* Attach the encoding if it is a finite number. This is the closest we * can get to checking that it is valid flags */ diff --git a/src/node/test/surface_test.js b/src/node/test/surface_test.js index d8b36dc55c..2a42dd5db5 100644 --- a/src/node/test/surface_test.js +++ b/src/node/test/surface_test.js @@ -179,8 +179,8 @@ describe('Server.prototype.addProtoService', function() { call.on('data', function(value) { assert.fail('No messages expected'); }); - call.on('status', function(status) { - assert.strictEqual(status.code, grpc.status.UNIMPLEMENTED); + call.on('error', function(err) { + assert.strictEqual(err.code, grpc.status.UNIMPLEMENTED); done(); }); }); @@ -189,8 +189,8 @@ describe('Server.prototype.addProtoService', function() { call.on('data', function(value) { assert.fail('No messages expected'); }); - call.on('status', function(status) { - assert.strictEqual(status.code, grpc.status.UNIMPLEMENTED); + call.on('error', function(err) { + assert.strictEqual(err.code, grpc.status.UNIMPLEMENTED); done(); }); call.end(); diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec index 6e594fd3ed..bcc2bb6126 100644 --- a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec +++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec @@ -36,7 +36,7 @@ Pod::Spec.new do |s| # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed # before them. s.name = '!ProtoCompiler-gRPCPlugin' - v = '1.0.1' + v = '1.0.2' s.version = v s.summary = 'The gRPC ProtoC plugin generates Objective-C files from .proto services.' s.description = <<-DESC @@ -84,7 +84,10 @@ Pod::Spec.new do |s| repo = 'grpc/grpc' file = "grpc_objective_c_plugin-#{v}-macos-x86_64.zip" s.source = { - :http => "https://github.com/#{repo}/releases/download/v#{v}/#{file}", + # TODO(mxyan): Change back to "https://github.com/#{repo}/releases/download/v#{v}/#{file}" for + # next release + # :http => "https://github.com/#{repo}/releases/download/v#{v}/#{file}", + :http => "https://github.com/#{repo}/releases/download/objective-c-v#{v}/#{file}", # TODO(jcanizales): Add sha1 or sha256 # :sha1 => '??', } diff --git a/src/objective-c/GRPCClient/private/GRPCHost.m b/src/objective-c/GRPCClient/private/GRPCHost.m index 31065cbf01..450bec36e0 100644 --- a/src/objective-c/GRPCClient/private/GRPCHost.m +++ b/src/objective-c/GRPCClient/private/GRPCHost.m @@ -50,7 +50,7 @@ NS_ASSUME_NONNULL_BEGIN // TODO(jcanizales): Generate the version in a standalone header, from templates. Like // templates/src/core/surface/version.c.template . -#define GRPC_OBJC_VERSION_STRING @"1.0.1" +#define GRPC_OBJC_VERSION_STRING @"1.0.2" static NSMutableDictionary *kHostCache; diff --git a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m index 4a92cc8e0d..4ba7badd86 100644 --- a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m +++ b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m @@ -316,7 +316,8 @@ static char *roots_filename; } - (void)testInvokeLargeRequest { - [self testIndividualCase:"invoke_large_request"]; + // NOT SUPPORTED (frame size) + // [self testIndividualCase:"invoke_large_request"]; } - (void)testLargeMetadata { @@ -329,7 +330,8 @@ static char *roots_filename; } - (void)testMaxMessageLength { - [self testIndividualCase:"max_message_length"]; + // NOT SUPPORTED (close_error) + // [self testIndividualCase:"max_message_length"]; } - (void)testNegativeDeadline { diff --git a/src/php/lib/Grpc/AbstractCall.php b/src/php/lib/Grpc/AbstractCall.php index c4d56790f7..9f0b02b8bb 100644 --- a/src/php/lib/Grpc/AbstractCall.php +++ b/src/php/lib/Grpc/AbstractCall.php @@ -62,7 +62,7 @@ abstract class AbstractCall Channel $channel, $method, $deserialize, - $options = [] + array $options = [] ) { if (array_key_exists('timeout', $options) && is_numeric($timeout = $options['timeout']) @@ -89,7 +89,7 @@ abstract class AbstractCall } /** - * @return mixed The metadata sent by the server. + * @return mixed The metadata sent by the server */ public function getMetadata() { @@ -97,7 +97,7 @@ abstract class AbstractCall } /** - * @return mixed The trailing metadata sent by the server. + * @return mixed The trailing metadata sent by the server */ public function getTrailingMetadata() { @@ -105,7 +105,7 @@ abstract class AbstractCall } /** - * @return string The URI of the endpoint. + * @return string The URI of the endpoint */ public function getPeer() { @@ -167,8 +167,7 @@ abstract class AbstractCall /** * Set the CallCredentials for the underlying Call. * - * @param CallCredentials $call_credentials The CallCredentials - * object + * @param CallCredentials $call_credentials The CallCredentials object */ public function setCallCredentials($call_credentials) { diff --git a/src/php/lib/Grpc/BaseStub.php b/src/php/lib/Grpc/BaseStub.php index d0baeae955..aec60af094 100644 --- a/src/php/lib/Grpc/BaseStub.php +++ b/src/php/lib/Grpc/BaseStub.php @@ -48,14 +48,14 @@ class BaseStub private $update_metadata; /** - * @param $hostname string - * @param $opts array + * @param string $hostname + * @param array $opts * - 'update_metadata': (optional) a callback function which takes in a * metadata array, and returns an updated metadata array * - 'grpc.primary_user_agent': (optional) a user-agent string - * @param $channel Channel An already created Channel object + * @param Channel $channel An already created Channel object (optional) */ - public function __construct($hostname, $opts, $channel = null) + public function __construct($hostname, $opts, Channel $channel = null) { $ssl_roots = file_get_contents( dirname(__FILE__).'/../../../../etc/roots.pem'); @@ -98,7 +98,7 @@ class BaseStub } /** - * @return string The URI of the endpoint. + * @return string The URI of the endpoint */ public function getTarget() { @@ -106,7 +106,7 @@ class BaseStub } /** - * @param $try_to_connect bool + * @param bool $try_to_connect (optional) * * @return int The grpc connectivity state */ @@ -145,6 +145,12 @@ class BaseStub return $this->_checkConnectivityState($new_state); } + /** + * @param $new_state Connect state + * + * @return bool true if state is CHANNEL_READY + * @throw Exception if state is CHANNEL_FATAL_FAILURE + */ private function _checkConnectivityState($new_state) { if ($new_state == \Grpc\CHANNEL_READY) { @@ -167,6 +173,10 @@ class BaseStub /** * constructs the auth uri for the jwt. + * + * @param string $method The method string + * + * @return string The URL string */ private function _get_jwt_aud_uri($method) { @@ -191,7 +201,7 @@ class BaseStub * * @param array $metadata The metadata map * - * @return $metadata Validated and key-normalized metadata map + * @return array $metadata Validated and key-normalized metadata map * @throw InvalidArgumentException if key contains invalid characters */ private function _validate_and_normalize_metadata($metadata) @@ -220,14 +230,16 @@ class BaseStub * @param mixed $argument The argument to the method * @param callable $deserialize A function that deserializes the response * @param array $metadata A metadata map to send to the server + * (optional) + * @param array $options An array of options (optional) * * @return SimpleSurfaceActiveCall The active call object */ public function _simpleRequest($method, $argument, $deserialize, - $metadata = [], - $options = []) + array $metadata = [], + array $options = []) { $call = new UnaryCall($this->channel, $method, @@ -251,17 +263,17 @@ class BaseStub * output. * * @param string $method The name of the method to call - * @param array $arguments An array or Traversable of arguments to stream to the - * server * @param callable $deserialize A function that deserializes the response * @param array $metadata A metadata map to send to the server + * (optional) + * @param array $options An array of options (optional) * * @return ClientStreamingSurfaceActiveCall The active call object */ public function _clientStreamRequest($method, callable $deserialize, - $metadata = [], - $options = []) + array $metadata = [], + array $options = []) { $call = new ClientStreamingCall($this->channel, $method, @@ -281,21 +293,23 @@ class BaseStub } /** - * Call a remote method that takes a single argument and returns a stream of - * responses. + * Call a remote method that takes a single argument and returns a stream + * of responses. * * @param string $method The name of the method to call * @param mixed $argument The argument to the method * @param callable $deserialize A function that deserializes the responses * @param array $metadata A metadata map to send to the server + * (optional) + * @param array $options An array of options (optional) * * @return ServerStreamingSurfaceActiveCall The active call object */ public function _serverStreamRequest($method, $argument, callable $deserialize, - $metadata = [], - $options = []) + array $metadata = [], + array $options = []) { $call = new ServerStreamingCall($this->channel, $method, @@ -320,13 +334,15 @@ class BaseStub * @param string $method The name of the method to call * @param callable $deserialize A function that deserializes the responses * @param array $metadata A metadata map to send to the server + * (optional) + * @param array $options An array of options (optional) * * @return BidiStreamingSurfaceActiveCall The active call object */ public function _bidiRequest($method, callable $deserialize, - $metadata = [], - $options = []) + array $metadata = [], + array $options = []) { $call = new BidiStreamingCall($this->channel, $method, diff --git a/src/php/lib/Grpc/BidiStreamingCall.php b/src/php/lib/Grpc/BidiStreamingCall.php index f0e1e811de..b03bbd204f 100644 --- a/src/php/lib/Grpc/BidiStreamingCall.php +++ b/src/php/lib/Grpc/BidiStreamingCall.php @@ -35,8 +35,8 @@ namespace Grpc; /** - * Represents an active call that allows for sending and recieving messages in - * streams in any order. + * Represents an active call that allows for sending and recieving messages + * in streams in any order. */ class BidiStreamingCall extends AbstractCall { @@ -44,6 +44,7 @@ class BidiStreamingCall extends AbstractCall * Start the call. * * @param array $metadata Metadata to send with the call, if applicable + * (optional) */ public function start(array $metadata = []) { @@ -76,10 +77,10 @@ class BidiStreamingCall extends AbstractCall * writesDone is called. * * @param ByteBuffer $data The data to write - * @param array $options an array of options, possible keys: - * 'flags' => a number + * @param array $options An array of options, possible keys: + * 'flags' => a number (optional) */ - public function write($data, $options = []) + public function write($data, array $options = []) { $message_array = ['message' => $this->serializeMessage($data)]; if (array_key_exists('flags', $options)) { @@ -103,8 +104,8 @@ class BidiStreamingCall extends AbstractCall /** * Wait for the server to send the status, and return it. * - * @return \stdClass The status object, with integer $code, string $details, - * and array $metadata members + * @return \stdClass The status object, with integer $code, string + * $details, and array $metadata members */ public function getStatus() { diff --git a/src/php/lib/Grpc/ClientStreamingCall.php b/src/php/lib/Grpc/ClientStreamingCall.php index 20db809ea3..c542f08872 100644 --- a/src/php/lib/Grpc/ClientStreamingCall.php +++ b/src/php/lib/Grpc/ClientStreamingCall.php @@ -35,8 +35,8 @@ namespace Grpc; /** - * Represents an active call that sends a stream of messages and then gets a - * single response. + * Represents an active call that sends a stream of messages and then gets + * a single response. */ class ClientStreamingCall extends AbstractCall { @@ -44,8 +44,9 @@ class ClientStreamingCall extends AbstractCall * Start the call. * * @param array $metadata Metadata to send with the call, if applicable + * (optional) */ - public function start($metadata = []) + public function start(array $metadata = []) { $this->call->startBatch([ OP_SEND_INITIAL_METADATA => $metadata, @@ -57,8 +58,8 @@ class ClientStreamingCall extends AbstractCall * wait is called. * * @param ByteBuffer $data The data to write - * @param array $options an array of options, possible keys: - * 'flags' => a number + * @param array $options An array of options, possible keys: + * 'flags' => a number (optional) */ public function write($data, array $options = []) { diff --git a/src/php/lib/Grpc/ServerStreamingCall.php b/src/php/lib/Grpc/ServerStreamingCall.php index 5aeeafa94a..406512bf57 100644 --- a/src/php/lib/Grpc/ServerStreamingCall.php +++ b/src/php/lib/Grpc/ServerStreamingCall.php @@ -35,8 +35,8 @@ namespace Grpc; /** - * Represents an active call that sends a single message and then gets a stream - * of responses. + * Represents an active call that sends a single message and then gets a + * stream of responses. */ class ServerStreamingCall extends AbstractCall { @@ -45,10 +45,11 @@ class ServerStreamingCall extends AbstractCall * * @param mixed $data The data to send * @param array $metadata Metadata to send with the call, if applicable - * @param array $options an array of options, possible keys: - * 'flags' => a number + * (optional) + * @param array $options An array of options, possible keys: + * 'flags' => a number (optional) */ - public function start($data, $metadata = [], $options = []) + public function start($data, array $metadata = [], array $options = []) { $message_array = ['message' => $this->serializeMessage($data)]; if (array_key_exists('flags', $options)) { @@ -82,8 +83,8 @@ class ServerStreamingCall extends AbstractCall /** * Wait for the server to send the status, and return it. * - * @return \stdClass The status object, with integer $code, string $details, - * and array $metadata members + * @return \stdClass The status object, with integer $code, string + * $details, and array $metadata members */ public function getStatus() { diff --git a/src/php/lib/Grpc/UnaryCall.php b/src/php/lib/Grpc/UnaryCall.php index e8eb6487a8..3c1cb158ea 100644 --- a/src/php/lib/Grpc/UnaryCall.php +++ b/src/php/lib/Grpc/UnaryCall.php @@ -35,8 +35,8 @@ namespace Grpc; /** - * Represents an active call that sends a single message and then gets a single - * response. + * Represents an active call that sends a single message and then gets a + * single response. */ class UnaryCall extends AbstractCall { @@ -45,10 +45,11 @@ class UnaryCall extends AbstractCall * * @param mixed $data The data to send * @param array $metadata Metadata to send with the call, if applicable - * @param array $options an array of options, possible keys: - * 'flags' => a number + * (optional) + * @param array $options An array of options, possible keys: + * 'flags' => a number (optional) */ - public function start($data, $metadata = [], $options = []) + public function start($data, array $metadata = [], array $options = []) { $message_array = ['message' => $this->serializeMessage($data)]; if (isset($options['flags'])) { diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py index ea3b6f3391..701c6af017 100644 --- a/src/python/grpcio/commands.py +++ b/src/python/grpcio/commands.py @@ -62,6 +62,7 @@ napoleon_numpy_docstring = True napoleon_include_special_with_doc = True html_theme = 'sphinx_rtd_theme' +copyright = "2016, The gRPC Authors" """ API_GLOSSARY = """ diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py index 6087276d51..e3c10156d0 100644 --- a/src/python/grpcio/grpc/__init__.py +++ b/src/python/grpcio/grpc/__init__.py @@ -31,6 +31,7 @@ import abc import enum +import sys import six @@ -767,8 +768,8 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)): gRPC runtime to determine the status code of the RPC. Args: - code: The integer status code of the RPC to be transmitted to the - invocation side of the RPC. + code: A StatusCode value to be transmitted to the invocation side of the + RPC as the status code of the RPC. """ raise NotImplementedError() @@ -780,8 +781,8 @@ class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)): details to transmit. Args: - details: The details string of the RPC to be transmitted to - the invocation side of the RPC. + details: A string to be transmitted to the invocation side of the RPC as + the status details of the RPC. """ raise NotImplementedError() @@ -849,6 +850,26 @@ class GenericRpcHandler(six.with_metaclass(abc.ABCMeta)): raise NotImplementedError() +class ServiceRpcHandler(six.with_metaclass(abc.ABCMeta, GenericRpcHandler)): + """An implementation of RPC methods belonging to a service. + + A service handles RPC methods with structured names of the form + '/Service.Name/Service.MethodX', where 'Service.Name' is the value + returned by service_name(), and 'Service.MethodX' is the service method + name. A service can have multiple service methods names, but only a single + service name. + """ + + @abc.abstractmethod + def service_name(self): + """Returns this services name. + + Returns: + The service name. + """ + raise NotImplementedError() + + ############################# Server Interface ############################### @@ -905,21 +926,6 @@ class Server(six.with_metaclass(abc.ABCMeta)): raise NotImplementedError() @abc.abstractmethod - def add_shutdown_handler(self, shutdown_handler): - """Adds a handler to be called on server shutdown. - - Shutdown handlers are run on server stop() or in the event that a running - server is destroyed unexpectedly. The handlers are run in series before - the stop grace period. - - Args: - shutdown_handler: A function taking a single arg, a time in seconds - within which the handler should complete. None indicates the handler can - run for any duration. - """ - raise NotImplementedError() - - @abc.abstractmethod def start(self): """Starts this Server's service of RPCs. @@ -929,7 +935,7 @@ class Server(six.with_metaclass(abc.ABCMeta)): raise NotImplementedError() @abc.abstractmethod - def stop(self, grace, shutdown_handler_grace=None): + def stop(self, grace): """Stops this Server's service of RPCs. All calls to this method immediately stop service of new RPCs. When existing @@ -952,8 +958,6 @@ class Server(six.with_metaclass(abc.ABCMeta)): aborted by this Server's stopping. If None, all RPCs will be aborted immediately and this method will block until this Server is completely stopped. - shutdown_handler_grace: A duration of time in seconds or None. This - value is passed to all shutdown handlers. Returns: A threading.Event that will be set when this Server has completely @@ -1248,8 +1252,7 @@ def secure_channel(target, credentials, options=None): credentials._credentials) -def server(thread_pool, handlers=None, options=None, exit_grace=None, - exit_shutdown_handler_grace=None): +def server(thread_pool, handlers=None, options=None): """Creates a Server with which RPCs can be serviced. Args: @@ -1262,19 +1265,13 @@ def server(thread_pool, handlers=None, options=None, exit_grace=None, returned Server is started. options: A sequence of string-value pairs according to which to configure the created server. - exit_grace: The grace period to use when terminating - running servers at interpreter exit. None indicates unspecified. - exit_shutdown_handler_grace: The shutdown handler grace to use when - terminating running servers at interpreter exit. None indicates - unspecified. Returns: A Server with which RPCs can be serviced. """ from grpc import _server return _server.Server(thread_pool, () if handlers is None else handlers, - () if options is None else options, exit_grace, - exit_shutdown_handler_grace) + () if options is None else options) ################################### __all__ ################################# @@ -1304,6 +1301,7 @@ __all__ = ( 'RpcMethodHandler', 'HandlerCallDetails', 'GenericRpcHandler', + 'ServiceRpcHandler', 'Server', 'unary_unary_rpc_method_handler', 'unary_stream_rpc_method_handler', @@ -1321,3 +1319,24 @@ __all__ = ( 'secure_channel', 'server', ) + + +############################### Extension Shims ################################ + + +# Here to maintain backwards compatibility; avoid using these in new code! +try: + import grpc_tools + sys.modules.update({'grpc.tools': grpc_tools}) +except ImportError: + pass +try: + import grpc_health + sys.modules.update({'grpc.health': grpc_health}) +except ImportError: + pass +try: + import grpc_reflection + sys.modules.update({'grpc.reflection': grpc_reflection}) +except ImportError: + pass diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py index 53a26727ab..41e9163cd6 100644 --- a/src/python/grpcio/grpc/_channel.py +++ b/src/python/grpcio/grpc/_channel.py @@ -36,8 +36,8 @@ import time import grpc from grpc import _common from grpc import _grpcio_metadata -from grpc.framework.foundation import callable_util from grpc._cython import cygrpc +from grpc.framework.foundation import callable_util _USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__) @@ -99,6 +99,22 @@ def _wait_once_until(condition, until): else: condition.wait(timeout=remaining) +_INTERNAL_CALL_ERROR_MESSAGE_FORMAT = ( + 'Internal gRPC call error %d. ' + + 'Please report to https://github.com/grpc/grpc/issues') + +def _check_call_error(call_error, metadata): + if call_error == cygrpc.CallError.invalid_metadata: + raise ValueError('metadata was invalid: %s' % metadata) + elif call_error != cygrpc.CallError.ok: + raise ValueError(_INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error) + +def _call_error_set_RPCstate(state, call_error, metadata): + if call_error == cygrpc.CallError.invalid_metadata: + _abort(state, grpc.StatusCode.INTERNAL, 'metadata was invalid: %s' % metadata) + else: + _abort(state, grpc.StatusCode.INTERNAL, + _INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error) class _RPCState(object): @@ -358,7 +374,7 @@ class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): if self._state.callbacks is None: return False else: - self._state.callbacks.append(lambda: callback()) + self._state.callbacks.append(callback) return True def initial_metadata(self): @@ -435,10 +451,10 @@ def _end_unary_response_blocking(state, with_call, deadline): class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): def __init__( - self, channel, create_managed_call, method, request_serializer, + self, channel, managed_call, method, request_serializer, response_deserializer): self._channel = channel - self._create_managed_call = create_managed_call + self._managed_call = managed_call self._method = method self._request_serializer = request_serializer self._response_deserializer = response_deserializer @@ -472,7 +488,8 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): None, 0, completion_queue, self._method, None, deadline_timespec) if credentials is not None: call.set_credentials(credentials._credentials) - call.start_client_batch(cygrpc.Operations(operations), None) + call_error = call.start_client_batch(cygrpc.Operations(operations), None) + _check_call_error(call_error, metadata) _handle_event(completion_queue.poll(), state, self._response_deserializer) return state, deadline @@ -490,23 +507,28 @@ class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable): if rendezvous: return rendezvous else: - call = self._create_managed_call( + call, drive_call = self._managed_call( None, 0, self._method, None, deadline_timespec) if credentials is not None: call.set_credentials(credentials._credentials) event_handler = _event_handler(state, call, self._response_deserializer) with state.condition: - call.start_client_batch(cygrpc.Operations(operations), event_handler) + call_error = call.start_client_batch(cygrpc.Operations(operations), + event_handler) + if call_error != cygrpc.CallError.ok: + _call_error_set_RPCstate(state, call_error, metadata) + return _Rendezvous(state, None, None, deadline) + drive_call() return _Rendezvous(state, call, self._response_deserializer, deadline) class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): def __init__( - self, channel, create_managed_call, method, request_serializer, + self, channel, managed_call, method, request_serializer, response_deserializer): self._channel = channel - self._create_managed_call = create_managed_call + self._managed_call = managed_call self._method = method self._request_serializer = request_serializer self._response_deserializer = response_deserializer @@ -518,7 +540,7 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): raise rendezvous else: state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None) - call = self._create_managed_call( + call, drive_call = self._managed_call( None, 0, self._method, None, deadline_timespec) if credentials is not None: call.set_credentials(credentials._credentials) @@ -535,17 +557,22 @@ class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable): cygrpc.operation_send_close_from_client(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_client_batch(cygrpc.Operations(operations), event_handler) + call_error = call.start_client_batch(cygrpc.Operations(operations), + event_handler) + if call_error != cygrpc.CallError.ok: + _call_error_set_RPCstate(state, call_error, metadata) + return _Rendezvous(state, None, None, deadline) + drive_call() return _Rendezvous(state, call, self._response_deserializer, deadline) class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): def __init__( - self, channel, create_managed_call, method, request_serializer, + self, channel, managed_call, method, request_serializer, response_deserializer): self._channel = channel - self._create_managed_call = create_managed_call + self._managed_call = managed_call self._method = method self._request_serializer = request_serializer self._response_deserializer = response_deserializer @@ -569,7 +596,8 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): cygrpc.operation_receive_message(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_client_batch(cygrpc.Operations(operations), None) + call_error = call.start_client_batch(cygrpc.Operations(operations), None) + _check_call_error(call_error, metadata) _consume_request_iterator( request_iterator, state, call, self._request_serializer) while True: @@ -597,7 +625,7 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): self, request_iterator, timeout=None, metadata=None, credentials=None): deadline, deadline_timespec = _deadline(timeout) state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None) - call = self._create_managed_call( + call, drive_call = self._managed_call( None, 0, self._method, None, deadline_timespec) if credentials is not None: call.set_credentials(credentials._credentials) @@ -613,7 +641,12 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): cygrpc.operation_receive_message(_EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_client_batch(cygrpc.Operations(operations), event_handler) + call_error = call.start_client_batch(cygrpc.Operations(operations), + event_handler) + if call_error != cygrpc.CallError.ok: + _call_error_set_RPCstate(state, call_error, metadata) + return _Rendezvous(state, None, None, deadline) + drive_call() _consume_request_iterator( request_iterator, state, call, self._request_serializer) return _Rendezvous(state, call, self._response_deserializer, deadline) @@ -622,10 +655,10 @@ class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable): class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable): def __init__( - self, channel, create_managed_call, method, request_serializer, + self, channel, managed_call, method, request_serializer, response_deserializer): self._channel = channel - self._create_managed_call = create_managed_call + self._managed_call = managed_call self._method = method self._request_serializer = request_serializer self._response_deserializer = response_deserializer @@ -634,7 +667,7 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable): self, request_iterator, timeout=None, metadata=None, credentials=None): deadline, deadline_timespec = _deadline(timeout) state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None) - call = self._create_managed_call( + call, drive_call = self._managed_call( None, 0, self._method, None, deadline_timespec) if credentials is not None: call.set_credentials(credentials._credentials) @@ -649,7 +682,12 @@ class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable): _common.cygrpc_metadata(metadata), _EMPTY_FLAGS), cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS), ) - call.start_client_batch(cygrpc.Operations(operations), event_handler) + call_error = call.start_client_batch(cygrpc.Operations(operations), + event_handler) + if call_error != cygrpc.CallError.ok: + _call_error_set_RPCstate(state, call_error, metadata) + return _Rendezvous(state, None, None, deadline) + drive_call() _consume_request_iterator( request_iterator, state, call, self._request_serializer) return _Rendezvous(state, call, self._response_deserializer, deadline) @@ -687,16 +725,13 @@ def _run_channel_spin_thread(state): channel_spin_thread.start() -def _create_channel_managed_call(state): - def create_channel_managed_call(parent, flags, method, host, deadline): - """Creates a managed cygrpc.Call. +def _channel_managed_call_management(state): + def create(parent, flags, method, host, deadline): + """Creates a managed cygrpc.Call and a function to call to drive it. - Callers of this function must conduct at least one operation on the returned - call. The tags associated with operations conducted on the returned call - must be no-argument callables that return None to indicate that this channel - should continue polling for events associated with the call and return the - call itself to indicate that no more events associated with the call will be - generated. + If operations are successfully added to the returned cygrpc.Call, the + returned function must be called. If operations are not successfully added + to the returned cygrpc.Call, the returned function must not be called. Args: parent: A cygrpc.Call to be used as the parent of the created call. @@ -706,18 +741,22 @@ def _create_channel_managed_call(state): deadline: A cygrpc.Timespec to be the deadline of the created call. Returns: - A cygrpc.Call with which to conduct an RPC. + A cygrpc.Call with which to conduct an RPC and a function to call if + operations are successfully started on the call. """ - with state.lock: - call = state.channel.create_call( - parent, flags, state.completion_queue, method, host, deadline) - if state.managed_calls is None: - state.managed_calls = set((call,)) - _run_channel_spin_thread(state) - else: - state.managed_calls.add(call) - return call - return create_channel_managed_call + call = state.channel.create_call( + parent, flags, state.completion_queue, method, host, deadline) + + def drive(): + with state.lock: + if state.managed_calls is None: + state.managed_calls = set((call,)) + _run_channel_spin_thread(state) + else: + state.managed_calls.add(call) + + return call, drive + return create class _ChannelConnectivityState(object): @@ -847,6 +886,7 @@ def _options(options): class Channel(grpc.Channel): + """A cygrpc.Channel-backed implementation of grpc.Channel.""" def __init__(self, target, options, credentials): """Constructor. @@ -871,25 +911,25 @@ class Channel(grpc.Channel): def unary_unary( self, method, request_serializer=None, response_deserializer=None): return _UnaryUnaryMultiCallable( - self._channel, _create_channel_managed_call(self._call_state), + self._channel, _channel_managed_call_management(self._call_state), _common.encode(method), request_serializer, response_deserializer) def unary_stream( self, method, request_serializer=None, response_deserializer=None): return _UnaryStreamMultiCallable( - self._channel, _create_channel_managed_call(self._call_state), + self._channel, _channel_managed_call_management(self._call_state), _common.encode(method), request_serializer, response_deserializer) def stream_unary( self, method, request_serializer=None, response_deserializer=None): return _StreamUnaryMultiCallable( - self._channel, _create_channel_managed_call(self._call_state), + self._channel, _channel_managed_call_management(self._call_state), _common.encode(method), request_serializer, response_deserializer) def stream_stream( self, method, request_serializer=None, response_deserializer=None): return _StreamStreamMultiCallable( - self._channel, _create_channel_managed_call(self._call_state), + self._channel, _channel_managed_call_management(self._call_state), _common.encode(method), request_serializer, response_deserializer) def __del__(self): diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py index d83a2e6ded..5223712dfa 100644 --- a/src/python/grpcio/grpc/_server.py +++ b/src/python/grpcio/grpc/_server.py @@ -60,8 +60,7 @@ _CANCELLED = 'cancelled' _EMPTY_FLAGS = 0 _EMPTY_METADATA = cygrpc.Metadata(()) -_DEFAULT_EXIT_GRACE = 1.0 -_DEFAULT_EXIT_SHUTDOWN_HANDLER_GRACE = 5.0 +_UNEXPECTED_EXIT_SERVER_GRACE = 1.0 def _serialized_request(request_event): @@ -596,18 +595,14 @@ class _ServerStage(enum.Enum): class _ServerState(object): - def __init__(self, completion_queue, server, generic_handlers, thread_pool, - exit_grace, exit_shutdown_handler_grace): + def __init__(self, completion_queue, server, generic_handlers, thread_pool): self.lock = threading.Lock() self.completion_queue = completion_queue self.server = server self.generic_handlers = list(generic_handlers) self.thread_pool = thread_pool - self.exit_grace = exit_grace - self.exit_shutdown_handler_grace = exit_shutdown_handler_grace self.stage = _ServerStage.STOPPED self.shutdown_events = None - self.shutdown_handlers = [] # TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields. self.rpc_states = set() @@ -677,45 +672,41 @@ def _serve(state): return -def _stop(state, grace, shutdown_handler_grace): - shutdown_event = threading.Event() - - def cancel_all_calls_after_grace(): - with state.lock: - if state.stage is _ServerStage.STOPPED: - shutdown_event.set() - return - elif state.stage is _ServerStage.STARTED: - do_shutdown = True - state.stage = _ServerStage.GRACE - state.shutdown_events = [] - else: - do_shutdown = False - state.shutdown_events.append(shutdown_event) - - if do_shutdown: - # Run Shutdown Handlers without the lock - for handler in state.shutdown_handlers: - handler(shutdown_handler_grace) - with state.lock: +def _stop(state, grace): + with state.lock: + if state.stage is _ServerStage.STOPPED: + shutdown_event = threading.Event() + shutdown_event.set() + return shutdown_event + else: + if state.stage is _ServerStage.STARTED: state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG) state.stage = _ServerStage.GRACE + state.shutdown_events = [] state.due.add(_SHUTDOWN_TAG) - - if not shutdown_event.wait(timeout=grace): - with state.lock: + shutdown_event = threading.Event() + state.shutdown_events.append(shutdown_event) + if grace is None: state.server.cancel_all_calls() # TODO(https://github.com/grpc/grpc/issues/6597): delete this loop. for rpc_state in state.rpc_states: with rpc_state.condition: rpc_state.client = _CANCELLED rpc_state.condition.notify_all() - - if grace is None: - cancel_all_calls_after_grace() - else: - threading.Thread(target=cancel_all_calls_after_grace).start() - + else: + def cancel_all_calls_after_grace(): + shutdown_event.wait(timeout=grace) + with state.lock: + state.server.cancel_all_calls() + # TODO(https://github.com/grpc/grpc/issues/6597): delete this loop. + for rpc_state in state.rpc_states: + with rpc_state.condition: + rpc_state.client = _CANCELLED + rpc_state.condition.notify_all() + thread = threading.Thread(target=cancel_all_calls_after_grace) + thread.start() + return shutdown_event + shutdown_event.wait() return shutdown_event @@ -725,12 +716,12 @@ def _start(state): raise ValueError('Cannot start already-started server!') state.server.start() state.stage = _ServerStage.STARTED - _request_call(state) + _request_call(state) def cleanup_server(timeout): if timeout is None: - _stop(state, state.exit_grace, state.exit_shutdown_handler_grace).wait() + _stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait() else: - _stop(state, timeout, 0).wait() + _stop(state, timeout).wait() thread = _common.CleanupThread( cleanup_server, target=_serve, args=(state,)) @@ -738,16 +729,12 @@ def _start(state): class Server(grpc.Server): - def __init__(self, thread_pool, generic_handlers, options, exit_grace, - exit_shutdown_handler_grace): + def __init__(self, thread_pool, generic_handlers, options): completion_queue = cygrpc.CompletionQueue() server = cygrpc.Server(_common.channel_args(options)) server.register_completion_queue(completion_queue) self._state = _ServerState( - completion_queue, server, generic_handlers, thread_pool, - _DEFAULT_EXIT_GRACE if exit_grace is None else exit_grace, - _DEFAULT_EXIT_SHUTDOWN_HANDLER_GRACE if exit_shutdown_handler_grace - is None else exit_shutdown_handler_grace) + completion_queue, server, generic_handlers, thread_pool) def add_generic_rpc_handlers(self, generic_rpc_handlers): _add_generic_handlers(self._state, generic_rpc_handlers) @@ -758,14 +745,11 @@ class Server(grpc.Server): def add_secure_port(self, address, server_credentials): return _add_secure_port(self._state, _common.encode(address), server_credentials) - def add_shutdown_handler(self, handler): - self._state.shutdown_handlers.append(handler) - def start(self): _start(self._state) - def stop(self, grace, shutdown_handler_grace=None): - return _stop(self._state, grace, shutdown_handler_grace) + def stop(self, grace): + return _stop(self._state, grace) def __del__(self): - _stop(self._state, None, None) + _stop(self._state, None) diff --git a/src/python/grpcio/grpc/_utilities.py b/src/python/grpcio/grpc/_utilities.py index 4850967fbc..a375896e6e 100644 --- a/src/python/grpcio/grpc/_utilities.py +++ b/src/python/grpcio/grpc/_utilities.py @@ -53,13 +53,17 @@ class RpcMethodHandler( pass -class DictionaryGenericHandler(grpc.GenericRpcHandler): +class DictionaryGenericHandler(grpc.ServiceRpcHandler): def __init__(self, service, method_handlers): + self._name = service self._method_handlers = { _common.fully_qualified_method(service, method): method_handler for method, method_handler in six.iteritems(method_handlers)} + def service_name(self): + return self._name + def service(self, handler_call_details): return self._method_handlers.get(handler_call_details.method) diff --git a/src/python/grpcio/support.py b/src/python/grpcio/support.py index f363f5fdc5..b226e690fd 100644 --- a/src/python/grpcio/support.py +++ b/src/python/grpcio/support.py @@ -100,9 +100,15 @@ def diagnose_compile_error(build_ext, error): .format(source) ) +def diagnose_attribute_error(build_ext, error): + if any('_needs_stub' in arg for arg in error.args): + raise commands.CommandError( + "We expect a missing `_needs_stub` attribute from older versions of " + "setuptools. Consider upgrading setuptools.") _ERROR_DIAGNOSES = { - errors.CompileError: diagnose_compile_error + errors.CompileError: diagnose_compile_error, + AttributeError: diagnose_attribute_error } def diagnose_build_ext_error(build_ext, error, formatted): diff --git a/src/python/grpcio_health_checking/MANIFEST.in b/src/python/grpcio_health_checking/MANIFEST.in index 7407f646d1..5255e4c403 100644 --- a/src/python/grpcio_health_checking/MANIFEST.in +++ b/src/python/grpcio_health_checking/MANIFEST.in @@ -1,4 +1,4 @@ include grpc_version.py include health_commands.py -graft grpc +graft grpc_health global-exclude *.pyc diff --git a/src/python/grpcio_health_checking/grpc/health/__init__.py b/src/python/grpcio_health_checking/grpc_health/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio_health_checking/grpc/health/__init__.py +++ b/src/python/grpcio_health_checking/grpc_health/__init__.py diff --git a/src/python/grpcio_health_checking/grpc/health/v1/__init__.py b/src/python/grpcio_health_checking/grpc_health/v1/__init__.py index 7086519106..7086519106 100644 --- a/src/python/grpcio_health_checking/grpc/health/v1/__init__.py +++ b/src/python/grpcio_health_checking/grpc_health/v1/__init__.py diff --git a/src/python/grpcio_health_checking/grpc/health/v1/health.py b/src/python/grpcio_health_checking/grpc_health/v1/health.py index 8108ac1096..0df679b0e2 100644 --- a/src/python/grpcio_health_checking/grpc/health/v1/health.py +++ b/src/python/grpcio_health_checking/grpc_health/v1/health.py @@ -33,7 +33,7 @@ import threading import grpc -from grpc.health.v1 import health_pb2 +from grpc_health.v1 import health_pb2 class HealthServicer(health_pb2.HealthServicer): diff --git a/src/python/grpcio_health_checking/health_commands.py b/src/python/grpcio_health_checking/health_commands.py index 66df25da63..0c420a655f 100644 --- a/src/python/grpcio_health_checking/health_commands.py +++ b/src/python/grpcio_health_checking/health_commands.py @@ -54,7 +54,7 @@ class CopyProtoModules(setuptools.Command): if os.path.isfile(HEALTH_PROTO): shutil.copyfile( HEALTH_PROTO, - os.path.join(ROOT_DIR, 'grpc/health/v1/health.proto')) + os.path.join(ROOT_DIR, 'grpc_health/v1/health.proto')) class BuildPackageProtos(setuptools.Command): @@ -74,5 +74,5 @@ class BuildPackageProtos(setuptools.Command): # directory is provided as an 'include' directory. We assume it's the '' key # to `self.distribution.package_dir` (and get a key error if it's not # there). - from grpc.tools import command + from grpc_tools import command command.build_package_protos(self.distribution.package_dir['']) diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py index 8c92ee16a9..e88f389ba8 100644 --- a/src/python/grpcio_health_checking/setup.py +++ b/src/python/grpcio_health_checking/setup.py @@ -66,7 +66,6 @@ setuptools.setup( license='3-clause BSD', package_dir=PACKAGE_DIRECTORIES, packages=setuptools.find_packages('.'), - namespace_packages=['grpc'], install_requires=INSTALL_REQUIRES, setup_requires=SETUP_REQUIRES, cmdclass=COMMAND_CLASS diff --git a/src/python/grpcio_reflection/grpc/reflection/__init__.py b/src/python/grpcio_reflection/grpc_reflection/__init__.py index d5ad73a74a..d5ad73a74a 100644 --- a/src/python/grpcio_reflection/grpc/reflection/__init__.py +++ b/src/python/grpcio_reflection/grpc_reflection/__init__.py diff --git a/src/python/grpcio_reflection/grpc/reflection/v1alpha/__init__.py b/src/python/grpcio_reflection/grpc_reflection/v1alpha/__init__.py index d5ad73a74a..d5ad73a74a 100644 --- a/src/python/grpcio_reflection/grpc/reflection/v1alpha/__init__.py +++ b/src/python/grpcio_reflection/grpc_reflection/v1alpha/__init__.py diff --git a/src/python/grpcio_reflection/grpc/reflection/v1alpha/reflection.py b/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py index 3c399b0d79..bfcbce8e04 100644 --- a/src/python/grpcio_reflection/grpc/reflection/v1alpha/reflection.py +++ b/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py @@ -35,7 +35,7 @@ import grpc from google.protobuf import descriptor_pb2 from google.protobuf import descriptor_pool -from grpc.reflection.v1alpha import reflection_pb2 +from grpc_reflection.v1alpha import reflection_pb2 _POOL = descriptor_pool.Default() diff --git a/src/python/grpcio_reflection/reflection_commands.py b/src/python/grpcio_reflection/reflection_commands.py index d189aee577..dee5491e0a 100644 --- a/src/python/grpcio_reflection/reflection_commands.py +++ b/src/python/grpcio_reflection/reflection_commands.py @@ -54,7 +54,7 @@ class CopyProtoModules(setuptools.Command): if os.path.isfile(HEALTH_PROTO): shutil.copyfile( HEALTH_PROTO, - os.path.join(ROOT_DIR, 'grpc/reflection/v1alpha/reflection.proto')) + os.path.join(ROOT_DIR, 'grpc_reflection/v1alpha/reflection.proto')) class BuildPackageProtos(setuptools.Command): @@ -74,5 +74,5 @@ class BuildPackageProtos(setuptools.Command): # directory is provided as an 'include' directory. We assume it's the '' key # to `self.distribution.package_dir` (and get a key error if it's not # there). - from grpc.tools import command + from grpc_tools import command command.build_package_protos(self.distribution.package_dir['']) diff --git a/src/python/grpcio_reflection/setup.py b/src/python/grpcio_reflection/setup.py index df95af4de1..cfc41f4fe7 100644 --- a/src/python/grpcio_reflection/setup.py +++ b/src/python/grpcio_reflection/setup.py @@ -66,7 +66,6 @@ setuptools.setup( license='3-clause BSD', package_dir=PACKAGE_DIRECTORIES, packages=setuptools.find_packages('.'), - namespace_packages=['grpc'], install_requires=INSTALL_REQUIRES, setup_requires=SETUP_REQUIRES, cmdclass=COMMAND_CLASS diff --git a/src/python/grpcio_tests/commands.py b/src/python/grpcio_tests/commands.py index 5ee551cfe1..e822971fe0 100644 --- a/src/python/grpcio_tests/commands.py +++ b/src/python/grpcio_tests/commands.py @@ -100,7 +100,7 @@ class BuildProtoModules(setuptools.Command): pass def run(self): - import grpc.tools.protoc as protoc + import grpc_tools.protoc as protoc include_regex = re.compile(self.include) exclude_regex = re.compile(self.exclude) if self.exclude else None @@ -116,7 +116,7 @@ class BuildProtoModules(setuptools.Command): # but we currently have name conflicts in src/proto for path in paths: command = [ - 'grpc.tools.protoc', + 'grpc_tools.protoc', '-I {}'.format(PROTO_STEM), '--python_out={}'.format(PROTO_STEM), '--grpc_python_out={}'.format(PROTO_STEM), diff --git a/src/python/grpcio_tests/setup.py b/src/python/grpcio_tests/setup.py index 01d5fa875b..375fbd6c77 100644 --- a/src/python/grpcio_tests/setup.py +++ b/src/python/grpcio_tests/setup.py @@ -35,7 +35,7 @@ import sys import setuptools -import grpc.tools.command +import grpc_tools.command PY3 = sys.version_info.major == 3 @@ -68,7 +68,7 @@ COMMAND_CLASS = { # Run `preprocess` *before* doing any packaging! 'preprocess': commands.GatherProto, - 'build_package_protos': grpc.tools.command.BuildPackageProtos, + 'build_package_protos': grpc_tools.command.BuildPackageProtos, 'build_py': commands.BuildPy, 'run_interop': commands.RunInterop, 'test_lite': commands.TestLite diff --git a/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py index 80300d13df..5dde72b169 100644 --- a/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py +++ b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py @@ -27,14 +27,14 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Tests of grpc.health.v1.health.""" +"""Tests of grpc_health.v1.health.""" import unittest import grpc from grpc.framework.foundation import logging_pool -from grpc.health.v1 import health -from grpc.health.v1 import health_pb2 +from grpc_health.v1 import health +from grpc_health.v1 import health_pb2 from tests.unit.framework.common import test_constants diff --git a/src/python/grpcio_tests/tests/http2/_negative_http2_client.py b/src/python/grpcio_tests/tests/http2/_negative_http2_client.py new file mode 100644 index 0000000000..f8604683b3 --- /dev/null +++ b/src/python/grpcio_tests/tests/http2/_negative_http2_client.py @@ -0,0 +1,153 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""The Python client used to test negative http2 conditions.""" + +import argparse + +import grpc +from src.proto.grpc.testing import test_pb2 +from src.proto.grpc.testing import messages_pb2 + +def _validate_payload_type_and_length(response, expected_type, expected_length): + if response.payload.type is not expected_type: + raise ValueError( + 'expected payload type %s, got %s' % + (expected_type, type(response.payload.type))) + elif len(response.payload.body) != expected_length: + raise ValueError( + 'expected payload body size %d, got %d' % + (expected_length, len(response.payload.body))) + +def _expect_status_code(call, expected_code): + if call.code() != expected_code: + raise ValueError( + 'expected code %s, got %s' % (expected_code, call.code())) + +def _expect_status_details(call, expected_details): + if call.details() != expected_details: + raise ValueError( + 'expected message %s, got %s' % (expected_details, call.details())) + +def _validate_status_code_and_details(call, expected_code, expected_details): + _expect_status_code(call, expected_code) + _expect_status_details(call, expected_details) + +# common requests +_REQUEST_SIZE = 314159 +_RESPONSE_SIZE = 271828 + +_SIMPLE_REQUEST = messages_pb2.SimpleRequest( + response_type=messages_pb2.COMPRESSABLE, + response_size=_RESPONSE_SIZE, + payload=messages_pb2.Payload(body=b'\x00' * _REQUEST_SIZE)) + +def _goaway(stub): + first_response = stub.UnaryCall(_SIMPLE_REQUEST) + _validate_payload_type_and_length(first_response, + messages_pb2.COMPRESSABLE, _RESPONSE_SIZE) + second_response = stub.UnaryCall(_SIMPLE_REQUEST) + _validate_payload_type_and_length(second_response, + messages_pb2.COMPRESSABLE, _RESPONSE_SIZE) + +def _rst_after_header(stub): + resp_future = stub.UnaryCall.future(_SIMPLE_REQUEST) + _validate_status_code_and_details(resp_future, grpc.StatusCode.UNAVAILABLE, "") + +def _rst_during_data(stub): + resp_future = stub.UnaryCall.future(_SIMPLE_REQUEST) + _validate_status_code_and_details(resp_future, grpc.StatusCode.UNKNOWN, "") + +def _rst_after_data(stub): + resp_future = stub.UnaryCall.future(_SIMPLE_REQUEST) + _validate_payload_type_and_length(next(resp_future), + messages_pb2.COMPRESSABLE, _RESPONSE_SIZE) + _validate_status_code_and_details(resp_future, grpc.StatusCode.UNKNOWN, "") + +def _ping(stub): + response = stub.UnaryCall(_SIMPLE_REQUEST) + _validate_payload_type_and_length(response, + messages_pb2.COMPRESSABLE, _RESPONSE_SIZE) + +def _max_streams(stub): + # send one req to ensure server sets MAX_STREAMS + response = stub.UnaryCall(_SIMPLE_REQUEST) + _validate_payload_type_and_length(response, + messages_pb2.COMPRESSABLE, _RESPONSE_SIZE) + + # give the streams a workout + futures = [] + for _ in range(15): + futures.append(stub.UnaryCall.future(_SIMPLE_REQUEST)) + for future in futures: + _validate_payload_type_and_length(future.result(), + messages_pb2.COMPRESSABLE, _RESPONSE_SIZE) + +def _run_test_case(test_case, stub): + if test_case == 'goaway': + _goaway(stub) + elif test_case == 'rst_after_header': + _rst_after_header(stub) + elif test_case == 'rst_during_data': + _rst_during_data(stub) + elif test_case == 'rst_after_data': + _rst_after_data(stub) + elif test_case =='ping': + _ping(stub) + elif test_case == 'max_streams': + _max_streams(stub) + else: + raise ValueError("Invalid test case: %s" % test_case) + +def _args(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--server_host', help='the host to which to connect', type=str, + default="127.0.0.1") + parser.add_argument( + '--server_port', help='the port to which to connect', type=int, + default="8080") + parser.add_argument( + '--test_case', help='the test case to execute', type=str, + default="goaway") + return parser.parse_args() + +def _stub(server_host, server_port): + target = '{}:{}'.format(server_host, server_port) + channel = grpc.insecure_channel(target) + return test_pb2.TestServiceStub(channel) + +def main(): + args = _args() + stub = _stub(args.server_host, args.server_port) + _run_test_case(args.test_case, stub) + + +if __name__ == '__main__': + main() diff --git a/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py b/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py index 936c895bd2..4fb22b4d9d 100644 --- a/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py +++ b/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py @@ -35,13 +35,13 @@ import unittest import grpc from src.proto.grpc.testing import test_pb2 -from tests.interop import _interop_test_case +from tests.interop import _intraop_test_case from tests.interop import methods from tests.interop import server -class InsecureInteropTest( - _interop_test_case.InteropTestCase, +class InsecureIntraopTest( + _intraop_test_case.IntraopTestCase, unittest.TestCase): def setUp(self): diff --git a/src/python/grpcio_tests/tests/interop/_interop_test_case.py b/src/python/grpcio_tests/tests/interop/_intraop_test_case.py index ccea17a66d..fe1c173992 100644 --- a/src/python/grpcio_tests/tests/interop/_interop_test_case.py +++ b/src/python/grpcio_tests/tests/interop/_intraop_test_case.py @@ -32,7 +32,7 @@ from tests.interop import methods -class InteropTestCase(object): +class IntraopTestCase(object): """Unit test methods. This class must be mixed in with unittest.TestCase and a class that defines diff --git a/src/python/grpcio_tests/tests/interop/_secure_interop_test.py b/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py index eaca553e1b..3665c69726 100644 --- a/src/python/grpcio_tests/tests/interop/_secure_interop_test.py +++ b/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py @@ -35,15 +35,15 @@ import unittest import grpc from src.proto.grpc.testing import test_pb2 -from tests.interop import _interop_test_case +from tests.interop import _intraop_test_case from tests.interop import methods from tests.interop import resources _SERVER_HOST_OVERRIDE = 'foo.test.google.fr' -class SecureInteropTest( - _interop_test_case.InteropTestCase, +class SecureIntraopTest( + _intraop_test_case.IntraopTestCase, unittest.TestCase): def setUp(self): diff --git a/src/python/grpcio_tests/tests/interop/methods.py b/src/python/grpcio_tests/tests/interop/methods.py index 52e56f3502..9038ae5751 100644 --- a/src/python/grpcio_tests/tests/interop/methods.py +++ b/src/python/grpcio_tests/tests/interop/methods.py @@ -33,7 +33,6 @@ import enum import json import os import threading -import time from oauth2client import client as oauth2client_client @@ -196,16 +195,6 @@ def _server_streaming(stub): response, messages_pb2.COMPRESSABLE, sizes[index]) -def _cancel_after_begin(stub): - sizes = (27182, 8, 1828, 45904,) - payloads = (messages_pb2.Payload(body=b'\x00' * size) for size in sizes) - requests = (messages_pb2.StreamingInputCallRequest(payload=payload) - for payload in payloads) - response_future = stub.StreamingInputCall.future(requests) - response_future.cancel() - if not response_future.cancelled(): - raise ValueError('expected call to be cancelled') - class _Pipe(object): @@ -265,6 +254,16 @@ def _ping_pong(stub): response, messages_pb2.COMPRESSABLE, response_size) +def _cancel_after_begin(stub): + with _Pipe() as pipe: + response_future = stub.StreamingInputCall.future(pipe) + response_future.cancel() + if not response_future.cancelled(): + raise ValueError('expected cancelled method to return True') + if response_future.code() is not grpc.StatusCode.CANCELLED: + raise ValueError('expected status code CANCELLED') + + def _cancel_after_first_response(stub): request_response_sizes = (31415, 9, 2653, 58979,) request_payload_sizes = (27182, 8, 1828, 45904,) @@ -302,7 +301,6 @@ def _timeout_on_sleeping_server(stub): response_type=messages_pb2.COMPRESSABLE, payload=messages_pb2.Payload(body=b'\x00' * request_payload_size)) pipe.add(request) - time.sleep(0.1) try: next(response_iterator) except grpc.RpcError as rpc_error: diff --git a/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py index 64fd97256e..f8ae05bb7a 100644 --- a/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py @@ -44,7 +44,7 @@ import threading import unittest import grpc -from grpc.tools import protoc +from grpc_tools import protoc from tests.unit.framework.common import test_constants _MESSAGES_IMPORT = b'import "messages.proto";' diff --git a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py index 87264cf9ba..c7bfeaeb95 100644 --- a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py +++ b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py @@ -27,14 +27,14 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Tests of grpc.reflection.v1alpha.reflection.""" +"""Tests of grpc_reflection.v1alpha.reflection.""" import unittest import grpc from grpc.framework.foundation import logging_pool -from grpc.reflection.v1alpha import reflection -from grpc.reflection.v1alpha import reflection_pb2 +from grpc_reflection.v1alpha import reflection +from grpc_reflection.v1alpha import reflection_pb2 from google.protobuf import descriptor_pool from google.protobuf import descriptor_pb2 diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json index 04a2e44178..0109ee2173 100644 --- a/src/python/grpcio_tests/tests/tests.json +++ b/src/python/grpcio_tests/tests/tests.json @@ -1,7 +1,7 @@ [ "health_check._health_servicer_test.HealthServicerTest", - "interop._insecure_interop_test.InsecureInteropTest", - "interop._secure_interop_test.SecureInteropTest", + "interop._insecure_intraop_test.InsecureIntraopTest", + "interop._secure_intraop_test.SecureIntraopTest", "protoc_plugin._python_plugin_test.PythonPluginTest", "protoc_plugin._split_definitions_test.SameCommonTest", "protoc_plugin._split_definitions_test.SameSeparateTest", @@ -27,7 +27,7 @@ "unit._cython.cygrpc_test.TypeSmokeTest", "unit._empty_message_test.EmptyMessageTest", "unit._exit_test.ExitTest", - "unit._exit_test.ShutdownHandlerTest", + "unit._invalid_metadata_test.InvalidMetadataTest", "unit._metadata_code_details_test.MetadataCodeDetailsTest", "unit._metadata_test.MetadataTest", "unit._rpc_test.RPCTest", diff --git a/src/python/grpcio_tests/tests/unit/_api_test.py b/src/python/grpcio_tests/tests/unit/_api_test.py index 2fe89499f5..51dc425420 100644 --- a/src/python/grpcio_tests/tests/unit/_api_test.py +++ b/src/python/grpcio_tests/tests/unit/_api_test.py @@ -65,6 +65,7 @@ class AllTest(unittest.TestCase): 'RpcMethodHandler', 'HandlerCallDetails', 'GenericRpcHandler', + 'ServiceRpcHandler', 'Server', 'unary_unary_rpc_method_handler', 'unary_stream_rpc_method_handler', diff --git a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py index e0a7d15aa7..46a964db8c 100644 --- a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py +++ b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py @@ -64,7 +64,7 @@ class ChannelReadyFutureTest(unittest.TestCase): ready_future = grpc.channel_ready_future(channel) ready_future.add_done_callback(callback.accept_value) with self.assertRaises(grpc.FutureTimeoutError): - ready_future.result(test_constants.SHORT_TIMEOUT) + ready_future.result(timeout=test_constants.SHORT_TIMEOUT) self.assertFalse(ready_future.cancelled()) self.assertFalse(ready_future.done()) self.assertTrue(ready_future.running()) @@ -85,7 +85,7 @@ class ChannelReadyFutureTest(unittest.TestCase): ready_future = grpc.channel_ready_future(channel) ready_future.add_done_callback(callback.accept_value) - self.assertIsNone(ready_future.result(test_constants.SHORT_TIMEOUT)) + self.assertIsNone(ready_future.result(timeout=test_constants.LONG_TIMEOUT)) value_passed_to_callback = callback.block_until_called() self.assertIs(ready_future, value_passed_to_callback) self.assertFalse(ready_future.cancelled()) diff --git a/src/python/grpcio_tests/tests/unit/_exit_test.py b/src/python/grpcio_tests/tests/unit/_exit_test.py index 342f5fcc10..5a4a32887c 100644 --- a/src/python/grpcio_tests/tests/unit/_exit_test.py +++ b/src/python/grpcio_tests/tests/unit/_exit_test.py @@ -43,8 +43,6 @@ import threading import time import unittest -import grpc -from grpc.framework.foundation import logging_pool from tests.unit import _exit_scenarios SCENARIO_FILE = os.path.abspath(os.path.join( @@ -54,7 +52,7 @@ BASE_COMMAND = [INTERPRETER, SCENARIO_FILE] BASE_SIGTERM_COMMAND = BASE_COMMAND + ['--wait_for_interrupt'] INIT_TIME = 1.0 -SHUTDOWN_GRACE = 5.0 + processes = [] process_lock = threading.Lock() @@ -184,24 +182,5 @@ class ExitTest(unittest.TestCase): interrupt_and_wait(process) -class _ShutDownHandler(object): - - def __init__(self): - self.seen_handler_grace = None - - def shutdown_handler(self, handler_grace): - self.seen_handler_grace = handler_grace - - -class ShutdownHandlerTest(unittest.TestCase): - - def test_shutdown_handler(self): - server = grpc.server(logging_pool.pool(1)) - handler = _ShutDownHandler() - server.add_shutdown_handler(handler.shutdown_handler) - server.start() - server.stop(0, shutdown_handler_grace=SHUTDOWN_GRACE).wait() - self.assertEqual(SHUTDOWN_GRACE, handler.seen_handler_grace) - if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py b/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py new file mode 100644 index 0000000000..2dc225de29 --- /dev/null +++ b/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py @@ -0,0 +1,175 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Test of RPCs made against gRPC Python's application-layer API.""" + +import unittest + +import grpc + +from tests.unit.framework.common import test_constants + +_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2 +_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:] +_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3 +_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3] + +_UNARY_UNARY = '/test/UnaryUnary' +_UNARY_STREAM = '/test/UnaryStream' +_STREAM_UNARY = '/test/StreamUnary' +_STREAM_STREAM = '/test/StreamStream' + + +def _unary_unary_multi_callable(channel): + return channel.unary_unary(_UNARY_UNARY) + + +def _unary_stream_multi_callable(channel): + return channel.unary_stream( + _UNARY_STREAM, + request_serializer=_SERIALIZE_REQUEST, + response_deserializer=_DESERIALIZE_RESPONSE) + + +def _stream_unary_multi_callable(channel): + return channel.stream_unary( + _STREAM_UNARY, + request_serializer=_SERIALIZE_REQUEST, + response_deserializer=_DESERIALIZE_RESPONSE) + + +def _stream_stream_multi_callable(channel): + return channel.stream_stream(_STREAM_STREAM) + + +class InvalidMetadataTest(unittest.TestCase): + + def setUp(self): + self._channel = grpc.insecure_channel('localhost:8080') + self._unary_unary = _unary_unary_multi_callable(self._channel) + self._unary_stream = _unary_stream_multi_callable(self._channel) + self._stream_unary = _stream_unary_multi_callable(self._channel) + self._stream_stream = _stream_stream_multi_callable(self._channel) + + def testUnaryRequestBlockingUnaryResponse(self): + request = b'\x07\x08' + metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponse'),) + expected_error_details = "metadata was invalid: %s" % metadata + with self.assertRaises(ValueError) as exception_context: + self._unary_unary(request, metadata=metadata) + self.assertIn(expected_error_details, str(exception_context.exception)) + + def testUnaryRequestBlockingUnaryResponseWithCall(self): + request = b'\x07\x08' + metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponseWithCall'),) + expected_error_details = "metadata was invalid: %s" % metadata + with self.assertRaises(ValueError) as exception_context: + self._unary_unary.with_call(request, metadata=metadata) + self.assertIn(expected_error_details, str(exception_context.exception)) + + def testUnaryRequestFutureUnaryResponse(self): + request = b'\x07\x08' + metadata = (('InVaLiD', 'UnaryRequestFutureUnaryResponse'),) + expected_error_details = "metadata was invalid: %s" % metadata + response_future = self._unary_unary.future(request, metadata=metadata) + with self.assertRaises(grpc.RpcError) as exception_context: + response_future.result() + self.assertEqual( + exception_context.exception.details(), expected_error_details) + self.assertEqual( + exception_context.exception.code(), grpc.StatusCode.INTERNAL) + self.assertEqual(response_future.details(), expected_error_details) + self.assertEqual(response_future.code(), grpc.StatusCode.INTERNAL) + + def testUnaryRequestStreamResponse(self): + request = b'\x37\x58' + metadata = (('InVaLiD', 'UnaryRequestStreamResponse'),) + expected_error_details = "metadata was invalid: %s" % metadata + response_iterator = self._unary_stream(request, metadata=metadata) + with self.assertRaises(grpc.RpcError) as exception_context: + next(response_iterator) + self.assertEqual( + exception_context.exception.details(), expected_error_details) + self.assertEqual( + exception_context.exception.code(), grpc.StatusCode.INTERNAL) + self.assertEqual(response_iterator.details(), expected_error_details) + self.assertEqual(response_iterator.code(), grpc.StatusCode.INTERNAL) + + def testStreamRequestBlockingUnaryResponse(self): + request_iterator = (b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) + metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponse'),) + expected_error_details = "metadata was invalid: %s" % metadata + with self.assertRaises(ValueError) as exception_context: + self._stream_unary(request_iterator, metadata=metadata) + self.assertIn(expected_error_details, str(exception_context.exception)) + + def testStreamRequestBlockingUnaryResponseWithCall(self): + request_iterator = ( + b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) + metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponseWithCall'),) + expected_error_details = "metadata was invalid: %s" % metadata + multi_callable = _stream_unary_multi_callable(self._channel) + with self.assertRaises(ValueError) as exception_context: + multi_callable.with_call(request_iterator, metadata=metadata) + self.assertIn(expected_error_details, str(exception_context.exception)) + + def testStreamRequestFutureUnaryResponse(self): + request_iterator = ( + b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) + metadata = (('InVaLiD', 'StreamRequestFutureUnaryResponse'),) + expected_error_details = "metadata was invalid: %s" % metadata + response_future = self._stream_unary.future( + request_iterator, metadata=metadata) + with self.assertRaises(grpc.RpcError) as exception_context: + response_future.result() + self.assertEqual( + exception_context.exception.details(), expected_error_details) + self.assertEqual( + exception_context.exception.code(), grpc.StatusCode.INTERNAL) + self.assertEqual(response_future.details(), expected_error_details) + self.assertEqual(response_future.code(), grpc.StatusCode.INTERNAL) + + def testStreamRequestStreamResponse(self): + request_iterator = ( + b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)) + metadata = (('InVaLiD', 'StreamRequestStreamResponse'),) + expected_error_details = "metadata was invalid: %s" % metadata + response_iterator = self._stream_stream(request_iterator, metadata=metadata) + with self.assertRaises(grpc.RpcError) as exception_context: + next(response_iterator) + self.assertEqual( + exception_context.exception.details(), expected_error_details) + self.assertEqual( + exception_context.exception.code(), grpc.StatusCode.INTERNAL) + self.assertEqual(response_iterator.details(), expected_error_details) + self.assertEqual(response_iterator.code(), grpc.StatusCode.INTERNAL) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py b/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py index 90fe10c77c..9cce96cc85 100644 --- a/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py +++ b/src/python/grpcio_tests/tests/unit/beta/_utilities_test.py @@ -66,7 +66,7 @@ class ChannelConnectivityTest(unittest.TestCase): ready_future = utilities.channel_ready_future(channel) ready_future.add_done_callback(callback.accept_value) with self.assertRaises(future.TimeoutError): - ready_future.result(test_constants.SHORT_TIMEOUT) + ready_future.result(timeout=test_constants.SHORT_TIMEOUT) self.assertFalse(ready_future.cancelled()) self.assertFalse(ready_future.done()) self.assertTrue(ready_future.running()) @@ -88,7 +88,7 @@ class ChannelConnectivityTest(unittest.TestCase): ready_future = utilities.channel_ready_future(channel) ready_future.add_done_callback(callback.accept_value) self.assertIsNone( - ready_future.result(test_constants.SHORT_TIMEOUT)) + ready_future.result(timeout=test_constants.LONG_TIMEOUT)) value_passed_to_callback = callback.block_until_called() self.assertIs(ready_future, value_passed_to_callback) self.assertFalse(ready_future.cancelled()) diff --git a/src/ruby/ext/grpc/rb_byte_buffer.c b/src/ruby/ext/grpc/rb_byte_buffer.c index f97890e4a2..47fd6d9120 100644 --- a/src/ruby/ext/grpc/rb_byte_buffer.c +++ b/src/ruby/ext/grpc/rb_byte_buffer.c @@ -65,5 +65,6 @@ VALUE grpc_rb_byte_buffer_to_s(grpc_byte_buffer *buffer) { GRPC_SLICE_LENGTH(next)); grpc_slice_unref(next); } + grpc_byte_buffer_reader_destroy(&reader); return rb_string; } diff --git a/src/ruby/ext/grpc/rb_server.c b/src/ruby/ext/grpc/rb_server.c index 2a6a246e67..c7b112c94b 100644 --- a/src/ruby/ext/grpc/rb_server.c +++ b/src/ruby/ext/grpc/rb_server.c @@ -37,6 +37,7 @@ #include "rb_server.h" #include <grpc/grpc.h> +#include <grpc/support/atm.h> #include <grpc/grpc_security.h> #include <grpc/support/log.h> #include "rb_call.h" @@ -59,22 +60,26 @@ typedef struct grpc_rb_server { /* The actual server */ grpc_server *wrapped; grpc_completion_queue *queue; + gpr_atm shutdown_started; } grpc_rb_server; static void destroy_server(grpc_rb_server *server, gpr_timespec deadline) { grpc_event ev; - if (server->wrapped != NULL) { - grpc_server_shutdown_and_notify(server->wrapped, server->queue, NULL); - ev = rb_completion_queue_pluck(server->queue, NULL, deadline, NULL); - if (ev.type == GRPC_QUEUE_TIMEOUT) { - grpc_server_cancel_all_calls(server->wrapped); - rb_completion_queue_pluck(server->queue, NULL, - gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + // This can be started by app or implicitly by GC. Avoid a race between these. + if (gpr_atm_full_fetch_add(&server->shutdown_started, (gpr_atm)1) == 0) { + if (server->wrapped != NULL) { + grpc_server_shutdown_and_notify(server->wrapped, server->queue, NULL); + ev = rb_completion_queue_pluck(server->queue, NULL, deadline, NULL); + if (ev.type == GRPC_QUEUE_TIMEOUT) { + grpc_server_cancel_all_calls(server->wrapped); + rb_completion_queue_pluck(server->queue, NULL, + gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + } + grpc_server_destroy(server->wrapped); + grpc_rb_completion_queue_destroy(server->queue); + server->wrapped = NULL; + server->queue = NULL; } - grpc_server_destroy(server->wrapped); - grpc_rb_completion_queue_destroy(server->queue); - server->wrapped = NULL; - server->queue = NULL; } } @@ -115,6 +120,7 @@ static const rb_data_type_t grpc_rb_server_data_type = { static VALUE grpc_rb_server_alloc(VALUE cls) { grpc_rb_server *wrapper = ALLOC(grpc_rb_server); wrapper->wrapped = NULL; + wrapper->shutdown_started = (gpr_atm)0; return TypedData_Wrap_Struct(cls, &grpc_rb_server_data_type, wrapper); } diff --git a/src/ruby/lib/grpc/errors.rb b/src/ruby/lib/grpc/errors.rb index 23b2bb7e12..f6998e17c4 100644 --- a/src/ruby/lib/grpc/errors.rb +++ b/src/ruby/lib/grpc/errors.rb @@ -35,9 +35,18 @@ module GRPC # either end of a GRPC connection. When raised, it indicates that a status # error should be returned to the other end of a GRPC connection; when # caught it means that this end received a status error. + # + # There is also subclass of BadStatus in this module for each GRPC status. + # E.g., the GRPC::Cancelled class corresponds to status CANCELLED. + # + # See + # https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/status.h + # for detailed descriptions of each status code. class BadStatus < StandardError attr_reader :code, :details, :metadata + include GRPC::Core::StatusCodes + # @param code [Numeric] the status code # @param details [String] the details of the exception # @param metadata [Hash] the error's metadata @@ -55,9 +64,152 @@ module GRPC def to_status Struct::Status.new(code, details, @metadata) end + + def self.new_status_exception(code, details = 'unkown cause', metadata = {}) + codes = {} + codes[OK] = Ok + codes[CANCELLED] = Cancelled + codes[UNKNOWN] = Unknown + codes[INVALID_ARGUMENT] = InvalidArgument + codes[DEADLINE_EXCEEDED] = DeadlineExceeded + codes[NOT_FOUND] = NotFound + codes[ALREADY_EXISTS] = AlreadyExists + codes[PERMISSION_DENIED] = PermissionDenied + codes[UNAUTHENTICATED] = Unauthenticated + codes[RESOURCE_EXHAUSTED] = ResourceExhausted + codes[FAILED_PRECONDITION] = FailedPrecondition + codes[ABORTED] = Aborted + codes[OUT_OF_RANGE] = OutOfRange + codes[UNIMPLEMENTED] = Unimplemented + codes[INTERNAL] = Internal + codes[UNIMPLEMENTED] = Unimplemented + codes[UNAVAILABLE] = Unavailable + codes[DATA_LOSS] = DataLoss + + if codes[code].nil? + BadStatus.new(code, details, metadata) + else + codes[code].new(details, metadata) + end + end + end + + # GRPC status code corresponding to status OK + class Ok < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::OK, details, metadata) + end end - # Cancelled is an exception class that indicates that an rpc was cancelled. - class Cancelled < StandardError + # GRPC status code corresponding to status CANCELLED + class Cancelled < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::CANCELLED, details, metadata) + end + end + + # GRPC status code corresponding to status UNKNOWN + class Unknown < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::UNKNOWN, details, metadata) + end + end + + # GRPC status code corresponding to status INVALID_ARGUMENT + class InvalidArgument < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::INVALID_ARGUMENT, details, metadata) + end + end + + # GRPC status code corresponding to status DEADLINE_EXCEEDED + class DeadlineExceeded < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::DEADLINE_EXCEEDED, details, metadata) + end + end + + # GRPC status code corresponding to status NOT_FOUND + class NotFound < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::NOT_FOUND, details, metadata) + end + end + + # GRPC status code corresponding to status ALREADY_EXISTS + class AlreadyExists < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::ALREADY_EXISTS, details, metadata) + end + end + + # GRPC status code corresponding to status PERMISSION_DENIED + class PermissionDenied < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::PERMISSION_DENIED, details, metadata) + end + end + + # GRPC status code corresponding to status UNAUTHENTICATED + class Unauthenticated < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::UNAUTHENTICATED, details, metadata) + end + end + + # GRPC status code corresponding to status RESOURCE_EXHAUSTED + class ResourceExhausted < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::RESOURCE_EXHAUSTED, details, metadata) + end + end + + # GRPC status code corresponding to status FAILED_PRECONDITION + class FailedPrecondition < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::FAILED_PRECONDITION, details, metadata) + end + end + + # GRPC status code corresponding to status ABORTED + class Aborted < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::ABORTED, details, metadata) + end + end + + # GRPC status code corresponding to status OUT_OF_RANGE + class OutOfRange < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::OUT_OF_RANGE, details, metadata) + end + end + + # GRPC status code corresponding to status UNIMPLEMENTED + class Unimplemented < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::UNIMPLEMENTED, details, metadata) + end + end + + # GRPC status code corresponding to status INTERNAL + class Internal < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::INTERNAL, details, metadata) + end + end + + # GRPC status code corresponding to status UNAVAILABLE + class Unavailable < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::UNAVAILABLE, details, metadata) + end + end + + # GRPC status code corresponding to status DATA_LOSS + class DataLoss < BadStatus + def initialize(details = 'unknown cause', metadata = {}) + super(Core::StatusCodes::DATA_LOSS, details, metadata) + end end end diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb index f5c426ebfc..3b31f77ec0 100644 --- a/src/ruby/lib/grpc/generic/active_call.rb +++ b/src/ruby/lib/grpc/generic/active_call.rb @@ -43,8 +43,8 @@ class Struct GRPC.logger.debug("Failing with status #{status}") # raise BadStatus, propagating the metadata if present. md = status.metadata - fail GRPC::BadStatus.new(status.code, status.details, md), - "status code: #{status.code}, details: #{status.details}" + fail GRPC::BadStatus.new_status_exception( + status.code, status.details, md) end status end diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb index d7cd9e6df2..8943f3f1fe 100644 --- a/src/ruby/lib/grpc/generic/bidi_call.rb +++ b/src/ruby/lib/grpc/generic/bidi_call.rb @@ -219,6 +219,10 @@ module GRPC GRPC.logger.debug('bidi-read-loop: finished') @reads_complete = true finished + # Make sure that the write loop is done done before finishing the call. + # Note that blocking is ok at this point because we've already received + # a status + @enq_th.join if is_client end end end diff --git a/src/ruby/lib/grpc/generic/rpc_desc.rb b/src/ruby/lib/grpc/generic/rpc_desc.rb index cd17aed8e7..d46c4a1b5c 100644 --- a/src/ruby/lib/grpc/generic/rpc_desc.rb +++ b/src/ruby/lib/grpc/generic/rpc_desc.rb @@ -119,7 +119,7 @@ module GRPC # Send back a UNKNOWN status to the client GRPC.logger.warn("failed handler: #{active_call}; sending status:UNKNOWN") GRPC.logger.warn(e) - send_status(active_call, UNKNOWN, 'unkown error handling call on server') + send_status(active_call, UNKNOWN, "#{e.class}: #{e.message}") end def assert_arity_matches(mth) diff --git a/src/ruby/lib/grpc/generic/service.rb b/src/ruby/lib/grpc/generic/service.rb index 7cb9f1cc99..84f1ce7520 100644 --- a/src/ruby/lib/grpc/generic/service.rb +++ b/src/ruby/lib/grpc/generic/service.rb @@ -110,8 +110,9 @@ module GRPC rpc_descs[name] = RpcDesc.new(name, input, output, marshal_class_method, unmarshal_class_method) - define_method(name) do - fail GRPC::BadStatus, GRPC::Core::StatusCodes::UNIMPLEMENTED + define_method(GenericService.underscore(name.to_s).to_sym) do + fail GRPC::BadStatus.new_status_exception( + GRPC::Core::StatusCodes::UNIMPLEMENTED) end end diff --git a/src/ruby/pb/grpc/health/checker.rb b/src/ruby/pb/grpc/health/checker.rb index 4bce1744c4..6b2d852ebf 100644 --- a/src/ruby/pb/grpc/health/checker.rb +++ b/src/ruby/pb/grpc/health/checker.rb @@ -52,7 +52,9 @@ module Grpc @status_mutex.synchronize do status = @statuses["#{req.service}"] end - fail GRPC::BadStatus, StatusCodes::NOT_FOUND if status.nil? + if status.nil? + fail GRPC::BadStatus.new_status_exception(StatusCodes::NOT_FOUND) + end HealthCheckResponse.new(status: status) end diff --git a/src/ruby/pb/test/client.rb b/src/ruby/pb/test/client.rb index 1e3ae65630..f101f9d89e 100755 --- a/src/ruby/pb/test/client.rb +++ b/src/ruby/pb/test/client.rb @@ -459,11 +459,8 @@ class NamedTests deadline = GRPC::Core::TimeConsts::from_relative_time(1) resps = @stub.full_duplex_call(enum.each_item, deadline: deadline) resps.each { } # wait to receive each request (or timeout) - fail 'Should have raised GRPC::BadStatus(DEADLINE_EXCEEDED)' - rescue GRPC::BadStatus => e - assert("#{__callee__}: status was wrong") do - e.code == GRPC::Core::StatusCodes::DEADLINE_EXCEEDED - end + fail 'Should have raised GRPC::DeadlineExceeded' + rescue GRPC::DeadlineExceeded end def empty_stream diff --git a/src/ruby/qps/client.rb b/src/ruby/qps/client.rb index 8aed866da5..817192626b 100644 --- a/src/ruby/qps/client.rb +++ b/src/ruby/qps/client.rb @@ -134,6 +134,7 @@ class BenchmarkClient resp = stub.streaming_call(q.each_item) start = Time.now q.push(req) + pushed_sentinal = false resp.each do |r| @histogram.add((Time.now-start)*1e9) if !@done @@ -141,8 +142,9 @@ class BenchmarkClient start = Time.now q.push(req) else - q.push(self) - break + q.push(self) unless pushed_sentinal + # Continue polling on the responses to consume and release resources + pushed_sentinal = true end end end diff --git a/src/ruby/spec/error_sanity_spec.rb b/src/ruby/spec/error_sanity_spec.rb new file mode 100644 index 0000000000..77e94a8816 --- /dev/null +++ b/src/ruby/spec/error_sanity_spec.rb @@ -0,0 +1,64 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +require 'grpc' + +StatusCodes = GRPC::Core::StatusCodes + +describe StatusCodes do + # convert upper snake-case to camel case. + # e.g., DEADLINE_EXCEEDED -> DeadlineExceeded + def upper_snake_to_camel(name) + name.to_s.split('_').map(&:downcase).map(&:capitalize).join('') + end + + StatusCodes.constants.each do |status_name| + it 'there is a subclass of BadStatus corresponding to StatusCode: ' \ + "#{status_name} that has code: #{StatusCodes.const_get(status_name)}" do + camel_case = upper_snake_to_camel(status_name) + error_class = GRPC.const_get(camel_case) + # expect the error class to be a subclass of BadStatus + expect(error_class < GRPC::BadStatus) + + error_object = error_class.new + # check that the code matches the int value of the error's constant + status_code = StatusCodes.const_get(status_name) + expect(error_object.code).to eq(status_code) + + # check default parameters + expect(error_object.details).to eq('unknown cause') + expect(error_object.metadata).to eq({}) + + # check that the BadStatus factory for creates the correct + # exception too + from_factory = GRPC::BadStatus.new_status_exception(status_code) + expect(from_factory.is_a?(error_class)).to be(true) + end + end +end diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb index 607a4a3c5d..b51b291cbd 100644 --- a/src/ruby/spec/generic/client_stub_spec.rb +++ b/src/ruby/spec/generic/client_stub_spec.rb @@ -190,15 +190,14 @@ describe 'ClientStub' do end creds = GRPC::Core::CallCredentials.new(failing_auth) - error_occured = false + unauth_error_occured = false begin get_response(stub, credentials: creds) - rescue GRPC::BadStatus => e - error_occured = true - expect(e.code).to eq(GRPC::Core::StatusCodes::UNAUTHENTICATED) + rescue GRPC::Unauthenticated => e + unauth_error_occured = true expect(e.details.include?(error_message)).to be true end - expect(error_occured).to eq(true) + expect(unauth_error_occured).to eq(true) # Kill the server thread so tests can complete th.kill diff --git a/src/ruby/spec/generic/rpc_desc_spec.rb b/src/ruby/spec/generic/rpc_desc_spec.rb index a3f0efa603..1ace7211e9 100644 --- a/src/ruby/spec/generic/rpc_desc_spec.rb +++ b/src/ruby/spec/generic/rpc_desc_spec.rb @@ -48,7 +48,6 @@ describe GRPC::RpcDesc do @bidi_streamer = RpcDesc.new('ss', Stream.new(Object.new), Stream.new(Object.new), 'encode', 'decode') @bs_code = INTERNAL - @no_reason = 'unkown error handling call on server' @ok_response = Object.new end @@ -62,8 +61,9 @@ describe GRPC::RpcDesc do it 'sends status UNKNOWN if other StandardErrors are raised' do expect(@call).to receive(:remote_read).once.and_return(Object.new) - expect(@call).to receive(:send_status) .once.with(UNKNOWN, @no_reason, - false, metadata: {}) + expect(@call).to receive(:send_status).once.with(UNKNOWN, + arg_error_msg, + false, metadata: {}) this_desc.run_server_method(@call, method(:other_error)) end @@ -112,7 +112,7 @@ describe GRPC::RpcDesc do end it 'sends status UNKNOWN if other StandardErrors are raised' do - expect(@call).to receive(:send_status).once.with(UNKNOWN, @no_reason, + expect(@call).to receive(:send_status).once.with(UNKNOWN, arg_error_msg, false, metadata: {}) @client_streamer.run_server_method(@call, method(:other_error_alt)) end @@ -174,8 +174,9 @@ describe GRPC::RpcDesc do end it 'sends status UNKNOWN if other StandardErrors are raised' do + error_msg = arg_error_msg(StandardError.new) expect(@call).to receive(:run_server_bidi).and_raise(StandardError) - expect(@call).to receive(:send_status).once.with(UNKNOWN, @no_reason, + expect(@call).to receive(:send_status).once.with(UNKNOWN, error_msg, false, metadata: {}) @bidi_streamer.run_server_method(@call, method(:other_error_alt)) end @@ -342,4 +343,9 @@ describe GRPC::RpcDesc do def other_error_alt(_call) fail(ArgumentError, 'other error') end + + def arg_error_msg(error = nil) + error ||= ArgumentError.new('other error') + "#{error.class}: #{error.message}" + end end diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb index c5694790fd..806ea8ce9f 100644 --- a/src/ruby/spec/generic/rpc_server_spec.rb +++ b/src/ruby/spec/generic/rpc_server_spec.rb @@ -408,21 +408,21 @@ describe GRPC::RpcServer do req = EchoMsg.new n = 20 # arbitrary, use as many to ensure the server pool is exceeded threads = [] - bad_status_code = nil + one_failed_as_unavailable = false n.times do threads << Thread.new do stub = SlowStub.new(alt_host, :this_channel_is_insecure) begin stub.an_rpc(req) - rescue GRPC::BadStatus => e - bad_status_code = e.code + rescue GRPC::ResourceExhausted + one_failed_as_unavailable = true end end end threads.each(&:join) alt_srv.stop t.join - expect(bad_status_code).to be(StatusCodes::RESOURCE_EXHAUSTED) + expect(one_failed_as_unavailable).to be(true) end end diff --git a/src/ruby/spec/pb/health/checker_spec.rb b/src/ruby/spec/pb/health/checker_spec.rb index 4711e09e88..719510001c 100644 --- a/src/ruby/spec/pb/health/checker_spec.rb +++ b/src/ruby/spec/pb/health/checker_spec.rb @@ -122,7 +122,7 @@ describe Grpc::Health::Checker do checker.check(HCReq.new(service: t[:service]), nil) end expected_msg = /#{StatusCodes::NOT_FOUND}/ - expect(&blk).to raise_error GRPC::BadStatus, expected_msg + expect(&blk).to raise_error GRPC::NotFound, expected_msg end end end @@ -141,7 +141,7 @@ describe Grpc::Health::Checker do checker.check(HCReq.new(service: t[:service]), nil) end expected_msg = /#{StatusCodes::NOT_FOUND}/ - expect(&blk).to raise_error GRPC::BadStatus, expected_msg + expect(&blk).to raise_error GRPC::NotFound, expected_msg end end end @@ -163,7 +163,7 @@ describe Grpc::Health::Checker do checker.check(HCReq.new(service: t[:service]), nil) end expected_msg = /#{StatusCodes::NOT_FOUND}/ - expect(&blk).to raise_error GRPC::BadStatus, expected_msg + expect(&blk).to raise_error GRPC::NotFound, expected_msg end end end @@ -214,7 +214,7 @@ describe Grpc::Health::Checker do stub.check(HCReq.new(service: 'unknown')) end expected_msg = /#{StatusCodes::NOT_FOUND}/ - expect(&blk).to raise_error GRPC::BadStatus, expected_msg + expect(&blk).to raise_error GRPC::NotFound, expected_msg @srv.stop t.join end diff --git a/src/ruby/spec/spec_helper.rb b/src/ruby/spec/spec_helper.rb index c891c1bf5e..c2be0afa72 100644 --- a/src/ruby/spec/spec_helper.rb +++ b/src/ruby/spec/spec_helper.rb @@ -67,3 +67,5 @@ RSpec.configure do |config| end RSpec::Expectations.configuration.warn_about_potential_false_positives = false + +Thread.abort_on_exception = true diff --git a/templates/gRPC-Core.podspec.template b/templates/gRPC-Core.podspec.template index fbad1a3f70..1b97d18f16 100644 --- a/templates/gRPC-Core.podspec.template +++ b/templates/gRPC-Core.podspec.template @@ -62,7 +62,7 @@ %> Pod::Spec.new do |s| s.name = 'gRPC-Core' - version = '1.0.1' + version = '1.0.2' s.version = version s.summary = 'Core cross-platform gRPC library, written in C' s.homepage = 'http://www.grpc.io' @@ -71,7 +71,9 @@ s.source = { :git => 'https://github.com/grpc/grpc.git', - :tag => "v#{version}", + # TODO(mxyan): Change back to "v#{version}" for next release + #:tag => "v#{version}", + :tag => "objective-c-v#{version}", # TODO(jcanizales): Depend explicitly on the nanopb pod, and disable submodules. :submodules => true, } diff --git a/templates/grpc.gemspec.template b/templates/grpc.gemspec.template index 62d61b75c1..82fbb69008 100644 --- a/templates/grpc.gemspec.template +++ b/templates/grpc.gemspec.template @@ -29,7 +29,7 @@ s.require_paths = %w( src/ruby/bin src/ruby/lib src/ruby/pb ) s.platform = Gem::Platform::RUBY - s.add_dependency 'google-protobuf', '~> 3.0.2' + s.add_dependency 'google-protobuf', '~> 3.1.0' s.add_dependency 'googleauth', '~> 0.5.1' s.add_development_dependency 'bundler', '~> 1.9' diff --git a/templates/tools/dockerfile/clang_format.include b/templates/tools/dockerfile/clang_format.include new file mode 100644 index 0000000000..9a2b60ba8c --- /dev/null +++ b/templates/tools/dockerfile/clang_format.include @@ -0,0 +1,5 @@ +RUN apt-get update && apt-get -y install wget +RUN echo deb http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list +RUN echo deb-src http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list +RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key| apt-key add - +RUN apt-get update && apt-get -y install clang-format-3.8 diff --git a/templates/tools/dockerfile/grpc_clang_format/Dockerfile.template b/templates/tools/dockerfile/grpc_clang_format/Dockerfile.template new file mode 100644 index 0000000000..8360fc121c --- /dev/null +++ b/templates/tools/dockerfile/grpc_clang_format/Dockerfile.template @@ -0,0 +1,37 @@ +%YAML 1.2 +--- | + # Copyright 2015, Google Inc. + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without + # modification, are permitted provided that the following conditions are + # met: + # + # * Redistributions of source code must retain the above copyright + # notice, this list of conditions and the following disclaimer. + # * Redistributions in binary form must reproduce the above + # copyright notice, this list of conditions and the following disclaimer + # in the documentation and/or other materials provided with the + # distribution. + # * Neither the name of Google Inc. nor the names of its + # contributors may be used to endorse or promote products derived from + # this software without specific prior written permission. + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + FROM ubuntu:15.10 + + <%include file="../clang_format.include"/> + ADD clang_format_all_the_things.sh / + CMD ["echo 'Run with tools/distrib/clang_format_code.sh'"] + diff --git a/templates/tools/dockerfile/test/sanity/Dockerfile.template b/templates/tools/dockerfile/test/sanity/Dockerfile.template index 0168353933..8617666b21 100644 --- a/templates/tools/dockerfile/test/sanity/Dockerfile.template +++ b/templates/tools/dockerfile/test/sanity/Dockerfile.template @@ -52,18 +52,12 @@ # ./compile.sh without a local protoc dependency # TODO(mattkwong): install dependencies to support latest Bazel version if newer # version is needed - RUN git clone https://github.com/bazelbuild/bazel.git /bazel && \ + RUN git clone https://github.com/bazelbuild/bazel.git /bazel && ${"\\"} cd /bazel && git checkout tags/0.4.1 && ./compile.sh RUN ln -s /bazel/output/bazel /bin/ - #=================== - # Docker "inception" - # Note this is quite the ugly hack. - # This makes sure that the docker binary we inject has its dependencies. - RUN curl https://get.docker.com/ | sh - RUN apt-get remove --purge -y docker-engine - - RUN mkdir /var/local/jenkins + <%include file="../../clang_format.include"/> + <%include file="../../run_tests_addons.include"/> # Define the default command. CMD ["bash"] diff --git a/templates/tools/run_tests/configs.json.template b/templates/tools/run_tests/generated/configs.json.template index 5c82dfb347..5c82dfb347 100644 --- a/templates/tools/run_tests/configs.json.template +++ b/templates/tools/run_tests/generated/configs.json.template diff --git a/templates/tools/run_tests/sources_and_headers.json.template b/templates/tools/run_tests/generated/sources_and_headers.json.template index 1c5c9747d6..1c5c9747d6 100644 --- a/templates/tools/run_tests/sources_and_headers.json.template +++ b/templates/tools/run_tests/generated/sources_and_headers.json.template diff --git a/templates/tools/run_tests/tests.json.template b/templates/tools/run_tests/generated/tests.json.template index 1e21465dd2..1e21465dd2 100644 --- a/templates/tools/run_tests/tests.json.template +++ b/templates/tools/run_tests/generated/tests.json.template diff --git a/templates/vsprojects/protobuf.props.template b/templates/vsprojects/protobuf.props.template index 48f9431c1c..3ae7c745de 100644 --- a/templates/vsprojects/protobuf.props.template +++ b/templates/vsprojects/protobuf.props.template @@ -6,7 +6,7 @@ <ItemDefinitionGroup>
<Link>
<AdditionalDependencies>libprotobuf.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\build\solution\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemGroup />
diff --git a/templates/vsprojects/protoc.props.template b/templates/vsprojects/protoc.props.template index ced6028a4b..2c3844a9a4 100644 --- a/templates/vsprojects/protoc.props.template +++ b/templates/vsprojects/protoc.props.template @@ -9,7 +9,7 @@ </ClCompile>
<Link>
<AdditionalDependencies>libprotoc.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\build\solution\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemGroup />
diff --git a/test/core/channel/channel_stack_test.c b/test/core/channel/channel_stack_test.c index 0840820cca..b43d05eec3 100644 --- a/test/core/channel/channel_stack_test.c +++ b/test/core/channel/channel_stack_test.c @@ -41,9 +41,9 @@ #include "test/core/util/test_config.h" -static void channel_init_func(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error *channel_init_func(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { GPR_ASSERT(args->channel_args->num_args == 1); GPR_ASSERT(args->channel_args->args[0].type == GRPC_ARG_INTEGER); GPR_ASSERT(0 == strcmp(args->channel_args->args[0].key, "test_key")); @@ -51,6 +51,7 @@ static void channel_init_func(grpc_exec_ctx *exec_ctx, GPR_ASSERT(args->is_first); GPR_ASSERT(args->is_last); *(int *)(elem->channel_data) = 0; + return GRPC_ERROR_NONE; } static grpc_error *call_init_func(grpc_exec_ctx *exec_ctx, diff --git a/test/core/client_channel/lb_policies_test.c b/test/core/client_channel/lb_policies_test.c index 6e4058fc21..016610763c 100644 --- a/test/core/client_channel/lb_policies_test.c +++ b/test/core/client_channel/lb_policies_test.c @@ -241,6 +241,8 @@ static request_sequences request_sequences_create(size_t n) { res.n = n; res.connections = gpr_malloc(sizeof(*res.connections) * n); res.connectivity_states = gpr_malloc(sizeof(*res.connectivity_states) * n); + memset(res.connections, 0, sizeof(*res.connections) * n); + memset(res.connectivity_states, 0, sizeof(*res.connectivity_states) * n); return res; } @@ -782,17 +784,15 @@ static void verify_total_carnage_round_robin(const servers_fixture *f, } } - /* no server is ever available. The persistent state is TRANSIENT_FAILURE. May - * also be CONNECTING if, under load, this check took too long to run and some - * subchannel already transitioned to retrying. */ + /* No server is ever available. There should be no READY states (or SHUTDOWN). + * Note that all other states (IDLE, CONNECTING, TRANSIENT_FAILURE) are still + * possible, as the policy transitions while attempting to reconnect. */ for (size_t i = 0; i < sequences->n; i++) { const grpc_connectivity_state actual = sequences->connectivity_states[i]; - if (actual != GRPC_CHANNEL_TRANSIENT_FAILURE && - actual != GRPC_CHANNEL_CONNECTING) { + if (actual == GRPC_CHANNEL_READY || actual == GRPC_CHANNEL_SHUTDOWN) { gpr_log(GPR_ERROR, - "CONNECTIVITY STATUS SEQUENCE FAILURE: expected " - "GRPC_CHANNEL_TRANSIENT_FAILURE or GRPC_CHANNEL_CONNECTING, got " - "'%s' at iteration #%d", + "CONNECTIVITY STATUS SEQUENCE FAILURE: got unexpected state " + "'%s' at iteration #%d.", grpc_connectivity_state_name(actual), (int)i); abort(); } @@ -841,17 +841,15 @@ static void verify_partial_carnage_round_robin( abort(); } - /* ... and that the last one should be TRANSIENT_FAILURE, after all servers - * are gone. May also be CONNECTING if, under load, this check took too long - * to run and the subchannel already transitioned to retrying. */ + /* ... and that the last one shouldn't be READY (or SHUTDOWN): all servers are + * gone. It may be all other states (IDLE, CONNECTING, TRANSIENT_FAILURE), as + * the policy transitions while attempting to reconnect. */ actual = sequences->connectivity_states[num_iters - 1]; for (i = 0; i < sequences->n; i++) { - if (actual != GRPC_CHANNEL_TRANSIENT_FAILURE && - actual != GRPC_CHANNEL_CONNECTING) { + if (actual == GRPC_CHANNEL_READY || actual == GRPC_CHANNEL_SHUTDOWN) { gpr_log(GPR_ERROR, - "CONNECTIVITY STATUS SEQUENCE FAILURE: expected " - "GRPC_CHANNEL_TRANSIENT_FAILURE or GRPC_CHANNEL_CONNECTING, got " - "'%s' at iteration #%d", + "CONNECTIVITY STATUS SEQUENCE FAILURE: got unexpected state " + "'%s' at iteration #%d.", grpc_connectivity_state_name(actual), (int)i); abort(); } @@ -948,8 +946,8 @@ int main(int argc, char **argv) { const size_t NUM_ITERS = 10; const size_t NUM_SERVERS = 4; - grpc_test_init(argc, argv); grpc_init(); + grpc_test_init(argc, argv); grpc_tracer_set_enabled("round_robin", 1); GPR_ASSERT(grpc_lb_policy_create(&exec_ctx, "this-lb-policy-does-not-exist", diff --git a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c index ffa167a0e7..b421720492 100644 --- a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c +++ b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c @@ -63,7 +63,8 @@ static grpc_error *my_resolve_address(const char *name, const char *addr, } } -static grpc_resolver *create_resolver(const char *name) { +static grpc_resolver *create_resolver(grpc_exec_ctx *exec_ctx, + const char *name) { grpc_resolver_factory *factory = grpc_resolver_factory_lookup("dns"); grpc_uri *uri = grpc_uri_parse(name, 0); GPR_ASSERT(uri); @@ -71,7 +72,7 @@ static grpc_resolver *create_resolver(const char *name) { memset(&args, 0, sizeof(args)); args.uri = uri; grpc_resolver *resolver = - grpc_resolver_factory_create_resolver(factory, &args); + grpc_resolver_factory_create_resolver(exec_ctx, factory, &args); grpc_resolver_factory_unref(factory); grpc_uri_destroy(uri); return resolver; @@ -101,12 +102,10 @@ int main(int argc, char **argv) { grpc_init(); gpr_mu_init(&g_mu); grpc_blocking_resolve_address = my_resolve_address; - - grpc_resolver *resolver = create_resolver("dns:test"); - grpc_channel_args *result = (grpc_channel_args *)1; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_resolver *resolver = create_resolver(&exec_ctx, "dns:test"); gpr_event ev1; gpr_event_init(&ev1); grpc_resolver_next(&exec_ctx, resolver, &result, diff --git a/test/core/client_channel/resolvers/dns_resolver_test.c b/test/core/client_channel/resolvers/dns_resolver_test.c index 41a9125431..5603a57b5f 100644 --- a/test/core/client_channel/resolvers/dns_resolver_test.c +++ b/test/core/client_channel/resolvers/dns_resolver_test.c @@ -48,7 +48,7 @@ static void test_succeeds(grpc_resolver_factory *factory, const char *string) { GPR_ASSERT(uri); memset(&args, 0, sizeof(args)); args.uri = uri; - resolver = grpc_resolver_factory_create_resolver(factory, &args); + resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args); GPR_ASSERT(resolver != NULL); GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test_succeeds"); grpc_uri_destroy(uri); @@ -65,7 +65,7 @@ static void test_fails(grpc_resolver_factory *factory, const char *string) { GPR_ASSERT(uri); memset(&args, 0, sizeof(args)); args.uri = uri; - resolver = grpc_resolver_factory_create_resolver(factory, &args); + resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args); GPR_ASSERT(resolver == NULL); grpc_uri_destroy(uri); grpc_exec_ctx_finish(&exec_ctx); diff --git a/test/core/client_channel/resolvers/sockaddr_resolver_test.c b/test/core/client_channel/resolvers/sockaddr_resolver_test.c index ebf311ab83..a9fd85aea1 100644 --- a/test/core/client_channel/resolvers/sockaddr_resolver_test.c +++ b/test/core/client_channel/resolvers/sockaddr_resolver_test.c @@ -49,11 +49,6 @@ typedef struct on_resolution_arg { void on_resolution_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { on_resolution_arg *res = arg; - const grpc_arg *channel_arg = - grpc_channel_args_find(res->resolver_result, GRPC_ARG_SERVER_NAME); - GPR_ASSERT(channel_arg != NULL); - GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING); - GPR_ASSERT(strcmp(res->expected_server_name, channel_arg->value.string) == 0); grpc_channel_args_destroy(res->resolver_result); } @@ -67,7 +62,7 @@ static void test_succeeds(grpc_resolver_factory *factory, const char *string) { GPR_ASSERT(uri); memset(&args, 0, sizeof(args)); args.uri = uri; - resolver = grpc_resolver_factory_create_resolver(factory, &args); + resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args); GPR_ASSERT(resolver != NULL); on_resolution_arg on_res_arg; @@ -93,7 +88,7 @@ static void test_fails(grpc_resolver_factory *factory, const char *string) { GPR_ASSERT(uri); memset(&args, 0, sizeof(args)); args.uri = uri; - resolver = grpc_resolver_factory_create_resolver(factory, &args); + resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args); GPR_ASSERT(resolver == NULL); grpc_uri_destroy(uri); grpc_exec_ctx_finish(&exec_ctx); diff --git a/test/core/client_channel/set_initial_connect_string_test.c b/test/core/client_channel/set_initial_connect_string_test.c index b16a3ebf45..11e57439d5 100644 --- a/test/core/client_channel/set_initial_connect_string_test.c +++ b/test/core/client_channel/set_initial_connect_string_test.c @@ -92,6 +92,7 @@ static void handle_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, grpc_pollset *accepting_pollset, grpc_tcp_server_acceptor *acceptor) { + gpr_free(acceptor); test_tcp_server *server = arg; grpc_closure_init(&on_read, handle_read, NULL); grpc_slice_buffer_init(&state.incoming_buffer); diff --git a/test/core/end2end/bad_server_response_test.c b/test/core/end2end/bad_server_response_test.c index 1c4a17fda8..30468558e8 100644 --- a/test/core/end2end/bad_server_response_test.c +++ b/test/core/end2end/bad_server_response_test.c @@ -145,6 +145,7 @@ static void handle_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, grpc_pollset *accepting_pollset, grpc_tcp_server_acceptor *acceptor) { + gpr_free(acceptor); test_tcp_server *server = arg; grpc_closure_init(&on_read, handle_read, NULL); grpc_closure_init(&on_write, done_write, NULL); diff --git a/test/core/end2end/fake_resolver.c b/test/core/end2end/fake_resolver.c index 865b55de4d..ed85030797 100644 --- a/test/core/end2end/fake_resolver.c +++ b/test/core/end2end/fake_resolver.c @@ -140,7 +140,8 @@ static void fake_resolver_factory_unref(grpc_resolver_factory* factory) {} static void do_nothing(void* ignored) {} -static grpc_resolver* fake_resolver_create(grpc_resolver_factory* factory, +static grpc_resolver* fake_resolver_create(grpc_exec_ctx* exec_ctx, + grpc_resolver_factory* factory, grpc_resolver_args* args) { if (0 != strcmp(args->uri->authority, "")) { gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme", @@ -181,12 +182,7 @@ static grpc_resolver* fake_resolver_create(grpc_resolver_factory* factory, // Instantiate resolver. fake_resolver* r = gpr_malloc(sizeof(fake_resolver)); memset(r, 0, sizeof(*r)); - grpc_arg server_name_arg; - server_name_arg.type = GRPC_ARG_STRING; - server_name_arg.key = GRPC_ARG_SERVER_NAME; - server_name_arg.value.string = args->uri->path; - r->channel_args = - grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1); + r->channel_args = grpc_channel_args_copy(args->args); r->addresses = addresses; gpr_mu_init(&r->mu); grpc_resolver_init(&r->base, &fake_resolver_vtable); diff --git a/test/core/end2end/fixtures/http_proxy.c b/test/core/end2end/fixtures/http_proxy.c index 57fc4a38f8..80865fc7a6 100644 --- a/test/core/end2end/fixtures/http_proxy.c +++ b/test/core/end2end/fixtures/http_proxy.c @@ -367,6 +367,7 @@ static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg, static void on_accept(grpc_exec_ctx* exec_ctx, void* arg, grpc_endpoint* endpoint, grpc_pollset* accepting_pollset, grpc_tcp_server_acceptor* acceptor) { + gpr_free(acceptor); grpc_end2end_http_proxy* proxy = arg; // Instantiate proxy_connection. proxy_connection* conn = gpr_malloc(sizeof(*conn)); diff --git a/test/core/end2end/fuzzers/api_fuzzer.c b/test/core/end2end/fuzzers/api_fuzzer.c index 19ac6ced14..746134c85b 100644 --- a/test/core/end2end/fuzzers/api_fuzzer.c +++ b/test/core/end2end/fuzzers/api_fuzzer.c @@ -361,7 +361,9 @@ static void finish_resolve(grpc_exec_ctx *exec_ctx, void *arg, } void my_resolve_address(grpc_exec_ctx *exec_ctx, const char *addr, - const char *default_port, grpc_closure *on_done, + const char *default_port, + grpc_pollset_set *interested_parties, + grpc_closure *on_done, grpc_resolved_addresses **addresses) { addr_req *r = gpr_malloc(sizeof(*r)); r->addr = gpr_strdup(addr); diff --git a/test/core/end2end/invalid_call_argument_test.c b/test/core/end2end/invalid_call_argument_test.c index 765b6ad1be..d974d2c8ff 100644 --- a/test/core/end2end/invalid_call_argument_test.c +++ b/test/core/end2end/invalid_call_argument_test.c @@ -573,6 +573,29 @@ static void test_recv_close_on_server_twice() { cleanup_test(); } +static void test_invalid_initial_metadata_reserved_key() { + gpr_log(GPR_INFO, "test_invalid_initial_metadata_reserved_key"); + + grpc_metadata metadata; + metadata.key = ":start_with_colon"; + metadata.value = "value"; + metadata.value_length = 6; + + grpc_op *op; + prepare_test(1); + op = g_state.ops; + op->op = GRPC_OP_SEND_INITIAL_METADATA; + op->data.send_initial_metadata.count = 1; + op->data.send_initial_metadata.metadata = &metadata; + op->flags = 0; + op->reserved = NULL; + op++; + GPR_ASSERT(GRPC_CALL_ERROR_INVALID_METADATA == + grpc_call_start_batch(g_state.call, g_state.ops, + (size_t)(op - g_state.ops), tag(1), NULL)); + cleanup_test(); +} + int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_init(); @@ -595,6 +618,7 @@ int main(int argc, char **argv) { test_send_server_status_twice(); test_recv_close_on_server_with_invalid_flags(); test_recv_close_on_server_twice(); + test_invalid_initial_metadata_reserved_key(); grpc_shutdown(); return 0; diff --git a/test/core/end2end/tests/filter_call_init_fails.c b/test/core/end2end/tests/filter_call_init_fails.c index 41ae575fff..6d9351ed8c 100644 --- a/test/core/end2end/tests/filter_call_init_fails.c +++ b/test/core/end2end/tests/filter_call_init_fails.c @@ -216,9 +216,11 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, void *and_free_memory) {} -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) {} +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { + return GRPC_ERROR_NONE; +} static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {} diff --git a/test/core/end2end/tests/filter_causes_close.c b/test/core/end2end/tests/filter_causes_close.c index bf9fd9073d..21905b98fa 100644 --- a/test/core/end2end/tests/filter_causes_close.c +++ b/test/core/end2end/tests/filter_causes_close.c @@ -243,9 +243,11 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, const grpc_call_final_info *final_info, void *and_free_memory) {} -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) {} +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { + return GRPC_ERROR_NONE; +} static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {} diff --git a/test/core/end2end/tests/filter_latency.c b/test/core/end2end/tests/filter_latency.c index ea63d45420..e10204863b 100644 --- a/test/core/end2end/tests/filter_latency.c +++ b/test/core/end2end/tests/filter_latency.c @@ -281,9 +281,11 @@ static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx, gpr_mu_unlock(&g_mu); } -static void init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) {} +static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, + grpc_channel_element *elem, + grpc_channel_element_args *args) { + return GRPC_ERROR_NONE; +} static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem) {} diff --git a/test/core/iomgr/resolve_address_test.c b/test/core/iomgr/resolve_address_test.c index 2dd0d88b3f..e4136a7a7a 100644 --- a/test/core/iomgr/resolve_address_test.c +++ b/test/core/iomgr/resolve_address_test.c @@ -32,8 +32,10 @@ */ #include "src/core/lib/iomgr/resolve_address.h" +#include <grpc/support/alloc.h> #include <grpc/support/log.h> #include <grpc/support/sync.h> +#include <grpc/support/thd.h> #include <grpc/support/time.h> #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/iomgr.h" @@ -46,16 +48,72 @@ static gpr_timespec test_deadline(void) { typedef struct args_struct { gpr_event ev; grpc_resolved_addresses *addrs; + gpr_atm done_atm; + gpr_mu *mu; + grpc_pollset *pollset; + grpc_pollset_set *pollset_set; } args_struct; -void args_init(args_struct *args) { +static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {} + +void args_init(grpc_exec_ctx *exec_ctx, args_struct *args) { gpr_event_init(&args->ev); + args->pollset = gpr_malloc(grpc_pollset_size()); + grpc_pollset_init(args->pollset, &args->mu); + args->pollset_set = grpc_pollset_set_create(); + grpc_pollset_set_add_pollset(exec_ctx, args->pollset_set, args->pollset); args->addrs = NULL; } -void args_finish(args_struct *args) { +void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) { GPR_ASSERT(gpr_event_wait(&args->ev, test_deadline())); grpc_resolved_addresses_destroy(args->addrs); + grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset); + grpc_pollset_set_destroy(args->pollset_set); + grpc_closure do_nothing_cb; + grpc_closure_init(&do_nothing_cb, do_nothing, NULL); + grpc_pollset_shutdown(exec_ctx, args->pollset, &do_nothing_cb); + // exec_ctx needs to be flushed before calling grpc_pollset_destroy() + grpc_exec_ctx_flush(exec_ctx); + grpc_pollset_destroy(args->pollset); + gpr_free(args->pollset); +} + +static gpr_timespec n_sec_deadline(int seconds) { + return gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_seconds(seconds, GPR_TIMESPAN)); +} + +static void actually_poll(void *argsp) { + args_struct *args = argsp; + gpr_timespec deadline = n_sec_deadline(10); + while (true) { + bool done = gpr_atm_acq_load(&args->done_atm) != 0; + if (done) { + break; + } + gpr_timespec time_left = + gpr_time_sub(deadline, gpr_now(GPR_CLOCK_REALTIME)); + gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64 ".%09d", done, + time_left.tv_sec, time_left.tv_nsec); + GPR_ASSERT(gpr_time_cmp(time_left, gpr_time_0(GPR_TIMESPAN)) >= 0); + grpc_pollset_worker *worker = NULL; + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + gpr_mu_lock(args->mu); + GRPC_LOG_IF_ERROR( + "pollset_work", + grpc_pollset_work(&exec_ctx, args->pollset, &worker, + gpr_now(GPR_CLOCK_REALTIME), n_sec_deadline(1))); + gpr_mu_unlock(args->mu); + grpc_exec_ctx_finish(&exec_ctx); + } + gpr_event_set(&args->ev, (void *)1); +} + +static void poll_pollset_until_request_done(args_struct *args) { + gpr_atm_rel_store(&args->done_atm, 0); + gpr_thd_id id; + gpr_thd_new(&id, actually_poll, args, NULL); } static void must_succeed(grpc_exec_ctx *exec_ctx, void *argsp, @@ -64,53 +122,57 @@ static void must_succeed(grpc_exec_ctx *exec_ctx, void *argsp, GPR_ASSERT(err == GRPC_ERROR_NONE); GPR_ASSERT(args->addrs != NULL); GPR_ASSERT(args->addrs->naddrs > 0); - gpr_event_set(&args->ev, (void *)1); + gpr_atm_rel_store(&args->done_atm, 1); } static void must_fail(grpc_exec_ctx *exec_ctx, void *argsp, grpc_error *err) { args_struct *args = argsp; GPR_ASSERT(err != GRPC_ERROR_NONE); - gpr_event_set(&args->ev, (void *)1); + gpr_atm_rel_store(&args->done_atm, 1); } static void test_localhost(void) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, "localhost:1", NULL, + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address(&exec_ctx, "localhost:1", NULL, args.pollset_set, grpc_closure_create(must_succeed, &args), &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } static void test_default_port(void) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, "localhost", "1", + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address(&exec_ctx, "localhost", "1", args.pollset_set, grpc_closure_create(must_succeed, &args), &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } static void test_missing_default_port(void) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, "localhost", NULL, + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address(&exec_ctx, "localhost", NULL, args.pollset_set, grpc_closure_create(must_fail, &args), &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } static void test_ipv6_with_port(void) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, "[2001:db8::1]:1", NULL, + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address(&exec_ctx, "[2001:db8::1]:1", NULL, args.pollset_set, grpc_closure_create(must_succeed, &args), &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } static void test_ipv6_without_port(void) { @@ -119,13 +181,14 @@ static void test_ipv6_without_port(void) { }; unsigned i; for (i = 0; i < sizeof(kCases) / sizeof(*kCases); i++) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, kCases[i], "80", + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address(&exec_ctx, kCases[i], "80", args.pollset_set, grpc_closure_create(must_succeed, &args), &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } } @@ -135,13 +198,14 @@ static void test_invalid_ip_addresses(void) { }; unsigned i; for (i = 0; i < sizeof(kCases) / sizeof(*kCases); i++) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, kCases[i], NULL, + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address(&exec_ctx, kCases[i], NULL, args.pollset_set, grpc_closure_create(must_fail, &args), &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } } @@ -151,13 +215,14 @@ static void test_unparseable_hostports(void) { }; unsigned i; for (i = 0; i < sizeof(kCases) / sizeof(*kCases); i++) { - args_struct args; - args_init(&args); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resolve_address(&exec_ctx, kCases[i], "1", + args_struct args; + args_init(&exec_ctx, &args); + poll_pollset_until_request_done(&args); + grpc_resolve_address(&exec_ctx, kCases[i], "1", args.pollset_set, grpc_closure_create(must_fail, &args), &args.addrs); + args_finish(&exec_ctx, &args); grpc_exec_ctx_finish(&exec_ctx); - args_finish(&args); } } diff --git a/test/core/iomgr/tcp_server_posix_test.c b/test/core/iomgr/tcp_server_posix_test.c index 1b8a39c1be..9a7810e227 100644 --- a/test/core/iomgr/tcp_server_posix_test.c +++ b/test/core/iomgr/tcp_server_posix_test.c @@ -126,6 +126,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, on_connect_result temp_result; on_connect_result_set(&temp_result, acceptor); + gpr_free(acceptor); gpr_mu_lock(g_mu); g_result = temp_result; diff --git a/test/core/security/create_jwt.c b/test/core/security/create_jwt.c index 741ace9bdd..ac795f29d2 100644 --- a/test/core/security/create_jwt.c +++ b/test/core/security/create_jwt.c @@ -72,6 +72,7 @@ int main(int argc, char **argv) { char *scope = NULL; char *json_key_file_path = NULL; char *service_url = NULL; + grpc_init(); gpr_cmdline *cl = gpr_cmdline_create("create_jwt"); gpr_cmdline_add_string(cl, "json_key", "File path of the json key.", &json_key_file_path); @@ -102,5 +103,6 @@ int main(int argc, char **argv) { create_jwt(json_key_file_path, service_url, scope); gpr_cmdline_destroy(cl); + grpc_shutdown(); return 0; } diff --git a/test/core/security/jwt_verifier_test.c b/test/core/security/jwt_verifier_test.c index f8afba8d6d..9a21814adc 100644 --- a/test/core/security/jwt_verifier_test.c +++ b/test/core/security/jwt_verifier_test.c @@ -166,6 +166,13 @@ static const char claims_without_time_constraint[] = " \"jti\": \"jwtuniqueid\"," " \"foo\": \"bar\"}"; +static const char claims_with_bad_subject[] = + "{ \"aud\": \"https://foo.com\"," + " \"iss\": \"evil@blah.foo.com\"," + " \"sub\": \"juju@blah.foo.com\"," + " \"jti\": \"jwtuniqueid\"," + " \"foo\": \"bar\"}"; + static const char invalid_claims[] = "{ \"aud\": \"https://foo.com\"," " \"iss\": 46," /* Issuer cannot be a number. */ @@ -179,6 +186,38 @@ typedef struct { const char *expected_subject; } verifier_test_config; +static void test_jwt_issuer_email_domain(void) { + const char *d = grpc_jwt_issuer_email_domain("https://foo.com"); + GPR_ASSERT(d == NULL); + d = grpc_jwt_issuer_email_domain("foo.com"); + GPR_ASSERT(d == NULL); + d = grpc_jwt_issuer_email_domain(""); + GPR_ASSERT(d == NULL); + d = grpc_jwt_issuer_email_domain("@"); + GPR_ASSERT(d == NULL); + d = grpc_jwt_issuer_email_domain("bar@foo"); + GPR_ASSERT(strcmp(d, "foo") == 0); + d = grpc_jwt_issuer_email_domain("bar@foo.com"); + GPR_ASSERT(strcmp(d, "foo.com") == 0); + d = grpc_jwt_issuer_email_domain("bar@blah.foo.com"); + GPR_ASSERT(strcmp(d, "foo.com") == 0); + d = grpc_jwt_issuer_email_domain("bar.blah@blah.foo.com"); + GPR_ASSERT(strcmp(d, "foo.com") == 0); + d = grpc_jwt_issuer_email_domain("bar.blah@baz.blah.foo.com"); + GPR_ASSERT(strcmp(d, "foo.com") == 0); + + /* This is not a very good parser but make sure we do not crash on these weird + inputs. */ + d = grpc_jwt_issuer_email_domain("@foo"); + GPR_ASSERT(strcmp(d, "foo") == 0); + d = grpc_jwt_issuer_email_domain("bar@."); + GPR_ASSERT(d != NULL); + d = grpc_jwt_issuer_email_domain("bar@.."); + GPR_ASSERT(d != NULL); + d = grpc_jwt_issuer_email_domain("bar@..."); + GPR_ASSERT(d != NULL); +} + static void test_claims_success(void) { grpc_jwt_claims *claims; grpc_slice s = grpc_slice_from_copied_string(claims_without_time_constraint); @@ -242,6 +281,19 @@ static void test_bad_audience_claims_failure(void) { grpc_jwt_claims_destroy(claims); } +static void test_bad_subject_claims_failure(void) { + grpc_jwt_claims *claims; + grpc_slice s = grpc_slice_from_copied_string(claims_with_bad_subject); + grpc_json *json = grpc_json_parse_string_with_len( + (char *)GRPC_SLICE_START_PTR(s), GRPC_SLICE_LENGTH(s)); + GPR_ASSERT(json != NULL); + claims = grpc_jwt_claims_from_json(json, s); + GPR_ASSERT(claims != NULL); + GPR_ASSERT(grpc_jwt_claims_check(claims, "https://foo.com") == + GRPC_JWT_VERIFIER_BAD_SUBJECT); + grpc_jwt_claims_destroy(claims); +} + static char *json_key_str(const char *last_part) { size_t result_len = strlen(json_key_str_part1) + strlen(json_key_str_part2) + strlen(last_part); @@ -563,10 +615,12 @@ static void test_jwt_verifier_bad_format(void) { int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_init(); + test_jwt_issuer_email_domain(); test_claims_success(); test_expired_claims_failure(); test_invalid_claims_failure(); test_bad_audience_claims_failure(); + test_bad_subject_claims_failure(); test_jwt_verifier_google_email_issuer_success(); test_jwt_verifier_custom_email_issuer_success(); test_jwt_verifier_url_issuer_success(); diff --git a/test/core/security/ssl_server_fuzzer.c b/test/core/security/ssl_server_fuzzer.c index ca629a6eba..8673225fef 100644 --- a/test/core/security/ssl_server_fuzzer.c +++ b/test/core/security/ssl_server_fuzzer.c @@ -111,8 +111,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { struct handshake_state state; state.done_callback_called = false; grpc_handshake_manager *handshake_mgr = grpc_handshake_manager_create(); - grpc_server_security_connector_create_handshakers(&exec_ctx, sc, - handshake_mgr); + grpc_server_security_connector_add_handshakers(&exec_ctx, sc, handshake_mgr); grpc_handshake_manager_do_handshake( &exec_ctx, handshake_mgr, mock_endpoint, NULL /* channel_args */, deadline, NULL /* acceptor */, on_handshake_done, &state); diff --git a/test/core/security/verify_jwt.c b/test/core/security/verify_jwt.c index 043d29e6bb..ccc85c9f32 100644 --- a/test/core/security/verify_jwt.c +++ b/test/core/security/verify_jwt.c @@ -93,6 +93,7 @@ int main(int argc, char **argv) { char *aud = NULL; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_init(); cl = gpr_cmdline_create("JWT verifier tool"); gpr_cmdline_add_string(cl, "jwt", "JSON web token to verify", &jwt); gpr_cmdline_add_string(cl, "aud", "Audience for the JWT", &aud); @@ -131,5 +132,6 @@ int main(int argc, char **argv) { grpc_jwt_verifier_destroy(verifier); gpr_cmdline_destroy(cl); + grpc_shutdown(); return !sync.success; } diff --git a/test/core/support/string_test.c b/test/core/support/string_test.c index 78b77fad8e..af232db350 100644 --- a/test/core/support/string_test.c +++ b/test/core/support/string_test.c @@ -243,6 +243,8 @@ static void test_int64toa() { static void test_leftpad() { char *padded; + LOG_TEST_NAME("test_leftpad"); + padded = gpr_leftpad("foo", ' ', 5); GPR_ASSERT(0 == strcmp(" foo", padded)); gpr_free(padded); @@ -273,12 +275,25 @@ static void test_leftpad() { } static void test_stricmp(void) { + LOG_TEST_NAME("test_stricmp"); + GPR_ASSERT(0 == gpr_stricmp("hello", "hello")); GPR_ASSERT(0 == gpr_stricmp("HELLO", "hello")); GPR_ASSERT(gpr_stricmp("a", "b") < 0); GPR_ASSERT(gpr_stricmp("b", "a") > 0); } +static void test_memrchr(void) { + LOG_TEST_NAME("test_memrchr"); + + GPR_ASSERT(NULL == gpr_memrchr(NULL, 'a', 0)); + GPR_ASSERT(NULL == gpr_memrchr("", 'a', 0)); + GPR_ASSERT(NULL == gpr_memrchr("hello", 'b', 5)); + GPR_ASSERT(0 == strcmp((const char *)gpr_memrchr("hello", 'h', 5), "hello")); + GPR_ASSERT(0 == strcmp((const char *)gpr_memrchr("hello", 'o', 5), "o")); + GPR_ASSERT(0 == strcmp((const char *)gpr_memrchr("hello", 'l', 5), "lo")); +} + int main(int argc, char **argv) { grpc_test_init(argc, argv); test_strdup(); @@ -291,5 +306,6 @@ int main(int argc, char **argv) { test_int64toa(); test_leftpad(); test_stricmp(); + test_memrchr(); return 0; } diff --git a/test/core/surface/channel_create_test.c b/test/core/surface/channel_create_test.c index ad7970aab9..654e5324d9 100644 --- a/test/core/surface/channel_create_test.c +++ b/test/core/surface/channel_create_test.c @@ -31,9 +31,14 @@ * */ +#include <string.h> + #include <grpc/grpc.h> #include <grpc/support/log.h> + #include "src/core/ext/client_channel/resolver_registry.h" +#include "src/core/lib/channel/channel_stack.h" +#include "src/core/lib/surface/channel.h" #include "test/core/util/test_config.h" void test_unknown_scheme_target(void) { @@ -44,6 +49,13 @@ void test_unknown_scheme_target(void) { chan = grpc_insecure_channel_create("blah://blah", NULL, NULL); GPR_ASSERT(chan != NULL); + + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_channel_element *elem = + grpc_channel_stack_element(grpc_channel_get_channel_stack(chan), 0); + GPR_ASSERT(0 == strcmp(elem->filter->name, "lame-client")); + grpc_exec_ctx_finish(&exec_ctx); + grpc_channel_destroy(chan); } diff --git a/test/core/surface/concurrent_connectivity_test.c b/test/core/surface/concurrent_connectivity_test.c index f9f4675454..93a4794222 100644 --- a/test/core/surface/concurrent_connectivity_test.c +++ b/test/core/surface/concurrent_connectivity_test.c @@ -105,8 +105,8 @@ void server_thread(void *vargs) { static void on_connect(grpc_exec_ctx *exec_ctx, void *vargs, grpc_endpoint *tcp, grpc_pollset *accepting_pollset, grpc_tcp_server_acceptor *acceptor) { + gpr_free(acceptor); struct server_thread_args *args = (struct server_thread_args *)vargs; - (void)acceptor; grpc_endpoint_shutdown(exec_ctx, tcp); grpc_endpoint_destroy(exec_ctx, tcp); GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(args->pollset, NULL)); diff --git a/test/core/util/reconnect_server.c b/test/core/util/reconnect_server.c index 6509cc5b68..7bf83a74a1 100644 --- a/test/core/util/reconnect_server.c +++ b/test/core/util/reconnect_server.c @@ -73,6 +73,7 @@ static void pretty_print_backoffs(reconnect_server *server) { static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, grpc_pollset *accepting_pollset, grpc_tcp_server_acceptor *acceptor) { + gpr_free(acceptor); char *peer; char *last_colon; reconnect_server *server = (reconnect_server *)arg; diff --git a/test/cpp/common/channel_filter_test.cc b/test/cpp/common/channel_filter_test.cc index 600a953d82..32246a4b76 100644 --- a/test/cpp/common/channel_filter_test.cc +++ b/test/cpp/common/channel_filter_test.cc @@ -41,14 +41,24 @@ namespace testing { class MyChannelData : public ChannelData { public: - MyChannelData(const grpc_channel_args& args, const char* peer) - : ChannelData(args, peer) {} + MyChannelData() {} + + grpc_error* Init(grpc_exec_ctx* exec_ctx, + grpc_channel_element_args* args) override { + (void)args->channel_args; // Make sure field is available. + return GRPC_ERROR_NONE; + } }; class MyCallData : public CallData { public: - explicit MyCallData(const ChannelData& channel_data) - : CallData(channel_data) {} + MyCallData() {} + + grpc_error* Init(grpc_exec_ctx* exec_ctx, ChannelData* channel_data, + grpc_call_element_args* args) override { + (void)args->path; // Make sure field is available. + return GRPC_ERROR_NONE; + } }; // This test ensures that when we make changes to the filter API in diff --git a/test/cpp/end2end/filter_end2end_test.cc b/test/cpp/end2end/filter_end2end_test.cc index ab6ed46de5..bd384f68b4 100644 --- a/test/cpp/end2end/filter_end2end_test.cc +++ b/test/cpp/end2end/filter_end2end_test.cc @@ -114,20 +114,17 @@ int GetCallCounterValue() { class ChannelDataImpl : public ChannelData { public: - ChannelDataImpl(const grpc_channel_args& args, const char* peer) - : ChannelData(args, peer) { + grpc_error* Init(grpc_exec_ctx* exec_ctx, grpc_channel_element_args* args) { IncrementConnectionCounter(); + return GRPC_ERROR_NONE; } }; class CallDataImpl : public CallData { public: - explicit CallDataImpl(const ChannelDataImpl& channel_data) - : CallData(channel_data) {} - void StartTransportStreamOp(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, TransportStreamOp* op) override { - // Incrementing the counter could be done from the ctor, but we want + // Incrementing the counter could be done from Init(), but we want // to test that the individual methods are actually called correctly. if (op->recv_initial_metadata() != nullptr) IncrementCallCounter(); grpc_call_next_op(exec_ctx, elem, op->op()); diff --git a/test/cpp/grpclb/grpclb_test.cc b/test/cpp/grpclb/grpclb_test.cc index fcdcaba6a2..de304b9f89 100644 --- a/test/cpp/grpclb/grpclb_test.cc +++ b/test/cpp/grpclb/grpclb_test.cc @@ -659,7 +659,7 @@ static test_fixture setup_test_fixture(int lb_server_update_delay_ms) { char *server_uri; // The grpclb LB policy will be automatically selected by virtue of // the fact that the returned addresses are balancer addresses. - gpr_asprintf(&server_uri, "test:%s?lb_enabled=1", + gpr_asprintf(&server_uri, "test:///%s?lb_enabled=1", tf.lb_server.servers_hostport); setup_client(server_uri, &tf.client); gpr_free(server_uri); diff --git a/test/cpp/interop/stress_test.cc b/test/cpp/interop/stress_test.cc index fc35db5233..97e658869f 100644 --- a/test/cpp/interop/stress_test.cc +++ b/test/cpp/interop/stress_test.cc @@ -371,9 +371,9 @@ int main(int argc, char** argv) { } // Start metrics server before waiting for the stress test threads + std::unique_ptr<grpc::Server> metrics_server; if (FLAGS_metrics_port > 0) { - std::unique_ptr<grpc::Server> metrics_server = - metrics_service.StartServer(FLAGS_metrics_port); + metrics_server = metrics_service.StartServer(FLAGS_metrics_port); } // Wait for the stress test threads to complete diff --git a/test/cpp/microbenchmarks/bm_fullstack.cc b/test/cpp/microbenchmarks/bm_fullstack.cc index 6cc780d44a..6c0bf80488 100644 --- a/test/cpp/microbenchmarks/bm_fullstack.cc +++ b/test/cpp/microbenchmarks/bm_fullstack.cc @@ -59,7 +59,7 @@ extern "C" { } #include "src/cpp/client/create_channel_internal.h" #include "src/proto/grpc/testing/echo.grpc.pb.h" -#include "third_party/google_benchmark/include/benchmark/benchmark.h" +#include "third_party/benchmark/include/benchmark/benchmark.h" namespace grpc { namespace testing { diff --git a/test/cpp/microbenchmarks/noop-benchmark.cc b/test/cpp/microbenchmarks/noop-benchmark.cc index 6b06c69c6e..99fa6d5f6e 100644 --- a/test/cpp/microbenchmarks/noop-benchmark.cc +++ b/test/cpp/microbenchmarks/noop-benchmark.cc @@ -31,10 +31,10 @@ * */ -/* This benchmark exists to ensure that the google_benchmark integration is +/* This benchmark exists to ensure that the benchmark integration is * working */ -#include "third_party/google_benchmark/include/benchmark/benchmark.h" +#include "third_party/benchmark/include/benchmark/benchmark.h" static void BM_NoOp(benchmark::State& state) { while (state.KeepRunning()) { diff --git a/test/cpp/qps/driver.cc b/test/cpp/qps/driver.cc index ea0b38e8ad..93ef32db77 100644 --- a/test/cpp/qps/driver.cc +++ b/test/cpp/qps/driver.cc @@ -44,6 +44,7 @@ #include <grpc/support/alloc.h> #include <grpc/support/host_port.h> #include <grpc/support/log.h> +#include <grpc/support/string_util.h> #include "src/core/lib/profiling/timers.h" #include "src/core/lib/support/env.h" @@ -99,23 +100,36 @@ static std::unordered_map<string, std::deque<int>> get_hosts_and_cores( return hosts; } -static deque<string> get_workers(const string& name) { - char* env = gpr_getenv(name.c_str()); - if (!env) return deque<string>(); - +static deque<string> get_workers(const string& env_name) { + char* env = gpr_getenv(env_name.c_str()); + if (!env) { + env = gpr_strdup(""); + } deque<string> out; char* p = env; - for (;;) { - char* comma = strchr(p, ','); - if (comma) { - out.emplace_back(p, comma); - p = comma + 1; - } else { - out.emplace_back(p); - gpr_free(env); - return out; + if (strlen(env) != 0) { + for (;;) { + char* comma = strchr(p, ','); + if (comma) { + out.emplace_back(p, comma); + p = comma + 1; + } else { + out.emplace_back(p); + break; + } } } + if (out.size() == 0) { + gpr_log(GPR_ERROR, + "Environment variable \"%s\" does not contain a list of QPS " + "workers to use. Set it to a comma-separated list of " + "hostname:port pairs, starting with hosts that should act as " + "servers. E.g. export " + "%s=\"serverhost1:1234,clienthost1:1234,clienthost2:1234\"", + env_name.c_str(), env_name.c_str()); + } + gpr_free(env); + return out; } // helpers for postprocess_scenario_result @@ -195,7 +209,8 @@ static void postprocess_scenario_result(ScenarioResult* result) { std::unique_ptr<ScenarioResult> RunScenario( const ClientConfig& initial_client_config, size_t num_clients, const ServerConfig& initial_server_config, size_t num_servers, - int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count) { + int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count, + const char* qps_server_target_override, bool configure_core_lists) { // Log everything from the driver gpr_set_log_verbosity(GPR_LOG_SEVERITY_DEBUG); @@ -240,9 +255,7 @@ std::unique_ptr<ScenarioResult> RunScenario( workers.push_back(addr); } } - - // Setup the hosts and core counts - auto hosts_cores = get_hosts_and_cores(workers); + GPR_ASSERT(workers.size() != 0); // if num_clients is set to <=0, do dynamic sizing: all workers // except for servers are clients @@ -264,6 +277,11 @@ std::unique_ptr<ScenarioResult> RunScenario( unique_ptr<ClientReaderWriter<ServerArgs, ServerStatus>> stream; }; std::vector<ServerData> servers(num_servers); + std::unordered_map<string, std::deque<int>> hosts_cores; + + if (configure_core_lists) { + hosts_cores = get_hosts_and_cores(workers); + } for (size_t i = 0; i < num_servers; i++) { gpr_log(GPR_INFO, "Starting server on %s (worker #%" PRIuPTR ")", workers[i].c_str(), i); @@ -271,37 +289,36 @@ std::unique_ptr<ScenarioResult> RunScenario( CreateChannel(workers[i], InsecureChannelCredentials())); ServerConfig server_config = initial_server_config; - char* host; - char* driver_port; - char* cli_target; - gpr_split_host_port(workers[i].c_str(), &host, &driver_port); - string host_str(host); int server_core_limit = initial_server_config.core_limit(); int client_core_limit = initial_client_config.core_limit(); - if (server_core_limit == 0 && client_core_limit > 0) { - // In this case, limit the server cores if it matches the - // same host as one or more clients - const auto& dq = hosts_cores.at(host_str); - bool match = false; - int limit = dq.size(); - for (size_t cli = 0; cli < num_clients; cli++) { - if (host_str == get_host(workers[cli + num_servers])) { - limit -= client_core_limit; - match = true; + if (configure_core_lists) { + string host_str(get_host(workers[i])); + if (server_core_limit == 0 && client_core_limit > 0) { + // In this case, limit the server cores if it matches the + // same host as one or more clients + const auto& dq = hosts_cores.at(host_str); + bool match = false; + int limit = dq.size(); + for (size_t cli = 0; cli < num_clients; cli++) { + if (host_str == get_host(workers[cli + num_servers])) { + limit -= client_core_limit; + match = true; + } + } + if (match) { + GPR_ASSERT(limit > 0); + server_core_limit = limit; } } - if (match) { - GPR_ASSERT(limit > 0); - server_core_limit = limit; - } - } - if (server_core_limit > 0) { - auto& dq = hosts_cores.at(host_str); - GPR_ASSERT(dq.size() >= static_cast<size_t>(server_core_limit)); - for (int core = 0; core < server_core_limit; core++) { - server_config.add_core_list(dq.front()); - dq.pop_front(); + if (server_core_limit > 0) { + auto& dq = hosts_cores.at(host_str); + GPR_ASSERT(dq.size() >= static_cast<size_t>(server_core_limit)); + gpr_log(GPR_INFO, "Setting server core_list"); + for (int core = 0; core < server_core_limit; core++) { + server_config.add_core_list(dq.front()); + dq.pop_front(); + } } } @@ -315,11 +332,19 @@ std::unique_ptr<ScenarioResult> RunScenario( if (!servers[i].stream->Read(&init_status)) { gpr_log(GPR_ERROR, "Server %zu did not yield initial status", i); } - gpr_join_host_port(&cli_target, host, init_status.port()); - client_config.add_server_targets(cli_target); - gpr_free(host); - gpr_free(driver_port); - gpr_free(cli_target); + if (qps_server_target_override != NULL && + strlen(qps_server_target_override) > 0) { + // overriding the qps server target only works if there is 1 server + GPR_ASSERT(num_servers == 1); + client_config.add_server_targets(qps_server_target_override); + } else { + std::string host; + char* cli_target; + host = get_host(workers[i]); + gpr_join_host_port(&cli_target, host.c_str(), init_status.port()); + client_config.add_server_targets(cli_target); + gpr_free(cli_target); + } } // Targets are all set by now @@ -341,7 +366,8 @@ std::unique_ptr<ScenarioResult> RunScenario( int server_core_limit = initial_server_config.core_limit(); int client_core_limit = initial_client_config.core_limit(); - if ((server_core_limit > 0) || (client_core_limit > 0)) { + if (configure_core_lists && + ((server_core_limit > 0) || (client_core_limit > 0))) { auto& dq = hosts_cores.at(get_host(worker)); if (client_core_limit == 0) { // limit client cores if it matches a server host @@ -359,6 +385,7 @@ std::unique_ptr<ScenarioResult> RunScenario( } if (client_core_limit > 0) { GPR_ASSERT(dq.size() >= static_cast<size_t>(client_core_limit)); + gpr_log(GPR_INFO, "Setting client core_list"); for (int core = 0; core < client_core_limit; core++) { per_client_config.add_core_list(dq.front()); dq.pop_front(); @@ -548,6 +575,9 @@ bool RunQuit() { // Get client, server lists bool result = true; auto workers = get_workers("QPS_WORKERS"); + if (workers.size() == 0) { + return false; + } for (size_t i = 0; i < workers.size(); i++) { auto stub = WorkerService::NewStub( CreateChannel(workers[i], InsecureChannelCredentials())); diff --git a/test/cpp/qps/driver.h b/test/cpp/qps/driver.h index 93f4370caf..b5c8152e1b 100644 --- a/test/cpp/qps/driver.h +++ b/test/cpp/qps/driver.h @@ -45,7 +45,9 @@ namespace testing { std::unique_ptr<ScenarioResult> RunScenario( const grpc::testing::ClientConfig& client_config, size_t num_clients, const grpc::testing::ServerConfig& server_config, size_t num_servers, - int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count); + int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count, + const char* qps_server_target_override = "", + bool configure_core_lists = true); bool RunQuit(); } // namespace testing diff --git a/test/cpp/qps/qps_json_driver.cc b/test/cpp/qps/qps_json_driver.cc index 31b5917fb7..da835b995a 100644 --- a/test/cpp/qps/qps_json_driver.cc +++ b/test/cpp/qps/qps_json_driver.cc @@ -67,17 +67,25 @@ DEFINE_double(error_tolerance, 0.01, "range is narrower than the error_tolerance computed range, we " "stop the search."); +DEFINE_string(qps_server_target_override, "", + "Override QPS server target to configure in client configs." + "Only applicable if there is a single benchmark server."); +DEFINE_bool(configure_core_lists, true, + "Provide 'core_list' parameters to workers. Value determined " + "by cores available and 'core_limit' parameters of the scenarios."); + namespace grpc { namespace testing { static std::unique_ptr<ScenarioResult> RunAndReport(const Scenario& scenario, bool* success) { std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n"; - auto result = - RunScenario(scenario.client_config(), scenario.num_clients(), - scenario.server_config(), scenario.num_servers(), - scenario.warmup_seconds(), scenario.benchmark_seconds(), - scenario.spawn_local_worker_count()); + auto result = RunScenario( + scenario.client_config(), scenario.num_clients(), + scenario.server_config(), scenario.num_servers(), + scenario.warmup_seconds(), scenario.benchmark_seconds(), + scenario.spawn_local_worker_count(), + FLAGS_qps_server_target_override.c_str(), FLAGS_configure_core_lists); // Amend the result with scenario config. Eventually we should adjust // RunScenario contract so we don't need to touch the result here. diff --git a/test/cpp/util/grpc_tool.cc b/test/cpp/util/grpc_tool.cc index 03c33abe9f..b9900ca1b7 100644 --- a/test/cpp/util/grpc_tool.cc +++ b/test/cpp/util/grpc_tool.cc @@ -86,11 +86,12 @@ class GrpcTool { // callback); // bool PrintTypeId(int argc, const char** argv, GrpcToolOutputCallback // callback); - // bool ParseMessage(int argc, const char** argv, GrpcToolOutputCallback - // callback); - // bool ToText(int argc, const char** argv, GrpcToolOutputCallback callback); - // bool ToBinary(int argc, const char** argv, GrpcToolOutputCallback - // callback); + bool ParseMessage(int argc, const char** argv, const CliCredentials& cred, + GrpcToolOutputCallback callback); + bool ToText(int argc, const char** argv, const CliCredentials& cred, + GrpcToolOutputCallback callback); + bool ToBinary(int argc, const char** argv, const CliCredentials& cred, + GrpcToolOutputCallback callback); void SetPrintCommandMode(int exit_status) { print_command_usage_ = true; @@ -173,9 +174,9 @@ const Command ops[] = { {"list", BindWith5Args(&GrpcTool::ListServices), 1, 3}, {"call", BindWith5Args(&GrpcTool::CallMethod), 2, 3}, {"type", BindWith5Args(&GrpcTool::PrintType), 2, 2}, - // {"parse", BindWith5Args(&GrpcTool::ParseMessage), 2, 3}, - // {"totext", BindWith5Args(&GrpcTool::ToText), 2, 3}, - // {"tobinary", BindWith5Args(&GrpcTool::ToBinary), 2, 3}, + {"parse", BindWith5Args(&GrpcTool::ParseMessage), 2, 3}, + {"totext", BindWith5Args(&GrpcTool::ToText), 2, 3}, + {"tobinary", BindWith5Args(&GrpcTool::ToBinary), 2, 3}, }; void Usage(const grpc::string& msg) { @@ -185,9 +186,9 @@ void Usage(const grpc::string& msg) { " grpc_cli ls ... ; List services\n" " grpc_cli call ... ; Call method\n" " grpc_cli type ... ; Print type\n" - // " grpc_cli parse ... ; Parse message\n" - // " grpc_cli totext ... ; Convert binary message to text\n" - // " grpc_cli tobinary ... ; Convert text message to binary\n" + " grpc_cli parse ... ; Parse message\n" + " grpc_cli totext ... ; Convert binary message to text\n" + " grpc_cli tobinary ... ; Convert text message to binary\n" " grpc_cli help ... ; Print this message, or per-command usage\n" "\n", msg.c_str()); @@ -414,6 +415,7 @@ bool GrpcTool::CallMethod(int argc, const char** argv, grpc::string request_text; grpc::string server_address(argv[0]); grpc::string method_name(argv[1]); + grpc::string formatted_method_name; std::unique_ptr<grpc::testing::ProtoFileParser> parser; grpc::string serialized_request_proto; @@ -450,7 +452,9 @@ bool GrpcTool::CallMethod(int argc, const char** argv, if (FLAGS_binary_input) { serialized_request_proto = request_text; + formatted_method_name = method_name; } else { + formatted_method_name = parser->GetFormattedMethodName(method_name); serialized_request_proto = parser->GetSerializedProtoFromMethod( method_name, request_text, true /* is_request */); if (parser->HasError()) { @@ -466,9 +470,9 @@ bool GrpcTool::CallMethod(int argc, const char** argv, ParseMetadataFlag(&client_metadata); PrintMetadata(client_metadata, "Sending client initial metadata:"); grpc::Status status = grpc::testing::CliCall::Call( - channel, parser->GetFormatedMethodName(method_name), - serialized_request_proto, &serialized_response_proto, client_metadata, - &server_initial_metadata, &server_trailing_metadata); + channel, formatted_method_name, serialized_request_proto, + &serialized_response_proto, client_metadata, &server_initial_metadata, + &server_trailing_metadata); PrintMetadata(server_initial_metadata, "Received initial metadata from server:"); PrintMetadata(server_trailing_metadata, @@ -493,5 +497,122 @@ bool GrpcTool::CallMethod(int argc, const char** argv, return callback(output_ss.str()); } +bool GrpcTool::ParseMessage(int argc, const char** argv, + const CliCredentials& cred, + GrpcToolOutputCallback callback) { + CommandUsage( + "Parse message\n" + " grpc_cli parse <address> <type> [<message>]\n" + " <address> ; host:port\n" + " <type> ; Protocol buffer type name\n" + " <message> ; Text protobuffer (overrides --infile)\n" + " --protofiles ; Comma separated proto files used as a" + " fallback when parsing request/response\n" + " --proto_path ; The search path of proto files, valid" + " only when --protofiles is given\n" + " --infile ; Input filename (defaults to stdin)\n" + " --outfile ; Output filename (defaults to stdout)\n" + " --binary_input ; Input in binary format\n" + " --binary_output ; Output in binary format\n" + + cred.GetCredentialUsage()); + + std::stringstream output_ss; + grpc::string message_text; + grpc::string server_address(argv[0]); + grpc::string type_name(argv[1]); + std::unique_ptr<grpc::testing::ProtoFileParser> parser; + grpc::string serialized_request_proto; + + if (argc == 3) { + message_text = argv[2]; + if (!FLAGS_infile.empty()) { + fprintf(stderr, "warning: message given in argv, ignoring --infile.\n"); + } + } else { + std::stringstream input_stream; + if (FLAGS_infile.empty()) { + if (isatty(STDIN_FILENO)) { + fprintf(stderr, "reading request message from stdin...\n"); + } + input_stream << std::cin.rdbuf(); + } else { + std::ifstream input_file(FLAGS_infile, std::ios::in | std::ios::binary); + input_stream << input_file.rdbuf(); + input_file.close(); + } + message_text = input_stream.str(); + } + + if (!FLAGS_binary_input || !FLAGS_binary_output) { + std::shared_ptr<grpc::Channel> channel = + grpc::CreateChannel(server_address, cred.GetCredentials()); + parser.reset( + new grpc::testing::ProtoFileParser(FLAGS_remotedb ? channel : nullptr, + FLAGS_proto_path, FLAGS_protofiles)); + if (parser->HasError()) { + return false; + } + } + + if (FLAGS_binary_input) { + serialized_request_proto = message_text; + } else { + serialized_request_proto = + parser->GetSerializedProtoFromMessageType(type_name, message_text); + if (parser->HasError()) { + return false; + } + } + + if (FLAGS_binary_output) { + output_ss << serialized_request_proto; + } else { + grpc::string output_text = parser->GetTextFormatFromMessageType( + type_name, serialized_request_proto); + if (parser->HasError()) { + return false; + } + output_ss << output_text << std::endl; + } + + return callback(output_ss.str()); +} + +bool GrpcTool::ToText(int argc, const char** argv, const CliCredentials& cred, + GrpcToolOutputCallback callback) { + CommandUsage( + "Convert binary message to text\n" + " grpc_cli totext <protofiles> <type>\n" + " <protofiles> ; Comma separated list of proto files\n" + " <type> ; Protocol buffer type name\n" + " --proto_path ; The search path of proto files\n" + " --infile ; Input filename (defaults to stdin)\n" + " --outfile ; Output filename (defaults to stdout)\n"); + + FLAGS_protofiles = argv[0]; + FLAGS_remotedb = false; + FLAGS_binary_input = true; + FLAGS_binary_output = false; + return ParseMessage(argc, argv, cred, callback); +} + +bool GrpcTool::ToBinary(int argc, const char** argv, const CliCredentials& cred, + GrpcToolOutputCallback callback) { + CommandUsage( + "Convert text message to binary\n" + " grpc_cli tobinary <protofiles> <type> [<message>]\n" + " <protofiles> ; Comma separated list of proto files\n" + " <type> ; Protocol buffer type name\n" + " --proto_path ; The search path of proto files\n" + " --infile ; Input filename (defaults to stdin)\n" + " --outfile ; Output filename (defaults to stdout)\n"); + + FLAGS_protofiles = argv[0]; + FLAGS_remotedb = false; + FLAGS_binary_input = false; + FLAGS_binary_output = true; + return ParseMessage(argc, argv, cred, callback); +} + } // namespace testing } // namespace grpc diff --git a/test/cpp/util/grpc_tool_test.cc b/test/cpp/util/grpc_tool_test.cc index 1ff8172306..33ce611a60 100644 --- a/test/cpp/util/grpc_tool_test.cc +++ b/test/cpp/util/grpc_tool_test.cc @@ -86,9 +86,18 @@ using grpc::testing::EchoResponse; " rpc Echo(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ "{}\n" +#define ECHO_RESPONSE_MESSAGE \ + "message: \"echo\"\n" \ + "param {\n" \ + " host: \"localhost\"\n" \ + " peer: \"peer\"\n" \ + "}\n\n" + namespace grpc { namespace testing { +DECLARE_bool(binary_input); +DECLARE_bool(binary_output); DECLARE_bool(l); namespace { @@ -338,6 +347,47 @@ TEST_F(GrpcToolTest, CallCommand) { ShutdownServer(); } +TEST_F(GrpcToolTest, ParseCommand) { + // Test input "grpc_cli parse localhost:<port> grpc.testing.EchoResponse + // ECHO_RESPONSE_MESSAGE" + std::stringstream output_stream; + std::stringstream binary_output_stream; + + const grpc::string server_address = SetUpServer(); + const char* argv[] = {"grpc_cli", "parse", server_address.c_str(), + "grpc.testing.EchoResponse", ECHO_RESPONSE_MESSAGE}; + + FLAGS_binary_input = false; + FLAGS_binary_output = false; + EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + // Expected output: ECHO_RESPONSE_MESSAGE + EXPECT_TRUE(0 == strcmp(output_stream.str().c_str(), ECHO_RESPONSE_MESSAGE)); + + // Parse text message to binary message and then parse it back to text message + output_stream.str(grpc::string()); + output_stream.clear(); + FLAGS_binary_output = true; + EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + grpc::string binary_data = output_stream.str(); + output_stream.str(grpc::string()); + output_stream.clear(); + argv[4] = binary_data.c_str(); + FLAGS_binary_input = true; + FLAGS_binary_output = false; + EXPECT_TRUE(0 == GrpcToolMainLib(5, argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + + // Expected output: ECHO_RESPONSE_MESSAGE + EXPECT_TRUE(0 == strcmp(output_stream.str().c_str(), ECHO_RESPONSE_MESSAGE)); + + ShutdownServer(); +} + TEST_F(GrpcToolTest, TooFewArguments) { // Test input "grpc_cli call Echo" std::stringstream output_stream; diff --git a/test/cpp/util/proto_file_parser.cc b/test/cpp/util/proto_file_parser.cc index 3e524227e5..bc8a6083f4 100644 --- a/test/cpp/util/proto_file_parser.cc +++ b/test/cpp/util/proto_file_parser.cc @@ -172,19 +172,19 @@ grpc::string ProtoFileParser::GetFullMethodName(const grpc::string& method) { return method_descriptor->full_name(); } -grpc::string ProtoFileParser::GetFormatedMethodName( +grpc::string ProtoFileParser::GetFormattedMethodName( const grpc::string& method) { has_error_ = false; - grpc::string formated_method_name = GetFullMethodName(method); + grpc::string formatted_method_name = GetFullMethodName(method); if (has_error_) { return ""; } - size_t last_dot = formated_method_name.find_last_of('.'); + size_t last_dot = formatted_method_name.find_last_of('.'); if (last_dot != grpc::string::npos) { - formated_method_name[last_dot] = '/'; + formatted_method_name[last_dot] = '/'; } - formated_method_name.insert(formated_method_name.begin(), '/'); - return formated_method_name; + formatted_method_name.insert(formatted_method_name.begin(), '/'); + return formatted_method_name; } grpc::string ProtoFileParser::GetMessageTypeFromMethod( diff --git a/test/cpp/util/proto_file_parser.h b/test/cpp/util/proto_file_parser.h index eda3991e72..c1070a37b5 100644 --- a/test/cpp/util/proto_file_parser.h +++ b/test/cpp/util/proto_file_parser.h @@ -64,9 +64,9 @@ class ProtoFileParser { // descriptor database queries. grpc::string GetFullMethodName(const grpc::string& method); - // Formated method name is in the form of /Service/Method, it's good to be + // Formatted method name is in the form of /Service/Method, it's good to be // used as the argument of Stub::Call() - grpc::string GetFormatedMethodName(const grpc::string& method); + grpc::string GetFormattedMethodName(const grpc::string& method); grpc::string GetSerializedProtoFromMethod( const grpc::string& method, const grpc::string& text_format_proto, diff --git a/test/http2_test/http2_base_server.py b/test/http2_test/http2_base_server.py new file mode 100644 index 0000000000..ee7719b1a8 --- /dev/null +++ b/test/http2_test/http2_base_server.py @@ -0,0 +1,205 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging +import messages_pb2 +import struct + +import h2 +import h2.connection +import twisted +import twisted.internet +import twisted.internet.protocol + +_READ_CHUNK_SIZE = 16384 +_GRPC_HEADER_SIZE = 5 + +class H2ProtocolBaseServer(twisted.internet.protocol.Protocol): + def __init__(self): + self._conn = h2.connection.H2Connection(client_side=False) + self._recv_buffer = {} + self._handlers = {} + self._handlers['ConnectionMade'] = self.on_connection_made_default + self._handlers['DataReceived'] = self.on_data_received_default + self._handlers['WindowUpdated'] = self.on_window_update_default + self._handlers['RequestReceived'] = self.on_request_received_default + self._handlers['SendDone'] = self.on_send_done_default + self._handlers['ConnectionLost'] = self.on_connection_lost + self._handlers['PingAcknowledged'] = self.on_ping_acknowledged_default + self._stream_status = {} + self._send_remaining = {} + self._outstanding_pings = 0 + + def set_handlers(self, handlers): + self._handlers = handlers + + def connectionMade(self): + self._handlers['ConnectionMade']() + + def connectionLost(self, reason): + self._handlers['ConnectionLost'](reason) + + def on_connection_made_default(self): + logging.info('Connection Made') + self._conn.initiate_connection() + self.transport.setTcpNoDelay(True) + self.transport.write(self._conn.data_to_send()) + + def on_connection_lost(self, reason): + logging.info('Disconnected %s' % reason) + twisted.internet.reactor.callFromThread(twisted.internet.reactor.stop) + + def dataReceived(self, data): + try: + events = self._conn.receive_data(data) + except h2.exceptions.ProtocolError: + # this try/except block catches exceptions due to race between sending + # GOAWAY and processing a response in flight. + return + if self._conn.data_to_send: + self.transport.write(self._conn.data_to_send()) + for event in events: + if isinstance(event, h2.events.RequestReceived) and self._handlers.has_key('RequestReceived'): + logging.info('RequestReceived Event for stream: %d' % event.stream_id) + self._handlers['RequestReceived'](event) + elif isinstance(event, h2.events.DataReceived) and self._handlers.has_key('DataReceived'): + logging.info('DataReceived Event for stream: %d' % event.stream_id) + self._handlers['DataReceived'](event) + elif isinstance(event, h2.events.WindowUpdated) and self._handlers.has_key('WindowUpdated'): + logging.info('WindowUpdated Event for stream: %d' % event.stream_id) + self._handlers['WindowUpdated'](event) + elif isinstance(event, h2.events.PingAcknowledged) and self._handlers.has_key('PingAcknowledged'): + logging.info('PingAcknowledged Event') + self._handlers['PingAcknowledged'](event) + self.transport.write(self._conn.data_to_send()) + + def on_ping_acknowledged_default(self, event): + logging.info('ping acknowledged') + self._outstanding_pings -= 1 + + def on_data_received_default(self, event): + self._conn.acknowledge_received_data(len(event.data), event.stream_id) + self._recv_buffer[event.stream_id] += event.data + + def on_request_received_default(self, event): + self._recv_buffer[event.stream_id] = '' + self._stream_id = event.stream_id + self._stream_status[event.stream_id] = True + self._conn.send_headers( + stream_id=event.stream_id, + headers=[ + (':status', '200'), + ('content-type', 'application/grpc'), + ('grpc-encoding', 'identity'), + ('grpc-accept-encoding', 'identity,deflate,gzip'), + ], + ) + self.transport.write(self._conn.data_to_send()) + + def on_window_update_default(self, event): + # send pending data, if any + self.default_send(event.stream_id) + + def send_reset_stream(self): + self._conn.reset_stream(self._stream_id) + self.transport.write(self._conn.data_to_send()) + + def setup_send(self, data_to_send, stream_id): + logging.info('Setting up data to send for stream_id: %d' % stream_id) + self._send_remaining[stream_id] = len(data_to_send) + self._send_offset = 0 + self._data_to_send = data_to_send + self.default_send(stream_id) + + def default_send(self, stream_id): + if not self._send_remaining.has_key(stream_id): + # not setup to send data yet + return + + while self._send_remaining[stream_id] > 0: + lfcw = self._conn.local_flow_control_window(stream_id) + if lfcw == 0: + break + chunk_size = min(lfcw, _READ_CHUNK_SIZE) + bytes_to_send = min(chunk_size, self._send_remaining[stream_id]) + logging.info('flow_control_window = %d. sending [%d:%d] stream_id %d' % + (lfcw, self._send_offset, self._send_offset + bytes_to_send, + stream_id)) + data = self._data_to_send[self._send_offset : self._send_offset + bytes_to_send] + try: + self._conn.send_data(stream_id, data, False) + except h2.exceptions.ProtocolError: + logging.info('Stream %d is closed' % stream_id) + break + self._send_remaining[stream_id] -= bytes_to_send + self._send_offset += bytes_to_send + if self._send_remaining[stream_id] == 0: + self._handlers['SendDone'](stream_id) + + def default_ping(self): + logging.info('sending ping') + self._outstanding_pings += 1 + self._conn.ping(b'\x00'*8) + self.transport.write(self._conn.data_to_send()) + + def on_send_done_default(self, stream_id): + if self._stream_status[stream_id]: + self._stream_status[stream_id] = False + self.default_send_trailer(stream_id) + else: + logging.error('Stream %d is already closed' % stream_id) + + def default_send_trailer(self, stream_id): + logging.info('Sending trailer for stream id %d' % stream_id) + self._conn.send_headers(stream_id, + headers=[ ('grpc-status', '0') ], + end_stream=True + ) + self.transport.write(self._conn.data_to_send()) + + @staticmethod + def default_response_data(response_size): + sresp = messages_pb2.SimpleResponse() + sresp.payload.body = b'\x00'*response_size + serialized_resp_proto = sresp.SerializeToString() + response_data = b'\x00' + struct.pack('i', len(serialized_resp_proto))[::-1] + serialized_resp_proto + return response_data + + def parse_received_data(self, stream_id): + """ returns a grpc framed string of bytes containing response proto of the size + asked in request """ + recv_buffer = self._recv_buffer[stream_id] + grpc_msg_size = struct.unpack('i',recv_buffer[1:5][::-1])[0] + if len(recv_buffer) != _GRPC_HEADER_SIZE + grpc_msg_size: + return None + req_proto_str = recv_buffer[5:5+grpc_msg_size] + sr = messages_pb2.SimpleRequest() + sr.ParseFromString(req_proto_str) + logging.info('Parsed request for stream %d: response_size=%s' % (stream_id, sr.response_size)) + return sr diff --git a/test/http2_test/http2_test_server.py b/test/http2_test/http2_test_server.py new file mode 100644 index 0000000000..44e36d34b6 --- /dev/null +++ b/test/http2_test/http2_test_server.py @@ -0,0 +1,90 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""HTTP2 Test Server""" + +import argparse +import logging +import twisted +import twisted.internet +import twisted.internet.endpoints +import twisted.internet.reactor + +import http2_base_server +import test_goaway +import test_max_streams +import test_ping +import test_rst_after_data +import test_rst_after_header +import test_rst_during_data + +_TEST_CASE_MAPPING = { + 'rst_after_header': test_rst_after_header.TestcaseRstStreamAfterHeader, + 'rst_after_data': test_rst_after_data.TestcaseRstStreamAfterData, + 'rst_during_data': test_rst_during_data.TestcaseRstStreamDuringData, + 'goaway': test_goaway.TestcaseGoaway, + 'ping': test_ping.TestcasePing, + 'max_streams': test_max_streams.TestcaseSettingsMaxStreams, +} + +class H2Factory(twisted.internet.protocol.Factory): + def __init__(self, testcase): + logging.info('Creating H2Factory for new connection.') + self._num_streams = 0 + self._testcase = testcase + + def buildProtocol(self, addr): + self._num_streams += 1 + logging.info('New Connection: %d' % self._num_streams) + if not _TEST_CASE_MAPPING.has_key(self._testcase): + logging.error('Unknown test case: %s' % self._testcase) + assert(0) + else: + t = _TEST_CASE_MAPPING[self._testcase] + + if self._testcase == 'goaway': + return t(self._num_streams).get_base_server() + else: + return t().get_base_server() + +if __name__ == '__main__': + logging.basicConfig( + format='%(levelname) -10s %(asctime)s %(module)s:%(lineno)s | %(message)s', + level=logging.INFO) + parser = argparse.ArgumentParser() + parser.add_argument('--test_case', choices=sorted(_TEST_CASE_MAPPING.keys()), + help='test case to run', required=True) + parser.add_argument('--port', type=int, default=8080, + help='port to run the server (default: 8080)') + args = parser.parse_args() + logging.info('Running test case %s on port %d' % (args.test_case, args.port)) + endpoint = twisted.internet.endpoints.TCP4ServerEndpoint( + twisted.internet.reactor, args.port, backlog=128) + endpoint.listen(H2Factory(args.test_case)) + twisted.internet.reactor.run() diff --git a/test/http2_test/messages_pb2.py b/test/http2_test/messages_pb2.py new file mode 100644 index 0000000000..86cf5a8970 --- /dev/null +++ b/test/http2_test/messages_pb2.py @@ -0,0 +1,661 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: messages.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='messages.proto', + package='grpc.testing', + syntax='proto3', + serialized_pb=_b('\n\x0emessages.proto\x12\x0cgrpc.testing\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"@\n\x07Payload\x12\'\n\x04type\x18\x01 \x01(\x0e\x32\x19.grpc.testing.PayloadType\x12\x0c\n\x04\x62ody\x18\x02 \x01(\x0c\"+\n\nEchoStatus\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\"\xce\x02\n\rSimpleRequest\x12\x30\n\rresponse_type\x18\x01 \x01(\x0e\x32\x19.grpc.testing.PayloadType\x12\x15\n\rresponse_size\x18\x02 \x01(\x05\x12&\n\x07payload\x18\x03 \x01(\x0b\x32\x15.grpc.testing.Payload\x12\x15\n\rfill_username\x18\x04 \x01(\x08\x12\x18\n\x10\x66ill_oauth_scope\x18\x05 \x01(\x08\x12\x34\n\x13response_compressed\x18\x06 \x01(\x0b\x32\x17.grpc.testing.BoolValue\x12\x31\n\x0fresponse_status\x18\x07 \x01(\x0b\x32\x18.grpc.testing.EchoStatus\x12\x32\n\x11\x65xpect_compressed\x18\x08 \x01(\x0b\x32\x17.grpc.testing.BoolValue\"_\n\x0eSimpleResponse\x12&\n\x07payload\x18\x01 \x01(\x0b\x32\x15.grpc.testing.Payload\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x13\n\x0boauth_scope\x18\x03 \x01(\t\"w\n\x19StreamingInputCallRequest\x12&\n\x07payload\x18\x01 \x01(\x0b\x32\x15.grpc.testing.Payload\x12\x32\n\x11\x65xpect_compressed\x18\x02 \x01(\x0b\x32\x17.grpc.testing.BoolValue\"=\n\x1aStreamingInputCallResponse\x12\x1f\n\x17\x61ggregated_payload_size\x18\x01 \x01(\x05\"d\n\x12ResponseParameters\x12\x0c\n\x04size\x18\x01 \x01(\x05\x12\x13\n\x0binterval_us\x18\x02 \x01(\x05\x12+\n\ncompressed\x18\x03 \x01(\x0b\x32\x17.grpc.testing.BoolValue\"\xe8\x01\n\x1aStreamingOutputCallRequest\x12\x30\n\rresponse_type\x18\x01 \x01(\x0e\x32\x19.grpc.testing.PayloadType\x12=\n\x13response_parameters\x18\x02 \x03(\x0b\x32 .grpc.testing.ResponseParameters\x12&\n\x07payload\x18\x03 \x01(\x0b\x32\x15.grpc.testing.Payload\x12\x31\n\x0fresponse_status\x18\x07 \x01(\x0b\x32\x18.grpc.testing.EchoStatus\"E\n\x1bStreamingOutputCallResponse\x12&\n\x07payload\x18\x01 \x01(\x0b\x32\x15.grpc.testing.Payload\"3\n\x0fReconnectParams\x12 \n\x18max_reconnect_backoff_ms\x18\x01 \x01(\x05\"3\n\rReconnectInfo\x12\x0e\n\x06passed\x18\x01 \x01(\x08\x12\x12\n\nbackoff_ms\x18\x02 \x03(\x05*\x1f\n\x0bPayloadType\x12\x10\n\x0c\x43OMPRESSABLE\x10\x00\x62\x06proto3') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +_PAYLOADTYPE = _descriptor.EnumDescriptor( + name='PayloadType', + full_name='grpc.testing.PayloadType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='COMPRESSABLE', index=0, number=0, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1303, + serialized_end=1334, +) +_sym_db.RegisterEnumDescriptor(_PAYLOADTYPE) + +PayloadType = enum_type_wrapper.EnumTypeWrapper(_PAYLOADTYPE) +COMPRESSABLE = 0 + + + +_BOOLVALUE = _descriptor.Descriptor( + name='BoolValue', + full_name='grpc.testing.BoolValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='grpc.testing.BoolValue.value', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=32, + serialized_end=58, +) + + +_PAYLOAD = _descriptor.Descriptor( + name='Payload', + full_name='grpc.testing.Payload', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='grpc.testing.Payload.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='body', full_name='grpc.testing.Payload.body', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=60, + serialized_end=124, +) + + +_ECHOSTATUS = _descriptor.Descriptor( + name='EchoStatus', + full_name='grpc.testing.EchoStatus', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='grpc.testing.EchoStatus.code', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='message', full_name='grpc.testing.EchoStatus.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=126, + serialized_end=169, +) + + +_SIMPLEREQUEST = _descriptor.Descriptor( + name='SimpleRequest', + full_name='grpc.testing.SimpleRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='response_type', full_name='grpc.testing.SimpleRequest.response_type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='response_size', full_name='grpc.testing.SimpleRequest.response_size', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='payload', full_name='grpc.testing.SimpleRequest.payload', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fill_username', full_name='grpc.testing.SimpleRequest.fill_username', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fill_oauth_scope', full_name='grpc.testing.SimpleRequest.fill_oauth_scope', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='response_compressed', full_name='grpc.testing.SimpleRequest.response_compressed', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='response_status', full_name='grpc.testing.SimpleRequest.response_status', index=6, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='expect_compressed', full_name='grpc.testing.SimpleRequest.expect_compressed', index=7, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=172, + serialized_end=506, +) + + +_SIMPLERESPONSE = _descriptor.Descriptor( + name='SimpleResponse', + full_name='grpc.testing.SimpleResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='payload', full_name='grpc.testing.SimpleResponse.payload', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='username', full_name='grpc.testing.SimpleResponse.username', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='oauth_scope', full_name='grpc.testing.SimpleResponse.oauth_scope', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=508, + serialized_end=603, +) + + +_STREAMINGINPUTCALLREQUEST = _descriptor.Descriptor( + name='StreamingInputCallRequest', + full_name='grpc.testing.StreamingInputCallRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='payload', full_name='grpc.testing.StreamingInputCallRequest.payload', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='expect_compressed', full_name='grpc.testing.StreamingInputCallRequest.expect_compressed', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=605, + serialized_end=724, +) + + +_STREAMINGINPUTCALLRESPONSE = _descriptor.Descriptor( + name='StreamingInputCallResponse', + full_name='grpc.testing.StreamingInputCallResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='aggregated_payload_size', full_name='grpc.testing.StreamingInputCallResponse.aggregated_payload_size', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=726, + serialized_end=787, +) + + +_RESPONSEPARAMETERS = _descriptor.Descriptor( + name='ResponseParameters', + full_name='grpc.testing.ResponseParameters', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='size', full_name='grpc.testing.ResponseParameters.size', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='interval_us', full_name='grpc.testing.ResponseParameters.interval_us', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='compressed', full_name='grpc.testing.ResponseParameters.compressed', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=789, + serialized_end=889, +) + + +_STREAMINGOUTPUTCALLREQUEST = _descriptor.Descriptor( + name='StreamingOutputCallRequest', + full_name='grpc.testing.StreamingOutputCallRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='response_type', full_name='grpc.testing.StreamingOutputCallRequest.response_type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='response_parameters', full_name='grpc.testing.StreamingOutputCallRequest.response_parameters', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='payload', full_name='grpc.testing.StreamingOutputCallRequest.payload', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='response_status', full_name='grpc.testing.StreamingOutputCallRequest.response_status', index=3, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=892, + serialized_end=1124, +) + + +_STREAMINGOUTPUTCALLRESPONSE = _descriptor.Descriptor( + name='StreamingOutputCallResponse', + full_name='grpc.testing.StreamingOutputCallResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='payload', full_name='grpc.testing.StreamingOutputCallResponse.payload', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1126, + serialized_end=1195, +) + + +_RECONNECTPARAMS = _descriptor.Descriptor( + name='ReconnectParams', + full_name='grpc.testing.ReconnectParams', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='max_reconnect_backoff_ms', full_name='grpc.testing.ReconnectParams.max_reconnect_backoff_ms', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1197, + serialized_end=1248, +) + + +_RECONNECTINFO = _descriptor.Descriptor( + name='ReconnectInfo', + full_name='grpc.testing.ReconnectInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='passed', full_name='grpc.testing.ReconnectInfo.passed', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='backoff_ms', full_name='grpc.testing.ReconnectInfo.backoff_ms', index=1, + number=2, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1250, + serialized_end=1301, +) + +_PAYLOAD.fields_by_name['type'].enum_type = _PAYLOADTYPE +_SIMPLEREQUEST.fields_by_name['response_type'].enum_type = _PAYLOADTYPE +_SIMPLEREQUEST.fields_by_name['payload'].message_type = _PAYLOAD +_SIMPLEREQUEST.fields_by_name['response_compressed'].message_type = _BOOLVALUE +_SIMPLEREQUEST.fields_by_name['response_status'].message_type = _ECHOSTATUS +_SIMPLEREQUEST.fields_by_name['expect_compressed'].message_type = _BOOLVALUE +_SIMPLERESPONSE.fields_by_name['payload'].message_type = _PAYLOAD +_STREAMINGINPUTCALLREQUEST.fields_by_name['payload'].message_type = _PAYLOAD +_STREAMINGINPUTCALLREQUEST.fields_by_name['expect_compressed'].message_type = _BOOLVALUE +_RESPONSEPARAMETERS.fields_by_name['compressed'].message_type = _BOOLVALUE +_STREAMINGOUTPUTCALLREQUEST.fields_by_name['response_type'].enum_type = _PAYLOADTYPE +_STREAMINGOUTPUTCALLREQUEST.fields_by_name['response_parameters'].message_type = _RESPONSEPARAMETERS +_STREAMINGOUTPUTCALLREQUEST.fields_by_name['payload'].message_type = _PAYLOAD +_STREAMINGOUTPUTCALLREQUEST.fields_by_name['response_status'].message_type = _ECHOSTATUS +_STREAMINGOUTPUTCALLRESPONSE.fields_by_name['payload'].message_type = _PAYLOAD +DESCRIPTOR.message_types_by_name['BoolValue'] = _BOOLVALUE +DESCRIPTOR.message_types_by_name['Payload'] = _PAYLOAD +DESCRIPTOR.message_types_by_name['EchoStatus'] = _ECHOSTATUS +DESCRIPTOR.message_types_by_name['SimpleRequest'] = _SIMPLEREQUEST +DESCRIPTOR.message_types_by_name['SimpleResponse'] = _SIMPLERESPONSE +DESCRIPTOR.message_types_by_name['StreamingInputCallRequest'] = _STREAMINGINPUTCALLREQUEST +DESCRIPTOR.message_types_by_name['StreamingInputCallResponse'] = _STREAMINGINPUTCALLRESPONSE +DESCRIPTOR.message_types_by_name['ResponseParameters'] = _RESPONSEPARAMETERS +DESCRIPTOR.message_types_by_name['StreamingOutputCallRequest'] = _STREAMINGOUTPUTCALLREQUEST +DESCRIPTOR.message_types_by_name['StreamingOutputCallResponse'] = _STREAMINGOUTPUTCALLRESPONSE +DESCRIPTOR.message_types_by_name['ReconnectParams'] = _RECONNECTPARAMS +DESCRIPTOR.message_types_by_name['ReconnectInfo'] = _RECONNECTINFO +DESCRIPTOR.enum_types_by_name['PayloadType'] = _PAYLOADTYPE + +BoolValue = _reflection.GeneratedProtocolMessageType('BoolValue', (_message.Message,), dict( + DESCRIPTOR = _BOOLVALUE, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.BoolValue) + )) +_sym_db.RegisterMessage(BoolValue) + +Payload = _reflection.GeneratedProtocolMessageType('Payload', (_message.Message,), dict( + DESCRIPTOR = _PAYLOAD, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.Payload) + )) +_sym_db.RegisterMessage(Payload) + +EchoStatus = _reflection.GeneratedProtocolMessageType('EchoStatus', (_message.Message,), dict( + DESCRIPTOR = _ECHOSTATUS, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.EchoStatus) + )) +_sym_db.RegisterMessage(EchoStatus) + +SimpleRequest = _reflection.GeneratedProtocolMessageType('SimpleRequest', (_message.Message,), dict( + DESCRIPTOR = _SIMPLEREQUEST, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.SimpleRequest) + )) +_sym_db.RegisterMessage(SimpleRequest) + +SimpleResponse = _reflection.GeneratedProtocolMessageType('SimpleResponse', (_message.Message,), dict( + DESCRIPTOR = _SIMPLERESPONSE, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.SimpleResponse) + )) +_sym_db.RegisterMessage(SimpleResponse) + +StreamingInputCallRequest = _reflection.GeneratedProtocolMessageType('StreamingInputCallRequest', (_message.Message,), dict( + DESCRIPTOR = _STREAMINGINPUTCALLREQUEST, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.StreamingInputCallRequest) + )) +_sym_db.RegisterMessage(StreamingInputCallRequest) + +StreamingInputCallResponse = _reflection.GeneratedProtocolMessageType('StreamingInputCallResponse', (_message.Message,), dict( + DESCRIPTOR = _STREAMINGINPUTCALLRESPONSE, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.StreamingInputCallResponse) + )) +_sym_db.RegisterMessage(StreamingInputCallResponse) + +ResponseParameters = _reflection.GeneratedProtocolMessageType('ResponseParameters', (_message.Message,), dict( + DESCRIPTOR = _RESPONSEPARAMETERS, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.ResponseParameters) + )) +_sym_db.RegisterMessage(ResponseParameters) + +StreamingOutputCallRequest = _reflection.GeneratedProtocolMessageType('StreamingOutputCallRequest', (_message.Message,), dict( + DESCRIPTOR = _STREAMINGOUTPUTCALLREQUEST, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.StreamingOutputCallRequest) + )) +_sym_db.RegisterMessage(StreamingOutputCallRequest) + +StreamingOutputCallResponse = _reflection.GeneratedProtocolMessageType('StreamingOutputCallResponse', (_message.Message,), dict( + DESCRIPTOR = _STREAMINGOUTPUTCALLRESPONSE, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.StreamingOutputCallResponse) + )) +_sym_db.RegisterMessage(StreamingOutputCallResponse) + +ReconnectParams = _reflection.GeneratedProtocolMessageType('ReconnectParams', (_message.Message,), dict( + DESCRIPTOR = _RECONNECTPARAMS, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.ReconnectParams) + )) +_sym_db.RegisterMessage(ReconnectParams) + +ReconnectInfo = _reflection.GeneratedProtocolMessageType('ReconnectInfo', (_message.Message,), dict( + DESCRIPTOR = _RECONNECTINFO, + __module__ = 'messages_pb2' + # @@protoc_insertion_point(class_scope:grpc.testing.ReconnectInfo) + )) +_sym_db.RegisterMessage(ReconnectInfo) + + +# @@protoc_insertion_point(module_scope) diff --git a/test/http2_test/test_goaway.py b/test/http2_test/test_goaway.py new file mode 100644 index 0000000000..61f4beb74a --- /dev/null +++ b/test/http2_test/test_goaway.py @@ -0,0 +1,77 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging +import time + +import http2_base_server + +class TestcaseGoaway(object): + """ + This test does the following: + Process incoming request normally, i.e. send headers, data and trailers. + Then send a GOAWAY frame with the stream id of the processed request. + It checks that the next request is made on a different TCP connection. + """ + def __init__(self, iteration): + self._base_server = http2_base_server.H2ProtocolBaseServer() + self._base_server._handlers['RequestReceived'] = self.on_request_received + self._base_server._handlers['DataReceived'] = self.on_data_received + self._base_server._handlers['SendDone'] = self.on_send_done + self._base_server._handlers['ConnectionLost'] = self.on_connection_lost + self._ready_to_send = False + self._iteration = iteration + + def get_base_server(self): + return self._base_server + + def on_connection_lost(self, reason): + logging.info('Disconnect received. Count %d' % self._iteration) + # _iteration == 2 => Two different connections have been used. + if self._iteration == 2: + self._base_server.on_connection_lost(reason) + + def on_send_done(self, stream_id): + self._base_server.on_send_done_default(stream_id) + logging.info('Sending GOAWAY for stream %d:' % stream_id) + self._base_server._conn.close_connection(error_code=0, additional_data=None, last_stream_id=stream_id) + self._base_server._stream_status[stream_id] = False + + def on_request_received(self, event): + self._ready_to_send = False + self._base_server.on_request_received_default(event) + + def on_data_received(self, event): + self._base_server.on_data_received_default(event) + sr = self._base_server.parse_received_data(event.stream_id) + if sr: + logging.info('Creating response size = %s' % sr.response_size) + response_data = self._base_server.default_response_data(sr.response_size) + self._ready_to_send = True + self._base_server.setup_send(response_data, event.stream_id) diff --git a/test/http2_test/test_max_streams.py b/test/http2_test/test_max_streams.py new file mode 100644 index 0000000000..9942b1bb9a --- /dev/null +++ b/test/http2_test/test_max_streams.py @@ -0,0 +1,63 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import hyperframe.frame +import logging + +import http2_base_server + +class TestcaseSettingsMaxStreams(object): + """ + This test sets MAX_CONCURRENT_STREAMS to 1 and asserts that at any point + only 1 stream is active. + """ + def __init__(self): + self._base_server = http2_base_server.H2ProtocolBaseServer() + self._base_server._handlers['DataReceived'] = self.on_data_received + self._base_server._handlers['ConnectionMade'] = self.on_connection_made + + def get_base_server(self): + return self._base_server + + def on_connection_made(self): + logging.info('Connection Made') + self._base_server._conn.initiate_connection() + self._base_server._conn.update_settings( + {hyperframe.frame.SettingsFrame.MAX_CONCURRENT_STREAMS: 1}) + self._base_server.transport.setTcpNoDelay(True) + self._base_server.transport.write(self._base_server._conn.data_to_send()) + + def on_data_received(self, event): + self._base_server.on_data_received_default(event) + sr = self._base_server.parse_received_data(event.stream_id) + if sr: + logging.info('Creating response of size = %s' % sr.response_size) + response_data = self._base_server.default_response_data(sr.response_size) + self._base_server.setup_send(response_data, event.stream_id) + # TODO (makdharma): Add assertion to check number of live streams diff --git a/test/http2_test/test_ping.py b/test/http2_test/test_ping.py new file mode 100644 index 0000000000..da41fd01bb --- /dev/null +++ b/test/http2_test/test_ping.py @@ -0,0 +1,67 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging + +import http2_base_server + +class TestcasePing(object): + """ + This test injects PING frames before and after header and data. Keeps count + of outstanding ping response and asserts when the count is non-zero at the + end of the test. + """ + def __init__(self): + self._base_server = http2_base_server.H2ProtocolBaseServer() + self._base_server._handlers['RequestReceived'] = self.on_request_received + self._base_server._handlers['DataReceived'] = self.on_data_received + self._base_server._handlers['ConnectionLost'] = self.on_connection_lost + + def get_base_server(self): + return self._base_server + + def on_request_received(self, event): + self._base_server.default_ping() + self._base_server.on_request_received_default(event) + self._base_server.default_ping() + + def on_data_received(self, event): + self._base_server.on_data_received_default(event) + sr = self._base_server.parse_received_data(event.stream_id) + if sr: + logging.info('Creating response size = %s' % sr.response_size) + response_data = self._base_server.default_response_data(sr.response_size) + self._base_server.default_ping() + self._base_server.setup_send(response_data, event.stream_id) + self._base_server.default_ping() + + def on_connection_lost(self, reason): + logging.info('Disconnect received. Ping Count %d' % self._base_server._outstanding_pings) + assert(self._base_server._outstanding_pings == 0) + self._base_server.on_connection_lost(reason) diff --git a/tools/run_tests/prepare_travis.sh b/test/http2_test/test_rst_after_data.py index 10546535e8..9236025395 100755..100644 --- a/tools/run_tests/prepare_travis.sh +++ b/test/http2_test/test_rst_after_data.py @@ -1,5 +1,4 @@ -#!/bin/bash -# Copyright 2015, Google Inc. +# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -28,40 +27,31 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -cd `dirname $0`/../.. -grpc_dir=`pwd` +import http2_base_server -distrib=`md5sum /etc/issue | cut -f1 -d\ ` -echo "Configuring for distribution $distrib" -git submodule | while read sha path extra ; do - cd /tmp - name=`basename $path` - file=$name-$sha-$CONFIG-prebuilt-$distrib.tar.gz - echo -n "Looking for $file ..." - url=http://storage.googleapis.com/grpc-prebuilt-packages/$file - wget -q $url && ( - echo " Found." - tar xfz $file - ) || echo " Not found." -done +class TestcaseRstStreamAfterData(object): + """ + In response to an incoming request, this test sends headers, followed by + data, followed by a reset stream frame. Client asserts that the RPC failed. + Client needs to deliver the complete message to the application layer. + """ + def __init__(self): + self._base_server = http2_base_server.H2ProtocolBaseServer() + self._base_server._handlers['DataReceived'] = self.on_data_received + self._base_server._handlers['SendDone'] = self.on_send_done -mkdir -p bins/$CONFIG/protobuf -mkdir -p libs/$CONFIG/protobuf -mkdir -p libs/$CONFIG/openssl + def get_base_server(self): + return self._base_server -function cpt { - cp /tmp/prebuilt/$1 $2/$CONFIG/$3 - touch $2/$CONFIG/$3/`basename $1` -} + def on_data_received(self, event): + self._base_server.on_data_received_default(event) + sr = self._base_server.parse_received_data(event.stream_id) + if sr: + response_data = self._base_server.default_response_data(sr.response_size) + self._ready_to_send = True + self._base_server.setup_send(response_data, event.stream_id) + # send reset stream -if [ -e /tmp/prebuilt/bin/protoc ] ; then - touch third_party/protobuf/configure - cpt bin/protoc bins protobuf - cpt lib/libprotoc.a libs protobuf - cpt lib/libprotobuf.a libs protobuf -fi - -if [ -e /tmp/prebuilt/lib/libssl.a ] ; then - cpt lib/libcrypto.a libs openssl - cpt lib/libssl.a libs openssl -fi + def on_send_done(self, stream_id): + self._base_server.send_reset_stream() + self._base_server._stream_status[stream_id] = False diff --git a/test/http2_test/test_rst_after_header.py b/test/http2_test/test_rst_after_header.py new file mode 100644 index 0000000000..41e1adb8ad --- /dev/null +++ b/test/http2_test/test_rst_after_header.py @@ -0,0 +1,48 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import http2_base_server + +class TestcaseRstStreamAfterHeader(object): + """ + In response to an incoming request, this test sends headers, followed by + a reset stream frame. Client asserts that the RPC failed. + """ + def __init__(self): + self._base_server = http2_base_server.H2ProtocolBaseServer() + self._base_server._handlers['RequestReceived'] = self.on_request_received + + def get_base_server(self): + return self._base_server + + def on_request_received(self, event): + # send initial headers + self._base_server.on_request_received_default(event) + # send reset stream + self._base_server.send_reset_stream() diff --git a/test/http2_test/test_rst_during_data.py b/test/http2_test/test_rst_during_data.py new file mode 100644 index 0000000000..7c859db267 --- /dev/null +++ b/test/http2_test/test_rst_during_data.py @@ -0,0 +1,58 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import http2_base_server + +class TestcaseRstStreamDuringData(object): + """ + In response to an incoming request, this test sends headers, followed by + some data, followed by a reset stream frame. Client asserts that the RPC + failed and does not deliver the message to the application. + """ + def __init__(self): + self._base_server = http2_base_server.H2ProtocolBaseServer() + self._base_server._handlers['DataReceived'] = self.on_data_received + self._base_server._handlers['SendDone'] = self.on_send_done + + def get_base_server(self): + return self._base_server + + def on_data_received(self, event): + self._base_server.on_data_received_default(event) + sr = self._base_server.parse_received_data(event.stream_id) + if sr: + response_data = self._base_server.default_response_data(sr.response_size) + self._ready_to_send = True + response_len = len(response_data) + truncated_response_data = response_data[0:response_len/2] + self._base_server.setup_send(truncated_response_data, event.stream_id) + + def on_send_done(self, stream_id): + self._base_server.send_reset_stream() + self._base_server._stream_status[stream_id] = False diff --git a/third_party/google_benchmark b/third_party/benchmark -Subproject 44c25c892a6229b20db7cd9dc05584ea865896d +Subproject 44c25c892a6229b20db7cd9dc05584ea865896d diff --git a/tools/buildgen/generate_build_additions.sh b/tools/buildgen/generate_build_additions.sh index 1ea47042f4..a4373ed350 100644 --- a/tools/buildgen/generate_build_additions.sh +++ b/tools/buildgen/generate_build_additions.sh @@ -28,9 +28,11 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +set -e + gen_build_yaml_dirs=" \ src/boringssl \ - src/google_benchmark \ + src/benchmark \ src/proto \ src/zlib \ test/core/bad_client \ diff --git a/tools/buildgen/generate_projects.py b/tools/buildgen/generate_projects.py index 5e78ad52d6..f8ddaf4963 100755 --- a/tools/buildgen/generate_projects.py +++ b/tools/buildgen/generate_projects.py @@ -36,7 +36,7 @@ import shutil import sys import tempfile import multiprocessing -sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'run_tests')) +sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'run_tests', 'python_utils')) assert sys.argv[1:], 'run generate_projects.sh instead of this directly' diff --git a/tools/codegen/core/gen_nano_proto.sh b/tools/codegen/core/gen_nano_proto.sh index df107c208f..99e49814b8 100755 --- a/tools/codegen/core/gen_nano_proto.sh +++ b/tools/codegen/core/gen_nano_proto.sh @@ -42,46 +42,6 @@ # 4: Output dir not an absolute path. # 5: Couldn't create output directory (2nd argument). -read -r -d '' COPYRIGHT <<'EOF' -/* - * - * Copyright <YEAR>, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -EOF - -CURRENT_YEAR=$(date +%Y) -COPYRIGHT_FILE=$(mktemp) -echo "${COPYRIGHT/<YEAR>/$CURRENT_YEAR}" > $COPYRIGHT_FILE - set -ex if [ $# -lt 2 ] || [ $# -gt 3 ]; then echo "Usage: $0 <input.proto> <absolute path to output dir> [grpc path]" @@ -143,13 +103,6 @@ readonly UC_PROTO_BASENAME=`echo $PROTO_BASENAME | tr [a-z] [A-Z]` sed -i "s:PB_${UC_PROTO_BASENAME}_PB_H_INCLUDED:GRPC_${INCLUDE_GUARD_BASE}_${UC_PROTO_BASENAME}_PB_H:g" \ "$OUTPUT_DIR/$PROTO_BASENAME.pb.h" -# prepend copyright -TMPFILE=$(mktemp) -cat $COPYRIGHT_FILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.c" > $TMPFILE -mv -v $TMPFILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.c" -cat $COPYRIGHT_FILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.h" > $TMPFILE -mv -v $TMPFILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.h" - deactivate rm -rf $VENV_DIR diff --git a/tools/distrib/check_copyright.py b/tools/distrib/check_copyright.py index f06e5f1d1a..718bb563f3 100755 --- a/tools/distrib/check_copyright.py +++ b/tools/distrib/check_copyright.py @@ -92,9 +92,23 @@ LICENSE_PREFIX = { 'LICENSE': '', } -KNOWN_BAD = set([ +_EXEMPT = frozenset(( + # Generated protocol compiler output. + 'examples/python/helloworld/helloworld_pb2.py', + 'examples/python/helloworld/helloworld_pb2_grpc.py', + 'examples/python/multiplex/helloworld_pb2.py', + 'examples/python/multiplex/helloworld_pb2_grpc.py', + 'examples/python/multiplex/route_guide_pb2.py', + 'examples/python/multiplex/route_guide_pb2_grpc.py', + 'examples/python/route_guide/route_guide_pb2.py', + 'examples/python/route_guide/route_guide_pb2_grpc.py', + + 'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h', + 'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c', + + # An older file originally from outside gRPC. 'src/php/tests/bootstrap.php', -]) +)) RE_YEAR = r'Copyright (?P<first_year>[0-9]+\-)?(?P<last_year>[0-9]+), Google Inc\.' @@ -140,7 +154,8 @@ except subprocess.CalledProcessError: sys.exit(0) for filename in filename_list: - if filename in KNOWN_BAD: continue + if filename in _EXEMPT: + continue ext = os.path.splitext(filename)[1] base = os.path.basename(filename) if ext in RE_LICENSE: diff --git a/tools/distrib/check_nanopb_output.sh b/tools/distrib/check_nanopb_output.sh index c0707051a6..eb64e23daf 100755 --- a/tools/distrib/check_nanopb_output.sh +++ b/tools/distrib/check_nanopb_output.sh @@ -37,7 +37,7 @@ readonly PROTOBUF_INSTALL_PREFIX="$(mktemp -d)" pushd third_party/protobuf ./autogen.sh ./configure --prefix="$PROTOBUF_INSTALL_PREFIX" -make +make -j 8 make install #ldconfig popd @@ -51,7 +51,7 @@ fi # stack up and change to nanopb's proto generator directory pushd third_party/nanopb/generator/proto export PATH="$PROTOC_BIN_PATH:$PATH" -make +make -j 8 # back to the root directory popd diff --git a/tools/distrib/clang_format_code.sh b/tools/distrib/clang_format_code.sh index 858e074898..13e018709f 100755 --- a/tools/distrib/clang_format_code.sh +++ b/tools/distrib/clang_format_code.sh @@ -32,9 +32,15 @@ set -ex # change to root directory cd $(dirname $0)/../.. +REPO_ROOT=$(pwd) -# build clang-format docker image -docker build -t grpc_clang_format tools/dockerfile/grpc_clang_format +if [ "$CLANG_FORMAT_SKIP_DOCKER" == "" ] +then + # build clang-format docker image + docker build -t grpc_clang_format tools/dockerfile/grpc_clang_format -# run clang-format against the checked out codebase -docker run -e TEST=$TEST -e CHANGED_FILES="$CHANGED_FILES" --rm=true -v ${HOST_GIT_ROOT:-`pwd`}:/local-code -t grpc_clang_format /clang_format_all_the_things.sh + # run clang-format against the checked out codebase + docker run -e TEST=$TEST -e CHANGED_FILES="$CHANGED_FILES" -e CLANG_FORMAT_ROOT="/local-code" --rm=true -v "${REPO_ROOT}":/local-code -t grpc_clang_format /clang_format_all_the_things.sh +else + CLANG_FORMAT_ROOT="${REPO_ROOT}" tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh +fi diff --git a/tools/distrib/python/docgen.py b/tools/distrib/python/docgen.py index 622317920d..38ffcd6e0e 100755 --- a/tools/distrib/python/docgen.py +++ b/tools/distrib/python/docgen.py @@ -94,6 +94,7 @@ if args.submit: # specified repository, edit it, and push it. It's up to the user to then go # onto GitHub and make a PR against grpc/grpc:gh-pages. repo_parent_dir = tempfile.mkdtemp() + print('Documentation parent directory: {}'.format(repo_parent_dir)) repo_dir = os.path.join(repo_parent_dir, 'grpc') python_doc_dir = os.path.join(repo_dir, 'python') doc_branch = args.doc_branch diff --git a/tools/distrib/python/grpcio_tools/MANIFEST.in b/tools/distrib/python/grpcio_tools/MANIFEST.in index 7712834d64..11ce367747 100644 --- a/tools/distrib/python/grpcio_tools/MANIFEST.in +++ b/tools/distrib/python/grpcio_tools/MANIFEST.in @@ -2,6 +2,6 @@ include grpc_version.py include protoc_deps.py include protoc_lib_deps.py include README.rst -graft grpc +graft grpc_tools graft grpc_root graft third_party diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/__init__.py b/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py index d5ad73a74a..d5ad73a74a 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/__init__.py +++ b/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/_protoc_compiler.pyx b/tools/distrib/python/grpcio_tools/grpc_tools/_protoc_compiler.pyx index a6530127c0..81034fad5e 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/_protoc_compiler.pyx +++ b/tools/distrib/python/grpcio_tools/grpc_tools/_protoc_compiler.pyx @@ -29,7 +29,7 @@ from libc cimport stdlib -cdef extern from "grpc/tools/main.h": +cdef extern from "grpc_tools/main.h": int protoc_main(int argc, char *argv[]) def run_main(list args not None): diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/command.py b/tools/distrib/python/grpcio_tools/grpc_tools/command.py index 424fd90411..31b3331a66 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/command.py +++ b/tools/distrib/python/grpcio_tools/grpc_tools/command.py @@ -33,7 +33,7 @@ import sys import setuptools -from grpc.tools import protoc +from grpc_tools import protoc def build_package_protos(package_root): @@ -45,11 +45,11 @@ def build_package_protos(package_root): proto_files.append(os.path.abspath(os.path.join(root, filename))) well_known_protos_include = pkg_resources.resource_filename( - 'grpc.tools', '_proto') + 'grpc_tools', '_proto') for proto_file in proto_files: command = [ - 'grpc.tools.protoc', + 'grpc_tools.protoc', '--proto_path={}'.format(inclusion_root), '--proto_path={}'.format(well_known_protos_include), '--python_out={}'.format(inclusion_root), diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/main.cc b/tools/distrib/python/grpcio_tools/grpc_tools/main.cc index 8391839513..0c2fa3180a 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/main.cc +++ b/tools/distrib/python/grpcio_tools/grpc_tools/main.cc @@ -32,7 +32,7 @@ #include "src/compiler/python_generator.h" -#include "grpc/tools/main.h" +#include "grpc_tools/main.h" int protoc_main(int argc, char* argv[]) { google::protobuf::compiler::CommandLineInterface cli; diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/main.h b/tools/distrib/python/grpcio_tools/grpc_tools/main.h index ea2860ff02..ea2860ff02 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/main.h +++ b/tools/distrib/python/grpcio_tools/grpc_tools/main.h diff --git a/tools/distrib/python/grpcio_tools/grpc/tools/protoc.py b/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py index e1256a7dd9..63fddb2f06 100644 --- a/tools/distrib/python/grpcio_tools/grpc/tools/protoc.py +++ b/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py @@ -32,7 +32,7 @@ import pkg_resources import sys -from grpc.tools import _protoc_compiler +from grpc_tools import _protoc_compiler def main(command_arguments): """Run the protocol buffer compiler with the given command-line arguments. @@ -45,5 +45,5 @@ def main(command_arguments): return _protoc_compiler.run_main(command_arguments) if __name__ == '__main__': - proto_include = pkg_resources.resource_filename('grpc.tools', '_proto') + proto_include = pkg_resources.resource_filename('grpc_tools', '_proto') sys.exit(main(sys.argv + ['-I{}'.format(proto_include)])) diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py index a07a586fb2..502d7ef27b 100644 --- a/tools/distrib/python/grpcio_tools/setup.py +++ b/tools/distrib/python/grpcio_tools/setup.py @@ -108,7 +108,7 @@ PROTO_FILES = [ CC_INCLUDE = os.path.normpath(protoc_lib_deps.CC_INCLUDE) PROTO_INCLUDE = os.path.normpath(protoc_lib_deps.PROTO_INCLUDE) -GRPC_PYTHON_TOOLS_PACKAGE = 'grpc.tools' +GRPC_PYTHON_TOOLS_PACKAGE = 'grpc_tools' GRPC_PYTHON_PROTO_RESOURCES_NAME = '_proto' DEFINE_MACROS = () @@ -154,16 +154,16 @@ def package_data(): def extension_modules(): if BUILD_WITH_CYTHON: - plugin_sources = [os.path.join('grpc', 'tools', '_protoc_compiler.pyx')] + plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.pyx')] else: - plugin_sources = [os.path.join('grpc', 'tools', '_protoc_compiler.cpp')] + plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.cpp')] plugin_sources += [ - os.path.join('grpc', 'tools', 'main.cc'), + os.path.join('grpc_tools', 'main.cc'), os.path.join('grpc_root', 'src', 'compiler', 'python_generator.cc')] + [ os.path.join(CC_INCLUDE, cc_file) for cc_file in CC_FILES] plugin_ext = extension.Extension( - name='grpc.tools._protoc_compiler', + name='grpc_tools._protoc_compiler', sources=plugin_sources, include_dirs=[ '.', @@ -184,12 +184,11 @@ def extension_modules(): return extensions setuptools.setup( - name='grpcio_tools', + name='grpcio-tools', version=grpc_version.VERSION, license='3-clause BSD', ext_modules=extension_modules(), packages=setuptools.find_packages('.'), - namespace_packages=['grpc'], install_requires=[ 'protobuf>=3.0.0', 'grpcio>={version}'.format(version=grpc_version.VERSION), diff --git a/tools/dockerfile/grpc_artifact_protoc/Dockerfile b/tools/dockerfile/grpc_artifact_protoc/Dockerfile index 1bbc6e021b..2904a8fa51 100644 --- a/tools/dockerfile/grpc_artifact_protoc/Dockerfile +++ b/tools/dockerfile/grpc_artifact_protoc/Dockerfile @@ -59,5 +59,11 @@ RUN yum install -y devtoolset-1.1 \ devtoolset-1.1-libstdc++-devel \ devtoolset-1.1-libstdc++-devel.i686 || true +# Update Git to version >1.7 to allow cloning submodules with --reference arg. +RUN yum remove -y git +RUN yum install -y epel-release +RUN yum install -y https://centos6.iuscommunity.org/ius-release.rpm +RUN yum install -y git2u + # Start in devtoolset environment that uses GCC 4.7 CMD ["scl", "enable", "devtoolset-1.1", "bash"] diff --git a/tools/dockerfile/grpc_artifact_python_manylinux_x64/Dockerfile b/tools/dockerfile/grpc_artifact_python_manylinux_x64/Dockerfile index 1d4e8e1a4a..69e624aa41 100644 --- a/tools/dockerfile/grpc_artifact_python_manylinux_x64/Dockerfile +++ b/tools/dockerfile/grpc_artifact_python_manylinux_x64/Dockerfile @@ -34,6 +34,19 @@ FROM quay.io/pypa/manylinux1_x86_64 # Update the package manager RUN yum update -y +############################################################# +# Update Git to allow cloning submodules with --reference arg +RUN yum remove -y git +RUN yum install -y curl-devel expat-devel gettext-devel openssl-devel zlib-devel gcc +RUN cd /usr/src && \ + wget https://kernel.org/pub/software/scm/git/git-2.0.5.tar.gz && \ + tar xzf git-2.0.5.tar.gz +RUN cd /usr/src/git-2.0.5 && \ + make prefix=/usr/local/git all && \ + make prefix=/usr/local/git install +ENV PATH /usr/local/git/bin:$PATH +RUN source /etc/bashrc + ################################### # Install Python build requirements RUN /opt/python/cp27-cp27m/bin/pip install cython diff --git a/tools/dockerfile/grpc_artifact_python_manylinux_x86/Dockerfile b/tools/dockerfile/grpc_artifact_python_manylinux_x86/Dockerfile index 810499695e..9af80078ed 100644 --- a/tools/dockerfile/grpc_artifact_python_manylinux_x86/Dockerfile +++ b/tools/dockerfile/grpc_artifact_python_manylinux_x86/Dockerfile @@ -34,6 +34,19 @@ FROM quay.io/pypa/manylinux1_i686 # Update the package manager RUN yum update -y +############################################################# +# Update Git to allow cloning submodules with --reference arg +RUN yum remove -y git +RUN yum install -y curl-devel expat-devel gettext-devel openssl-devel zlib-devel gcc +RUN cd /usr/src && \ + wget https://kernel.org/pub/software/scm/git/git-2.0.5.tar.gz && \ + tar xzf git-2.0.5.tar.gz +RUN cd /usr/src/git-2.0.5 && \ + make prefix=/usr/local/git all && \ + make prefix=/usr/local/git install +ENV PATH /usr/local/git/bin:$PATH +RUN source /etc/bashrc + ################################### # Install Python build requirements RUN /opt/python/cp27-cp27m/bin/pip install cython diff --git a/tools/dockerfile/grpc_clang_format/Dockerfile b/tools/dockerfile/grpc_clang_format/Dockerfile index ab58017a02..85f5e4db74 100644 --- a/tools/dockerfile/grpc_clang_format/Dockerfile +++ b/tools/dockerfile/grpc_clang_format/Dockerfile @@ -27,13 +27,13 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -FROM ubuntu:wily -RUN apt-get update -RUN apt-get -y install wget +FROM ubuntu:15.10 + +RUN apt-get update && apt-get -y install wget RUN echo deb http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list RUN echo deb-src http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key| apt-key add - -RUN apt-get update -RUN apt-get -y install clang-format-3.8 +RUN apt-get update && apt-get -y install clang-format-3.8 + ADD clang_format_all_the_things.sh / CMD ["echo 'Run with tools/distrib/clang_format_code.sh'"] diff --git a/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh b/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh index 462c65ab5e..c6e4aabfe6 100755 --- a/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh +++ b/tools/dockerfile/grpc_clang_format/clang_format_all_the_things.sh @@ -44,7 +44,7 @@ for dir in $DIRS do for glob in $GLOB do - files="$files `find /local-code/$dir -name $glob -and -not -name *.generated.* -and -not -name *.pb.h -and -not -name *.pb.c -and -not -name *.pb.cc`" + files="$files `find ${CLANG_FORMAT_ROOT}/$dir -name $glob -and -not -name *.generated.* -and -not -name *.pb.h -and -not -name *.pb.c -and -not -name *.pb.cc`" done done @@ -54,7 +54,7 @@ if [ -n "$CHANGED_FILES" ]; then files=$(comm -12 <(echo $files | tr ' ' '\n' | sort -u) <(echo $CHANGED_FILES | tr ' ' '\n' | sort -u)) fi -if [ "x$TEST" = "x" ] +if [ "$TEST" == "" ] then echo $files | xargs $CLANG_FORMAT -i else diff --git a/tools/dockerfile/test/sanity/Dockerfile b/tools/dockerfile/test/sanity/Dockerfile index 6b19ac845b..811384fda1 100644 --- a/tools/dockerfile/test/sanity/Dockerfile +++ b/tools/dockerfile/test/sanity/Dockerfile @@ -97,17 +97,27 @@ RUN apt-get install -y openjdk-8-jdk # ./compile.sh without a local protoc dependency # TODO(mattkwong): install dependencies to support latest Bazel version if newer # version is needed -RUN git clone https://github.com/bazelbuild/bazel.git /bazel && cd /bazel && git checkout tags/0.4.1 && ./compile.sh +RUN git clone https://github.com/bazelbuild/bazel.git /bazel && \ + cd /bazel && git checkout tags/0.4.1 && ./compile.sh RUN ln -s /bazel/output/bazel /bin/ -#=================== -# Docker "inception" -# Note this is quite the ugly hack. -# This makes sure that the docker binary we inject has its dependencies. -RUN curl https://get.docker.com/ | sh -RUN apt-get remove --purge -y docker-engine +RUN apt-get update && apt-get -y install wget +RUN echo deb http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list +RUN echo deb-src http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list +RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key| apt-key add - +RUN apt-get update && apt-get -y install clang-format-3.8 + +# Prepare ccache +RUN ln -s /usr/bin/ccache /usr/local/bin/gcc +RUN ln -s /usr/bin/ccache /usr/local/bin/g++ +RUN ln -s /usr/bin/ccache /usr/local/bin/cc +RUN ln -s /usr/bin/ccache /usr/local/bin/c++ +RUN ln -s /usr/bin/ccache /usr/local/bin/clang +RUN ln -s /usr/bin/ccache /usr/local/bin/clang++ + RUN mkdir /var/local/jenkins + # Define the default command. CMD ["bash"] diff --git a/tools/gce/linux_performance_worker_init.sh b/tools/gce/linux_performance_worker_init.sh index 523749ee81..ab29e015e0 100755 --- a/tools/gce/linux_performance_worker_init.sh +++ b/tools/gce/linux_performance_worker_init.sh @@ -150,3 +150,19 @@ sudo tar -C /usr/local -xzf go$GO_VERSION.$OS-$ARCH.tar.gz # Put go on the PATH, keep the usual installation dir sudo ln -s /usr/local/go/bin/go /usr/bin/go rm go$GO_VERSION.$OS-$ARCH.tar.gz + +# Install perf, to profile benchmarks. (need to get the right linux-tools-<> for kernel version) +sudo apt-get install -y linux-tools-common linux-tools-generic linux-tools-`uname -r` +# see http://unix.stackexchange.com/questions/14227/do-i-need-root-admin-permissions-to-run-userspace-perf-tool-perf-events-ar +echo 0 | sudo tee /proc/sys/kernel/perf_event_paranoid +# see http://stackoverflow.com/questions/21284906/perf-couldnt-record-kernel-reference-relocation-symbol +echo 0 | sudo tee /proc/sys/kernel/kptr_restrict + +# qps workers under perf appear to need a lot of mmap pages under certain scenarios and perf args in +# order to not lose perf events or time out +echo 4096 | sudo tee /proc/sys/kernel/perf_event_mlock_kb + +# Fetch scripts to generate flame graphs from perf data collected +# on benchmarks +git clone -v https://github.com/brendangregg/FlameGraph ~/FlameGraph + diff --git a/src/python/grpcio_reflection/grpc/__init__.py b/tools/run_tests/artifacts/__init__.py index 70ac5edd48..100a624dc9 100644 --- a/src/python/grpcio_reflection/grpc/__init__.py +++ b/tools/run_tests/artifacts/__init__.py @@ -26,5 +26,3 @@ # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -__import__('pkg_resources').declare_namespace(__name__) diff --git a/tools/run_tests/artifact_targets.py b/tools/run_tests/artifacts/artifact_targets.py index 65d34e17e1..005d99790a 100644 --- a/tools/run_tests/artifact_targets.py +++ b/tools/run_tests/artifacts/artifact_targets.py @@ -35,7 +35,8 @@ import random import string import sys -import jobset +sys.path.insert(0, os.path.abspath('..')) +import python_utils.jobset as jobset def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={}, @@ -113,7 +114,7 @@ class PythonArtifact: environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE' return create_docker_jobspec(self.name, 'tools/dockerfile/grpc_artifact_python_manylinux_%s' % self.arch, - 'tools/run_tests/build_artifact_python.sh', + 'tools/run_tests/artifacts/build_artifact_python.sh', environ=environ, timeout_seconds=60*60) elif self.platform == 'windows': @@ -125,7 +126,7 @@ class PythonArtifact: # seed. We create a random temp-dir here dir = ''.join(random.choice(string.ascii_uppercase) for _ in range(10)) return create_jobspec(self.name, - ['tools\\run_tests\\build_artifact_python.bat', + ['tools\\run_tests\\artifacts\\build_artifact_python.bat', self.py_version, '32' if self.arch == 'x86' else '64', dir @@ -136,7 +137,7 @@ class PythonArtifact: environ['PYTHON'] = self.py_version environ['SKIP_PIP_INSTALL'] = 'TRUE' return create_jobspec(self.name, - ['tools/run_tests/build_artifact_python.sh'], + ['tools/run_tests/artifacts/build_artifact_python.sh'], environ=environ) def __str__(self): @@ -165,11 +166,11 @@ class RubyArtifact: environ['SETARCH_CMD'] = 'linux32' return create_docker_jobspec(self.name, 'tools/dockerfile/grpc_artifact_linux_%s' % self.arch, - 'tools/run_tests/build_artifact_ruby.sh', + 'tools/run_tests/artifacts/build_artifact_ruby.sh', environ=environ) else: return create_jobspec(self.name, - ['tools/run_tests/build_artifact_ruby.sh']) + ['tools/run_tests/artifacts/build_artifact_ruby.sh']) class CSharpExtArtifact: @@ -184,7 +185,7 @@ class CSharpExtArtifact: def pre_build_jobspecs(self): if self.platform == 'windows': return [create_jobspec('prebuild_%s' % self.name, - ['tools\\run_tests\\pre_build_c.bat'], + ['tools\\run_tests\\helper_scripts\\pre_build_c.bat'], shell=True, flake_retries=5, timeout_retries=2)] @@ -195,7 +196,7 @@ class CSharpExtArtifact: if self.platform == 'windows': msbuild_platform = 'Win32' if self.arch == 'x86' else self.arch return create_jobspec(self.name, - ['tools\\run_tests\\build_artifact_csharp.bat', + ['tools\\run_tests\\artifacts\\build_artifact_csharp.bat', 'vsprojects\\grpc_csharp_ext.sln', '/p:Configuration=Release', '/p:PlatformToolset=v120', @@ -210,14 +211,14 @@ class CSharpExtArtifact: if self.platform == 'linux': return create_docker_jobspec(self.name, 'tools/dockerfile/grpc_artifact_linux_%s' % self.arch, - 'tools/run_tests/build_artifact_csharp.sh', + 'tools/run_tests/artifacts/build_artifact_csharp.sh', environ=environ) else: archflag = _ARCH_FLAG_MAP[self.arch] environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG) environ['LDFLAGS'] += ' %s' % archflag return create_jobspec(self.name, - ['tools/run_tests/build_artifact_csharp.sh'], + ['tools/run_tests/artifacts/build_artifact_csharp.sh'], environ=environ) def __str__(self): @@ -245,7 +246,7 @@ class NodeExtArtifact: def build_jobspec(self): if self.platform == 'windows': return create_jobspec(self.name, - ['tools\\run_tests\\build_artifact_node.bat', + ['tools\\run_tests\\artifacts\\build_artifact_node.bat', self.gyp_arch], shell=True) else: @@ -253,10 +254,10 @@ class NodeExtArtifact: return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch), - 'tools/run_tests/build_artifact_node.sh {}'.format(self.gyp_arch)) + 'tools/run_tests/artifacts/build_artifact_node.sh {}'.format(self.gyp_arch)) else: return create_jobspec(self.name, - ['tools/run_tests/build_artifact_node.sh', + ['tools/run_tests/artifacts/build_artifact_node.sh', self.gyp_arch]) class PHPArtifact: @@ -276,10 +277,10 @@ class PHPArtifact: return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch), - 'tools/run_tests/build_artifact_php.sh') + 'tools/run_tests/artifacts/build_artifact_php.sh') else: return create_jobspec(self.name, - ['tools/run_tests/build_artifact_php.sh']) + ['tools/run_tests/artifacts/build_artifact_php.sh']) class ProtocArtifact: """Builds protoc and protoc-plugin artifacts""" @@ -306,18 +307,18 @@ class ProtocArtifact: if self.platform == 'linux': return create_docker_jobspec(self.name, 'tools/dockerfile/grpc_artifact_protoc', - 'tools/run_tests/build_artifact_protoc.sh', + 'tools/run_tests/artifacts/build_artifact_protoc.sh', environ=environ) else: environ['CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG return create_jobspec(self.name, - ['tools/run_tests/build_artifact_protoc.sh'], + ['tools/run_tests/artifacts/build_artifact_protoc.sh'], environ=environ) else: generator = 'Visual Studio 12 Win64' if self.arch == 'x64' else 'Visual Studio 12' vcplatform = 'x64' if self.arch == 'x64' else 'Win32' return create_jobspec(self.name, - ['tools\\run_tests\\build_artifact_protoc.bat'], + ['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'], environ={'generator': generator, 'Platform': vcplatform}) diff --git a/tools/run_tests/build_artifact_csharp.bat b/tools/run_tests/artifacts/build_artifact_csharp.bat index 24c8d485f9..24c8d485f9 100644 --- a/tools/run_tests/build_artifact_csharp.bat +++ b/tools/run_tests/artifacts/build_artifact_csharp.bat diff --git a/tools/run_tests/build_artifact_csharp.sh b/tools/run_tests/artifacts/build_artifact_csharp.sh index 7438713f5c..aed04b2745 100755 --- a/tools/run_tests/build_artifact_csharp.sh +++ b/tools/run_tests/artifacts/build_artifact_csharp.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. make grpc_csharp_ext diff --git a/tools/run_tests/build_artifact_node.bat b/tools/run_tests/artifacts/build_artifact_node.bat index 57d55ef19e..2e0ecd21d0 100644 --- a/tools/run_tests/build_artifact_node.bat +++ b/tools/run_tests/artifacts/build_artifact_node.bat @@ -27,7 +27,7 @@ @rem (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE @rem OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -set node_versions=0.12.0 1.0.0 1.1.0 2.0.0 3.0.0 4.0.0 5.0.0 6.0.0 +set node_versions=0.12.0 1.0.0 1.1.0 2.0.0 3.0.0 4.0.0 5.0.0 6.0.0 7.0.0 set PATH=%PATH%;C:\Program Files\nodejs\;%APPDATA%\npm diff --git a/tools/run_tests/build_artifact_node.sh b/tools/run_tests/artifacts/build_artifact_node.sh index 9d06472aa4..1066ebde19 100755 --- a/tools/run_tests/build_artifact_node.sh +++ b/tools/run_tests/artifacts/build_artifact_node.sh @@ -34,7 +34,7 @@ source ~/.nvm/nvm.sh nvm use 4 set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. rm -rf build || true @@ -42,7 +42,7 @@ mkdir -p artifacts npm update -node_versions=( 0.12.0 1.0.0 1.1.0 2.0.0 3.0.0 4.0.0 5.0.0 6.0.0 ) +node_versions=( 0.12.0 1.0.0 1.1.0 2.0.0 3.0.0 4.0.0 5.0.0 6.0.0 7.0.0 ) for version in ${node_versions[@]} do diff --git a/tools/run_tests/build_artifact_php.sh b/tools/run_tests/artifacts/build_artifact_php.sh index 669447fa9a..c8d55860c1 100755 --- a/tools/run_tests/build_artifact_php.sh +++ b/tools/run_tests/artifacts/build_artifact_php.sh @@ -31,7 +31,7 @@ PHP_TARGET_ARCH=$1 set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. mkdir -p artifacts diff --git a/tools/run_tests/build_artifact_protoc.bat b/tools/run_tests/artifacts/build_artifact_protoc.bat index 3246a903d0..fd93318833 100644 --- a/tools/run_tests/build_artifact_protoc.bat +++ b/tools/run_tests/artifacts/build_artifact_protoc.bat @@ -30,15 +30,16 @@ mkdir artifacts setlocal -cd third_party/protobuf +cd third_party/protobuf/cmake -cd cmake -cmake -G "%generator%" -Dprotobuf_BUILD_TESTS=OFF || goto :error +mkdir build & cd build +mkdir solution & cd solution +cmake -G "%generator%" -Dprotobuf_BUILD_TESTS=OFF ../../.. || goto :error endlocal call vsprojects/build_plugins.bat || goto :error -xcopy /Y third_party\protobuf\cmake\Release\protoc.exe artifacts\ || goto :error +xcopy /Y third_party\protobuf\cmake\build\solution\Release\protoc.exe artifacts\ || goto :error xcopy /Y vsprojects\Release\*_plugin.exe artifacts\ || xcopy /Y vsprojects\x64\Release\*_plugin.exe artifacts\ || goto :error goto :EOF diff --git a/tools/run_tests/build_artifact_protoc.sh b/tools/run_tests/artifacts/build_artifact_protoc.sh index 161d3a84d6..26c2280eff 100755 --- a/tools/run_tests/build_artifact_protoc.sh +++ b/tools/run_tests/artifacts/build_artifact_protoc.sh @@ -33,7 +33,7 @@ source scl_source enable devtoolset-1.1 set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. make plugins diff --git a/tools/run_tests/build_artifact_python.bat b/tools/run_tests/artifacts/build_artifact_python.bat index 246713a6ce..246713a6ce 100644 --- a/tools/run_tests/build_artifact_python.bat +++ b/tools/run_tests/artifacts/build_artifact_python.bat diff --git a/tools/run_tests/build_artifact_python.sh b/tools/run_tests/artifacts/build_artifact_python.sh index 2a1d41fd68..5a5506029a 100755 --- a/tools/run_tests/build_artifact_python.sh +++ b/tools/run_tests/artifacts/build_artifact_python.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. export GRPC_PYTHON_USE_CUSTOM_BDIST=0 export GRPC_PYTHON_BUILD_WITH_CYTHON=1 diff --git a/tools/run_tests/build_artifact_ruby.sh b/tools/run_tests/artifacts/build_artifact_ruby.sh index 2d97b4068b..019efb01fd 100755 --- a/tools/run_tests/build_artifact_ruby.sh +++ b/tools/run_tests/artifacts/build_artifact_ruby.sh @@ -31,7 +31,7 @@ set -ex SYSTEM=`uname | cut -f 1 -d_` -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. set +ex [[ -s /etc/profile.d/rvm.sh ]] && . /etc/profile.d/rvm.sh [[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" diff --git a/tools/run_tests/build_package_node.sh b/tools/run_tests/artifacts/build_package_node.sh index a5636cf87a..8b5e8c0bc1 100755 --- a/tools/run_tests/build_package_node.sh +++ b/tools/run_tests/artifacts/build_package_node.sh @@ -33,7 +33,7 @@ source ~/.nvm/nvm.sh nvm use 4 set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. base=$(pwd) diff --git a/tools/run_tests/build_package_php.sh b/tools/run_tests/artifacts/build_package_php.sh index 56e3319ed9..42a8d9f8df 100755 --- a/tools/run_tests/build_package_php.sh +++ b/tools/run_tests/artifacts/build_package_php.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. mkdir -p artifacts/ cp -r $EXTERNAL_GIT_ROOT/architecture={x86,x64},language=php,platform={windows,linux,macos}/artifacts/* artifacts/ || true diff --git a/tools/run_tests/build_package_python.sh b/tools/run_tests/artifacts/build_package_python.sh index 2511a6ae46..4a1c15ceee 100755 --- a/tools/run_tests/build_package_python.sh +++ b/tools/run_tests/artifacts/build_package_python.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. mkdir -p artifacts/ diff --git a/tools/run_tests/build_package_ruby.sh b/tools/run_tests/artifacts/build_package_ruby.sh index 0a755bddb0..b4d20d8a4c 100755 --- a/tools/run_tests/build_package_ruby.sh +++ b/tools/run_tests/artifacts/build_package_ruby.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. base=$(pwd) diff --git a/tools/run_tests/distribtest_targets.py b/tools/run_tests/artifacts/distribtest_targets.py index a16daac4fe..a7535b3852 100644 --- a/tools/run_tests/distribtest_targets.py +++ b/tools/run_tests/artifacts/distribtest_targets.py @@ -30,7 +30,11 @@ """Definition of targets run distribution package tests.""" -import jobset +import os.path +import sys + +sys.path.insert(0, os.path.abspath('..')) +import python_utils.jobset as jobset def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={}, diff --git a/tools/run_tests/package_targets.py b/tools/run_tests/artifacts/package_targets.py index 673affeac0..d490f571c3 100644 --- a/tools/run_tests/package_targets.py +++ b/tools/run_tests/artifacts/package_targets.py @@ -30,7 +30,12 @@ """Definition of targets to build distribution packages.""" -import jobset +import os.path +import sys + +sys.path.insert(0, os.path.abspath('..')) +import python_utils.jobset as jobset + def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={}, flake_retries=0, timeout_retries=0): @@ -114,7 +119,7 @@ class NodePackage: return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_x64', - 'tools/run_tests/build_package_node.sh') + 'tools/run_tests/artifacts/build_package_node.sh') class RubyPackage: @@ -131,7 +136,7 @@ class RubyPackage: return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_x64', - 'tools/run_tests/build_package_ruby.sh') + 'tools/run_tests/artifacts/build_package_ruby.sh') class PythonPackage: @@ -148,7 +153,7 @@ class PythonPackage: return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_x64', - 'tools/run_tests/build_package_python.sh') + 'tools/run_tests/artifacts/build_package_python.sh') class PHPPackage: @@ -165,7 +170,7 @@ class PHPPackage: return create_docker_jobspec( self.name, 'tools/dockerfile/grpc_artifact_linux_x64', - 'tools/run_tests/build_package_php.sh') + 'tools/run_tests/artifacts/build_package_php.sh') def targets(): diff --git a/tools/run_tests/build_stats_schema.json b/tools/run_tests/build_stats/build_stats_schema.json index 021a349545..021a349545 100644 --- a/tools/run_tests/build_stats_schema.json +++ b/tools/run_tests/build_stats/build_stats_schema.json diff --git a/tools/run_tests/build_stats_schema_no_matrix.json b/tools/run_tests/build_stats/build_stats_schema_no_matrix.json index eeb067d7a5..eeb067d7a5 100644 --- a/tools/run_tests/build_stats_schema_no_matrix.json +++ b/tools/run_tests/build_stats/build_stats_schema_no_matrix.json diff --git a/tools/run_tests/dockerize/build_docker_and_run_tests.sh b/tools/run_tests/dockerize/build_docker_and_run_tests.sh index c3219c533d..b68ac89121 100755 --- a/tools/run_tests/dockerize/build_docker_and_run_tests.sh +++ b/tools/run_tests/dockerize/build_docker_and_run_tests.sh @@ -77,8 +77,6 @@ docker run \ -v /tmp/ccache:/tmp/ccache \ -v /tmp/npm-cache:/tmp/npm-cache \ -v /tmp/xdg-cache-home:/tmp/xdg-cache-home \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v $(which docker):/bin/docker \ -w /var/local/git/grpc \ --name=$CONTAINER_NAME \ $DOCKER_IMAGE_NAME \ diff --git a/tools/run_tests/configs.json b/tools/run_tests/generated/configs.json index b0839ef026..b0839ef026 100644 --- a/tools/run_tests/configs.json +++ b/tools/run_tests/generated/configs.json diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 2e6877ccac..6ae269cc20 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -2263,7 +2263,7 @@ }, { "deps": [ - "google_benchmark", + "benchmark", "gpr", "gpr_test_util", "grpc", @@ -2913,7 +2913,7 @@ }, { "deps": [ - "google_benchmark" + "benchmark" ], "headers": [], "is_filegroup": false, @@ -6207,30 +6207,30 @@ { "deps": [], "headers": [ - "third_party/google_benchmark/include/benchmark/benchmark.h", - "third_party/google_benchmark/include/benchmark/benchmark_api.h", - "third_party/google_benchmark/include/benchmark/macros.h", - "third_party/google_benchmark/include/benchmark/reporter.h", - "third_party/google_benchmark/src/arraysize.h", - "third_party/google_benchmark/src/benchmark_api_internal.h", - "third_party/google_benchmark/src/check.h", - "third_party/google_benchmark/src/colorprint.h", - "third_party/google_benchmark/src/commandlineflags.h", - "third_party/google_benchmark/src/complexity.h", - "third_party/google_benchmark/src/cycleclock.h", - "third_party/google_benchmark/src/internal_macros.h", - "third_party/google_benchmark/src/log.h", - "third_party/google_benchmark/src/mutex.h", - "third_party/google_benchmark/src/re.h", - "third_party/google_benchmark/src/sleep.h", - "third_party/google_benchmark/src/stat.h", - "third_party/google_benchmark/src/string_util.h", - "third_party/google_benchmark/src/sysinfo.h", - "third_party/google_benchmark/src/timers.h" - ], - "is_filegroup": false, - "language": "c++", - "name": "google_benchmark", + "third_party/benchmark/include/benchmark/benchmark.h", + "third_party/benchmark/include/benchmark/benchmark_api.h", + "third_party/benchmark/include/benchmark/macros.h", + "third_party/benchmark/include/benchmark/reporter.h", + "third_party/benchmark/src/arraysize.h", + "third_party/benchmark/src/benchmark_api_internal.h", + "third_party/benchmark/src/check.h", + "third_party/benchmark/src/colorprint.h", + "third_party/benchmark/src/commandlineflags.h", + "third_party/benchmark/src/complexity.h", + "third_party/benchmark/src/cycleclock.h", + "third_party/benchmark/src/internal_macros.h", + "third_party/benchmark/src/log.h", + "third_party/benchmark/src/mutex.h", + "third_party/benchmark/src/re.h", + "third_party/benchmark/src/sleep.h", + "third_party/benchmark/src/stat.h", + "third_party/benchmark/src/string_util.h", + "third_party/benchmark/src/sysinfo.h", + "third_party/benchmark/src/timers.h" + ], + "is_filegroup": false, + "language": "c++", + "name": "benchmark", "src": [], "third_party": false, "type": "lib" diff --git a/tools/run_tests/tests.json b/tools/run_tests/generated/tests.json index b76263b8b9..b76263b8b9 100644 --- a/tools/run_tests/tests.json +++ b/tools/run_tests/generated/tests.json diff --git a/tools/run_tests/build_csharp.sh b/tools/run_tests/helper_scripts/build_csharp.sh index 48ce11a10b..84c5b1c777 100755 --- a/tools/run_tests/build_csharp.sh +++ b/tools/run_tests/helper_scripts/build_csharp.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../../src/csharp +cd $(dirname $0)/../../../src/csharp # overriding NativeDependenciesConfigurationUnix is needed to make gcov code coverage work. xbuild /p:Configuration=$MSBUILD_CONFIG /p:NativeDependenciesConfigurationUnix=$CONFIG Grpc.sln diff --git a/tools/run_tests/build_csharp_coreclr.bat b/tools/run_tests/helper_scripts/build_csharp_coreclr.bat index b6e3ccbd2b..78e5f5998b 100644 --- a/tools/run_tests/build_csharp_coreclr.bat +++ b/tools/run_tests/helper_scripts/build_csharp_coreclr.bat @@ -29,7 +29,7 @@ setlocal -cd /d %~dp0\..\..\src\csharp +cd /d %~dp0\..\..\..\src\csharp dotnet restore . || goto :error diff --git a/tools/run_tests/build_csharp_coreclr.sh b/tools/run_tests/helper_scripts/build_csharp_coreclr.sh index 02cf0d39cb..dd5fd31c75 100755 --- a/tools/run_tests/build_csharp_coreclr.sh +++ b/tools/run_tests/helper_scripts/build_csharp_coreclr.sh @@ -30,7 +30,7 @@ set -ex -cd $(dirname $0)/../../src/csharp +cd $(dirname $0)/../../../src/csharp # TODO(jtattermusch): introduce caching dotnet restore . diff --git a/tools/run_tests/build_node.bat b/tools/run_tests/helper_scripts/build_node.bat index 82e8208348..82e8208348 100644 --- a/tools/run_tests/build_node.bat +++ b/tools/run_tests/helper_scripts/build_node.bat diff --git a/tools/run_tests/build_node.sh b/tools/run_tests/helper_scripts/build_node.sh index d9292fd8aa..8a928bb762 100755 --- a/tools/run_tests/build_node.sh +++ b/tools/run_tests/helper_scripts/build_node.sh @@ -38,6 +38,6 @@ set -ex CONFIG=${CONFIG:-opt} # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. npm install --unsafe-perm --build-from-source diff --git a/tools/run_tests/build_php.sh b/tools/run_tests/helper_scripts/build_php.sh index 77a8abcfe7..acaaa23adf 100755 --- a/tools/run_tests/build_php.sh +++ b/tools/run_tests/helper_scripts/build_php.sh @@ -33,7 +33,7 @@ set -ex CONFIG=${CONFIG:-opt} # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. root=`pwd` export GRPC_LIB_SUBDIR=libs/$CONFIG diff --git a/tools/run_tests/build_python.sh b/tools/run_tests/helper_scripts/build_python.sh index fb884ad166..0e88e96765 100755 --- a/tools/run_tests/build_python.sh +++ b/tools/run_tests/helper_scripts/build_python.sh @@ -31,7 +31,7 @@ set -ex # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. ########################## # Portability operations # @@ -171,8 +171,7 @@ pip_install_dir() { } $VENV_PYTHON -m pip install --upgrade pip -# TODO(https://github.com/pypa/setuptools/issues/709) get the latest setuptools -$VENV_PYTHON -m pip install setuptools==25.1.1 +$VENV_PYTHON -m pip install setuptools $VENV_PYTHON -m pip install cython pip_install_dir $ROOT $VENV_PYTHON $ROOT/tools/distrib/python/make_grpcio_tools.py diff --git a/tools/run_tests/build_python_msys2.sh b/tools/run_tests/helper_scripts/build_python_msys2.sh index 6e9d369018..6e9d369018 100644 --- a/tools/run_tests/build_python_msys2.sh +++ b/tools/run_tests/helper_scripts/build_python_msys2.sh diff --git a/tools/run_tests/build_ruby.sh b/tools/run_tests/helper_scripts/build_ruby.sh index 10343fce69..32638dede9 100755 --- a/tools/run_tests/build_ruby.sh +++ b/tools/run_tests/helper_scripts/build_ruby.sh @@ -34,7 +34,7 @@ set -ex export GRPC_CONFIG=${CONFIG:-opt} # change to grpc's ruby directory -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. rm -rf ./tmp rake compile diff --git a/tools/run_tests/post_tests_c.sh b/tools/run_tests/helper_scripts/post_tests_c.sh index 4409526dab..a83a59e23b 100755 --- a/tools/run_tests/post_tests_c.sh +++ b/tools/run_tests/helper_scripts/post_tests_c.sh @@ -32,7 +32,7 @@ set -ex if [ "$CONFIG" != "gcov" ] ; then exit ; fi -root=$(readlink -f $(dirname $0)/../..) +root=$(readlink -f $(dirname $0)/../../..) out=$root/reports/c_cxx_coverage tmp1=$(mktemp) tmp2=$(mktemp) diff --git a/tools/run_tests/post_tests_csharp.bat b/tools/run_tests/helper_scripts/post_tests_csharp.bat index 0d49a00b2a..2359f148ce 100644 --- a/tools/run_tests/post_tests_csharp.bat +++ b/tools/run_tests/helper_scripts/post_tests_csharp.bat @@ -36,7 +36,7 @@ if not "%CONFIG%" == "gcov" ( ) @rem enter src/csharp directory -cd /d %~dp0\..\..\src\csharp +cd /d %~dp0\..\..\..\src\csharp @rem Generate code coverage report @rem TODO(jtattermusch): currently the report list is hardcoded diff --git a/tools/run_tests/post_tests_csharp.sh b/tools/run_tests/helper_scripts/post_tests_csharp.sh index bb6f5c6e18..762c1f8827 100755 --- a/tools/run_tests/post_tests_csharp.sh +++ b/tools/run_tests/helper_scripts/post_tests_csharp.sh @@ -33,7 +33,7 @@ set -ex if [ "$CONFIG" != "gcov" ] ; then exit ; fi # change to gRPC repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. # Generate the csharp extension coverage report gcov objs/gcov/src/csharp/ext/*.o diff --git a/tools/run_tests/post_tests_php.sh b/tools/run_tests/helper_scripts/post_tests_php.sh index b4098066ea..23dc202322 100755 --- a/tools/run_tests/post_tests_php.sh +++ b/tools/run_tests/helper_scripts/post_tests_php.sh @@ -32,7 +32,7 @@ set -ex if [ "$CONFIG" != "gcov" ] ; then exit ; fi -root=$(readlink -f $(dirname $0)/../..) +root=$(readlink -f $(dirname $0)/../../..) out=$root/reports/php_ext_coverage tmp1=$(mktemp) tmp2=$(mktemp) diff --git a/tools/run_tests/post_tests_ruby.sh b/tools/run_tests/helper_scripts/post_tests_ruby.sh index 0877e44805..300edfe8a3 100755 --- a/tools/run_tests/post_tests_ruby.sh +++ b/tools/run_tests/helper_scripts/post_tests_ruby.sh @@ -32,7 +32,7 @@ set -ex if [ "$CONFIG" != "gcov" ] ; then exit ; fi -root=$(readlink -f $(dirname $0)/../..) +root=$(readlink -f $(dirname $0)/../../..) out=$root/reports/ruby_ext_coverage tmp1=$(mktemp) tmp2=$(mktemp) diff --git a/tools/run_tests/pre_build_c.bat b/tools/run_tests/helper_scripts/pre_build_c.bat index e4ab69384c..75b90f85b2 100644 --- a/tools/run_tests/pre_build_c.bat +++ b/tools/run_tests/helper_scripts/pre_build_c.bat @@ -32,7 +32,7 @@ setlocal @rem enter repo root -cd /d %~dp0\..\.. +cd /d %~dp0\..\..\.. @rem Location of nuget.exe set NUGET=C:\nuget\nuget.exe diff --git a/tools/run_tests/pre_build_csharp.bat b/tools/run_tests/helper_scripts/pre_build_csharp.bat index f15979a96b..139955d4da 100644 --- a/tools/run_tests/pre_build_csharp.bat +++ b/tools/run_tests/helper_scripts/pre_build_csharp.bat @@ -32,7 +32,7 @@ setlocal @rem enter repo root -cd /d %~dp0\..\.. +cd /d %~dp0\..\..\.. @rem Location of nuget.exe set NUGET=C:\nuget\nuget.exe diff --git a/tools/run_tests/pre_build_csharp.sh b/tools/run_tests/helper_scripts/pre_build_csharp.sh index ee678ddce5..1f808556f4 100755 --- a/tools/run_tests/pre_build_csharp.sh +++ b/tools/run_tests/helper_scripts/pre_build_csharp.sh @@ -31,7 +31,7 @@ set -ex # cd to gRPC csharp directory -cd $(dirname $0)/../../src/csharp +cd $(dirname $0)/../../../src/csharp root=`pwd` diff --git a/tools/run_tests/pre_build_node.bat b/tools/run_tests/helper_scripts/pre_build_node.bat index addb01a2a4..addb01a2a4 100644 --- a/tools/run_tests/pre_build_node.bat +++ b/tools/run_tests/helper_scripts/pre_build_node.bat diff --git a/tools/run_tests/pre_build_node.sh b/tools/run_tests/helper_scripts/pre_build_node.sh index e63be9da52..e63be9da52 100755 --- a/tools/run_tests/pre_build_node.sh +++ b/tools/run_tests/helper_scripts/pre_build_node.sh diff --git a/tools/run_tests/pre_build_ruby.sh b/tools/run_tests/helper_scripts/pre_build_ruby.sh index e7074c45c2..56b58df544 100755 --- a/tools/run_tests/pre_build_ruby.sh +++ b/tools/run_tests/helper_scripts/pre_build_ruby.sh @@ -34,6 +34,6 @@ set -ex export GRPC_CONFIG=${CONFIG:-opt} # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. bundle install diff --git a/tools/run_tests/run_lcov.sh b/tools/run_tests/helper_scripts/run_lcov.sh index 796a0b5ceb..bc7b44cd3e 100755 --- a/tools/run_tests/run_lcov.sh +++ b/tools/run_tests/helper_scripts/run_lcov.sh @@ -32,7 +32,7 @@ set -ex out=$(readlink -f ${1:-coverage}) -root=$(readlink -f $(dirname $0)/../..) +root=$(readlink -f $(dirname $0)/../../..) shift || true tmp=$(mktemp) cd $root diff --git a/tools/run_tests/run_node.bat b/tools/run_tests/helper_scripts/run_node.bat index 0987fbee55..0987fbee55 100644 --- a/tools/run_tests/run_node.bat +++ b/tools/run_tests/helper_scripts/run_node.bat diff --git a/tools/run_tests/run_node.sh b/tools/run_tests/helper_scripts/run_node.sh index 44f75645f5..0fafe9481a 100755 --- a/tools/run_tests/run_node.sh +++ b/tools/run_tests/helper_scripts/run_node.sh @@ -37,7 +37,7 @@ set -ex CONFIG=${CONFIG:-opt} # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. root=`pwd` diff --git a/tools/run_tests/run_python.sh b/tools/run_tests/helper_scripts/run_python.sh index 17e0186f2a..7be473428f 100755 --- a/tools/run_tests/run_python.sh +++ b/tools/run_tests/helper_scripts/run_python.sh @@ -31,7 +31,7 @@ set -ex # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. PYTHON=`realpath -s "${1:-py27/bin/python}"` diff --git a/tools/run_tests/run_ruby.sh b/tools/run_tests/helper_scripts/run_ruby.sh index 73a84ac361..ab153b7e25 100755 --- a/tools/run_tests/run_ruby.sh +++ b/tools/run_tests/helper_scripts/run_ruby.sh @@ -31,6 +31,6 @@ set -ex # change to grpc repo root -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. rake diff --git a/tools/run_tests/run_tests_in_workspace.sh b/tools/run_tests/helper_scripts/run_tests_in_workspace.sh index 9c6c5b76e0..002c8d6de2 100755 --- a/tools/run_tests/run_tests_in_workspace.sh +++ b/tools/run_tests/helper_scripts/run_tests_in_workspace.sh @@ -34,7 +34,7 @@ # newly created workspace) set -ex -cd $(dirname $0)/../.. +cd $(dirname $0)/../../.. export repo_root=$(pwd) rm -rf "${WORKSPACE_NAME}" diff --git a/tools/run_tests/interop_html_report.template b/tools/run_tests/interop/interop_html_report.template index 46cce426b7..46cce426b7 100644 --- a/tools/run_tests/interop_html_report.template +++ b/tools/run_tests/interop/interop_html_report.template diff --git a/src/python/grpcio_health_checking/grpc/__init__.py b/tools/run_tests/performance/process_local_perf_flamegraphs.sh index fcc7048815..d15610f137 100644..100755 --- a/src/python/grpcio_health_checking/grpc/__init__.py +++ b/tools/run_tests/performance/process_local_perf_flamegraphs.sh @@ -1,3 +1,4 @@ +#!/bin/bash # Copyright 2015, Google Inc. # All rights reserved. # @@ -27,4 +28,13 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -__import__('pkg_resources').declare_namespace(__name__) +mkdir -p $OUTPUT_DIR + +PERF_DATA_FILE=${PERF_BASE_NAME}-perf.data +PERF_SCRIPT_OUTPUT=${PERF_BASE_NAME}-out.perf + +# Generate Flame graphs +echo "running perf script on $PERF_DATA_FILE" +perf script -i $PERF_DATA_FILE > $PERF_SCRIPT_OUTPUT + +~/FlameGraph/stackcollapse-perf.pl $PERF_SCRIPT_OUTPUT | ~/FlameGraph/flamegraph.pl > ${OUTPUT_DIR}/${OUTPUT_FILENAME}.svg diff --git a/tools/run_tests/performance/process_remote_perf_flamegraphs.sh b/tools/run_tests/performance/process_remote_perf_flamegraphs.sh new file mode 100755 index 0000000000..cc075354cc --- /dev/null +++ b/tools/run_tests/performance/process_remote_perf_flamegraphs.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +mkdir -p $OUTPUT_DIR + +PERF_DATA_FILE=${PERF_BASE_NAME}-perf.data +PERF_SCRIPT_OUTPUT=${PERF_BASE_NAME}-out.perf + +# Generate Flame graphs +echo "running perf script on $USER_AT_HOST with perf.data" +ssh $USER_AT_HOST "cd ~/performance_workspace/grpc && perf script -i $PERF_DATA_FILE | gzip > ${PERF_SCRIPT_OUTPUT}.gz" + +scp $USER_AT_HOST:~/performance_workspace/grpc/$PERF_SCRIPT_OUTPUT.gz . + +gzip -d -f $PERF_SCRIPT_OUTPUT.gz + +~/FlameGraph/stackcollapse-perf.pl --kernel $PERF_SCRIPT_OUTPUT | ~/FlameGraph/flamegraph.pl --color=java --hash > ${OUTPUT_DIR}/${OUTPUT_FILENAME}.svg diff --git a/tools/distrib/python/grpcio_tools/grpc/__init__.py b/tools/run_tests/python_utils/__init__.py index 70ac5edd48..100a624dc9 100644 --- a/tools/distrib/python/grpcio_tools/grpc/__init__.py +++ b/tools/run_tests/python_utils/__init__.py @@ -26,5 +26,3 @@ # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -__import__('pkg_resources').declare_namespace(__name__) diff --git a/tools/run_tests/antagonist.py b/tools/run_tests/python_utils/antagonist.py index 857addfb38..857addfb38 100755 --- a/tools/run_tests/antagonist.py +++ b/tools/run_tests/python_utils/antagonist.py diff --git a/tools/run_tests/dockerjob.py b/tools/run_tests/python_utils/dockerjob.py index 4a7e61b3c4..0869c5cee9 100755 --- a/tools/run_tests/dockerjob.py +++ b/tools/run_tests/python_utils/dockerjob.py @@ -31,13 +31,14 @@ from __future__ import print_function -import jobset import tempfile import time import uuid import os import subprocess +import jobset + _DEVNULL = open(os.devnull, 'w') diff --git a/tools/run_tests/filter_pull_request_tests.py b/tools/run_tests/python_utils/filter_pull_request_tests.py index ca1d6d4eb5..ca1d6d4eb5 100644 --- a/tools/run_tests/filter_pull_request_tests.py +++ b/tools/run_tests/python_utils/filter_pull_request_tests.py diff --git a/tools/run_tests/jobset.py b/tools/run_tests/python_utils/jobset.py index 2acc7971f6..7b2c62d1a2 100755 --- a/tools/run_tests/jobset.py +++ b/tools/run_tests/python_utils/jobset.py @@ -139,16 +139,16 @@ def message(tag, msg, explanatory_text=None, do_newline=False): if explanatory_text: print(explanatory_text) print('%s: %s' % (tag, msg)) - return - sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % ( - _BEGINNING_OF_LINE, - _CLEAR_LINE, - '\n%s' % explanatory_text if explanatory_text is not None else '', - _COLORS[_TAG_COLOR[tag]][1], - _COLORS[_TAG_COLOR[tag]][0], - tag, - msg, - '\n' if do_newline or explanatory_text is not None else '')) + else: + sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % ( + _BEGINNING_OF_LINE, + _CLEAR_LINE, + '\n%s' % explanatory_text if explanatory_text is not None else '', + _COLORS[_TAG_COLOR[tag]][1], + _COLORS[_TAG_COLOR[tag]][0], + tag, + msg, + '\n' if do_newline or explanatory_text is not None else '')) sys.stdout.flush() except: pass @@ -219,7 +219,8 @@ class JobResult(object): class Job(object): """Manages one job.""" - def __init__(self, spec, newline_on_success, travis, add_env): + def __init__(self, spec, newline_on_success, travis, add_env, + quiet_success=False): self._spec = spec self._newline_on_success = newline_on_success self._travis = travis @@ -227,7 +228,9 @@ class Job(object): self._retries = 0 self._timeout_retries = 0 self._suppress_failure_message = False - message('START', spec.shortname, do_newline=self._travis) + self._quiet_success = quiet_success + if not self._quiet_success: + message('START', spec.shortname, do_newline=self._travis) self.result = JobResult() self.start() @@ -302,10 +305,11 @@ class Job(object): if real > 0.5: cores = (user + sys) / real measurement = '; cpu_cost=%.01f; estimated=%.01f' % (cores, self._spec.cpu_cost) - message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % ( - self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement), - stdout() if self._spec.verbose_success else None, - do_newline=self._newline_on_success or self._travis) + if not self._quiet_success: + message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % ( + self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement), + stdout() if self._spec.verbose_success else None, + do_newline=self._newline_on_success or self._travis) self.result.state = 'PASSED' elif (self._state == _RUNNING and self._spec.timeout_seconds is not None and @@ -341,7 +345,7 @@ class Jobset(object): """Manages one run of jobs.""" def __init__(self, check_cancelled, maxjobs, newline_on_success, travis, - stop_on_failure, add_env): + stop_on_failure, add_env, quiet_success): self._running = set() self._check_cancelled = check_cancelled self._cancelled = False @@ -352,6 +356,7 @@ class Jobset(object): self._travis = travis self._stop_on_failure = stop_on_failure self._add_env = add_env + self._quiet_success = quiet_success self.resultset = {} self._remaining = None self._start_time = time.time() @@ -380,7 +385,8 @@ class Jobset(object): job = Job(spec, self._newline_on_success, self._travis, - self._add_env) + self._add_env, + self._quiet_success) self._running.add(job) if job.GetSpec().shortname not in self.resultset: self.resultset[job.GetSpec().shortname] = [] @@ -403,10 +409,11 @@ class Jobset(object): break for job in dead: self._completed += 1 - self.resultset[job.GetSpec().shortname].append(job.result) + if not self._quiet_success or job.result.state != 'PASSED': + self.resultset[job.GetSpec().shortname].append(job.result) self._running.remove(job) if dead: return - if (not self._travis): + if not self._travis and platform_string() != 'windows': rstr = '' if self._remaining is None else '%d queued, ' % self._remaining if self._remaining is not None and self._completed > 0: now = time.time() @@ -463,7 +470,8 @@ def run(cmdlines, infinite_runs=False, stop_on_failure=False, add_env={}, - skip_jobs=False): + skip_jobs=False, + quiet_success=False): if skip_jobs: results = {} skipped_job_result = JobResult() @@ -474,7 +482,8 @@ def run(cmdlines, return results js = Jobset(check_cancelled, maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS, - newline_on_success, travis, stop_on_failure, add_env) + newline_on_success, travis, stop_on_failure, add_env, + quiet_success) for cmdline, remaining in tag_remaining(cmdlines): if not js.start(cmdline): break diff --git a/tools/run_tests/port_server.py b/tools/run_tests/python_utils/port_server.py index e9b3f7ff79..e9b3f7ff79 100755 --- a/tools/run_tests/port_server.py +++ b/tools/run_tests/python_utils/port_server.py diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/python_utils/report_utils.py index 90055e3530..352cf7abe7 100644 --- a/tools/run_tests/report_utils.py +++ b/tools/run_tests/python_utils/report_utils.py @@ -84,7 +84,7 @@ def render_interop_html_report( client_langs, server_langs, test_cases, auth_test_cases, http2_cases, resultset, num_failures, cloud_to_prod, prod_servers, http2_interop): """Generate HTML report for interop tests.""" - template_file = 'tools/run_tests/interop_html_report.template' + template_file = 'tools/run_tests/interop/interop_html_report.template' try: mytemplate = Template(filename=template_file, format_exceptions=True) except NameError: @@ -122,3 +122,10 @@ def render_interop_html_report( except: print(exceptions.text_error_template().render()) raise + +def render_perf_profiling_results(output_filepath, profile_names): + with open(output_filepath, 'w') as output_file: + output_file.write('<ul>\n') + for name in profile_names: + output_file.write('<li><a href=%s>%s</a></li>\n' % (name, name)) + output_file.write('</ul>\n') diff --git a/tools/run_tests/watch_dirs.py b/tools/run_tests/python_utils/watch_dirs.py index 21ef23e158..21ef23e158 100755 --- a/tools/run_tests/watch_dirs.py +++ b/tools/run_tests/python_utils/watch_dirs.py diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py index 83cfc429f9..c14f18af81 100755 --- a/tools/run_tests/run_interop_tests.py +++ b/tools/run_tests/run_interop_tests.py @@ -34,20 +34,21 @@ from __future__ import print_function import argparse import atexit -import dockerjob import itertools -import jobset import json import multiprocessing import os import re -import report_utils import subprocess import sys import tempfile import time import uuid +import python_utils.dockerjob as dockerjob +import python_utils.jobset as jobset +import python_utils.report_utils as report_utils + # Docker doesn't clean up after itself, so we do it on exit. atexit.register(lambda: subprocess.call(['stty', 'echo'])) diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py index 1d0c98fb69..b7b742d7af 100755 --- a/tools/run_tests/run_performance_tests.py +++ b/tools/run_tests/run_performance_tests.py @@ -35,14 +35,11 @@ from __future__ import print_function import argparse import collections import itertools -import jobset import json import multiprocessing import os -import performance.scenario_config as scenario_config import pipes import re -import report_utils import subprocess import sys import tempfile @@ -50,6 +47,10 @@ import time import traceback import uuid +import performance.scenario_config as scenario_config +import python_utils.jobset as jobset +import python_utils.report_utils as report_utils + _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) os.chdir(_ROOT) @@ -57,15 +58,18 @@ os.chdir(_ROOT) _REMOTE_HOST_USERNAME = 'jenkins' +_PERF_REPORT_OUTPUT_DIR = 'perf_reports' + class QpsWorkerJob: """Encapsulates a qps worker server job.""" - def __init__(self, spec, language, host_and_port): + def __init__(self, spec, language, host_and_port, perf_file_base_name=None): self._spec = spec self.language = language self.host_and_port = host_and_port self._job = None + self.perf_file_base_name = perf_file_base_name def start(self): self._job = jobset.Job(self._spec, newline_on_success=True, travis=True, add_env={}) @@ -80,24 +84,32 @@ class QpsWorkerJob: self._job = None -def create_qpsworker_job(language, shortname=None, - port=10000, remote_host=None): - cmdline = language.worker_cmdline() + ['--driver_port=%s' % port] +def create_qpsworker_job(language, shortname=None, port=10000, remote_host=None, perf_cmd=None): + cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port]) + if remote_host: - user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host) - cmdline = ['ssh', - str(user_at_host), - 'cd ~/performance_workspace/grpc/ && %s' % ' '.join(cmdline)] host_and_port='%s:%s' % (remote_host, port) else: host_and_port='localhost:%s' % port + perf_file_base_name = None + if perf_cmd: + perf_file_base_name = '%s-%s' % (host_and_port, shortname) + # specify -o output file so perf.data gets collected when worker stopped + cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name] + cmdline + + if remote_host: + user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host) + ssh_cmd = ['ssh'] + ssh_cmd.extend([str(user_at_host), 'cd ~/performance_workspace/grpc/ && %s' % ' '.join(cmdline)]) + cmdline = ssh_cmd + jobspec = jobset.JobSpec( cmdline=cmdline, shortname=shortname, timeout_seconds=5*60, # workers get restarted after each scenario verbose_success=True) - return QpsWorkerJob(jobspec, language, host_and_port) + return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name) def create_scenario_jobspec(scenario_json, workers, remote_host=None, @@ -259,7 +271,7 @@ def build_on_remote_hosts(hosts, languages=scenario_config.LANGUAGES.keys(), bui sys.exit(1) -def create_qpsworkers(languages, worker_hosts): +def create_qpsworkers(languages, worker_hosts, perf_cmd=None): """Creates QPS workers (but does not start them).""" if not worker_hosts: # run two workers locally (for each language) @@ -275,11 +287,32 @@ def create_qpsworkers(languages, worker_hosts): shortname= 'qps_worker_%s_%s' % (language, worker_idx), port=worker[1] + language.worker_port_offset(), - remote_host=worker[0]) + remote_host=worker[0], + perf_cmd=perf_cmd) for language in languages for worker_idx, worker in enumerate(workers)] +def perf_report_processor_job(worker_host, perf_base_name, output_filename): + print('Creating perf report collection job for %s' % worker_host) + cmd = '' + if worker_host != 'localhost': + user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host) + cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\ + tools/run_tests/performance/process_remote_perf_flamegraphs.sh" \ + % (user_at_host, output_filename, _PERF_REPORT_OUTPUT_DIR, perf_base_name) + else: + cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\ + tools/run_tests/performance/process_local_perf_flamegraphs.sh" \ + % (output_filename, _PERF_REPORT_OUTPUT_DIR, perf_base_name) + + return jobset.JobSpec(cmdline=cmd, + timeout_seconds=3*60, + shell=True, + verbose_success=True, + shortname='process perf report') + + Scenario = collections.namedtuple('Scenario', 'jobspec workers name') @@ -372,6 +405,31 @@ def finish_qps_workers(jobs): print('All QPS workers finished.') return num_killed +profile_output_files = [] + +# Collect perf text reports and flamegraphs if perf_cmd was used +# Note the base names of perf text reports are used when creating and processing +# perf data. The scenario name uniqifies the output name in the final +# perf reports directory. +# Alos, the perf profiles need to be fetched and processed after each scenario +# in order to avoid clobbering the output files. +def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name): + perf_report_jobs = [] + global profile_output_files + for host_and_port in hosts_and_base_names: + perf_base_name = hosts_and_base_names[host_and_port] + output_filename = '%s-%s' % (scenario_name, perf_base_name) + # from the base filename, create .svg output filename + host = host_and_port.split(':')[0] + profile_output_files.append('%s.svg' % output_filename) + perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename)) + + jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True) + failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1) + jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True) + return failures + + argp = argparse.ArgumentParser(description='Run performance tests.') argp.add_argument('-l', '--language', choices=['all'] + sorted(scenario_config.LANGUAGES.keys()), @@ -405,6 +463,33 @@ argp.add_argument('--netperf', help='Run netperf benchmark as one of the scenarios.') argp.add_argument('-x', '--xml_report', default='report.xml', type=str, help='Name of XML report file to generate.') +argp.add_argument('--perf_args', + help=('Example usage: "--perf_args=record -F 99 -g". ' + 'Wrap QPS workers in a perf command ' + 'with the arguments to perf specified here. ' + '".svg" flame graph profiles will be ' + 'created for each Qps Worker on each scenario. ' + 'Files will output to "<repo_root>/perf_reports" ' + 'directory. Output files from running the worker ' + 'under perf are saved in the repo root where its ran. ' + 'Note that the perf "-g" flag is necessary for ' + 'flame graphs generation to work (assuming the binary ' + 'being profiled uses frame pointers, check out ' + '"--call-graph dwarf" option using libunwind otherwise.) ' + 'Also note that the entire "--perf_args=<arg(s)>" must ' + 'be wrapped in quotes as in the example usage. ' + 'If the "--perg_args" is unspecified, "perf" will ' + 'not be used at all. ' + 'See http://www.brendangregg.com/perf.html ' + 'for more general perf examples.')) +argp.add_argument('--skip_generate_flamegraphs', + default=False, + action='store_const', + const=True, + help=('Turn flame graph generation off. ' + 'May be useful if "perf_args" arguments do not make sense for ' + 'generating flamegraphs (e.g., "--perf_args=stat ...")')) + args = argp.parse_args() @@ -435,7 +520,13 @@ if not args.remote_driver_host: if not args.dry_run: build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local) -qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host) +perf_cmd = None +if args.perf_args: + # Expect /usr/bin/perf to be installed here, as is usual + perf_cmd = ['/usr/bin/perf'] + perf_cmd.extend(re.split('\s+', args.perf_args)) + +qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd) # get list of worker addresses for each language. workers_by_lang = dict([(str(language), []) for language in languages]) @@ -457,16 +548,20 @@ if not scenarios: total_scenario_failures = 0 qps_workers_killed = 0 merged_resultset = {} +perf_report_failures = 0 + for scenario in scenarios: if args.dry_run: print(scenario.name) else: + scenario_failures = 0 try: for worker in scenario.workers: worker.start() - scenario_failures, resultset = jobset.run([scenario.jobspec, - create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host)], - newline_on_success=True, maxjobs=1) + jobs = [scenario.jobspec] + if scenario.workers: + jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host)) + scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1) total_scenario_failures += scenario_failures merged_resultset = dict(itertools.chain(merged_resultset.iteritems(), resultset.iteritems())) @@ -474,10 +569,27 @@ for scenario in scenarios: # Consider qps workers that need to be killed as failures qps_workers_killed += finish_qps_workers(scenario.workers) + if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs: + workers_and_base_names = {} + for worker in scenario.workers: + if not worker.perf_file_base_name: + raise Exception('using perf buf perf report filename is unspecified') + workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name + perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name) -report_utils.render_junit_xml_report(merged_resultset, args.xml_report, - suite_name='benchmarks') + +# Still write the index.html even if some scenarios failed. +# 'profile_output_files' will only have names for scenarios that passed +if perf_cmd and not args.skip_generate_flamegraphs: + # write the index fil to the output dir, with all profiles from all scenarios/workers + report_utils.render_perf_profiling_results('%s/index.html' % _PERF_REPORT_OUTPUT_DIR, profile_output_files) if total_scenario_failures > 0 or qps_workers_killed > 0: - print ("%s scenarios failed and %s qps worker jobs killed" % (total_scenario_failures, qps_workers_killed)) + print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed)) + sys.exit(1) + +report_utils.render_junit_xml_report(merged_resultset, args.xml_report, + suite_name='benchmarks') +if perf_report_failures > 0: + print('%s perf profile collection jobs failed' % perf_report_failures) sys.exit(1) diff --git a/tools/run_tests/run_stress_tests.py b/tools/run_tests/run_stress_tests.py index de4a22877c..a94a615b88 100755 --- a/tools/run_tests/run_stress_tests.py +++ b/tools/run_tests/run_stress_tests.py @@ -33,9 +33,7 @@ from __future__ import print_function import argparse import atexit -import dockerjob import itertools -import jobset import json import multiprocessing import os @@ -46,6 +44,9 @@ import tempfile import time import uuid +import python_utils.dockerjob as dockerjob +import python_utils.jobset as jobset + # Docker doesn't clean up after itself, so we do it on exit. atexit.register(lambda: subprocess.call(['stty', 'echo'])) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index c49ee4a6cc..924274191e 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -54,9 +54,9 @@ import time from six.moves import urllib import uuid -import jobset -import report_utils -import watch_dirs +import python_utils.jobset as jobset +import python_utils.report_utils as report_utils +import python_utils.watch_dirs as watch_dirs _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) @@ -116,7 +116,7 @@ class Config(object): def get_c_tests(travis, test_lang) : out = [] platforms_str = 'ci_platforms' if travis else 'platforms' - with open('tools/run_tests/tests.json') as f: + with open('tools/run_tests/generated/tests.json') as f: js = json.load(f) return [tgt for tgt in js @@ -300,7 +300,7 @@ class CLanguage(object): def pre_build_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\pre_build_c.bat']] + return [['tools\\run_tests\\helper_scripts\\pre_build_c.bat']] else: return [] @@ -311,7 +311,7 @@ class CLanguage(object): if self.platform == 'windows': return [] else: - return [['tools/run_tests/post_tests_c.sh']] + return [['tools/run_tests/helper_scripts/post_tests_c.sh']] def makefile_name(self): return 'Makefile' @@ -382,17 +382,16 @@ class NodeLanguage(object): def test_specs(self): if self.platform == 'windows': - return [self.config.job_spec(['tools\\run_tests\\run_node.bat'], None)] + return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])] else: - return [self.config.job_spec(['tools/run_tests/run_node.sh', self.node_version], - None, + return [self.config.job_spec(['tools/run_tests/helper_scripts/run_node.sh', self.node_version], environ=_FORCE_ENVIRON_FOR_WRAPPERS)] def pre_build_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\pre_build_node.bat']] + return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']] else: - return [['tools/run_tests/pre_build_node.sh', self.node_version]] + return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]] def make_targets(self): return [] @@ -402,9 +401,9 @@ class NodeLanguage(object): def build_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\build_node.bat']] + return [['tools\\run_tests\\helper_scripts\\build_node.bat']] else: - return [['tools/run_tests/build_node.sh', self.node_version]] + return [['tools/run_tests/helper_scripts/build_node.sh', self.node_version]] def post_tests_steps(self): return [] @@ -427,7 +426,7 @@ class PhpLanguage(object): _check_compiler(self.args.compiler, ['default']) def test_specs(self): - return [self.config.job_spec(['src/php/bin/run_tests.sh'], None, + return [self.config.job_spec(['src/php/bin/run_tests.sh'], environ=_FORCE_ENVIRON_FOR_WRAPPERS)] def pre_build_steps(self): @@ -440,10 +439,10 @@ class PhpLanguage(object): return [] def build_steps(self): - return [['tools/run_tests/build_php.sh']] + return [['tools/run_tests/helper_scripts/build_php.sh']] def post_tests_steps(self): - return [['tools/run_tests/post_tests_php.sh']] + return [['tools/run_tests/helper_scripts/post_tests_php.sh']] def makefile_name(self): return 'Makefile' @@ -463,7 +462,7 @@ class Php7Language(object): _check_compiler(self.args.compiler, ['default']) def test_specs(self): - return [self.config.job_spec(['src/php/bin/run_tests.sh'], None, + return [self.config.job_spec(['src/php/bin/run_tests.sh'], environ=_FORCE_ENVIRON_FOR_WRAPPERS)] def pre_build_steps(self): @@ -476,10 +475,10 @@ class Php7Language(object): return [] def build_steps(self): - return [['tools/run_tests/build_php.sh']] + return [['tools/run_tests/helper_scripts/build_php.sh']] def post_tests_steps(self): - return [['tools/run_tests/post_tests_php.sh']] + return [['tools/run_tests/helper_scripts/post_tests_php.sh']] def makefile_name(self): return 'Makefile' @@ -548,18 +547,18 @@ class PythonLanguage(object): if os.name == 'nt': shell = ['bash'] - builder = [os.path.abspath('tools/run_tests/build_python_msys2.sh')] + builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')] builder_prefix_arguments = ['MINGW{}'.format(bits)] venv_relative_python = ['Scripts/python.exe'] toolchain = ['mingw32'] else: shell = [] - builder = [os.path.abspath('tools/run_tests/build_python.sh')] + builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')] builder_prefix_arguments = [] venv_relative_python = ['bin/python'] toolchain = ['unix'] - runner = [os.path.abspath('tools/run_tests/run_python.sh')] + runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')] config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments, venv_relative_python, toolchain, runner) python27_config = _python_config_generator(name='py27', major='2', @@ -611,12 +610,12 @@ class RubyLanguage(object): _check_compiler(self.args.compiler, ['default']) def test_specs(self): - return [self.config.job_spec(['tools/run_tests/run_ruby.sh'], + return [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'], timeout_seconds=10*60, environ=_FORCE_ENVIRON_FOR_WRAPPERS)] def pre_build_steps(self): - return [['tools/run_tests/pre_build_ruby.sh']] + return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']] def make_targets(self): return [] @@ -625,10 +624,10 @@ class RubyLanguage(object): return [] def build_steps(self): - return [['tools/run_tests/build_ruby.sh']] + return [['tools/run_tests/helper_scripts/build_ruby.sh']] def post_tests_steps(self): - return [['tools/run_tests/post_tests_ruby.sh']] + return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']] def makefile_name(self): return 'Makefile' @@ -702,7 +701,6 @@ class CSharpLanguage(object): for test in tests_by_assembly[assembly]: cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args specs.append(self.config.job_spec(cmdline, - None, shortname='csharp.%s' % test, environ=_FORCE_ENVIRON_FOR_WRAPPERS)) else: @@ -720,7 +718,6 @@ class CSharpLanguage(object): # to prevent problems with registering the profiler. run_exclusive = 1000000 specs.append(self.config.job_spec(cmdline, - None, shortname='csharp.coverage.%s' % assembly, cpu_cost=run_exclusive, environ=_FORCE_ENVIRON_FOR_WRAPPERS)) @@ -728,9 +725,9 @@ class CSharpLanguage(object): def pre_build_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\pre_build_csharp.bat']] + return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat']] else: - return [['tools/run_tests/pre_build_csharp.sh']] + return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']] def make_targets(self): return ['grpc_csharp_ext'] @@ -741,22 +738,22 @@ class CSharpLanguage(object): def build_steps(self): if self.args.compiler == 'coreclr': if self.platform == 'windows': - return [['tools\\run_tests\\build_csharp_coreclr.bat']] + return [['tools\\run_tests\\helper_scripts\\build_csharp_coreclr.bat']] else: - return [['tools/run_tests/build_csharp_coreclr.sh']] + return [['tools/run_tests/helper_scripts/build_csharp_coreclr.sh']] else: if self.platform == 'windows': return [[_windows_build_bat(self.args.compiler), 'src/csharp/Grpc.sln', '/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]] else: - return [['tools/run_tests/build_csharp.sh']] + return [['tools/run_tests/helper_scripts/build_csharp.sh']] def post_tests_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\post_tests_csharp.bat']] + return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']] else: - return [['tools/run_tests/post_tests_csharp.sh']] + return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']] def makefile_name(self): return 'Makefile' @@ -779,7 +776,7 @@ class ObjCLanguage(object): def test_specs(self): return [ self.config.job_spec(['src/objective-c/tests/run_tests.sh'], - timeout_seconds=None, + timeout_seconds=60*60, shortname='objc-tests', environ=_FORCE_ENVIRON_FOR_WRAPPERS), self.config.job_spec(['src/objective-c/tests/build_example_test.sh'], @@ -823,8 +820,12 @@ class Sanity(object): def test_specs(self): import yaml with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f: + environ={'TEST': 'true'} + if _is_use_docker_child(): + environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true' return [self.config.job_spec(cmd['script'].split(), - timeout_seconds=None, environ={'TEST': 'true'}, + timeout_seconds=30*60, + environ=environ, cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)] @@ -875,9 +876,9 @@ class NodeExpressLanguage(object): def pre_build_steps(self): if self.platform == 'windows': - return [['tools\\run_tests\\pre_build_node.bat']] + return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']] else: - return [['tools/run_tests/pre_build_node.sh', self.node_version]] + return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]] def make_targets(self): return [] @@ -901,7 +902,7 @@ class NodeExpressLanguage(object): return 'node_express' # different configurations we can run under -with open('tools/run_tests/configs.json') as f: +with open('tools/run_tests/generated/configs.json') as f: _CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read())) @@ -1097,6 +1098,12 @@ argp.add_argument('-x', '--xml_report', default=None, type=str, help='Generates a JUnit-compatible XML report') argp.add_argument('--report_suite_name', default='tests', type=str, help='Test suite name to use in generated JUnit XML report') +argp.add_argument('--quiet_success', + default=False, + action='store_const', + const=True, + help='Dont print anything when a test passes. Passing tests also will not be reported in XML report. ' + + 'Useful when running many iterations of each test (argument -n).') argp.add_argument('--force_default_poller', default=False, action='store_const', const=True, help='Dont try to iterate over many polling strategies when they exist') args = argp.parse_args() @@ -1296,7 +1303,7 @@ def _start_port_server(port_server_port): running = False if running: current_version = int(subprocess.check_output( - [sys.executable, os.path.abspath('tools/run_tests/port_server.py'), + [sys.executable, os.path.abspath('tools/run_tests/python_utils/port_server.py'), 'dump_version'])) print('my port server is version %d' % current_version) running = (version >= current_version) @@ -1308,7 +1315,7 @@ def _start_port_server(port_server_port): fd, logfile = tempfile.mkstemp() os.close(fd) print('starting port_server, with log file %s' % logfile) - args = [sys.executable, os.path.abspath('tools/run_tests/port_server.py'), + args = [sys.executable, os.path.abspath('tools/run_tests/python_utils/port_server.py'), '-p', '%d' % port_server_port, '-l', logfile] env = dict(os.environ) env['BUILD_ID'] = 'pleaseDontKillMeJenkins' @@ -1414,7 +1421,7 @@ def _build_and_run( return [] # start antagonists - antagonists = [subprocess.Popen(['tools/run_tests/antagonist.py']) + antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py']) for _ in range(0, args.antagonists)] port_server_port = 32766 _start_port_server(port_server_port) @@ -1444,20 +1451,24 @@ def _build_and_run( else itertools.repeat(massaged_one_run, runs_per_test)) all_runs = itertools.chain.from_iterable(runs_sequence) + if args.quiet_success: + jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True) num_test_failures, resultset = jobset.run( all_runs, check_cancelled, newline_on_success=newline_on_success, travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs, stop_on_failure=args.stop_on_failure, - add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) + add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}, + quiet_success=args.quiet_success) if resultset: for k, v in sorted(resultset.items()): num_runs, num_failures = _calculate_num_runs_failures(v) - if num_failures == num_runs: # what about infinite_runs??? - jobset.message('FAILED', k, do_newline=True) - elif num_failures > 0: - jobset.message( - 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs), - do_newline=True) + if num_failures > 0: + if num_failures == num_runs: # what about infinite_runs??? + jobset.message('FAILED', k, do_newline=True) + else: + jobset.message( + 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs), + do_newline=True) finally: for antagonist in antagonists: antagonist.kill() diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py index 989bc7eb21..6e83180c66 100755 --- a/tools/run_tests/run_tests_matrix.py +++ b/tools/run_tests/run_tests_matrix.py @@ -31,12 +31,13 @@ """Run test matrix.""" import argparse -import jobset import multiprocessing import os -import report_utils import sys -from filter_pull_request_tests import filter_tests + +import python_utils.jobset as jobset +import python_utils.report_utils as report_utils +from python_utils.filter_pull_request_tests import filter_tests _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) os.chdir(_ROOT) @@ -69,7 +70,7 @@ def _workspace_jobspec(name, runtests_args=[], workspace_name=None, inner_jobs=_ workspace_name = 'workspace_%s' % name env = {'WORKSPACE_NAME': workspace_name} test_job = jobset.JobSpec( - cmdline=['tools/run_tests/run_tests_in_workspace.sh', + cmdline=['tools/run_tests/helper_scripts/run_tests_in_workspace.sh', '-t', '-j', str(inner_jobs), '-x', '../report_%s.xml' % name, @@ -242,6 +243,17 @@ def _allowed_labels(): return sorted(all_labels) +def _runs_per_test_type(arg_str): + """Auxiliary function to parse the "runs_per_test" flag.""" + try: + n = int(arg_str) + if n <= 0: raise ValueError + return n + except: + msg = '\'{}\' is not a positive integer'.format(arg_str) + raise argparse.ArgumentTypeError(msg) + + if __name__ == "__main__": argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.') argp.add_argument('-j', '--jobs', @@ -253,6 +265,11 @@ if __name__ == "__main__": nargs='+', default=[], help='Filter targets to run by label with AND semantics.') + argp.add_argument('--exclude', + choices=_allowed_labels(), + nargs='+', + default=[], + help='Exclude targets with any of given labels.') argp.add_argument('--build_only', default=False, action='store_const', @@ -269,7 +286,7 @@ if __name__ == "__main__": default=False, action='store_const', const=True, - help='Filters out tests irrelavant to pull request changes.') + help='Filters out tests irrelevant to pull request changes.') argp.add_argument('--base_branch', default='origin/master', type=str, @@ -278,6 +295,9 @@ if __name__ == "__main__": default=_DEFAULT_INNER_JOBS, type=int, help='Number of jobs in each run_tests.py instance') + argp.add_argument('-n', '--runs_per_test', default=1, type=_runs_per_test_type, + help='How many times to run each tests. >1 runs implies ' + + 'omitting passing test from the output & reports.') args = argp.parse_args() extra_args = [] @@ -285,6 +305,10 @@ if __name__ == "__main__": extra_args.append('--build_only') if args.force_default_poller: extra_args.append('--force_default_poller') + if args.runs_per_test > 1: + extra_args.append('-n') + extra_args.append('%s' % args.runs_per_test) + extra_args.append('--quiet_success') all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \ _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) @@ -292,7 +316,8 @@ if __name__ == "__main__": jobs = [] for job in all_jobs: if not args.filter or all(filter in job.labels for filter in args.filter): - jobs.append(job) + if not any(exclude_label in job.labels for exclude_label in args.exclude): + jobs.append(job) if not jobs: jobset.message('FAILED', 'No test suites match given criteria.', diff --git a/tools/run_tests/sanity/check_sources_and_headers.py b/tools/run_tests/sanity/check_sources_and_headers.py index b733ba173f..a86db02b80 100755 --- a/tools/run_tests/sanity/check_sources_and_headers.py +++ b/tools/run_tests/sanity/check_sources_and_headers.py @@ -34,7 +34,7 @@ import re import sys root = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..')) -with open(os.path.join(root, 'tools', 'run_tests', 'sources_and_headers.json')) as f: +with open(os.path.join(root, 'tools', 'run_tests', 'generated', 'sources_and_headers.json')) as f: js = json.loads(f.read()) re_inc1 = re.compile(r'^#\s*include\s*"([^"]*)"') diff --git a/tools/run_tests/sanity/check_submodules.sh b/tools/run_tests/sanity/check_submodules.sh index 6ec0786c96..be12f968d2 100755 --- a/tools/run_tests/sanity/check_submodules.sh +++ b/tools/run_tests/sanity/check_submodules.sh @@ -43,7 +43,7 @@ git submodule | awk '{ print $1 }' | sort > $submodules cat << EOF | awk '{ print $1 }' | sort > $want_submodules c880e42ba1c8032d4cdde2aba0541d8a9d9fa2e9 third_party/boringssl (version_for_cocoapods_2.0-100-gc880e42) 05b155ff59114735ec8cd089f669c4c3d8f59029 third_party/gflags (v2.1.0-45-g05b155f) - 44c25c892a6229b20db7cd9dc05584ea865896de third_party/google_benchmark (v0.1.0-343-g44c25c8) + 44c25c892a6229b20db7cd9dc05584ea865896de third_party/benchmark (v0.1.0-343-g44c25c8) c99458533a9b4c743ed51537e25989ea55944908 third_party/googletest (release-1.7.0) a428e42072765993ff674fda72863c9f1aa2d268 third_party/protobuf (v3.1.0) 50893291621658f355bc5b4d450a8d06a563053d third_party/zlib (v1.2.8) diff --git a/tools/run_tests/sanity/check_test_filtering.py b/tools/run_tests/sanity/check_test_filtering.py index b522cdeb49..290a6e2ddf 100755 --- a/tools/run_tests/sanity/check_test_filtering.py +++ b/tools/run_tests/sanity/check_test_filtering.py @@ -38,7 +38,7 @@ import re # hack import paths to pick up extra code sys.path.insert(0, os.path.abspath('tools/run_tests/')) from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs -import filter_pull_request_tests +import python_utils.filter_pull_request_tests as filter_pull_request_tests _LIST_OF_LANGUAGE_LABELS = ['c', 'c++', 'csharp', 'node', 'objc', 'php', 'php7', 'python', 'ruby'] _LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows'] diff --git a/tools/run_tests/task_runner.py b/tools/run_tests/task_runner.py index 2e3fa443b9..fdc4668222 100755 --- a/tools/run_tests/task_runner.py +++ b/tools/run_tests/task_runner.py @@ -33,14 +33,13 @@ from __future__ import print_function import argparse -import atexit -import jobset import multiprocessing import sys -import artifact_targets -import distribtest_targets -import package_targets +import artifacts.artifact_targets as artifact_targets +import artifacts.distribtest_targets as distribtest_targets +import artifacts.package_targets as package_targets +import python_utils.jobset as jobset _TARGETS = [] _TARGETS += artifact_targets.targets() diff --git a/vsprojects/README.md b/vsprojects/README.md index 56d9f56009..7af69c2726 100644 --- a/vsprojects/README.md +++ b/vsprojects/README.md @@ -83,10 +83,12 @@ Windows .exe binaries of gRPC protoc plugins. 1. Follow instructions in `third_party\protobuf\cmake\README.md` to create Visual Studio 2013 projects for protobuf. ``` $ cd third_party/protobuf/cmake -$ cmake -G "Visual Studio 12 2013" +$ mkdir build & cd build +$ mkdir solution & cd solution +$ cmake -G "Visual Studio 12 2013" -Dprotobuf_BUILD_TESTS=OFF ../.. ``` -2. Open solution `third_party\protobuf\cmake\protobuf.sln` and build it in Release mode. That will build libraries `libprotobuf.lib` and `libprotoc.lib` needed for the next step. +2. Open solution `third_party\protobuf\cmake\build\solution\protobuf.sln` and build it in Release mode. That will build libraries `libprotobuf.lib` and `libprotoc.lib` needed for the next step. 3. Open solution `vsprojects\grpc_protoc_plugins.sln` and build it in Release mode. As a result, you should obtain a set of gRPC protoc plugin binaries (`grpc_cpp_plugin.exe`, `grpc_csharp_plugin.exe`, ...) diff --git a/vsprojects/build_plugins.bat b/vsprojects/build_plugins.bat index 7c8e056dc4..ae5c5f09be 100644 --- a/vsprojects/build_plugins.bat +++ b/vsprojects/build_plugins.bat @@ -38,7 +38,7 @@ cd /d %~dp0 @call "%VS120COMNTOOLS%\..\..\vc\vcvarsall.bat" x86 @rem Build third_party/protobuf -msbuild ..\third_party\protobuf\cmake\protobuf.sln /p:Configuration=Release || goto :error +msbuild ..\third_party\protobuf\cmake\build\solution\protobuf.sln /p:Configuration=Release || goto :error @rem Build the C# protoc plugins msbuild grpc_protoc_plugins.sln /p:Configuration=Release || goto :error diff --git a/vsprojects/protobuf.props b/vsprojects/protobuf.props index b1de8af27a..b828313572 100644 --- a/vsprojects/protobuf.props +++ b/vsprojects/protobuf.props @@ -1 +1 @@ -<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ImportGroup Label="PropertySheets" /> <PropertyGroup Label="UserMacros" /> <PropertyGroup /> <ItemDefinitionGroup> <Link> <AdditionalDependencies>libprotobuf.lib;%(AdditionalDependencies)</AdditionalDependencies> <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> </Link> </ItemDefinitionGroup> <ItemGroup /> </Project>
\ No newline at end of file +<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ImportGroup Label="PropertySheets" /> <PropertyGroup Label="UserMacros" /> <PropertyGroup /> <ItemDefinitionGroup> <Link> <AdditionalDependencies>libprotobuf.lib;%(AdditionalDependencies)</AdditionalDependencies> <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\build\solution\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> </Link> </ItemDefinitionGroup> <ItemGroup /> </Project>
\ No newline at end of file diff --git a/vsprojects/protoc.props b/vsprojects/protoc.props index 1bdc07193b..87fff8f128 100644 --- a/vsprojects/protoc.props +++ b/vsprojects/protoc.props @@ -1 +1 @@ -<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ImportGroup Label="PropertySheets" /> <PropertyGroup Label="UserMacros" /> <PropertyGroup /> <ItemDefinitionGroup> <ClCompile> <DisableSpecificWarnings>4244;4267;%(DisableSpecificWarnings)</DisableSpecificWarnings> </ClCompile> <Link> <AdditionalDependencies>libprotoc.lib;%(AdditionalDependencies)</AdditionalDependencies> <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> </Link> </ItemDefinitionGroup> <ItemGroup /> </Project>
\ No newline at end of file +<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ImportGroup Label="PropertySheets" /> <PropertyGroup Label="UserMacros" /> <PropertyGroup /> <ItemDefinitionGroup> <ClCompile> <DisableSpecificWarnings>4244;4267;%(DisableSpecificWarnings)</DisableSpecificWarnings> </ClCompile> <Link> <AdditionalDependencies>libprotoc.lib;%(AdditionalDependencies)</AdditionalDependencies> <AdditionalLibraryDirectories>$(SolutionDir)\..\third_party\protobuf\cmake\build\solution\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories> </Link> </ItemDefinitionGroup> <ItemGroup /> </Project>
\ No newline at end of file diff --git a/vsprojects/vcxproj/google_benchmark/google_benchmark.vcxproj b/vsprojects/vcxproj/benchmark/benchmark.vcxproj index 52774e0802..9f262b3b00 100644 --- a/vsprojects/vcxproj/google_benchmark/google_benchmark.vcxproj +++ b/vsprojects/vcxproj/benchmark/benchmark.vcxproj @@ -19,7 +19,7 @@ </ProjectConfiguration> </ItemGroup> <PropertyGroup Label="Globals"> - <ProjectGuid>{AAD4AEF3-DF1E-7A6D-EC35-233BD1031BF4}</ProjectGuid> + <ProjectGuid>{07978586-E47C-8709-A63E-895FBF3C3C7D}</ProjectGuid> <IgnoreWarnIntDirInTempDetected>true</IgnoreWarnIntDirInTempDetected> <IntDir>$(SolutionDir)IntDir\$(MSBuildProjectName)\</IntDir> </PropertyGroup> @@ -57,10 +57,10 @@ </ImportGroup> <PropertyGroup Label="UserMacros" /> <PropertyGroup Condition="'$(Configuration)'=='Debug'"> - <TargetName>google_benchmark</TargetName> + <TargetName>benchmark</TargetName> </PropertyGroup> <PropertyGroup Condition="'$(Configuration)'=='Release'"> - <TargetName>google_benchmark</TargetName> + <TargetName>benchmark</TargetName> </PropertyGroup> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> <ClCompile> @@ -147,53 +147,53 @@ </ItemDefinitionGroup> <ItemGroup> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\benchmark.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\benchmark_api.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\macros.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\reporter.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\arraysize.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\benchmark_api_internal.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\check.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\colorprint.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\commandlineflags.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\complexity.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\cycleclock.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\internal_macros.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\log.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\mutex.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\re.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\sleep.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\stat.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\string_util.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\sysinfo.h" /> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\timers.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\benchmark.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\benchmark_api.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\macros.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\reporter.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\arraysize.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\benchmark_api_internal.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\check.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\colorprint.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\commandlineflags.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\complexity.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\cycleclock.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\internal_macros.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\log.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\mutex.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\re.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\sleep.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\stat.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\string_util.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\sysinfo.h" /> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\timers.h" /> </ItemGroup> <ItemGroup> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\benchmark.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\benchmark.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\benchmark_register.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\benchmark_register.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\colorprint.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\colorprint.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\commandlineflags.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\commandlineflags.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\complexity.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\complexity.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\console_reporter.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\console_reporter.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\csv_reporter.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\csv_reporter.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\json_reporter.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\json_reporter.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\reporter.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\reporter.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\sleep.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\sleep.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\string_util.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\string_util.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\sysinfo.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\sysinfo.cc"> </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\timers.cc"> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\timers.cc"> </ClCompile> </ItemGroup> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> diff --git a/vsprojects/vcxproj/benchmark/benchmark.vcxproj.filters b/vsprojects/vcxproj/benchmark/benchmark.vcxproj.filters new file mode 100644 index 0000000000..ccc9ca2cae --- /dev/null +++ b/vsprojects/vcxproj/benchmark/benchmark.vcxproj.filters @@ -0,0 +1,125 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\benchmark.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\benchmark_register.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\colorprint.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\commandlineflags.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\complexity.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\console_reporter.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\csv_reporter.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\json_reporter.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\reporter.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\sleep.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\string_util.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\sysinfo.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + <ClCompile Include="$(SolutionDir)\..\third_party\benchmark\src\timers.cc"> + <Filter>third_party\benchmark\src</Filter> + </ClCompile> + </ItemGroup> + <ItemGroup> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\benchmark.h"> + <Filter>third_party\benchmark\include\benchmark</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\benchmark_api.h"> + <Filter>third_party\benchmark\include\benchmark</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\macros.h"> + <Filter>third_party\benchmark\include\benchmark</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\include\benchmark\reporter.h"> + <Filter>third_party\benchmark\include\benchmark</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\arraysize.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\benchmark_api_internal.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\check.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\colorprint.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\commandlineflags.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\complexity.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\cycleclock.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\internal_macros.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\log.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\mutex.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\re.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\sleep.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\stat.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\string_util.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\sysinfo.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + <ClInclude Include="$(SolutionDir)\..\third_party\benchmark\src\timers.h"> + <Filter>third_party\benchmark\src</Filter> + </ClInclude> + </ItemGroup> + + <ItemGroup> + <Filter Include="third_party"> + <UniqueIdentifier>{7b593518-9fee-107e-6b64-24bdce73f939}</UniqueIdentifier> + </Filter> + <Filter Include="third_party\benchmark"> + <UniqueIdentifier>{f0d35de1-6b41-778d-0ba0-faad514fb0f4}</UniqueIdentifier> + </Filter> + <Filter Include="third_party\benchmark\include"> + <UniqueIdentifier>{cbc02dfa-face-8cc6-0efb-efacc0c3369c}</UniqueIdentifier> + </Filter> + <Filter Include="third_party\benchmark\include\benchmark"> + <UniqueIdentifier>{4f2f03fc-b82d-df33-63ee-bedebeb2c0ee}</UniqueIdentifier> + </Filter> + <Filter Include="third_party\benchmark\src"> + <UniqueIdentifier>{f42a8e0a-5a76-0e6f-d708-f0306858f673}</UniqueIdentifier> + </Filter> + </ItemGroup> +</Project> + diff --git a/vsprojects/vcxproj/google_benchmark/google_benchmark.vcxproj.filters b/vsprojects/vcxproj/google_benchmark/google_benchmark.vcxproj.filters deleted file mode 100644 index 9db6ed4657..0000000000 --- a/vsprojects/vcxproj/google_benchmark/google_benchmark.vcxproj.filters +++ /dev/null @@ -1,125 +0,0 @@ -<?xml version="1.0" encoding="utf-8"?> -<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> - <ItemGroup> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\benchmark.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\benchmark_register.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\colorprint.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\commandlineflags.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\complexity.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\console_reporter.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\csv_reporter.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\json_reporter.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\reporter.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\sleep.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\string_util.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\sysinfo.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - <ClCompile Include="$(SolutionDir)\..\third_party\google_benchmark\src\timers.cc"> - <Filter>third_party\google_benchmark\src</Filter> - </ClCompile> - </ItemGroup> - <ItemGroup> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\benchmark.h"> - <Filter>third_party\google_benchmark\include\benchmark</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\benchmark_api.h"> - <Filter>third_party\google_benchmark\include\benchmark</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\macros.h"> - <Filter>third_party\google_benchmark\include\benchmark</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\include\benchmark\reporter.h"> - <Filter>third_party\google_benchmark\include\benchmark</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\arraysize.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\benchmark_api_internal.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\check.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\colorprint.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\commandlineflags.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\complexity.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\cycleclock.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\internal_macros.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\log.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\mutex.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\re.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\sleep.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\stat.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\string_util.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\sysinfo.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - <ClInclude Include="$(SolutionDir)\..\third_party\google_benchmark\src\timers.h"> - <Filter>third_party\google_benchmark\src</Filter> - </ClInclude> - </ItemGroup> - - <ItemGroup> - <Filter Include="third_party"> - <UniqueIdentifier>{7458b63d-7ba4-103d-2bed-3e3ad30d8237}</UniqueIdentifier> - </Filter> - <Filter Include="third_party\google_benchmark"> - <UniqueIdentifier>{54a154e8-669b-a7c1-9b6e-bd1aab2f86e3}</UniqueIdentifier> - </Filter> - <Filter Include="third_party\google_benchmark\include"> - <UniqueIdentifier>{f54c3cb1-ec20-a651-6956-78379b51e1a5}</UniqueIdentifier> - </Filter> - <Filter Include="third_party\google_benchmark\include\benchmark"> - <UniqueIdentifier>{0483a457-8050-4565-bc15-09695bf7b822}</UniqueIdentifier> - </Filter> - <Filter Include="third_party\google_benchmark\src"> - <UniqueIdentifier>{c39ff2d1-691e-4614-4d75-4bc20db05e09}</UniqueIdentifier> - </Filter> - </ItemGroup> -</Project> - diff --git a/vsprojects/vcxproj/test/bm_fullstack/bm_fullstack.vcxproj b/vsprojects/vcxproj/test/bm_fullstack/bm_fullstack.vcxproj index 1ce993e323..3809beb508 100644 --- a/vsprojects/vcxproj/test/bm_fullstack/bm_fullstack.vcxproj +++ b/vsprojects/vcxproj/test/bm_fullstack/bm_fullstack.vcxproj @@ -164,8 +164,8 @@ </ClCompile> </ItemGroup> <ItemGroup> - <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\google_benchmark\google_benchmark.vcxproj"> - <Project>{AAD4AEF3-DF1E-7A6D-EC35-233BD1031BF4}</Project> + <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\benchmark\benchmark.vcxproj"> + <Project>{07978586-E47C-8709-A63E-895FBF3C3C7D}</Project> </ProjectReference> <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\grpc++_test_util\grpc++_test_util.vcxproj"> <Project>{0BE77741-552A-929B-A497-4EF7ECE17A64}</Project> diff --git a/vsprojects/vcxproj/test/noop-benchmark/noop-benchmark.vcxproj b/vsprojects/vcxproj/test/noop-benchmark/noop-benchmark.vcxproj index 99f33b2165..15a82c0ed6 100644 --- a/vsprojects/vcxproj/test/noop-benchmark/noop-benchmark.vcxproj +++ b/vsprojects/vcxproj/test/noop-benchmark/noop-benchmark.vcxproj @@ -164,8 +164,8 @@ </ClCompile> </ItemGroup> <ItemGroup> - <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\google_benchmark\google_benchmark.vcxproj"> - <Project>{AAD4AEF3-DF1E-7A6D-EC35-233BD1031BF4}</Project> + <ProjectReference Include="$(SolutionDir)\..\vsprojects\vcxproj\.\benchmark\benchmark.vcxproj"> + <Project>{07978586-E47C-8709-A63E-895FBF3C3C7D}</Project> </ProjectReference> </ItemGroup> <ItemGroup> |